diff --git a/.github/workflows/check-website-links.yml b/.github/workflows/check-website-links.yml index 6726ddb16a51d..64293b927f79b 100644 --- a/.github/workflows/check-website-links.yml +++ b/.github/workflows/check-website-links.yml @@ -1,4 +1,5 @@ name: CheckLinks + on: push: branches: @@ -12,12 +13,13 @@ jobs: checklinks: name: Check website links runs-on: ubuntu-latest + steps: - uses: actions/checkout@v2 - name: Ruby uses: ruby/setup-ruby@v1 with: - ruby-version: 2.6 + ruby-version: 3.3 bundler-cache: true - name: Build jekyll website with drafts run: bundle exec jekyll build --drafts @@ -25,7 +27,7 @@ jobs: - name: Use Node.js uses: actions/setup-node@v3 with: - node-version: 19.x + node-version: 22.x - name: Install dependencies run: npm install @@ -37,7 +39,28 @@ jobs: run: | sudo mv ./build/* ./_site rm -rf ./_site/src - - name: Check for broken links run: | - bundle exec htmlproofer --assume_extension --checks_to_ignore ImageCheck,ScriptCheck --only_4xx --http_status_ignore 429,403 --allow_hash_href --url_ignore "https://onnxruntime.ai/docs/reference/api/c-api.html,https://www.onnxruntime.ai/docs/reference/execution-providers/TensorRT-ExecutionProvider.html#c-api-example,https://www.onnxruntime.ai/docs/resources/graph-optimizations.html,onnxruntime/capi/onnxruntime_pybind11_state.html,https://github.com/microsoft/onnx-converters-private/issues/new/choose,https://aka.ms/onnx/exportissue,https://aka.ms/onnx/board" --log-level :info ./_site + bundle exec htmlproofer ./_site \ + --only_4xx \ + --ignore-status-codes 429,403 \ + --allow_hash_href \ + --allow_missing_href \ + --ignore_urls "/.*linkedin\.com.*/,https://onnxruntime.ai/docs/reference/api/c-api.html,https://www.onnxruntime.ai/docs/reference/execution-providers/TensorRT-ExecutionProvider.html#c-api-example,https://www.onnxruntime.ai/docs/resources/graph-optimizations.html,onnxruntime/capi/onnxruntime_pybind11_state.html,https://github.com/microsoft/onnx-converters-private/issues/new/choose,https://aka.ms/onnx/exportissue,https://aka.ms/onnx/board" \ + --no-check-external-hash + # - name: Check for broken links + # uses: chabad360/htmlproofer@master + # with: + # directory: "./_site" + # # The directory to scan + # arguments: | + # --no-check_external_hash + # --assume_extension + # --only_4xx + # --ignore_status_codes 429,403,999 + # --allow_missing_href + # --allow_hash_href + # --checks 'Links' + # --log-level :info + # --ignore_urls "^https://linkedin.com,https://onnxruntime.ai/docs/reference/api/c-api.html,https://www.onnxruntime.ai/docs/reference/execution-providers/TensorRT-ExecutionProvider.html#c-api-example,https://www.onnxruntime.ai/docs/resources/graph-optimizations.html,onnxruntime/capi/onnxruntime_pybind11_state.html,https://github.com/microsoft/onnx-converters-private/issues/new/choose,https://aka.ms/onnx/exportissue,https://aka.ms/onnx/board" + # # The arguments to pass to HTMLProofer \ No newline at end of file diff --git a/_sass/color_schemes/onnxruntime.scss b/_sass/color_schemes/onnxruntime.scss index 8766cefe3d32d..cae24fdd46e10 100644 --- a/_sass/color_schemes/onnxruntime.scss +++ b/_sass/color_schemes/onnxruntime.scss @@ -13,6 +13,7 @@ $btn-primary-color: #226aca; // } // 2024 December Accessibility changes .highlight .s { color: #3c7a3b ;} +.highlight .py {color: #a25f00;} // Initial Theme .highlight .hll { background-color: #ffffcc; } .highlight { background: #ffffff; } @@ -22,7 +23,7 @@ $btn-primary-color: #226aca; .highlight .o { color: #333333; } .highlight .ch { color: #707070 ; } .highlight .cm { color: #707070 ; } -.highlight .cp { color: #557799; } +.highlight .cp { color: #507191; } .highlight .cpf { color: #707070 ; } .highlight .c1 { color: #707070 ; } .highlight .cs { color: #cc0000; font-weight: bold; } @@ -52,7 +53,7 @@ $btn-primary-color: #226aca; .highlight .ni { color: #880000; font-weight: bold; } .highlight .ne { font-weight: bold; color: #eb0000; } .highlight .nf { color: #0066BB; font-weight: bold; } -.highlight .nl { font-weight: bold; color: #8f6f00; } +.highlight .nl { font-weight: bold; color: #876900; } .highlight .nn { font-weight: bold; color: #0d77a2 ; } .highlight .nt { color: #007700; } .highlight .nv { color: #996633; } @@ -68,7 +69,7 @@ $btn-primary-color: #226aca; .highlight .sc { color: #0044DD; } .highlight .dl { background-color: #fff0f0; } .highlight .sd { color: #d54220; } -.highlight .s2 { background-color: #fff0f0; } +.highlight .s2 { color: #3c7a3b ; background-color: #fff0f0; } .highlight .se { color: #666666; font-weight: bold; background-color: #fff0f0; } .highlight .sh { background-color: #fff0f0; } .highlight .si { background-color: #eeeeee; } diff --git a/docs/build/eps.md b/docs/build/eps.md index bce2d1defb248..ee0041a4b8362 100644 --- a/docs/build/eps.md +++ b/docs/build/eps.md @@ -271,7 +271,7 @@ See more information on the OpenVINO™ Execution Provider [here](../execution-p *2024.3 is the current recommended OpenVINO™ version. [OpenVINO™ 2023.3](https://docs.openvino.ai/2023.3/home.html) is minimal OpenVINO™ version requirement.* 2. Configure the target hardware with specific follow on instructions: - * To configure Intel® Processor Graphics(GPU) please follow these instructions: [Windows](https://docs.openvino.ai/latest/openvino_docs_install_guides_configurations_for_intel_gpu.html#gpu-guide-windows), [Linux](https://docs.openvino.ai/latest/openvino_docs_install_guides_configurations_for_intel_gpu.html#linux) + * To configure Intel® Processor Graphics(GPU) please follow these instructions: [Windows](https://docs.openvino.ai/2024/get-started/configurations/configurations-intel-gpu.html#windows), [Linux](https://docs.openvino.ai/2024/get-started/configurations/configurations-intel-gpu.html#linux) 3. Initialize the OpenVINO™ environment by running the setupvars script as shown below. This is a required step: @@ -306,7 +306,7 @@ See more information on the OpenVINO™ Execution Provider [here](../execution-p * `--use_openvino` builds the OpenVINO™ Execution Provider in ONNX Runtime. * ``: Specifies the default hardware target for building OpenVINO™ Execution Provider. This can be overriden dynamically at runtime with another option (refer to [OpenVINO™-ExecutionProvider](../execution-providers/OpenVINO-ExecutionProvider.md#summary-of-options) for more details on dynamic device selection). Below are the options for different Intel target devices. -Refer to [Intel GPU device naming convention](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_GPU.html#device-naming-convention) for specifying the correct hardware target in cases where both integrated and discrete GPU's co-exist. +Refer to [Intel GPU device naming convention](https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.html#device-naming-convention) for specifying the correct hardware target in cases where both integrated and discrete GPU's co-exist. | Hardware Option | Target Device | | --------------- | ------------------------| diff --git a/docs/build/inferencing.md b/docs/build/inferencing.md index 125623ef28399..0403613ed8cd0 100644 --- a/docs/build/inferencing.md +++ b/docs/build/inferencing.md @@ -29,7 +29,7 @@ Basic CPU build cd onnxruntime ``` -* Install [Python 3.x](http://python.org/). +* Install [Python 3.x](https://python.org/). * Install [cmake-3.27](https://cmake.org/download/) or higher. @@ -394,7 +394,7 @@ This option is very fast and allows the package to be built in minutes, but is c TLDR; Go to https://www.linaro.org/downloads/, get "64-bit Armv8 Cortex-A, little-endian" and "Linux Targeted", not "Bare-Metal Targeted". Extract it to your build machine and add the bin folder to your $PATH env. Then skip this part. - You can use [GCC](https://gcc.gnu.org/) or [Clang](http://clang.llvm.org/). Both work, but instructions here are based on GCC. + You can use [GCC](https://gcc.gnu.org/) or [Clang](https://clang.llvm.org/). Both work, but instructions here are based on GCC. In GCC terms: * "build" describes the type of system on which GCC is being configured and compiled. @@ -412,7 +412,7 @@ This option is very fast and allows the package to be built in minutes, but is c COLLECT_GCC=/usr/bin/aarch64-linux-gnu-gcc COLLECT_LTO_WRAPPER=/usr/libexec/gcc/aarch64-linux-gnu/9/lto-wrapper Target: aarch64-linux-gnu - Configured with: ../gcc-9.2.1-20190827/configure --bindir=/usr/bin --build=x86_64-redhat-linux-gnu --datadir=/usr/share --disable-decimal-float --disable-dependency-tracking --disable-gold --disable-libgcj --disable-libgomp --disable-libmpx --disable-libquadmath --disable-libssp --disable-libunwind-exceptions --disable-shared --disable-silent-rules --disable-sjlj-exceptions --disable-threads --with-ld=/usr/bin/aarch64-linux-gnu-ld --enable-__cxa_atexit --enable-checking=release --enable-gnu-unique-object --enable-initfini-array --enable-languages=c,c++ --enable-linker-build-id --enable-lto --enable-nls --enable-obsolete --enable-plugin --enable-targets=all --exec-prefix=/usr --host=x86_64-redhat-linux-gnu --includedir=/usr/include --infodir=/usr/share/info --libexecdir=/usr/libexec --localstatedir=/var --mandir=/usr/share/man --prefix=/usr --program-prefix=aarch64-linux-gnu- --sbindir=/usr/sbin --sharedstatedir=/var/lib --sysconfdir=/etc --target=aarch64-linux-gnu --with-bugurl=http://bugzilla.redhat.com/bugzilla/ --with-gcc-major-version-only --with-isl --with-newlib --with-plugin-ld=/usr/bin/aarch64-linux-gnu-ld --with-sysroot=/usr/aarch64-linux-gnu/sys-root --with-system-libunwind --with-system-zlib --without-headers --enable-gnu-indirect-function --with-linker-hash-style=gnu + Configured with: ../gcc-9.2.1-20190827/configure --bindir=/usr/bin --build=x86_64-redhat-linux-gnu --datadir=/usr/share --disable-decimal-float --disable-dependency-tracking --disable-gold --disable-libgcj --disable-libgomp --disable-libmpx --disable-libquadmath --disable-libssp --disable-libunwind-exceptions --disable-shared --disable-silent-rules --disable-sjlj-exceptions --disable-threads --with-ld=/usr/bin/aarch64-linux-gnu-ld --enable-__cxa_atexit --enable-checking=release --enable-gnu-unique-object --enable-initfini-array --enable-languages=c,c++ --enable-linker-build-id --enable-lto --enable-nls --enable-obsolete --enable-plugin --enable-targets=all --exec-prefix=/usr --host=x86_64-redhat-linux-gnu --includedir=/usr/include --infodir=/usr/share/info --libexecdir=/usr/libexec --localstatedir=/var --mandir=/usr/share/man --prefix=/usr --program-prefix=aarch64-linux-gnu- --sbindir=/usr/sbin --sharedstatedir=/var/lib --sysconfdir=/etc --target=aarch64-linux-gnu --with-bugurl=https://bugzilla.redhat.com/bugzilla/ --with-gcc-major-version-only --with-isl --with-newlib --with-plugin-ld=/usr/bin/aarch64-linux-gnu-ld --with-sysroot=/usr/aarch64-linux-gnu/sys-root --with-system-libunwind --with-system-zlib --without-headers --enable-gnu-indirect-function --with-linker-hash-style=gnu Thread model: single gcc version 9.2.1 20190827 (Red Hat Cross 9.2.1-3) (GCC) ``` diff --git a/docs/execution-providers/OpenVINO-ExecutionProvider.md b/docs/execution-providers/OpenVINO-ExecutionProvider.md index fa71f70b0c277..6a64703b39abc 100644 --- a/docs/execution-providers/OpenVINO-ExecutionProvider.md +++ b/docs/execution-providers/OpenVINO-ExecutionProvider.md @@ -90,7 +90,7 @@ To use csharp api for openvino execution provider create a custom nuget package. ### OpenCL queue throttling for GPU devices -Enables [OpenCL queue throttling](https://docs.openvino.ai/latest/groupov_runtime_ocl_gpu_prop_cpp_api.html?highlight=throttling) for GPU devices. Reduces CPU utilization when using GPUs with OpenVINO EP. +Enables [OpenCL queue throttling](https://docs.openvino.ai/2024/api/c_cpp_api/group__ov__runtime__ocl__gpu__prop__cpp__api.html) for GPU devices. Reduces CPU utilization when using GPUs with OpenVINO EP. ### Model caching @@ -118,7 +118,7 @@ Int8 models are supported on CPU, GPU and NPU. OpenVINO™ Execution Provider now supports ONNX models that store weights in external files. It is especially useful for models larger than 2GB because of protobuf limitations. -See the [OpenVINO™ ONNX Support documentation](https://docs.openvino.ai/latest/classov_1_1Core.html). +See the [OpenVINO™ ONNX Support documentation](https://docs.openvino.ai/2024/openvino-workflow/model-preparation/convert-model-onnx.html). Converting and Saving an ONNX Model to External Data: Use the ONNX API's.[documentation](https://github.com/onnx/onnx/blob/master/docs/ExternalData.md#converting-and-saving-an-onnx-model-to-external-data). @@ -177,7 +177,7 @@ Use `AUTO:..` as the device name to delegate selection of an From the application point of view, this is just another device that handles all accelerators in full system. For more information on Auto-Device plugin of OpenVINO™, please refer to the -[Intel OpenVINO™ Auto Device Plugin](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_AUTO.html). +[Intel OpenVINO™ Auto Device Plugin](https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.html#automatic-device-selection). ### Heterogeneous Execution for OpenVINO™ Execution Provider @@ -186,7 +186,7 @@ The heterogeneous execution enables computing for inference on one network on se * To utilize accelerator's power and calculate the heaviest parts of the network on the accelerator and execute unsupported layers on fallback devices like the CPU to utilize all available hardware more efficiently during one inference. For more information on Heterogeneous plugin of OpenVINO™, please refer to the -[Intel OpenVINO™ Heterogeneous Plugin](https://docs.openvino.ai/latest/openvino_docs_OV_UG_Hetero_execution.html). +[Intel OpenVINO™ Heterogeneous Plugin](https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes/hetero-execution.html). ### Multi-Device Execution for OpenVINO EP @@ -196,7 +196,7 @@ Multi-Device plugin automatically assigns inference requests to available comput * More consistent performance, since the devices can now share the inference burden (so that if one device is becoming too busy, another device can take more of the load) For more information on Multi-Device plugin of OpenVINO™, please refer to the -[Intel OpenVINO™ Multi Device Plugin](https://docs.openvino.ai/latest/openvino_docs_OV_UG_Running_on_multiple_devices.html). +[Intel OpenVINO™ Multi Device Plugin](https://docs.openvino.ai/2024/openvino-workflow/running-inference/inference-devices-and-modes/gpu-device.html#multi-stream-execution). ### Export OpenVINO Compiled Blob Export the OpenVINO compiled blob as an ONNX model. Using this ONNX model for subsequent inferences avoids model recompilation and could have a positive impact on Session creation time. This feature is currently enabled for fully supported models only. It complies with the ORT session config keys diff --git a/docs/extensions/add-op.md b/docs/extensions/add-op.md index eeb3bf762980a..17d4d47fe4b64 100644 --- a/docs/extensions/add-op.md +++ b/docs/extensions/add-op.md @@ -70,7 +70,7 @@ the custom operator kernel C++ code example can be found [operators](https://git * the third libraries API docs integrated in ONNXRuntime Extensions the can be used in C++ code - OpenCV API docs https://docs.opencv.org/4.x/ - Google SentencePiece Library docs https://github.com/google/sentencepiece/blob/master/doc/api.md - - dlib(matrix and ML library) C++ API docs http://dlib.net/algorithms.html + - dlib(matrix and ML library) C++ API docs https://dlib.net/algorithms.html - BlingFire Library https://github.com/microsoft/BlingFire - Google RE2 Library https://github.com/google/re2/wiki/CplusplusAPI - JSON library https://json.nlohmann.me/api/basic_json/ diff --git a/docs/genai/tutorials/finetune.md b/docs/genai/tutorials/finetune.md index 5d0302b896dfc..3dd739d80340b 100644 --- a/docs/genai/tutorials/finetune.md +++ b/docs/genai/tutorials/finetune.md @@ -65,7 +65,7 @@ Olive generates models and adapters in ONNX format. These models and adapters ca Note: this operations requires a system with an NVIDIA GPU, with CUDA installed - Use the `olive fine-tune` command: https://microsoft.github.io/Olive/features/cli.html#finetune + Use the `olive fine-tune` command: https://microsoft.github.io/Olive/how-to/cli/cli-finetune.html Here is an example usage of the command: @@ -75,12 +75,12 @@ Olive generates models and adapters in ONNX format. These models and adapters ca 2. Optionally, quantize your model - Use the `olive quantize` command: https://microsoft.github.io/Olive/features/cli.html#quantize + Use the `olive quantize` command: https://microsoft.github.io/Olive/how-to/cli/cli-quantize.html 3. Generate the ONNX model and adapter using the quantized model - Use the `olive auto-opt` command for this step: https://microsoft.github.io/Olive/features/cli.html#auto-opt + Use the `olive auto-opt` command for this step: https://microsoft.github.io/Olive/how-to/cli/cli-auto-opt.html The `--adapter path` can either be a HuggingFace adapter reference, or a path to the adapter you fine-tuned above. @@ -162,4 +162,4 @@ python app.py -m -a <.onnx_adapter files> -t -s ## References * [Python API docs](../api/python.md#adapter-class) -* [Olive CLI docs](https://microsoft.github.io/Olive/features/cli.html) +* [Olive CLI docs](https://microsoft.github.io/Olive/how-to/index.html#working-with-the-cli) diff --git a/docs/get-started/with-c.md b/docs/get-started/with-c.md index 8817a35c72717..362579b0f7225 100644 --- a/docs/get-started/with-c.md +++ b/docs/get-started/with-c.md @@ -61,7 +61,7 @@ is as follows * Call ```Run()``` as usual * **Share allocator(s) between sessions:** * *Description*: This feature allows multiple sessions in the same process to use the same allocator(s). - * *Scenario*: You've several sessions in the same process and see high memory usage. One of the reasons for this is as follows. Each session creates its own CPU allocator which is arena based by default. [ORT implements](https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/bfc_arena.h) a simplified version of an arena allocator that is based on [Doug Lea's best-first with coalescing algorithm](http://gee.cs.oswego.edu/dl/html/malloc.html). Each allocator lives in its own session. It allocates a large region of memory during init time and thereafter it chunks, coalesces and extends this initial region as per allocation/deallocation demands. Overtime the arena ends up with unused chunks of memory per session. Moreover, the memory allocated by the arena is never returned to the system; once allocated it always remains allocated. All these factors add up when using multiple sessions (each with its own arena) thereby increasing the overall memory consumption of the process. Hence it becomes important to share the arena allocator between sessions. + * *Scenario*: You've several sessions in the same process and see high memory usage. One of the reasons for this is as follows. Each session creates its own CPU allocator which is arena based by default. [ORT implements](https://github.com/microsoft/onnxruntime/blob/main/onnxruntime/core/framework/bfc_arena.h) a simplified version of an arena allocator that is based on [Doug Lea's best-first with coalescing algorithm](https://gee.cs.oswego.edu/dl/html/malloc.html). Each allocator lives in its own session. It allocates a large region of memory during init time and thereafter it chunks, coalesces and extends this initial region as per allocation/deallocation demands. Overtime the arena ends up with unused chunks of memory per session. Moreover, the memory allocated by the arena is never returned to the system; once allocated it always remains allocated. All these factors add up when using multiple sessions (each with its own arena) thereby increasing the overall memory consumption of the process. Hence it becomes important to share the arena allocator between sessions. * *Usage*: * Create and register a shared allocator with the env using the ```CreateAndRegisterAllocator``` API. This allocator is then reused by all sessions that use the same env instance unless a session chooses to override this by setting ```session_state.use_env_allocators``` to "0". diff --git a/docs/get-started/with-python.md b/docs/get-started/with-python.md index a93d9ec4a7c5a..82791aa3e75a4 100644 --- a/docs/get-started/with-python.md +++ b/docs/get-started/with-python.md @@ -281,4 +281,4 @@ For Python compiler version notes, see [this page](https://github.com/microsoft/ - [Python Tutorials](../tutorials/api-basics) * [TensorFlow with ONNX Runtime](../tutorials/tf-get-started.md) * [PyTorch with ONNX Runtime](https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html) -* [scikit-learn with ONNX Runtime](http://onnx.ai/sklearn-onnx/index_tutorial.html) +* [scikit-learn with ONNX Runtime](https://onnx.ai/sklearn-onnx/index_tutorial.html) diff --git a/docs/tutorials/OpenVINO_EP_samples/squeezenet_classification_cpp.md b/docs/tutorials/OpenVINO_EP_samples/squeezenet_classification_cpp.md index d0759ac028d55..46d32902393b7 100644 --- a/docs/tutorials/OpenVINO_EP_samples/squeezenet_classification_cpp.md +++ b/docs/tutorials/OpenVINO_EP_samples/squeezenet_classification_cpp.md @@ -14,7 +14,7 @@ The source code for this sample is available [here](https://github.com/microsoft # How to build ## Prerequisites -1. [The Intel® Distribution of OpenVINO toolkit](https://docs.openvinotoolkit.org/latest/index.html) +1. [The Intel® Distribution of OpenVINO toolkit](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/overview.html) 2. Use opencv (use the same opencv package that comes builtin with Intel® Distribution of OpenVINO toolkit) diff --git a/docs/tutorials/OpenVINO_EP_samples/tiny_yolo_v2_object_detection_python.md b/docs/tutorials/OpenVINO_EP_samples/tiny_yolo_v2_object_detection_python.md index e6c6e756a2087..3ee8c610ef9d6 100644 --- a/docs/tutorials/OpenVINO_EP_samples/tiny_yolo_v2_object_detection_python.md +++ b/docs/tutorials/OpenVINO_EP_samples/tiny_yolo_v2_object_detection_python.md @@ -14,7 +14,7 @@ The source code for this sample is available [here](https://github.com/microsoft # How to build ## Prerequisites -1. [The Intel® Distribution of OpenVINO toolkit](https://docs.openvinotoolkit.org/latest/index.html) +1. [The Intel® Distribution of OpenVINO toolkit](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/overview.html) 2. Download the latest tinyYOLOv2 model from the ONNX Model Zoo. This model was adapted from [ONNX Model Zoo](https://github.com/onnx/models).Download the latest version of the [tinyYOLOv2](https://github.com/onnx/models/tree/main/validated/vision/object_detection_segmentation/tiny-yolov2) model from here. diff --git a/docs/tutorials/csharp/yolov3_object_detection_csharp.md b/docs/tutorials/csharp/yolov3_object_detection_csharp.md index dce5c44694eea..56f00b2a758eb 100644 --- a/docs/tutorials/csharp/yolov3_object_detection_csharp.md +++ b/docs/tutorials/csharp/yolov3_object_detection_csharp.md @@ -23,7 +23,7 @@ The source code for this sample is available [here](https://github.com/microsoft ## Prerequisites 1. Install [.NET Core 3.1](https://dotnet.microsoft.com/download/dotnet-core/3.1) or higher for you OS (Mac, Windows or Linux). -2. [The Intel® Distribution of OpenVINO toolkit](https://docs.openvinotoolkit.org/latest/index.html) +2. [The Intel® Distribution of OpenVINO toolkit](https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/overview.html) 3. Use any sample Image as input to the sample. diff --git a/docs/tutorials/on-device-training/ios-app.md b/docs/tutorials/on-device-training/ios-app.md index e61bab68596ff..91a0e9ed39f7b 100644 --- a/docs/tutorials/on-device-training/ios-app.md +++ b/docs/tutorials/on-device-training/ios-app.md @@ -964,5 +964,5 @@ Congratulations! You have successfully built an iOS application that can train a -[Back to top](#contents) +[Back to top](#table-of-contents) diff --git a/docs/tutorials/traditional-ml.md b/docs/tutorials/traditional-ml.md index 29fce1bd3e6b4..0d7987f0a6323 100644 --- a/docs/tutorials/traditional-ml.md +++ b/docs/tutorials/traditional-ml.md @@ -17,10 +17,10 @@ ONNX Runtime supports [ONNX-ML](https://github.com/onnx/onnx/blob/master/docs/Op {:toc} ## Convert model to ONNX -* [Scikit-learn conversion](http://onnx.ai/sklearn-onnx/tutorial_1_simple.html) -* [Scikit-learn custom conversion](http://onnx.ai/sklearn-onnx/tutorial_2_new_converter.html) -* [XGBoost conversion](http://onnx.ai/sklearn-onnx/auto_tutorial/plot_gexternal_xgboost.html) -* [LightGBM conversion](http://onnx.ai/sklearn-onnx/auto_tutorial/plot_gexternal_lightgbm.html) +* [Scikit-learn conversion](https://onnx.ai/sklearn-onnx/tutorial_1_simple.html) +* [Scikit-learn custom conversion](https://onnx.ai/sklearn-onnx/tutorial_2_new_converter.html) +* [XGBoost conversion](https://onnx.ai/sklearn-onnx/auto_tutorial/plot_gexternal_xgboost.html) +* [LightGBM conversion](https://onnx.ai/sklearn-onnx/auto_tutorial/plot_gexternal_lightgbm.html) * [ONNXMLTools samples](https://github.com/onnx/onnxmltools/tree/master/docs/examples) ## Deploy model diff --git a/package.json b/package.json index 683971995f411..e244b186b7f8d 100644 --- a/package.json +++ b/package.json @@ -36,7 +36,7 @@ "prettier-plugin-svelte": "^2.10.1", "svelte": "^4.0.5", "svelte-check": "^3.4.3", - "svelte-highlight": "^7.3.0", + "svelte-highlight": "^7.7.0", "svelte-preprocess-import-assets": "^1.0.1", "svelte-sitemap": "^2.6.0", "tailwindcss": "^3.3.3", diff --git a/src/app.html b/src/app.html index cdfdad8b3f2dc..5942b992b8802 100644 --- a/src/app.html +++ b/src/app.html @@ -36,7 +36,7 @@ }, propertyConfiguration: { // Properties Plugin configuration - gpcDataSharingOptIn: false, + gpcDataSharingOptIn: false, callback: { userConsentDetails: _getWcpUserConsentDetails }, diff --git a/src/images/ONNX-Dark.svelte b/src/images/ONNX-Dark.svelte index 1146f894ff920..d47caae5ca913 100644 --- a/src/images/ONNX-Dark.svelte +++ b/src/images/ONNX-Dark.svelte @@ -3,7 +3,7 @@ export let height: number = 175; - + - + + + diff --git a/src/lib/components/ui/InfiniteMovingCards/InfiniteMovingCards.svelte b/src/lib/components/ui/InfiniteMovingCards/InfiniteMovingCards.svelte index 6c56315258661..cdd4f6d84d9aa 100644 --- a/src/lib/components/ui/InfiniteMovingCards/InfiniteMovingCards.svelte +++ b/src/lib/components/ui/InfiniteMovingCards/InfiniteMovingCards.svelte @@ -59,22 +59,26 @@ }; const toggleScroll = () => { - if (scrollerRef) { - const currentState = window.getComputedStyle(scrollerRef).animationPlayState; - scrollerRef.style.animationPlayState = currentState === 'running' ? 'paused' : 'running'; - } - }; + if (scrollerRef) { + const currentState = window.getComputedStyle(scrollerRef).animationPlayState; + scrollerRef.style.animationPlayState = currentState === 'running' ? 'paused' : 'running'; + } + }; - const handleKeyDown = (event: { key: string; preventDefault: () => void; }) => { - if (event.key === 'Enter' || event.key === ' ') { - event.preventDefault(); // Prevent default spacebar scrolling behavior - toggleScroll(); - } - }; + const handleKeyDown = (event: { key: string; preventDefault: () => void }) => { + if (event.key === 'Enter' || event.key === ' ') { + event.preventDefault(); // Prevent default spacebar scrolling behavior + toggleScroll(); + } + };
- +
    {#each items as item, idx (item.alt)} -
  • - - {item.alt} - -
  • +
  • + + {item.alt} + +
  • {/each}
diff --git a/src/routes/+layout.svelte b/src/routes/+layout.svelte index bb044615fad0b..2b45599770192 100644 --- a/src/routes/+layout.svelte +++ b/src/routes/+layout.svelte @@ -2,7 +2,7 @@ import '../app.css'; import Header from './components/header.svelte'; import Footer from './components/footer.svelte'; - import oneLight from 'svelte-highlight/styles/one-light'; + import a11yLight from 'svelte-highlight/styles/a11y-light'; import { fade } from 'svelte/transition'; import { page } from '$app/stores'; export let data; @@ -10,27 +10,33 @@ - {@html oneLight} + {@html a11yLight} {#if !data.pathname.startsWith('/blogs/')} - ONNX Runtime | {data.pathname == '/' - ? 'Home' - : data.pathname.substring(1).charAt(0).toUpperCase() + data.pathname.substring(2)} - - - + ONNX Runtime | {data.pathname == '/' + ? 'Home' + : data.pathname.substring(1).charAt(0).toUpperCase() + data.pathname.substring(2)} + + + {/if} - + @@ -41,8 +47,6 @@ - -
{#if !$page.url.pathname.startsWith('/blogs/')} diff --git a/src/routes/+page.svelte b/src/routes/+page.svelte index a99341fec4d66..3f0b66e5ae29c 100644 --- a/src/routes/+page.svelte +++ b/src/routes/+page.svelte @@ -42,12 +42,15 @@ } }); }); - let description = 'Cross-platform accelerated machine learning. Built-in optimizations speed up training and inferencing with your existing technology stack.' - let image = 'https://i.ibb.co/0YBy62j/ORT-icon-for-light-bg.png' - let imageSquare = 'https://i.ibb.co/0YBy62j/ORT-icon-for-light-bg.png' - let authors = [''] - let keywords = 'onnx runtime, onnx, onnxruntime, onnx runtime performance, onnx runtime training, onnx runtime inference, onnx runtime cross-platform, onnx runtime cross platform, onnx runtime crossplatform, onnx runtime training and inference, onnx runtime training & inference, onnx runtime training inference, onnx runtime training, onnx runtime inference, onnx runtime cross-platform, onnx runtime cross platform, onnx runtime crossplatform, onnx runtime training and inference, onnx runtime training & inference, onnx runtime training inference' + let description = + 'Cross-platform accelerated machine learning. Built-in optimizations speed up training and inferencing with your existing technology stack.'; + let image = 'https://i.ibb.co/0YBy62j/ORT-icon-for-light-bg.png'; + let imageSquare = 'https://i.ibb.co/0YBy62j/ORT-icon-for-light-bg.png'; + let authors = ['']; + let keywords = + 'onnx runtime, onnx, onnxruntime, onnx runtime performance, onnx runtime training, onnx runtime inference, onnx runtime cross-platform, onnx runtime cross platform, onnx runtime crossplatform, onnx runtime training and inference, onnx runtime training & inference, onnx runtime training inference, onnx runtime training, onnx runtime inference, onnx runtime cross-platform, onnx runtime cross platform, onnx runtime crossplatform, onnx runtime training and inference, onnx runtime training & inference, onnx runtime training inference'; + @@ -55,9 +58,9 @@ - + - + @@ -75,7 +78,7 @@