From 5b9369e93c704f55fd6a4e49934f41dccffe55a5 Mon Sep 17 00:00:00 2001 From: mindest <30493312+mindest@users.noreply.github.com> Date: Tue, 23 Jul 2024 04:37:32 +0800 Subject: [PATCH] Fix typos according to reviewdog report. (#21335) ### Description Fix typos based on reviewdog report but with some exceptions/corrections. --- .gitattributes | 2 +- ThirdPartyNotices.txt | 2 +- cmake/onnxruntime.cmake | 2 +- .../composable_kernel/Fix_Clang_Build.patch | 2 +- .../default/partials/title.tmpl.partial | 2 +- dockerfiles/README.md | 4 +- .../onnx-inference-byoc-gpu-cpu-aks.ipynb | 4 +- .../platform/EigenNonBlockingThreadPool.h | 2 +- .../core/providers/cuda/cuda_context.h | 12 +- .../core/providers/cuda/cuda_resource.h | 2 +- .../core/providers/rocm/rocm_context.h | 9 +- .../core/providers/rocm/rocm_resource.h | 2 +- .../core/session/onnxruntime_c_api.h | 19 ++-- .../core/session/onnxruntime_lite_custom_op.h | 2 +- java/build.gradle | 2 +- .../main/java/ai/onnxruntime/OnnxRuntime.java | 2 +- .../onnxruntime/providers/package-info.java | 2 +- java/src/test/java/sample/ScoreMNIST.java | 2 +- .../backends/webgl/glsl-coordinate-lib.ts | 2 +- js/web/lib/onnxjs/backends/webgl/ops/pack.ts | 2 +- .../cpu/attnlstm/deep_cpu_attn_lstm.h | 2 +- .../cpu/transformers/sampling_cpu_helper.h | 2 +- onnxruntime/core/codegen/common/common.cc | 2 +- onnxruntime/core/codegen/mti/common.h | 2 +- .../passes/scheduler/schedule_utils.cc | 4 +- .../codegen/passes/scheduler/schedule_utils.h | 4 +- .../passes/scheduler/tvm_schedule_builder.cc | 2 +- .../passes/weight_layout/weight_layout.h | 2 +- onnxruntime/core/common/logging/logging.cc | 2 +- onnxruntime/core/common/status.cc | 2 +- .../core/framework/allocation_planner.cc | 4 +- .../core/framework/allocation_planner.h | 6 +- .../framework/device_stream_collection.cc | 3 +- onnxruntime/core/framework/execution_frame.h | 2 +- .../partial_graph_execution_state.cc | 2 +- .../framework/sequential_execution_plan.h | 6 +- .../core/framework/sequential_executor.cc | 2 +- onnxruntime/core/framework/session_options.h | 2 +- onnxruntime/core/framework/session_state.cc | 4 +- onnxruntime/core/framework/sparse_tensor.cc | 4 +- onnxruntime/core/framework/tensorprotoutils.h | 6 +- onnxruntime/core/framework/utils.h | 2 +- .../mickey/gemm/warp/quantb_meta_loader.h | 8 +- onnxruntime/core/mlas/lib/convolve.cpp | 4 +- .../core/optimizer/attention_fusion_helper.h | 4 +- .../free_dim_override_transformer.cc | 4 +- .../core/optimizer/insert_cast_transformer.cc | 6 +- .../onnx_transpose_optimization.cc | 4 +- .../core/providers/acl/nn/batch_norm.cc | 2 +- onnxruntime/core/providers/acl/nn/pool.cc | 2 +- .../providers/armnn/activation/activations.cc | 2 +- onnxruntime/core/providers/armnn/math/gemm.h | 2 +- .../core/providers/armnn/nn/batch_norm.cc | 2 +- onnxruntime/core/providers/armnn/nn/conv.cc | 2 +- onnxruntime/core/providers/armnn/nn/pool.cc | 4 +- .../math/einsum_utils/einsum_auxiliary_ops.cc | 4 +- .../einsum_typed_compute_processor.cc | 2 +- .../cpu/object_detection/roialign.cc | 2 +- .../providers/cpu/sequence/sequence_ops.cc | 2 +- .../core/providers/cpu/tensor/unique.cc | 2 +- .../core/providers/cuda/cuda_allocator.cc | 2 +- .../core/providers/cuda/cuda_stream_handle.cc | 2 +- .../cuda/math/softmax_blockwise_impl.cuh | 2 +- onnxruntime/core/providers/cuda/nn/conv.cc | 4 +- .../cuda/object_detection/roialign_impl.cu | 106 +++++++++--------- .../providers/cuda/reduction/reduction_ops.cc | 2 +- .../core/providers/cuda/tensor/resize_impl.cu | 6 +- .../providers/cuda/tensor/transpose_impl.cu | 4 +- .../src/Operators/DmlOperatorFusedMatMul.cpp | 2 +- .../providers/dnnl/dnnl_execution_provider.cc | 2 +- .../providers/dnnl/dnnl_node_capability.cc | 6 +- .../core/providers/dnnl/subgraph/dnnl_conv.h | 8 +- .../providers/dnnl/subgraph/dnnl_convgrad.cc | 2 +- .../providers/dnnl/subgraph/dnnl_convgrad.h | 4 +- .../dnnl/subgraph/dnnl_dequantizelinear.cc | 2 +- .../providers/dnnl/subgraph/dnnl_matmul.cc | 8 +- .../providers/dnnl/subgraph/dnnl_reduce.cc | 8 +- .../dnnl/subgraph/dnnl_subgraph_primitive.h | 2 +- .../providers/dnnl/subgraph/dnnl_transpose.cc | 5 +- .../providers/migraphx/migraphx_allocator.cc | 2 +- .../migraphx/migraphx_stream_handle.cc | 2 +- .../nnapi_lib/nnapi_implementation.cc | 2 +- .../rknpu/rknpu_execution_provider.cc | 4 +- onnxruntime/core/providers/rocm/nn/conv.cc | 2 +- .../providers/rocm/reduction/reduction_ops.cc | 2 +- .../core/providers/rocm/rocm_allocator.cc | 2 +- .../core/providers/rocm/rocm_stream_handle.cc | 2 +- .../webnn/webnn_execution_provider.cc | 2 +- onnxruntime/core/session/IOBinding.cc | 2 +- onnxruntime/core/session/inference_session.cc | 2 +- onnxruntime/core/session/inference_session.h | 2 +- onnxruntime/core/util/qmath.h | 8 +- .../python/onnxruntime_pybind_schema.cc | 2 +- .../onnxruntime_pybind_sparse_tensor.cc | 4 +- .../python/onnxruntime_pybind_state.cc | 18 +-- .../python/tools/quantization/calibrate.py | 4 +- .../tools/quantization/operators/direct_q8.py | 2 +- .../python/tools/quantization/quant_utils.py | 2 +- .../python/tools/transformers/README.md | 2 +- .../python/tools/transformers/benchmark.py | 2 +- .../transformers/large_model_exporter.py | 2 +- .../models/gpt2/benchmark_gpt2.py | 2 +- .../models/gpt2/convert_to_onnx.py | 2 +- .../transformers/models/gpt2/gpt2_tester.py | 2 +- .../PyTorch_Bert-Squad_OnnxRuntime_GPU.ipynb | 2 +- .../tools/transformers/onnx_model_phi.py | 2 +- .../tools/transformers/onnx_model_tnlr.py | 2 +- .../python/tools/transformers/optimizer.py | 2 +- .../tools/transformers/shape_optimizer.py | 2 +- .../contrib_ops/attention_lstm_op_test.cc | 16 +-- .../test/framework/allocation_planner_test.cc | 2 +- .../test/framework/inference_session_test.cc | 2 +- onnxruntime/test/framework/tunable_op_test.cc | 2 +- .../test/fuzzing/include/BetaDistribution.h | 4 +- onnxruntime/test/fuzzing/src/test.cpp | 2 +- onnxruntime/test/ir/graph_test.cc | 4 +- .../test/ir/schema_registry_manager_test.cc | 4 +- .../test/mlas/unittest/test_fgemm_fixture.h | 2 +- .../test/mlas/unittest/test_halfgemm.cpp | 2 +- .../test/mlas/unittest/test_pool2d_fixture.h | 2 +- .../test/mlas/unittest/test_pool3d_fixture.h | 2 +- .../test/mlas/unittest/test_qgemm_fixture.h | 2 +- .../test/mlas/unittest/test_sbgemm.cpp | 4 +- .../mlas/unittest/test_symm_qgemm_fixture.h | 2 +- .../optimizer/transpose_optimizer_test.cc | 4 +- onnxruntime/test/perftest/ReadMe.txt | 2 +- onnxruntime/test/perftest/ort_test_session.cc | 2 +- .../platform/android/cxa_demangle_test.cc | 2 +- .../providers/cpu/controlflow/scan_test.cc | 2 +- .../cpu/rnn/deep_cpu_lstm_op_test.cc | 4 +- .../cuda/test_cases/allocator_cuda_test.cc | 2 +- .../matmul_post_op_transform_test.cc | 6 +- .../internal_testing_partitioning_tests.cc | 2 +- .../test/providers/qnn/qnn_ep_context_test.cc | 4 +- .../test/python/onnxruntime_test_python.py | 2 +- .../python/onnxruntime_test_python_mlops.py | 2 +- .../onnxruntime_test_python_sparse_matmul.py | 2 +- .../test_quantize_static_resnet.py | 2 +- .../python/transformers/test_generation.py | 2 +- onnxruntime/test/shared_lib/test_inference.cc | 6 +- .../model_creation_for_testing.ipynb | 2 +- .../self_attention_megatron_basic_test.py | 2 +- ...transpose_optimizer_shared_initializers.py | 2 +- onnxruntime/wasm/api.h | 2 +- .../core/framework/adasum/adasum_interface.h | 2 +- .../core/framework/ortmodule_graph_builder.cc | 2 +- .../orttraining/core/framework/pipeline.cc | 2 +- .../core/graph/mixed_precision_transformer.cc | 2 +- .../core/graph/optimizer_graph_builder.h | 2 +- .../core/graph/pipeline_transformer.cc | 4 +- .../core/graph/training_op_defs.cc | 22 ++-- .../core/optimizer/graph_transformer_config.h | 2 +- .../core/session/training_session.cc | 4 +- orttraining/orttraining/models/bert/main.cc | 16 +-- orttraining/orttraining/models/mnist/main.cc | 16 +-- .../models/runner/training_runner.cc | 2 +- .../python/training/onnxblock/blocks.py | 4 +- .../python/training/ortmodule/__init__.py | 2 +- .../cuda/fused_ops/fused_ops_frontend.cpp | 6 +- .../test/distributed/partition_utils.h | 6 +- .../orttraining/test/graph/bert_toy_fetches.h | 2 +- .../python/orttraining_test_ortmodule_api.py | 2 +- .../test/python/qat_poc_example/qat.py | 2 +- .../training_ops/cuda/cross_entropy_test.cc | 2 +- .../orttraining/training_api/optimizer.cc | 6 +- .../orttraining/training_api/optimizer.h | 2 +- .../cpu/activation/activations_grad.cc | 2 +- .../training_ops/cuda/math/div_grad_impl.cu | 2 +- .../training_ops/cuda/optimizer/lamb.cc | 2 +- .../training_ops/cuda/optimizer/lamb_impl.cu | 2 +- .../tools/scripts/layer_norm_transform.py | 2 +- orttraining/tools/scripts/model_transform.py | 2 +- .../tools/scripts/opset12_model_transform.py | 2 +- rust/onnxruntime-sys/examples/c_api_sample.rs | 4 +- .../src/tensor/ort_output_tensor.rs | 2 +- tools/ci_build/build.py | 4 +- .../azure-pipelines/web-ci-pipeline.yml | 2 +- tools/python/util/android/android.py | 2 +- winml/adapter/winml_adapter_session.cpp | 2 +- ...rosoft.AI.MachineLearning.Experimental.idl | 2 +- winml/api/Windows.AI.MachineLearning.idl | 2 +- winml/lib/Api/LearningModelBinding.cpp | 2 +- winml/lib/Api/impl/NumericData.h | 2 +- .../test/api/LearningModelSessionAPITest.cpp | 2 +- winml/test/common/googleTestMacros.h | 2 +- winml/test/common/taefTestMacros.h | 2 +- winml/test/common/test.h | 6 +- winml/test/image/imagetests.cpp | 4 +- winml/test/model/model_tests.cpp | 2 +- 189 files changed, 380 insertions(+), 360 deletions(-) diff --git a/.gitattributes b/.gitattributes index 41eae6dac52f5..8bfd419922d6b 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,4 @@ -# This sets the default behaviour, overriding core.autocrlf +# This sets the default behavior, overriding core.autocrlf * text=auto # All source files should have unix line-endings in the repository, diff --git a/ThirdPartyNotices.txt b/ThirdPartyNotices.txt index 8ec770da22159..6a11f414361bd 100644 --- a/ThirdPartyNotices.txt +++ b/ThirdPartyNotices.txt @@ -4820,7 +4820,7 @@ SOFTWARE. ---------------------------------------------------------------------------- -This is the MIT/Expat Licence. For more information see: +This is the MIT/Expat License. For more information see: 1. http://www.opensource.org/licenses/mit-license.php diff --git a/cmake/onnxruntime.cmake b/cmake/onnxruntime.cmake index 21ae0947f3788..0e89c2f14d34b 100644 --- a/cmake/onnxruntime.cmake +++ b/cmake/onnxruntime.cmake @@ -150,7 +150,7 @@ endif() if(CMAKE_SYSTEM_NAME STREQUAL "Android" AND onnxruntime_MINIMAL_BUILD) # target onnxruntime is a shared library, the dummy __cxa_demangle is only attach to it to avoid - # affecting downstream ort library users with the behaviour of dummy __cxa_demangle. So the dummy + # affecting downstream ort library users with the behavior of dummy __cxa_demangle. So the dummy # __cxa_demangle must not expose to libonnxruntime_common.a. It works as when the linker is # creating the DSO, our dummy __cxa_demangle always comes before libc++abi.a so the # __cxa_demangle in libc++abi.a is discarded, thus, huge binary size reduction. diff --git a/cmake/patches/composable_kernel/Fix_Clang_Build.patch b/cmake/patches/composable_kernel/Fix_Clang_Build.patch index 0352f8eb9bb34..73ece647d82c7 100644 --- a/cmake/patches/composable_kernel/Fix_Clang_Build.patch +++ b/cmake/patches/composable_kernel/Fix_Clang_Build.patch @@ -44,7 +44,7 @@ index c23746e7f..bc326c8b5 100644 find_package(HIP REQUIRED) # Override HIP version in config.h, if necessary. @@ -269,12 +248,6 @@ if( DEFINED CK_OVERRIDE_HIP_VERSION_PATCH ) - message(STATUS "CK_HIP_VERSION_PATCH overriden with ${CK_OVERRIDE_HIP_VERSION_PATCH}") + message(STATUS "CK_HIP_VERSION_PATCH overridden with ${CK_OVERRIDE_HIP_VERSION_PATCH}") endif() message(STATUS "Build with HIP ${HIP_VERSION}") -link_libraries(hip::device) diff --git a/csharp/ApiDocs/_exported_templates/default/partials/title.tmpl.partial b/csharp/ApiDocs/_exported_templates/default/partials/title.tmpl.partial index 38c62fe55f603..fd589fd74877c 100644 --- a/csharp/ApiDocs/_exported_templates/default/partials/title.tmpl.partial +++ b/csharp/ApiDocs/_exported_templates/default/partials/title.tmpl.partial @@ -39,7 +39,7 @@ Event {{name.0.value}} Operator {{name.0.value}} {{/inOperator}} {{#inEii}} -Explict Interface Implementation {{name.0.value}} +Explicit Interface Implementation {{name.0.value}} {{/inEii}} {{#inVariable}} Variable {{name.0.value}} diff --git a/dockerfiles/README.md b/dockerfiles/README.md index a2e99d66d4654..008587a01082b 100644 --- a/dockerfiles/README.md +++ b/dockerfiles/README.md @@ -32,7 +32,7 @@ docker run -it onnxruntime-source ``` -The docker file supports both x86_64 and ARM64(aarch64). You may use docker's "--platform" parameter to explictly specify which CPU architecture you want to build. For example: +The docker file supports both x86_64 and ARM64(aarch64). You may use docker's "--platform" parameter to explicitly specify which CPU architecture you want to build. For example: ```bash docker build --platform linux/arm64/v8 -f Dockerfile.source @@ -274,7 +274,7 @@ Note: You may add --use_tensorrt and --tensorrt_home options if you wish to use Note: Resulting Docker image will have ONNX Runtime installed in /usr, and ONNX Runtime wheel copied to /onnxruntime directory. Nothing else from ONNX Runtime source tree will be copied/installed to the image. -Note: When running the container you built in Docker, please either use 'nvidia-docker' command instead of 'docker', or use Docker command-line options to make sure NVIDIA runtime will be used and appropiate files mounted from host. Otherwise, CUDA libraries won't be found. You can also [set NVIDIA runtime as default in Docker](https://github.com/dusty-nv/jetson-containers#docker-default-runtime). +Note: When running the container you built in Docker, please either use 'nvidia-docker' command instead of 'docker', or use Docker command-line options to make sure NVIDIA runtime will be used and appropriate files mounted from host. Otherwise, CUDA libraries won't be found. You can also [set NVIDIA runtime as default in Docker](https://github.com/dusty-nv/jetson-containers#docker-default-runtime). ## MIGraphX **Ubuntu 20.04, ROCm6.0, MIGraphX** diff --git a/docs/python/notebooks/onnx-inference-byoc-gpu-cpu-aks.ipynb b/docs/python/notebooks/onnx-inference-byoc-gpu-cpu-aks.ipynb index be34a812c77db..c1278b63a84d3 100644 --- a/docs/python/notebooks/onnx-inference-byoc-gpu-cpu-aks.ipynb +++ b/docs/python/notebooks/onnx-inference-byoc-gpu-cpu-aks.ipynb @@ -64,7 +64,7 @@ "If you are using an Azure Machine Learning Notebook VM, you are all set. Otherwise, please follow the [Azure ML configuration notebook](https://github.com/Azure/MachineLearningNotebooks/blob/master/configuration.ipynb) to set up your environment.\n", "\n", "### Install additional packages needed for this Notebook\n", - "You need to install the popular plotting library matplotlib, the image manipulation library opencv, and the onnx library in the conda environment where Azure Maching Learning SDK is installed.\n", + "You need to install the popular plotting library matplotlib, the image manipulation library opencv, and the onnx library in the conda environment where Azure Machine Learning SDK is installed.\n", "\n", "```\n", "(myenv) $ pip install matplotlib onnx opencv-python\n", @@ -79,7 +79,7 @@ "source": [ "## 1. Obtain a model from the ONNX Model Zoo\n", "\n", - "For more information on the Facial Emotion Recognition (FER+) model, you can explore the notebook explaning how to deploy [FER+ with ONNX Runtime on an ACI Instance](onnx-inference-facial-expression-recognition-deploy.ipynb)." + "For more information on the Facial Emotion Recognition (FER+) model, you can explore the notebook explaining how to deploy [FER+ with ONNX Runtime on an ACI Instance](onnx-inference-facial-expression-recognition-deploy.ipynb)." ] }, { diff --git a/include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h b/include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h index f9b694efb936f..e33007102e198 100644 --- a/include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h +++ b/include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h @@ -1129,7 +1129,7 @@ class ThreadPoolTempl : public onnxruntime::concurrency::ExtendedThreadPoolInter // // Ensure that the ThreadPoolParallelSection has sufficient workers to // execute a loop with degree of parallelism n. We track the number - // of workers already avaiable to the parallel section, prior to + // of workers already available to the parallel section, prior to // submitting tasks to the work queues to make up the total. // // Each worker will call in to worker_fn(idx) with a per-worker thread diff --git a/include/onnxruntime/core/providers/cuda/cuda_context.h b/include/onnxruntime/core/providers/cuda/cuda_context.h index 9ada01673d4d9..462b31bb433a5 100644 --- a/include/onnxruntime/core/providers/cuda/cuda_context.h +++ b/include/onnxruntime/core/providers/cuda/cuda_context.h @@ -53,7 +53,8 @@ struct CudaContext : public CustomOpContext { cudnn_conv_use_max_workspace = FetchResource(kernel_ctx, CudaResource::cudnn_conv_use_max_workspace_t); cudnn_conv1d_pad_to_nc1d = FetchResource(kernel_ctx, CudaResource::cudnn_conv1d_pad_to_nc1d_t); - enable_skip_layer_norm_strict_mode = FetchResource(kernel_ctx, CudaResource::enable_skip_layer_norm_strict_mode_t); + enable_skip_layer_norm_strict_mode = FetchResource( + kernel_ctx, CudaResource::enable_skip_layer_norm_strict_mode_t); prefer_nhwc = FetchResource(kernel_ctx, CudaResource::prefer_nhwc_t); use_tf32 = FetchResource(kernel_ctx, CudaResource::use_tf32_t); } @@ -61,13 +62,16 @@ struct CudaContext : public CustomOpContext { template T FetchResource(const OrtKernelContext& kernel_ctx, CudaResource resource_type) { if constexpr (sizeof(T) > sizeof(void*)) { - ORT_CXX_API_THROW("void* is not large enough to hold resource type: " + std::to_string(resource_type), OrtErrorCode::ORT_INVALID_ARGUMENT); + ORT_CXX_API_THROW("void* is not large enough to hold resource type: " + std::to_string(resource_type), + OrtErrorCode::ORT_INVALID_ARGUMENT); } const auto& ort_api = Ort::GetApi(); void* resource = {}; - OrtStatus* status = ort_api.KernelContext_GetResource(&kernel_ctx, ORT_CUDA_RESOUCE_VERSION, resource_type, &resource); + OrtStatus* status = ort_api.KernelContext_GetResource( + &kernel_ctx, ORT_CUDA_RESOURCE_VERSION, resource_type, &resource); if (status) { - ORT_CXX_API_THROW("Failed to fetch cuda ep resource, resouce type: " + std::to_string(resource_type), OrtErrorCode::ORT_RUNTIME_EXCEPTION); + ORT_CXX_API_THROW("Failed to fetch cuda ep resource, resource type: " + std::to_string(resource_type), + OrtErrorCode::ORT_RUNTIME_EXCEPTION); } T t = {}; memcpy(&t, &resource, sizeof(T)); diff --git a/include/onnxruntime/core/providers/cuda/cuda_resource.h b/include/onnxruntime/core/providers/cuda/cuda_resource.h index 00e7dec5727d1..555023c442c01 100644 --- a/include/onnxruntime/core/providers/cuda/cuda_resource.h +++ b/include/onnxruntime/core/providers/cuda/cuda_resource.h @@ -3,7 +3,7 @@ #include "core/providers/resource.h" -#define ORT_CUDA_RESOUCE_VERSION 3 +#define ORT_CUDA_RESOURCE_VERSION 3 enum CudaResource : int { cuda_stream_t = cuda_resource_offset, // 10000 diff --git a/include/onnxruntime/core/providers/rocm/rocm_context.h b/include/onnxruntime/core/providers/rocm/rocm_context.h index 5f04289a8c6e0..f187e0cbb3a89 100644 --- a/include/onnxruntime/core/providers/rocm/rocm_context.h +++ b/include/onnxruntime/core/providers/rocm/rocm_context.h @@ -23,21 +23,24 @@ struct RocmContext : public CustomOpContext { void* resource = {}; OrtStatus* status = nullptr; - status = ort_api.KernelContext_GetResource(&kernel_ctx, ORT_ROCM_RESOUCE_VERSION, RocmResource::hip_stream_t, &resource); + status = ort_api.KernelContext_GetResource( + &kernel_ctx, ORT_ROCM_RESOURCE_VERSION, RocmResource::hip_stream_t, &resource); if (status) { ORT_CXX_API_THROW("failed to fetch hip stream", OrtErrorCode::ORT_RUNTIME_EXCEPTION); } hip_stream = reinterpret_cast(resource); resource = {}; - status = ort_api.KernelContext_GetResource(&kernel_ctx, ORT_ROCM_RESOUCE_VERSION, RocmResource::miopen_handle_t, &resource); + status = ort_api.KernelContext_GetResource( + &kernel_ctx, ORT_ROCM_RESOURCE_VERSION, RocmResource::miopen_handle_t, &resource); if (status) { ORT_CXX_API_THROW("failed to fetch miopen handle", OrtErrorCode::ORT_RUNTIME_EXCEPTION); } miopen_handle = reinterpret_cast(resource); resource = {}; - status = ort_api.KernelContext_GetResource(&kernel_ctx, ORT_ROCM_RESOUCE_VERSION, RocmResource::rocblas_handle_t, &resource); + status = ort_api.KernelContext_GetResource( + &kernel_ctx, ORT_ROCM_RESOURCE_VERSION, RocmResource::rocblas_handle_t, &resource); if (status) { ORT_CXX_API_THROW("failed to fetch rocblas handle", OrtErrorCode::ORT_RUNTIME_EXCEPTION); } diff --git a/include/onnxruntime/core/providers/rocm/rocm_resource.h b/include/onnxruntime/core/providers/rocm/rocm_resource.h index 53f26c13e93e0..772447a1809d8 100644 --- a/include/onnxruntime/core/providers/rocm/rocm_resource.h +++ b/include/onnxruntime/core/providers/rocm/rocm_resource.h @@ -3,7 +3,7 @@ #include "core/providers/resource.h" -#define ORT_ROCM_RESOUCE_VERSION 1 +#define ORT_ROCM_RESOURCE_VERSION 1 enum RocmResource : int { hip_stream_t = rocm_resource_offset, diff --git a/include/onnxruntime/core/session/onnxruntime_c_api.h b/include/onnxruntime/core/session/onnxruntime_c_api.h index 5c61963a2f39c..5aafdd149e889 100644 --- a/include/onnxruntime/core/session/onnxruntime_c_api.h +++ b/include/onnxruntime/core/session/onnxruntime_c_api.h @@ -473,13 +473,13 @@ typedef struct OrtCUDAProviderOptions { /** \brief Enable TunableOp for using. * Set it to 1/0 to enable/disable TunableOp. Otherwise, it is disabled by default. - * This option can be overriden by environment variable ORT_CUDA_TUNABLE_OP_ENABLE. + * This option can be overridden by environment variable ORT_CUDA_TUNABLE_OP_ENABLE. */ int tunable_op_enable; /** \brief Enable TunableOp for tuning. * Set it to 1/0 to enable/disable TunableOp tuning. Otherwise, it is disabled by default. - * This option can be overriden by environment variable ORT_CUDA_TUNABLE_OP_TUNING_ENABLE. + * This option can be overridden by environment variable ORT_CUDA_TUNABLE_OP_TUNING_ENABLE. */ int tunable_op_tuning_enable; @@ -562,13 +562,13 @@ typedef struct OrtROCMProviderOptions { /** \brief Enable TunableOp for using. * Set it to 1/0 to enable/disable TunableOp. Otherwise, it is disabled by default. - * This option can be overriden by environment variable ORT_ROCM_TUNABLE_OP_ENABLE. + * This option can be overridden by environment variable ORT_ROCM_TUNABLE_OP_ENABLE. */ int tunable_op_enable; /** \brief Enable TunableOp for tuning. * Set it to 1/0 to enable/disable TunableOp tuning. Otherwise, it is disabled by default. - * This option can be overriden by environment variable ORT_ROCM_TUNABLE_OP_TUNING_ENABLE. + * This option can be overridden by environment variable ORT_ROCM_TUNABLE_OP_TUNING_ENABLE. */ int tunable_op_tuning_enable; @@ -2798,7 +2798,7 @@ struct OrtApi { * "initial_growth_chunk_size_bytes": (Possible) Size of the second allocation in the arena. * Only relevant if arena strategy is `kNextPowerOfTwo`. Use -1 to allow ORT to choose the default. * "max_power_of_two_extend_bytes": The maximum enxtend size if arena strategy is `kNextPowerOfTwo`. - * It is not an allocation limit, it is only a limit for extention when requested byte is less than the limit. + * It is not an allocation limit, it is only a limit for extension when requested byte is less than the limit. * When requested bytes is more than the limit, allocator will still return as requested. * Use -1 to allow ORT to choose the default 1GB for max_power_of_two_extend_bytes. * Ultimately, the allocation size is determined by the allocation memory request. @@ -4467,13 +4467,14 @@ struct OrtApi { * E.g. a cuda stream or a cublas handle * * \param context - Kernel context - * \param resouce_version - Version of the resource + * \param resource_version - Version of the resource * \param resource_id - Type of resource * \param resource - A pointer to returned resource * * \since Version 1.16. */ - ORT_API2_STATUS(KernelContext_GetResource, _In_ const OrtKernelContext* context, _In_ int resouce_version, _In_ int resource_id, _Outptr_ void** resource); + ORT_API2_STATUS(KernelContext_GetResource, _In_ const OrtKernelContext* context, _In_ int resource_version, + _In_ int resource_id, _Outptr_ void** resource); /** \brief Set user logging function * @@ -4528,10 +4529,10 @@ struct OrtApi { ORT_API2_STATUS(ShapeInferContext_GetAttribute, _In_ const OrtShapeInferContext* context, _In_ const char* attr_name, _Outptr_ const OrtOpAttr** attr); /** - * Set type and shape info of an ouput + * Set type and shape info of an output * * \param[in] context - * \param[in] index The index of the ouput + * \param[in] index The index of the output * \param[out] info Type shape info of the output * * \since Version 1.17. diff --git a/include/onnxruntime/core/session/onnxruntime_lite_custom_op.h b/include/onnxruntime/core/session/onnxruntime_lite_custom_op.h index ee60f25da115e..57a64380faeb0 100644 --- a/include/onnxruntime/core/session/onnxruntime_lite_custom_op.h +++ b/include/onnxruntime/core/session/onnxruntime_lite_custom_op.h @@ -403,7 +403,7 @@ using Variadic = TensorArray; Note: OrtLiteCustomOp inherits from OrtCustomOp to bridge tween a custom func/struct and ort core. The lifetime of an OrtLiteCustomOp instance is managed by customer code, not ort, so: -1. DO NOT cast OrtLiteCustomOp to OrtCustomOp and release since there is no virtual destructor in the hierachy. +1. DO NOT cast OrtLiteCustomOp to OrtCustomOp and release since there is no virtual destructor in the hierarchy. 2. OrtLiteCustomFunc and OrtLiteCustomStruct, as two sub-structs, can be released in form of OrtLiteCustomOp since all members are kept in the OrtLiteCustomOp, hence memory could still be recycled properly. Further, OrtCustomOp is a c struct bearing no v-table, so offspring structs are by design to be of zero virtual functions to maintain cast safety. diff --git a/java/build.gradle b/java/build.gradle index 3219b082994ff..8b4d5429b0f70 100644 --- a/java/build.gradle +++ b/java/build.gradle @@ -54,7 +54,7 @@ java { targetCompatibility = JavaVersion.VERSION_1_8 } -// This jar tasks serves as a CMAKE signalling +// This jar tasks serves as a CMAKE signaling // mechanism. The jar will be overwritten by allJar task jar { } diff --git a/java/src/main/java/ai/onnxruntime/OnnxRuntime.java b/java/src/main/java/ai/onnxruntime/OnnxRuntime.java index f552badd4f83e..b80debdde47c4 100644 --- a/java/src/main/java/ai/onnxruntime/OnnxRuntime.java +++ b/java/src/main/java/ai/onnxruntime/OnnxRuntime.java @@ -438,7 +438,7 @@ private static String mapLibraryName(String library) { /** * Extracts the providers array from the C API, converts it into an EnumSet. * - *

Throws IllegalArgumentException if a provider isn't recognised (note this exception should + *

Throws IllegalArgumentException if a provider isn't recognized (note this exception should * only happen during development of ONNX Runtime, if it happens at any other point, file an issue * on GitHub). * diff --git a/java/src/main/java/ai/onnxruntime/providers/package-info.java b/java/src/main/java/ai/onnxruntime/providers/package-info.java index 1f1e70a589f3a..33c24c6139f52 100644 --- a/java/src/main/java/ai/onnxruntime/providers/package-info.java +++ b/java/src/main/java/ai/onnxruntime/providers/package-info.java @@ -3,5 +3,5 @@ * Licensed under the MIT License. */ -/** Classes for controlling the behaviour of ONNX Runtime Execution Providers. */ +/** Classes for controlling the behavior of ONNX Runtime Execution Providers. */ package ai.onnxruntime.providers; diff --git a/java/src/test/java/sample/ScoreMNIST.java b/java/src/test/java/sample/ScoreMNIST.java index 6ecbc5cd56d10..efc7ef9fd6e47 100644 --- a/java/src/test/java/sample/ScoreMNIST.java +++ b/java/src/test/java/sample/ScoreMNIST.java @@ -242,7 +242,7 @@ public static void writeDataSKL(float[][] data, int[] indices, float[] values) { /** * Find the maximum probability and return it's index. * - * @param probabilities The probabilites. + * @param probabilities The probabilities. * @return The index of the max. */ public static int pred(float[] probabilities) { diff --git a/js/web/lib/onnxjs/backends/webgl/glsl-coordinate-lib.ts b/js/web/lib/onnxjs/backends/webgl/glsl-coordinate-lib.ts index 1f2b27c7bdea8..717233182ed8a 100644 --- a/js/web/lib/onnxjs/backends/webgl/glsl-coordinate-lib.ts +++ b/js/web/lib/onnxjs/backends/webgl/glsl-coordinate-lib.ts @@ -1234,7 +1234,7 @@ export class CoordsGlslLib extends GlslLib { } /** - * This is the main function to map from the given texture coordiantes (s,t) + * This is the main function to map from the given texture coordinates (s,t) * to logical indices for the output * There will only be one single variation of this * Also see coordsToOffset and offsetToIndices for input-specific versions diff --git a/js/web/lib/onnxjs/backends/webgl/ops/pack.ts b/js/web/lib/onnxjs/backends/webgl/ops/pack.ts index 42a275a96fb8a..37ef8c8fe2435 100644 --- a/js/web/lib/onnxjs/backends/webgl/ops/pack.ts +++ b/js/web/lib/onnxjs/backends/webgl/ops/pack.ts @@ -85,7 +85,7 @@ function getOutOfBoundsCondition(rank: number, shape: readonly number[], dims: s } /** - * code snippet to sample input texture with output coordiantes + * code snippet to sample input texture with output coordinates */ function getOutput(shape: readonly number[], dims: string[]): string { const rank = shape.length; diff --git a/onnxruntime/contrib_ops/cpu/attnlstm/deep_cpu_attn_lstm.h b/onnxruntime/contrib_ops/cpu/attnlstm/deep_cpu_attn_lstm.h index 326b2d8dc4925..bce8fd118e957 100644 --- a/onnxruntime/contrib_ops/cpu/attnlstm/deep_cpu_attn_lstm.h +++ b/onnxruntime/contrib_ops/cpu/attnlstm/deep_cpu_attn_lstm.h @@ -19,7 +19,7 @@ using onnxruntime::rnn::detail::Direction; using onnxruntime::rnn::detail::MakeDirection; // The class represents DeepCPU implementation of a long short term memory (LSTM) plus a Bahdanau Attention wraper. -// The equivilent python usage could be checked int the corresponding op test directory, attention_lstm_data_gen.py. +// The equivalent python usage could be checked int the corresponding op test directory, attention_lstm_data_gen.py. // Also please note that detail implementation re-used lot of code from current ONNXRuntime LSTM operator, refactor // is needed in future if this is become part of ONNX. class DeepCpuAttnLstmOp final : public OpKernel { diff --git a/onnxruntime/contrib_ops/cpu/transformers/sampling_cpu_helper.h b/onnxruntime/contrib_ops/cpu/transformers/sampling_cpu_helper.h index 413ef596cd118..2f41746c1d4e7 100644 --- a/onnxruntime/contrib_ops/cpu/transformers/sampling_cpu_helper.h +++ b/onnxruntime/contrib_ops/cpu/transformers/sampling_cpu_helper.h @@ -152,7 +152,7 @@ Status Sample(AllocatorPtr& allocator, 1, generator, *sampled_idx)); - // TODO: update presense_mask() + // TODO: update presence_mask() #ifdef DEBUG_GENERATION dumper->Print("sampled_idx", *sampled_idx); #endif diff --git a/onnxruntime/core/codegen/common/common.cc b/onnxruntime/core/codegen/common/common.cc index c2ae4ddba584e..818b919e99ef2 100644 --- a/onnxruntime/core/codegen/common/common.cc +++ b/onnxruntime/core/codegen/common/common.cc @@ -159,7 +159,7 @@ std::unique_ptr ToCapacity(const onnxruntime::GraphViewer& gr ORT_THROW_IF_ERROR(node.ForEachWithIndex(node.ImplicitInputDefs(), process_input_fn)); // Handle outouts - // two cases are considerd as outputs + // two cases are considered as outputs // 1. Output NodeArg is not used by any Node // 2. Output NodeArg is used by at least one Node out of this subgraph. // Note a NodeArg can be used by Nodes in and out of the subgraph at the same time. diff --git a/onnxruntime/core/codegen/mti/common.h b/onnxruntime/core/codegen/mti/common.h index 87bce55715ee1..d71e740b9284a 100644 --- a/onnxruntime/core/codegen/mti/common.h +++ b/onnxruntime/core/codegen/mti/common.h @@ -8,7 +8,7 @@ #define MTI_ASSERT(condition) \ if (!(condition)) { \ - std::string error_msg = "Not satsified: " #condition \ + std::string error_msg = "Not satisfied: " #condition \ ": line " + \ std::to_string(__LINE__) + \ " in file " + std::string(__FILE__) + "\n"; \ diff --git a/onnxruntime/core/codegen/passes/scheduler/schedule_utils.cc b/onnxruntime/core/codegen/passes/scheduler/schedule_utils.cc index 3595229bbe132..76c2ad509c401 100644 --- a/onnxruntime/core/codegen/passes/scheduler/schedule_utils.cc +++ b/onnxruntime/core/codegen/passes/scheduler/schedule_utils.cc @@ -74,7 +74,7 @@ bool ShouldTryVectorization( // Check the schedule of tensor // If it is not scheduled, try to vectorize it. // Note TryVectorization has to use with compute_root. -// Therefore, there is a safty check of tensor's schedule +// Therefore, there is a safety check of tensor's schedule bool TryVectorization( const tvm::Tensor& tensor, int64_t natural_vector_size, @@ -124,7 +124,7 @@ bool TryVectorization( // Check the schedule of tensor // If it is not scheduled, try to add compute_inline on it. // Note TryInlineSchedule cannot be used with compute_root. -// Therefore, there is a safty check of tensor's schedule. +// Therefore, there is a safety check of tensor's schedule. bool TryInlineSchedule( const tvm::Tensor& tensor, ScheduleContext& ctx) { diff --git a/onnxruntime/core/codegen/passes/scheduler/schedule_utils.h b/onnxruntime/core/codegen/passes/scheduler/schedule_utils.h index 757366b551cf8..4a0781f94d385 100644 --- a/onnxruntime/core/codegen/passes/scheduler/schedule_utils.h +++ b/onnxruntime/core/codegen/passes/scheduler/schedule_utils.h @@ -34,7 +34,7 @@ bool ShouldTryVectorization( // Check the schedule of tensor // If it is not scheduled, try to vectorize it. // Note TryVectorization has to use with compute_root. -// Therefore, there is a safty check of tensor's schedule +// Therefore, there is a safety check of tensor's schedule bool TryVectorization( const tvm::Tensor& tensor, int64_t natural_vector_size, @@ -43,7 +43,7 @@ bool TryVectorization( // Check the schedule of tensor // If it is not scheduled, try to add compute_inline on it. // Note TryInlineSchedule cannot be used with compute_root. -// Therefore, there is a safty check of tensor's schedule. +// Therefore, there is a safety check of tensor's schedule. bool TryInlineSchedule( const tvm::Tensor& tensor, ScheduleContext& ctx); diff --git a/onnxruntime/core/codegen/passes/scheduler/tvm_schedule_builder.cc b/onnxruntime/core/codegen/passes/scheduler/tvm_schedule_builder.cc index 6f0ffa14e8abb..2c8250198fa5f 100644 --- a/onnxruntime/core/codegen/passes/scheduler/tvm_schedule_builder.cc +++ b/onnxruntime/core/codegen/passes/scheduler/tvm_schedule_builder.cc @@ -39,7 +39,7 @@ void TVMScheduleBuilder::DumpAllSchedulers() const { d->ForEach([&stream](const std::string& key, Scheduler* op) { stream << "Key " << key - << ", Creater " << op->Name() << std::endl; + << ", Creator " << op->Name() << std::endl; }); ++count; diff --git a/onnxruntime/core/codegen/passes/weight_layout/weight_layout.h b/onnxruntime/core/codegen/passes/weight_layout/weight_layout.h index af61641a74937..1b45a38e7e24e 100644 --- a/onnxruntime/core/codegen/passes/weight_layout/weight_layout.h +++ b/onnxruntime/core/codegen/passes/weight_layout/weight_layout.h @@ -13,7 +13,7 @@ namespace tvm_codegen { using CoordTransFunc = std::function(const tvm::Array&)>; -// WeightLayout is data layout trasnformer for weight/initializer +// WeightLayout is data layout transformer for weight/initializer class WeightLayout { public: // Static function to return unique string as a key diff --git a/onnxruntime/core/common/logging/logging.cc b/onnxruntime/core/common/logging/logging.cc index ad6f666a2d989..a086c90ea4b14 100644 --- a/onnxruntime/core/common/logging/logging.cc +++ b/onnxruntime/core/common/logging/logging.cc @@ -56,7 +56,7 @@ LoggingManager* LoggingManager::GetDefaultInstance() { return static_cast(DefaultLoggerManagerInstance().load()); } -// GSL_SUPRESS(i.22) is broken. Ignore the warnings for the static local variables that are trivial +// GSL_SUPPRESS(i.22) is broken. Ignore the warnings for the static local variables that are trivial // and should not have any destruction order issues via pragmas instead. // https://developercommunity.visualstudio.com/content/problem/249706/gslsuppress-does-not-work-for-i22-c-core-guideline.html #ifdef _MSC_VER diff --git a/onnxruntime/core/common/status.cc b/onnxruntime/core/common/status.cc index 4ffc7adaac88d..e824a66eaed58 100644 --- a/onnxruntime/core/common/status.cc +++ b/onnxruntime/core/common/status.cc @@ -70,7 +70,7 @@ std::string Status::ToString() const { return result; } -// GSL_SUPRESS(i.22) is broken. Ignore the warnings for the static local variables that are trivial +// GSL_SUPPRESS(i.22) is broken. Ignore the warnings for the static local variables that are trivial // and should not have any destruction order issues via pragmas instead. // https://developercommunity.visualstudio.com/content/problem/249706/gslsuppress-does-not-work-for-i22-c-core-guideline.html #ifdef _MSC_VER diff --git a/onnxruntime/core/framework/allocation_planner.cc b/onnxruntime/core/framework/allocation_planner.cc index 7747058f0d0aa..5dca4cf6c165b 100644 --- a/onnxruntime/core/framework/allocation_planner.cc +++ b/onnxruntime/core/framework/allocation_planner.cc @@ -1073,7 +1073,7 @@ class PlannerImpl { #ifdef ORT_ENABLE_STREAM // assume we already have a baseline reuse plan (no memory reuse at all) - // this funciton will optimize the plan by building a reuse plan with stream safety. + // this function will optimize the plan by building a reuse plan with stream safety. Status OptimizeReusePlanForMultiStream() { InlinedHashMap dependent_counter; for (const auto& it : dependence_graph_) { @@ -2012,7 +2012,7 @@ class PlannerImpl { for (auto* output : node->OutputDefs()) { if (output->Exists()) { if (std::find(it->InputDefs().begin(), it->InputDefs().end(), output) != it->InputDefs().end()) { - output_consumed_in_subgraph = false; // output direclty consumed in current graph + output_consumed_in_subgraph = false; // output directly consumed in current graph OrtValueIndex output_arg_idx; ORT_THROW_IF_ERROR(ort_value_name_idx_map_.GetIdx(output->Name(), output_arg_idx)); // there are two cases we need notification: diff --git a/onnxruntime/core/framework/allocation_planner.h b/onnxruntime/core/framework/allocation_planner.h index 10ea5920b8809..aa62f218d9ff6 100644 --- a/onnxruntime/core/framework/allocation_planner.h +++ b/onnxruntime/core/framework/allocation_planner.h @@ -53,7 +53,7 @@ class SequentialPlannerContext : public ISequentialPlannerContext { public: SequentialPlannerContext(ExecutionMode execution_mode, ExecutionOrder execution_order, bool enable_memory_reuse) : execution_mode_(execution_mode), - exection_order_(execution_order), + execution_order_(execution_order), enable_memory_reuse_(enable_memory_reuse) { } @@ -63,13 +63,13 @@ class SequentialPlannerContext : public ISequentialPlannerContext { bool IsParallelExecutionEnabled() const override { return execution_mode_ == ExecutionMode::ORT_PARALLEL; } - ExecutionOrder GetExecutionOrder() const override { return exection_order_; } + ExecutionOrder GetExecutionOrder() const override { return execution_order_; } bool GetEnableMemoryReuse() const override { return enable_memory_reuse_; } private: ExecutionMode execution_mode_ = ExecutionMode::ORT_SEQUENTIAL; - ExecutionOrder exection_order_ = ExecutionOrder::DEFAULT; + ExecutionOrder execution_order_ = ExecutionOrder::DEFAULT; bool enable_memory_reuse_ = true; }; diff --git a/onnxruntime/core/framework/device_stream_collection.cc b/onnxruntime/core/framework/device_stream_collection.cc index 13948289e1c37..8d15e03c2e5ce 100644 --- a/onnxruntime/core/framework/device_stream_collection.cc +++ b/onnxruntime/core/framework/device_stream_collection.cc @@ -93,7 +93,8 @@ class DeviceStreamCollectionImpl { const AllocatorMap& allocators_; bool is_main_graph_ = false; // This is used in ExecutionFrame when memory pattern is enabled, to allocate the peak size memory - // labelled this stream in the current thread, instead of the default stream which will be used in all the threads (thus caused thread safe issue) + // labeled this stream in the current thread, instead of the default stream which will be used in all the threads + // (thus caused thread safe issue) std::unique_ptr root_stream_; OrtDevice root_stream_device_; void ReleaseSingleStreamBuffers(); diff --git a/onnxruntime/core/framework/execution_frame.h b/onnxruntime/core/framework/execution_frame.h index 18d210ffd48f7..de571f86f1c77 100644 --- a/onnxruntime/core/framework/execution_frame.h +++ b/onnxruntime/core/framework/execution_frame.h @@ -167,7 +167,7 @@ class ExecutionFrame final : public IExecutionFrame { } // This function try retrieve the inferred shapes for the given NodeArg index. - // If the retrival is sucessful, this function returns true and false otherwise. + // If the retrival is successful, this function returns true and false otherwise. bool TryGetInferredShape(int index, TensorShape& shape) const override; #if !defined(ORT_MINIMAL_BUILD) && defined(ORT_MEMORY_PROFILE) diff --git a/onnxruntime/core/framework/partial_graph_execution_state.cc b/onnxruntime/core/framework/partial_graph_execution_state.cc index a053634adbe35..ce0572927d94a 100644 --- a/onnxruntime/core/framework/partial_graph_execution_state.cc +++ b/onnxruntime/core/framework/partial_graph_execution_state.cc @@ -50,7 +50,7 @@ PartialGraphExecutionState::~PartialGraphExecutionState() { DeviceStreamCollection* PartialGraphExecutionState::GetDeviceStreamCollection(const SessionState& session_state) { if (device_stream_collection_ == nullptr) { device_stream_collection_ = session_state.AcquireDeviceStreamCollection(); - // the life-time of partial graph execution state is in-consistant with session, + // the life-time of partial graph execution state is inconsistent with session, // so we can't make sure it is safe to return the device stream collection to // session when deconstruct partial graph execution state. // so let's always delete the stream collections. diff --git a/onnxruntime/core/framework/sequential_execution_plan.h b/onnxruntime/core/framework/sequential_execution_plan.h index 62c66bc6f336c..d9472e404c0e4 100644 --- a/onnxruntime/core/framework/sequential_execution_plan.h +++ b/onnxruntime/core/framework/sequential_execution_plan.h @@ -106,7 +106,7 @@ struct SequentialExecutionPlan : public ExecutionPlanBase { // types of steps: // 1. Kernel Launch // 2. Activate notification - // 3. Wait on a notificaiton + // 3. Wait on a notification class ExecutionStep { public: ExecutionStep(NodeIndex node_index) : node_index_(node_index) {} @@ -122,7 +122,7 @@ struct SequentialExecutionPlan : public ExecutionPlanBase { protected: NodeIndex node_index_; }; - // LogicStream is a sequence of execution steps that can be executed independetly. + // LogicStream is a sequence of execution steps that can be executed independently. // The steps within a sequence are executed in order, and happened on the same device. struct LogicStream { std::vector> steps_; @@ -160,7 +160,7 @@ struct SequentialExecutionPlan : public ExecutionPlanBase { std::vector notification_owners; // key: notification index. // value: {stream_idx, step_idx} - // giving a notificaiton, we used this map to figure out what is the downstream steps it need to trigger. + // giving a notification, we used this map to figure out what is the downstream steps it need to trigger. InlinedHashMap>> downstream_map; size_t num_barriers{0}; diff --git a/onnxruntime/core/framework/sequential_executor.cc b/onnxruntime/core/framework/sequential_executor.cc index a374e381a2b0e..aa762ca32fdb4 100644 --- a/onnxruntime/core/framework/sequential_executor.cc +++ b/onnxruntime/core/framework/sequential_executor.cc @@ -442,7 +442,7 @@ onnxruntime::Status ExecuteKernel(StreamExecutionContext& ctx, if (p_kernel->KernelDef().OpName() == "YieldOp") { // Do not execute YieldOp (it is an no-op anyways). // Decrement the reference count of tensors that are not needed beyond this point. - // REVEIW(codemzs): The current model assumes the intermediate tensors that are exported + // REVIEW(codemzs): The current model assumes the intermediate tensors that are exported // as graph outputs are owned by ORT, the risk of caller freeing the tensor or manipulating tensor // memory lingers while the tensor is used downstream after the export. ctx.RecycleNodeInputs(idx); diff --git a/onnxruntime/core/framework/session_options.h b/onnxruntime/core/framework/session_options.h index 46bfc3630303c..8d4db36106f28 100644 --- a/onnxruntime/core/framework/session_options.h +++ b/onnxruntime/core/framework/session_options.h @@ -62,7 +62,7 @@ enum class ExecutionPriority : int { struct FreeDimensionOverride { std::string dim_identifier; - FreeDimensionOverrideType dim_identifer_type; + FreeDimensionOverrideType dim_identifier_type; int64_t dim_value; }; diff --git a/onnxruntime/core/framework/session_state.cc b/onnxruntime/core/framework/session_state.cc index 42fb7b392283a..a88f36f63639c 100644 --- a/onnxruntime/core/framework/session_state.cc +++ b/onnxruntime/core/framework/session_state.cc @@ -22,9 +22,9 @@ using namespace ::onnxruntime::common; namespace onnxruntime { #ifdef ORT_ENABLE_STREAM -static inline std::string GetWaitKey(const OrtDevice::DeviceType notificaiton_device_type, +static inline std::string GetWaitKey(const OrtDevice::DeviceType notification_device_type, const OrtDevice::DeviceType executor_device_type) { - return std::to_string(notificaiton_device_type) + ":" + std::to_string(executor_device_type); + return std::to_string(notification_device_type) + ":" + std::to_string(executor_device_type); } class StreamCommandHandleRegistryImpl : public IStreamCommandHandleRegistry { diff --git a/onnxruntime/core/framework/sparse_tensor.cc b/onnxruntime/core/framework/sparse_tensor.cc index a3bcea4762d3e..4e40e3dd81ca2 100644 --- a/onnxruntime/core/framework/sparse_tensor.cc +++ b/onnxruntime/core/framework/sparse_tensor.cc @@ -551,7 +551,7 @@ Status SparseTensor::Copy(const IDataTransfer& data_transfer, SparseTensor& dst_ } if (Values().Shape().Size() > 0) { - // This instance may either have a contigious buffer which we can copy in one shot + // This instance may either have a contiguous buffer which we can copy in one shot // or it can point to users buffers, in which case we have to copy each buffer individually // strings can not be memcpyed albeit always on CPU. if (p_data_ != nullptr) { @@ -569,7 +569,7 @@ Status SparseTensor::Copy(const IDataTransfer& data_transfer, SparseTensor& dst_ ORT_RETURN_IF_ERROR(data_transfer.CopyTensor(src, dst)); } } else { - // non-contiguos buffer + // non-contiguous buffer if (is_string) { CopyStrings(Values(), result_values); } else { diff --git a/onnxruntime/core/framework/tensorprotoutils.h b/onnxruntime/core/framework/tensorprotoutils.h index aabfc0487f3e0..e5197adcb94ec 100644 --- a/onnxruntime/core/framework/tensorprotoutils.h +++ b/onnxruntime/core/framework/tensorprotoutils.h @@ -151,7 +151,7 @@ common::Status GetExtDataFromTensorProto(const Env& env, const std::filesystem:: // the data location is external. i.e. it does not load the external data. // However if AttributeProto contains SparseTensorProto then it converts the data into dense tensor proto // (including loading external data when applicable). -// model_path is used for contructing full path for external_data +// model_path is used for constructing full path for external_data // tensor_name specifies the name for the new TensorProto TensorProto common::Status ConstantNodeProtoToTensorProto(const ONNX_NAMESPACE::NodeProto& node, const std::filesystem::path& model_path, @@ -165,7 +165,7 @@ common::Status ConstantNodeProtoToTensorProto(const ONNX_NAMESPACE::NodeProto& n // Convert a SparseTensorProto to a dense TensorProto // If the SparseTensorProto contains external data then it loads the data and converts to dense tensor proto // The resulting TensorProto will contain the data as raw data. -// model_path is used for contructing full path for external_data +// model_path is used for constructing full path for external_data common::Status SparseTensorProtoToDenseTensorProto(const ONNX_NAMESPACE::SparseTensorProto& sparse, const std::filesystem::path& model_path, ONNX_NAMESPACE::TensorProto& dense); @@ -174,7 +174,7 @@ common::Status SparseTensorProtoToDenseTensorProto(const ONNX_NAMESPACE::SparseT // Convert a TensorProto to a SparseTensorProto // If the tensorproto contains external data then it loads the data and converts to sparse tensor // The resulting SparseTensorProto will contain the data as raw data -// model_path is used for contructing full path for external_data +// model_path is used for constructing full path for external_data common::Status DenseTensorToSparseTensorProto(const ONNX_NAMESPACE::TensorProto& dense, const std::filesystem::path& model_path, ONNX_NAMESPACE::SparseTensorProto& sparse); diff --git a/onnxruntime/core/framework/utils.h b/onnxruntime/core/framework/utils.h index 17cf9671b70eb..afdb5a2cb27f5 100644 --- a/onnxruntime/core/framework/utils.h +++ b/onnxruntime/core/framework/utils.h @@ -47,7 +47,7 @@ void ConstructStrings(void* p_data, int64_t elements); ///

/// Destroy std::string objects in the contiquous chunk of memory -/// by explicitely invoking ~string(); +/// by explicitly invoking ~string(); /// /// /// diff --git a/onnxruntime/core/mickey/gemm/warp/quantb_meta_loader.h b/onnxruntime/core/mickey/gemm/warp/quantb_meta_loader.h index 4a784a1a49109..79c582279f2c8 100644 --- a/onnxruntime/core/mickey/gemm/warp/quantb_meta_loader.h +++ b/onnxruntime/core/mickey/gemm/warp/quantb_meta_loader.h @@ -37,12 +37,12 @@ void weightsMinuEight2Half(uint32_t const &weights, // // For element 0, 1, 4, 5, we have 0x000?000?, set the high bits // to 0x6400, essentially we set the exponent bits to 25, effective - // exp = 25 - 15 = 10, with explicity hight bit, the value is + // exp = 25 - 15 = 10, with explicitly hight bit, the value is // 2^10 + q_w. // // Similarly for element 2, 3, 6, 7, we have 0x00?000?, set the // high bits to 0x5400, essentially we set the exponent bits to 21, - // effective exp = 21 - 15 = 6, with explicity hight bit, the value + // effective exp = 21 - 15 = 6, with explicitly hight bit, the value // is 2^6 + q_w. // // 1.125 instruction per weight, 9 instructions in total. @@ -86,12 +86,12 @@ void weights2Half([[maybe_unused]] uint32_t const &weights, // // For element 0, 1, 4, 5, we have 0x000?000?, set the high bits // to 0x6400, essentially we set the exponent bits to 25, effective - // exp = 25 - 15 = 10, with explicity hight bit, the value is + // exp = 25 - 15 = 10, with explicitly hight bit, the value is // 2^10 + q_w. // // Similarly for element 2, 3, 6, 7, we have 0x00?000?, set the // high bits to 0x5400, essentially we set the exponent bits to 21, - // effective exp = 21 - 15 = 6, with explicity hight bit, the value + // effective exp = 21 - 15 = 6, with explicitly hight bit, the value // is 2^6 + q_w. // // 1.125 instruction per weight, 9 instructions in total. diff --git a/onnxruntime/core/mlas/lib/convolve.cpp b/onnxruntime/core/mlas/lib/convolve.cpp index 5d2c35fbfb406..ec79641559c6b 100644 --- a/onnxruntime/core/mlas/lib/convolve.cpp +++ b/onnxruntime/core/mlas/lib/convolve.cpp @@ -61,7 +61,7 @@ Routine Description: This implementation supports sampling a portion of the convolution patches. This avoids the need to allocate very large buffers to store - all of the convolution patches at once, when the underyling GEMM + all of the convolution patches at once, when the underlying GEMM implementation will already break up the operation into panels. Multiple threads can also be used to process different portions of the image. @@ -267,7 +267,7 @@ Routine Description: This implementation supports sampling a portion of the convolution patches. This avoids the need to allocate very large buffers to store - all of the convolution patches at once, when the underyling GEMM + all of the convolution patches at once, when the underlying GEMM implementation will already break up the operation into panels. Multiple threads can also be used to process different portions of the image. diff --git a/onnxruntime/core/optimizer/attention_fusion_helper.h b/onnxruntime/core/optimizer/attention_fusion_helper.h index ca744adddbeec..267a82b72670c 100644 --- a/onnxruntime/core/optimizer/attention_fusion_helper.h +++ b/onnxruntime/core/optimizer/attention_fusion_helper.h @@ -1118,8 +1118,8 @@ bool CheckNodesInPathV(const Graph& graph, const Node& reshape, const Node& tran head_size = v_reshape_shape[3]; // Check reshape for attention output has shape input (0, 0, -1) or (0, 0, N*H) - // In DistilBert, the reshape after qkv paths can not be fused during reshape fusion, so we do not have the correspondig - // initializer. We need to get the shape information from the input of concat. + // In DistilBert, the reshape after qkv paths can not be fused during reshape fusion, so we do not have the + // corresponding initializer. We need to get the shape information from the input of concat. InlinedVector reshape_shape; if (!optimizer_utils::AppendTensorFromInitializer(graph, *(reshape.InputDefs()[1]), reshape_shape)) { if (CheckDistilBertReshapeShape(graph, reshape, hidden_size, record_node_idx, logger)) { diff --git a/onnxruntime/core/optimizer/free_dim_override_transformer.cc b/onnxruntime/core/optimizer/free_dim_override_transformer.cc index 0d162b5238b18..bce73a0dcec45 100644 --- a/onnxruntime/core/optimizer/free_dim_override_transformer.cc +++ b/onnxruntime/core/optimizer/free_dim_override_transformer.cc @@ -22,9 +22,9 @@ FreeDimensionOverrideTransformer::FreeDimensionOverrideTransformer(gsl::span floating point casts could be optimised but this is left to an explicit cast optimisation pass. + // For instance, certain integral -> floating point casts could be optimized but + // this is left to an explicit cast optimisation pass. // The comparison with "InsertedPrecisionFreeCast_" reflects cast nodes that are inserted by InsertCastTransformer. - // Such casts should not be considered as loss of precision - the inserted upcasts (f16 -> f32) and downcasts (f32 -> f16) are inserted to support kernels when on a CPU EP without F16 support. + // Such casts should not be considered as loss of precision - the inserted upcasts (f16 -> f32) and + // downcasts (f32 -> f16) are inserted to support kernels when on a CPU EP without F16 support. auto src_type_group = GetTypeGroup(src_type); auto dst_type_group = GetTypeGroup(dst_type); if (Unknown == src_type_group || Unknown == dst_type_group) { diff --git a/onnxruntime/core/optimizer/transpose_optimization/onnx_transpose_optimization.cc b/onnxruntime/core/optimizer/transpose_optimization/onnx_transpose_optimization.cc index d4ed9c4e26cc6..bdb6a44bddaaf 100644 --- a/onnxruntime/core/optimizer/transpose_optimization/onnx_transpose_optimization.cc +++ b/onnxruntime/core/optimizer/transpose_optimization/onnx_transpose_optimization.cc @@ -1258,7 +1258,7 @@ static int EstimateTransposeValueCost(const api::GraphRef& graph, std::string_vi std::unique_ptr producer_node = graph.GetNodeProducingOutput(input); if (producer_node != nullptr) { - // this handles cancelling out a Transpose or Squeeze added to a shared initializer that was updated + // this handles canceling out a Transpose or Squeeze added to a shared initializer that was updated // by TransposeInputImpl Case 1 or UnqueezeInput Case 1. // - if a shared initializer is not broadcast, we have -> Transpose -> DQ // - if a shared initializer is broadcast, we have -> Transpose -> Squeeze -> DQ and need @@ -1992,7 +1992,7 @@ static bool HandleTile(HandlerArgs& args) { constexpr HandlerInfo tile_handler = {&FirstInput, &HandleTile}; -// Helper to remove cancelling Transpose -> Transpose or +// Helper to remove canceling Transpose -> Transpose or // Transpose -> Reshape nodes. static void RemoveCancelingTransposeNodes(HandlerArgs& args) { // Input to 1st transpose diff --git a/onnxruntime/core/providers/acl/nn/batch_norm.cc b/onnxruntime/core/providers/acl/nn/batch_norm.cc index eb6a10074f1db..be0e57c5c0543 100755 --- a/onnxruntime/core/providers/acl/nn/batch_norm.cc +++ b/onnxruntime/core/providers/acl/nn/batch_norm.cc @@ -118,7 +118,7 @@ Status BatchNorm::Compute(OpKernelContext* context) const { ACLImportMemory(tbatch_norm.b->allocator(), (void*)b_data, B->Shape().Size() * 4); ACLImportMemory(tbatch_norm.scale->allocator(), (void*)scale_data, S->Shape().Size() * 4); - // allocate space for input tensor to accomodate paddings and strides + // allocate space for input tensor to accommodate paddings and strides tbatch_norm.in->allocator()->allocate(); tbatch_norm.layer = std::move(layer); diff --git a/onnxruntime/core/providers/acl/nn/pool.cc b/onnxruntime/core/providers/acl/nn/pool.cc index 8fbcba3ed87a7..01d9bc0302c3a 100644 --- a/onnxruntime/core/providers/acl/nn/pool.cc +++ b/onnxruntime/core/providers/acl/nn/pool.cc @@ -121,7 +121,7 @@ ACLNEPool PoolOperation(onnxruntime::OpKernelContext* context, layer->configure(tpool.in.get(), tpool.out.get(), pool_info); } - // allocate space for input tensor to accomodate paddings and strides + // allocate space for input tensor to accommodate paddings and strides tpool.in->allocator()->allocate(); tpool.layer = std::move(layer); diff --git a/onnxruntime/core/providers/armnn/activation/activations.cc b/onnxruntime/core/providers/armnn/activation/activations.cc index 93017c26271f7..7ab7a14f7e206 100644 --- a/onnxruntime/core/providers/armnn/activation/activations.cc +++ b/onnxruntime/core/providers/armnn/activation/activations.cc @@ -56,7 +56,7 @@ Status Relu::Compute(OpKernelContext* context) const { armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); activation->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); - // Optimise ArmNN network + // Optimize ArmNN network armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*myNetwork, {armnn::Compute::CpuAcc}, Relu::run->GetDeviceSpec()); if (optNet == nullptr) { diff --git a/onnxruntime/core/providers/armnn/math/gemm.h b/onnxruntime/core/providers/armnn/math/gemm.h index 4f77c4afb725a..039a9c3b75adb 100644 --- a/onnxruntime/core/providers/armnn/math/gemm.h +++ b/onnxruntime/core/providers/armnn/math/gemm.h @@ -130,7 +130,7 @@ class Gemm : public onnxruntime::Gemm { armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); fc_armnn->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); - // Optimise ArmNN network + // Optimize ArmNN network armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*myNetwork, {armnn::Compute::CpuAcc}, Gemm::run->GetDeviceSpec()); if (optNet == nullptr) { diff --git a/onnxruntime/core/providers/armnn/nn/batch_norm.cc b/onnxruntime/core/providers/armnn/nn/batch_norm.cc index e9d8e6fb47852..9a7821d81bdb1 100755 --- a/onnxruntime/core/providers/armnn/nn/batch_norm.cc +++ b/onnxruntime/core/providers/armnn/nn/batch_norm.cc @@ -89,7 +89,7 @@ Status BatchNorm::Compute(OpKernelContext* context) const { armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); - // Optimise ArmNN network + // Optimize ArmNN network armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*myNetwork, {armnn::Compute::CpuAcc}, BatchNorm::run->GetDeviceSpec()); if (optNet == nullptr) { diff --git a/onnxruntime/core/providers/armnn/nn/conv.cc b/onnxruntime/core/providers/armnn/nn/conv.cc index 674e927ffc324..db261e67ecd00 100644 --- a/onnxruntime/core/providers/armnn/nn/conv.cc +++ b/onnxruntime/core/providers/armnn/nn/conv.cc @@ -266,7 +266,7 @@ Status Conv::Compute(OpKernelContext* context) const { activation->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); } - // Optimise ArmNN network + // Optimize ArmNN network armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*myNetwork, {armnn::Compute::CpuAcc}, Conv::run->GetDeviceSpec()); if (optNet == nullptr) { diff --git a/onnxruntime/core/providers/armnn/nn/pool.cc b/onnxruntime/core/providers/armnn/nn/pool.cc index c4eeb17779fcb..9d25b4eed2db4 100644 --- a/onnxruntime/core/providers/armnn/nn/pool.cc +++ b/onnxruntime/core/providers/armnn/nn/pool.cc @@ -161,7 +161,7 @@ Status Pool::Compute(OpKernelContext* context) const { armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); pool_armnn->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); - // Optimise ArmNN network + // Optimize ArmNN network armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*myNetwork, {armnn::Compute::CpuAcc}, Pool::run->GetDeviceSpec()); if (optNet == nullptr) { @@ -250,7 +250,7 @@ Status MaxPoolV8::Compute(OpKernelContext* context) const { armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Float32); pool_armnn->GetOutputSlot(0).SetTensorInfo(outputTensorInfo); - // Optimise ArmNN network + // Optimize ArmNN network armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*myNetwork, {armnn::Compute::CpuAcc}, MaxPoolV8::run->GetDeviceSpec()); if (optNet == nullptr) { diff --git a/onnxruntime/core/providers/cpu/math/einsum_utils/einsum_auxiliary_ops.cc b/onnxruntime/core/providers/cpu/math/einsum_utils/einsum_auxiliary_ops.cc index 91e7955aa9fbe..a602a85fc2737 100644 --- a/onnxruntime/core/providers/cpu/math/einsum_utils/einsum_auxiliary_ops.cc +++ b/onnxruntime/core/providers/cpu/math/einsum_utils/einsum_auxiliary_ops.cc @@ -290,9 +290,9 @@ std::unique_ptr Transpose(const Tensor& input, const TensorShape& input_ // and it will de-allocate the memory for this intermediate tensor when it goes out of scope std::unique_ptr output = std::make_unique(input.DataType(), output_dims, allocator); - TensorShape overriden_shape(input_shape_override); + TensorShape overridden_shape(input_shape_override); - auto status = device_transpose_func(permutation, input, *output, &overriden_shape, einsum_cuda_assets); + auto status = device_transpose_func(permutation, input, *output, &overridden_shape, einsum_cuda_assets); if (!status.IsOK()) { ORT_THROW(ONNXRUNTIME, FAIL, "Einsum op: Transpose failed: ", status.ErrorMessage()); diff --git a/onnxruntime/core/providers/cpu/math/einsum_utils/einsum_typed_compute_processor.cc b/onnxruntime/core/providers/cpu/math/einsum_utils/einsum_typed_compute_processor.cc index a362bb06220d8..343ed485a150a 100644 --- a/onnxruntime/core/providers/cpu/math/einsum_utils/einsum_typed_compute_processor.cc +++ b/onnxruntime/core/providers/cpu/math/einsum_utils/einsum_typed_compute_processor.cc @@ -209,7 +209,7 @@ std::unique_ptr EinsumTypedComputeProcessor::PairwiseOperandProcess(c if (current_left && IsTransposeReshapeForEinsum(left_permutation, current_left->Shape().GetDims(), reshaped_dims)) { - // This can be done because curent_* tensors (if they exist) and output tensors are + // This can be done because current_* tensors (if they exist) and output tensors are // intermediate tensors and cannot be input tensors to the Einsum node itself // (which are immutable). // Covered by ExplicitEinsumAsTensorContractionReshapeLeft. diff --git a/onnxruntime/core/providers/cpu/object_detection/roialign.cc b/onnxruntime/core/providers/cpu/object_detection/roialign.cc index ac1bb111494fd..ead2ccaef002e 100644 --- a/onnxruntime/core/providers/cpu/object_detection/roialign.cc +++ b/onnxruntime/core/providers/cpu/object_detection/roialign.cc @@ -135,7 +135,7 @@ static void PreCalcForBilinearInterpolate(const int64_t height, const int64_t wi T w3 = ly * hx; T w4 = ly * lx; - // save weights and indeces + // save weights and indices PreCalc pc; pc.pos1 = y_low * width + x_low; pc.pos2 = y_low * width + x_high; diff --git a/onnxruntime/core/providers/cpu/sequence/sequence_ops.cc b/onnxruntime/core/providers/cpu/sequence/sequence_ops.cc index 2913f4ac32b6e..7a27b04ece7cf 100644 --- a/onnxruntime/core/providers/cpu/sequence/sequence_ops.cc +++ b/onnxruntime/core/providers/cpu/sequence/sequence_ops.cc @@ -317,7 +317,7 @@ Status SequenceConstruct::Compute(OpKernelContext* context) const { const auto* X = context->Input(input_idx); if (input_idx > 0 && X->DataType() != first_dtype) { return ORT_MAKE_STATUS(ONNXRUNTIME, INVALID_ARGUMENT, - "Violation of the requirment that all input tensors must have the same data type."); + "Violation of the requirement that all input tensors must have the same data type."); } } diff --git a/onnxruntime/core/providers/cpu/tensor/unique.cc b/onnxruntime/core/providers/cpu/tensor/unique.cc index ab99d87da83fd..92c163a0f08a1 100644 --- a/onnxruntime/core/providers/cpu/tensor/unique.cc +++ b/onnxruntime/core/providers/cpu/tensor/unique.cc @@ -51,7 +51,7 @@ ONNX_OPERATOR_SET_SCHEMA( 1, "indices", "A 1-D INT64 tensor " - "containing indices of 'Y' elements' first occurance in 'X'. " + "containing indices of 'Y' elements' first occurrence in 'X'. " "When 'axis' is provided, it contains indices to subtensors in input 'X' on the 'axis'. " "When 'axis' is not provided, it contains indices to values in the flattened input tensor. ", "tensor(int64)", diff --git a/onnxruntime/core/providers/cuda/cuda_allocator.cc b/onnxruntime/core/providers/cuda/cuda_allocator.cc index 314aa1062f1b0..2189af8e0ee2d 100644 --- a/onnxruntime/core/providers/cuda/cuda_allocator.cc +++ b/onnxruntime/core/providers/cuda/cuda_allocator.cc @@ -60,7 +60,7 @@ void* CUDAExternalAllocator::Alloc(size_t size) { if (size > 0) { p = alloc_(size); - // review(codemzs): ORT_ENFORCE does not seem appropiate. + // review(codemzs): ORT_ENFORCE does not seem appropriate. ORT_ENFORCE(p != nullptr); } diff --git a/onnxruntime/core/providers/cuda/cuda_stream_handle.cc b/onnxruntime/core/providers/cuda/cuda_stream_handle.cc index 58e57572131b1..14b75d2383b58 100644 --- a/onnxruntime/core/providers/cuda/cuda_stream_handle.cc +++ b/onnxruntime/core/providers/cuda/cuda_stream_handle.cc @@ -179,7 +179,7 @@ Status CudaStream::CleanUpOnRunEnd() { } void* CudaStream::GetResource(int version, int id) const { - ORT_ENFORCE(version <= ORT_CUDA_RESOUCE_VERSION, "resource version unsupported!"); + ORT_ENFORCE(version <= ORT_CUDA_RESOURCE_VERSION, "resource version unsupported!"); void* resource{}; switch (id) { case CudaResource::cuda_stream_t: diff --git a/onnxruntime/core/providers/cuda/math/softmax_blockwise_impl.cuh b/onnxruntime/core/providers/cuda/math/softmax_blockwise_impl.cuh index 6cb65ea8e739c..8bb87035cdc6d 100644 --- a/onnxruntime/core/providers/cuda/math/softmax_blockwise_impl.cuh +++ b/onnxruntime/core/providers/cuda/math/softmax_blockwise_impl.cuh @@ -30,7 +30,7 @@ dim3 SoftMax_getBlockSize(int ILP, uint64_t dim_size) { uint64_t max_block_size = std::min(dim_size / ILP, static_cast(max_threads)); // In the vectorized case we want to trade off allowing more of the buffers to be accessed - // in a vectorized way against wanting a larger block size to get better utilisation. + // in a vectorized way against wanting a larger block size to get better utilization. // In general with ILP you can have (ILP-1)/ILP of the buffer accessed vectorised, at the risk // of having a very small block size. We choose to keep >= 1/2 of the buffer vectorised while // allowing a larger block size. diff --git a/onnxruntime/core/providers/cuda/nn/conv.cc b/onnxruntime/core/providers/cuda/nn/conv.cc index e05786248cbcf..764feadcf4cb3 100644 --- a/onnxruntime/core/providers/cuda/nn/conv.cc +++ b/onnxruntime/core/providers/cuda/nn/conv.cc @@ -15,7 +15,7 @@ namespace onnxruntime { namespace cuda { // Op Set 11 for Conv only update document to clearify default dilations and strides value. -// which are already convered by op set 11 cpu versoin, so simply add declaration. +// which are already convered by op set 11 cpu version, so simply add declaration. #define REGISTER_KERNEL_TYPED(T, DOMAIN, NHWC) \ ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX( \ Conv, \ @@ -269,7 +269,7 @@ Status Conv::UpdateState(OpKernelContext* context, bool bias_expected) // especially for EXHAUSTIVE algo search which may result in a better algo selection. // ORTModule uses different algo search options (HEURISTIC, and use max workspace size) compared to // inference build (EXHAUSTIVE, 32M workspace size). We observed better perf when we pad input shape - // [N,C,D] to [N,C,1,D], expecially on A100, and especially for ConvGrad. + // [N,C,D] to [N,C,1,D], especially on A100, and especially for ConvGrad. // PyTorch also pads to [N,C,1,D]. For inference build, we still pad it to [N, C, D, 1] as this seems // to be the sweet spot for all algo search options: EXHAUSTIVE, HEURISTIC, and DEFAULT. // See PR #7348 and #7702 for more context. diff --git a/onnxruntime/core/providers/cuda/object_detection/roialign_impl.cu b/onnxruntime/core/providers/cuda/object_detection/roialign_impl.cu index 537ad0a8b9efe..10053c630ab66 100644 --- a/onnxruntime/core/providers/cuda/object_detection/roialign_impl.cu +++ b/onnxruntime/core/providers/cuda/object_detection/roialign_impl.cu @@ -20,7 +20,7 @@ namespace onnxruntime { namespace cuda { - + template __device__ T bilinear_interpolate( const T* bottom_data, @@ -73,8 +73,8 @@ __device__ T bilinear_interpolate( T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; T val = is_mode_avg - ? (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4) // mode Avg - : max(max(max(w1 * v1, w2 * v2), w3 * v3), w4 * v4); // mode Max + ? (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4) // mode Avg + : max(max(max(w1 * v1, w2 * v2), w3 * v3), w4 * v4); // mode Max return val; } @@ -116,7 +116,7 @@ __global__ void RoIAlignForward( T roi_width = roi_end_w - roi_start_w; T roi_height = roi_end_h - roi_start_h; - if (!half_pixel) { // backward compatiblity + if (!half_pixel) { // backward compatibility // Force malformed ROIs to be 1x1 roi_width = max(roi_width, (T)1.); roi_height = max(roi_height, (T)1.); @@ -129,29 +129,29 @@ __global__ void RoIAlignForward( // We use roi_bin_grid to sample the grid and mimic integral int roi_bin_grid_h = (sampling_ratio > 0) - ? sampling_ratio - : _Ceil(roi_height / pooled_height); // e.g., = 2 + ? sampling_ratio + : _Ceil(roi_height / pooled_height); // e.g., = 2 int roi_bin_grid_w = (sampling_ratio > 0) ? sampling_ratio : _Ceil(roi_width / pooled_width); // We do average (integral) pooling inside a bin - const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 T output_val = 0.; bool max_flag = false; - for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 { const T y = roi_start_h + ph * bin_size_h + - static_cast(iy + .5f) * bin_size_h / - static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 + static_cast(iy + .5f) * bin_size_h / + static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5 for (int ix = 0; ix < roi_bin_grid_w; ix++) { const T x = roi_start_w + pw * bin_size_w + - static_cast(ix + .5f) * bin_size_w / - static_cast(roi_bin_grid_w); + static_cast(ix + .5f) * bin_size_w / + static_cast(roi_bin_grid_w); T val = bilinear_interpolate( offset_bottom_data, height, width, y, x, is_mode_avg, index); - + if (is_mode_avg) { output_val += val; } else { @@ -174,24 +174,24 @@ __global__ void RoIAlignForward( template void RoiAlignImpl( - cudaStream_t stream, - const int64_t nthreads, - const T* bottom_data, - const T spatial_scale, - const int64_t channels, - const int64_t height, - const int64_t width, - const int64_t pooled_height, - const int64_t pooled_width, - const int64_t sampling_ratio, - const T* bottom_rois, - int64_t roi_cols, - T* top_data, - const bool is_mode_avg, - const bool half_pixel, - const int64_t* batch_indices_ptr) { - int blocksPerGrid = (int)(ceil(static_cast(nthreads) / GridDim::maxThreadsPerBlock)); - RoIAlignForward<<>>( + cudaStream_t stream, + const int64_t nthreads, + const T* bottom_data, + const T spatial_scale, + const int64_t channels, + const int64_t height, + const int64_t width, + const int64_t pooled_height, + const int64_t pooled_width, + const int64_t sampling_ratio, + const T* bottom_rois, + int64_t roi_cols, + T* top_data, + const bool is_mode_avg, + const bool half_pixel, + const int64_t* batch_indices_ptr) { + int blocksPerGrid = (int)(ceil(static_cast(nthreads) / GridDim::maxThreadsPerBlock)); + RoIAlignForward<<>>( nthreads, bottom_data, spatial_scale, @@ -206,30 +206,30 @@ void RoiAlignImpl( top_data, is_mode_avg, half_pixel, - batch_indices_ptr); + batch_indices_ptr); } -#define SPECIALIZED_IMPL(T) \ - template void RoiAlignImpl( \ - cudaStream_t stream, \ - const int64_t nthreads, \ - const T* bottom_data, \ - const T spatial_scale, \ - const int64_t channels, \ - const int64_t height, \ - const int64_t width, \ - const int64_t pooled_height, \ - const int64_t pooled_width, \ - const int64_t sampling_ratio, \ - const T* bottom_rois, \ - int64_t roi_cols, \ - T* top_data, \ - const bool is_mode_avg, \ - const bool half_pixel, \ - const int64_t* batch_indices_ptr); +#define SPECIALIZED_IMPL(T) \ + template void RoiAlignImpl( \ + cudaStream_t stream, \ + const int64_t nthreads, \ + const T* bottom_data, \ + const T spatial_scale, \ + const int64_t channels, \ + const int64_t height, \ + const int64_t width, \ + const int64_t pooled_height, \ + const int64_t pooled_width, \ + const int64_t sampling_ratio, \ + const T* bottom_rois, \ + int64_t roi_cols, \ + T* top_data, \ + const bool is_mode_avg, \ + const bool half_pixel, \ + const int64_t* batch_indices_ptr); SPECIALIZED_IMPL(float) SPECIALIZED_IMPL(double) - -} // namespace cuda -} // namespace onnxruntime + +} // namespace cuda +} // namespace onnxruntime diff --git a/onnxruntime/core/providers/cuda/reduction/reduction_ops.cc b/onnxruntime/core/providers/cuda/reduction/reduction_ops.cc index bc78e577c5052..c921339ee6f33 100644 --- a/onnxruntime/core/providers/cuda/reduction/reduction_ops.cc +++ b/onnxruntime/core/providers/cuda/reduction/reduction_ops.cc @@ -115,7 +115,7 @@ Status ReduceKernel::ReduceKernelShared( CUDNN_RETURN_IF_ERROR(cudnnGetReductionIndicesSize(cudnn_handle, reduce_desc, input_tensor, output_tensor, &indices_bytes)); auto indices_cuda = GetScratchBuffer(indices_bytes, stream); - // need to allocate a separate buffer for ArgMin/ArgMax comparsion output + // need to allocate a separate buffer for ArgMin/ArgMax comparison output auto output_count = output_shape.Size(); if (ReduceTensorIndices == CUDNN_REDUCE_TENSOR_NO_INDICES) { diff --git a/onnxruntime/core/providers/cuda/tensor/resize_impl.cu b/onnxruntime/core/providers/cuda/tensor/resize_impl.cu index e788f24052985..a96d4c82a7fdc 100644 --- a/onnxruntime/core/providers/cuda/tensor/resize_impl.cu +++ b/onnxruntime/core/providers/cuda/tensor/resize_impl.cu @@ -234,15 +234,15 @@ __global__ void _ResizeNearestKernel( int output_index = static_cast(id); int input_index = 0; - int extrapolation_occured = 0; + int extrapolation_occurred = 0; for (int axis = 0; axis < rank; ++axis) { int dim = 0; output_div_pitches[axis].divmod(output_index, dim, output_index); const NearestMappingInfo& mi = dims_mapping[prefix_dim_sum[axis] + dim]; - extrapolation_occured += mi.extrapolate_; + extrapolation_occurred += mi.extrapolate_; input_index += input_strides[axis] * mi.origin_; } - output_data[id] = extrapolation_occured ? extrapolation_value : input_data[input_index]; + output_data[id] = extrapolation_occurred ? extrapolation_value : input_data[input_index]; } struct LinearMappingInfo { diff --git a/onnxruntime/core/providers/cuda/tensor/transpose_impl.cu b/onnxruntime/core/providers/cuda/tensor/transpose_impl.cu index 6344845359b32..602514d1c8227 100644 --- a/onnxruntime/core/providers/cuda/tensor/transpose_impl.cu +++ b/onnxruntime/core/providers/cuda/tensor/transpose_impl.cu @@ -145,7 +145,7 @@ bool CanDoTranspose4DParallelizeMultipleElementsPerThreadInInnermostDim(const cu (input_dims[3] % num_elements_per_thread) == 0 && input_dims[1] <= prop.maxGridSize[1] && input_dims[0] <= prop.maxGridSize[2]) { - // There are 2 constrains when luanching the kernels + // There are 2 constrains when launching the kernels // 1. block_size_x * block_size_y <= prop.maxThreadsPerBlock // 2. block_size_y * num_block_ext >= input_dims[2] int64_t block_size_x = input_dims[3] / num_elements_per_thread; @@ -261,7 +261,7 @@ bool CanDoTranspose4DParallelizeOneElementPerThread(const cudaDeviceProp& prop, if (input_dims[3] <= prop.maxThreadsPerBlock && input_dims[1] <= prop.maxGridSize[1] && input_dims[0] <= prop.maxGridSize[2]) { - // There are 2 constrains when luanching the kernels + // There are 2 constrains when launching the kernels // 1. block_size_x * block_size_y <= prop.maxThreadsPerBlock // 2. block_size_y * num_block_ext >= input_dims[2] int64_t block_size_x = input_dims[3]; diff --git a/onnxruntime/core/providers/dml/DmlExecutionProvider/src/Operators/DmlOperatorFusedMatMul.cpp b/onnxruntime/core/providers/dml/DmlExecutionProvider/src/Operators/DmlOperatorFusedMatMul.cpp index 0bc543c56f7d1..a0c9289a87156 100644 --- a/onnxruntime/core/providers/dml/DmlExecutionProvider/src/Operators/DmlOperatorFusedMatMul.cpp +++ b/onnxruntime/core/providers/dml/DmlExecutionProvider/src/Operators/DmlOperatorFusedMatMul.cpp @@ -44,7 +44,7 @@ class DmlOperatorFusedMatMul : public DmlOperator // At this point, we have manipulated input/output shapes and strides and // we do not care about actual input shapes present in the model (.onnx file). - // Create the TensorDesc with the manipulated input shapes becuase we don't want incorrect + // Create the TensorDesc with the manipulated input shapes because we don't want incorrect // broadcasting to be happen inside TensorDesc constructor. std::vector> inputIndices = { 0, 1, std::nullopt }; gsl::span inputShapes[2] = {sizesA, sizesB}; diff --git a/onnxruntime/core/providers/dnnl/dnnl_execution_provider.cc b/onnxruntime/core/providers/dnnl/dnnl_execution_provider.cc index 3271dab13f675..ffda84921a3ee 100644 --- a/onnxruntime/core/providers/dnnl/dnnl_execution_provider.cc +++ b/onnxruntime/core/providers/dnnl/dnnl_execution_provider.cc @@ -344,7 +344,7 @@ Status DnnlExecutionProvider::Compile(const std::vector& fuse auto input_tensor = ctx.GetInput(i); auto tensor_info = input_tensor.GetTensorTypeAndShapeInfo(); auto shape = tensor_info.GetShape(); - // dnnl expectes non-const data + // dnnl expects non-const data void* inputBuffer = const_cast(input_tensor.GetTensorRawData()); inputs.emplace( input_name, diff --git a/onnxruntime/core/providers/dnnl/dnnl_node_capability.cc b/onnxruntime/core/providers/dnnl/dnnl_node_capability.cc index 5db52f29a93cf..01f44e91fd49c 100644 --- a/onnxruntime/core/providers/dnnl/dnnl_node_capability.cc +++ b/onnxruntime/core/providers/dnnl/dnnl_node_capability.cc @@ -431,7 +431,7 @@ bool DnnlMatMulIntegerNodeCapability::IsDimensionSupported(const Node* node, con } } - // if shape nullptr, not enough information to reject it. attempt to run it (no gaurantee) + // if shape nullptr, not enough information to reject it. attempt to run it (no guarantee) if (node_inputs[0]->Shape() == nullptr || node_inputs[1]->Shape() == nullptr) { return true; } @@ -465,7 +465,7 @@ bool DnnlSumNodeCapability::Supported(const Node* node, const GraphViewer& graph } // OneDNN version of Sum does not support Numpy style broadcasting. -// If the dimentions of all inputs do not match return false +// If the dimensions of all inputs do not match return false bool DnnlSumNodeCapability::IsDimensionSupported(const Node* node) const { auto node_inputs = node->InputDefs(); // find first non-null shape @@ -615,7 +615,7 @@ bool DnnlReshapeNodeCapability::Supported(const Node* node, const GraphViewer& g } bool DnnlReshapeNodeCapability::IsDimensionSupported(const Node* node) const { auto node_inputs = node->InputDefs(); - // We can not reshape a one dimentional tensor to a scalar output + // We can not reshape a one dimensional tensor to a scalar output if (node_inputs[1]->Shape() != nullptr && node_inputs[1]->Shape()->dim_size() == 1 && node_inputs[1]->Shape()->dim(0).dim_value() == 0) { diff --git a/onnxruntime/core/providers/dnnl/subgraph/dnnl_conv.h b/onnxruntime/core/providers/dnnl/subgraph/dnnl_conv.h index 831b10c3e147f..1af9e503e7816 100644 --- a/onnxruntime/core/providers/dnnl/subgraph/dnnl_conv.h +++ b/onnxruntime/core/providers/dnnl/subgraph/dnnl_conv.h @@ -32,9 +32,9 @@ class DnnlConv { private: /* - * Return the infered padding. + * Return the inferred padding. * - * The padding will be based on the specified padding or will infered based on the + * The padding will be based on the specified padding or will inferred based on the * Onnx 'auto_pad' attributes. * * This will return the padding in the format specified in the Onnx specification. @@ -47,9 +47,9 @@ class DnnlConv { const dnnl::memory::dims& dilations, const std::vector& kernel_shape, const dnnl::memory::dims& strides); - /* Get the padding left values from the infered pads */ + /* Get the padding left values from the inferred pads */ dnnl::memory::dims GetPaddingLeft(const std::vector& onnx_padding, ConvShape shape); - /* Get the padding right values from the infered pads */ + /* Get the padding right values from the inferred pads */ dnnl::memory::dims GetPaddingRight(const std::vector& onnx_padding, ConvShape shape); /* diff --git a/onnxruntime/core/providers/dnnl/subgraph/dnnl_convgrad.cc b/onnxruntime/core/providers/dnnl/subgraph/dnnl_convgrad.cc index 21218e24c17d6..e05693f3e5f2e 100644 --- a/onnxruntime/core/providers/dnnl/subgraph/dnnl_convgrad.cc +++ b/onnxruntime/core/providers/dnnl/subgraph/dnnl_convgrad.cc @@ -40,7 +40,7 @@ ConvGrad: (According to OnnxRuntime discovered using code inspection and Onnx do Attributes (auto_pad, dilations, group, kernel_shap, pads, and strides) should be the same as the forward pass Conv operator -To acheive Everything specified in the OnnxRuntime ConvGrad we must use both: +To achieve Everything specified in the OnnxRuntime ConvGrad we must use both: 1) dnnl::convolution_backward_data - used to calculate (dX) diff_src 2) dnnl::convolution_backward_weights - used to calculate (dW) diff_weights and (dB) diff_bias */ diff --git a/onnxruntime/core/providers/dnnl/subgraph/dnnl_convgrad.h b/onnxruntime/core/providers/dnnl/subgraph/dnnl_convgrad.h index 3a27788745ef0..c45c85859c25e 100644 --- a/onnxruntime/core/providers/dnnl/subgraph/dnnl_convgrad.h +++ b/onnxruntime/core/providers/dnnl/subgraph/dnnl_convgrad.h @@ -39,9 +39,9 @@ class DnnlConvGrad { std::vector GetKernelShape(DnnlNode& node); /* Get the 'pads' attribute */ dnnl::memory::dims GetPads(DnnlNode& node, ConvShape shape); - /* Get the padding left values from the infered pads */ + /* Get the padding left values from the inferred pads */ dnnl::memory::dims GetPaddingLeft(const std::vector& onnx_padding, ConvShape shape); - /* Get the padding right values from the infered pads */ + /* Get the padding right values from the inferred pads */ dnnl::memory::dims GetPaddingRight(const std::vector& onnx_padding, ConvShape shape); /* * Get the 'dilations' attribute. diff --git a/onnxruntime/core/providers/dnnl/subgraph/dnnl_dequantizelinear.cc b/onnxruntime/core/providers/dnnl/subgraph/dnnl_dequantizelinear.cc index 074df058806e5..ac668aad1bb4a 100644 --- a/onnxruntime/core/providers/dnnl/subgraph/dnnl_dequantizelinear.cc +++ b/onnxruntime/core/providers/dnnl/subgraph/dnnl_dequantizelinear.cc @@ -68,7 +68,7 @@ void DnnlDequantizeLinear::CreatePrimitive(DnnlSubgraphPrimitive& sp, DnnlNode& auto dst_md = dnnl::memory::desc(x_md.get_dims(), node.Output(OUT_Y).Type(), dnnl::memory::format_tag::any); dnnl::memory dst_mem; - // If zero point exists and we are NOT dequantizing int32, then substract zp from x and scale + // If zero point exists and we are NOT dequantizing int32, then subtract zp from x and scale if (isZeroPointUseful && (x_mem.get_desc().get_data_type() != dnnl::memory::data_type::s32)) { // Get Zero point auto x_zp_mem = sp.GetMemory(node.Input(IN_X_ZERO_POINT)); diff --git a/onnxruntime/core/providers/dnnl/subgraph/dnnl_matmul.cc b/onnxruntime/core/providers/dnnl/subgraph/dnnl_matmul.cc index 54528011850be..82a9e9f3ec898 100644 --- a/onnxruntime/core/providers/dnnl/subgraph/dnnl_matmul.cc +++ b/onnxruntime/core/providers/dnnl/subgraph/dnnl_matmul.cc @@ -126,7 +126,7 @@ void DnnlMatMul::CreatePrimitive(DnnlSubgraphPrimitive& sp, DnnlNode& node) { } // The reorder from above will get the memory in the right order. The next few lines will create a memory and memory descriptor - // that will have the correct dimentions and correct memory::format + // that will have the correct dimensions and correct memory::format transposedA_md = dnnl::memory::desc(transposedA_dims, node.Input(IN_A).Type(), sp.GetDnnlFormat(transposedA_dims.size())); transposedA_mem = dnnl::memory(transposedA_md, eng, nullptr); void* handle = intermediateA_mem.get_data_handle(); @@ -146,7 +146,7 @@ void DnnlMatMul::CreatePrimitive(DnnlSubgraphPrimitive& sp, DnnlNode& node) { } // The reorder from above will get the memory in the right order. The next few lines will create a memory and memory descriptor - // that will have the correct dimentions and correct memory::format + // that will have the correct dimensions and correct memory::format transposedB_md = dnnl::memory::desc(transposedB_dims, node.Input(IN_B).Type(), sp.GetDnnlFormat(transposedB_dims.size())); transposedB_mem = dnnl::memory(transposedB_md, eng, nullptr); void* handle = intermediateB_mem.get_data_handle(); @@ -193,8 +193,8 @@ void DnnlMatMul::CreatePrimitive(DnnlSubgraphPrimitive& sp, DnnlNode& node) { create a post op binary with possible unsqueezing in order to make sure onednn properly broadcast current limitation 1. is no unsqueeze for matmul output as it is not exposed due to post op fusion - 2. the third input has to be reordered to plain format (eg, no memory format propogation if the third input is internal to subgraph) - 3. adding 1s to front (unsqueeze/expand) in logical dims would possibly fail if physcial layout is not plain format + 2. the third input has to be reordered to plain format (eg, no memory format propagation if the third input is internal to subgraph) + 3. adding 1s to front (unsqueeze/expand) in logical dims would possibly fail if physical layout is not plain format */ dnnl::primitive_attr attr; if (has_postop_fusion) { diff --git a/onnxruntime/core/providers/dnnl/subgraph/dnnl_reduce.cc b/onnxruntime/core/providers/dnnl/subgraph/dnnl_reduce.cc index f49fdd7e9bde1..b19411e61767c 100644 --- a/onnxruntime/core/providers/dnnl/subgraph/dnnl_reduce.cc +++ b/onnxruntime/core/providers/dnnl/subgraph/dnnl_reduce.cc @@ -135,16 +135,16 @@ void DnnlReduce::CreatePrimitive(DnnlSubgraphPrimitive& sp, DnnlNode& node) { * shape reduction. For this reason we have code paths that are taken if the source dimensions and * destination dimensions are equal that will not call the reduction op. * - * "ReduceLogSum" is equivelent to Log(ReduceSum(input)) + * "ReduceLogSum" is equivalent to Log(ReduceSum(input)) * - if the reduction op is called then the eltwise_log post op will added to the reduction primitive. * - if the reduction op is not called then the eltwise_log primitive is added as its own primitive * - NOTE "ReduceLogSum" follows the code flow of "All other reduce ops" with the exception of the added * post op and an extra check if src_dims == dest_dims. - * "ReduceLogSumExp" is equivelent to Log(ReduceSum(Exp(input))) + * "ReduceLogSumExp" is equivalent to Log(ReduceSum(Exp(input))) * - if the reduction op is called then the eltwise_exp primitive is added before the reduction op * the eletwise_log post op will be added to the reduction primitive * - if the reduction op is not called then the input is not modified since Log(Exp(input) == input - * "ReduceSumSquare" is equivelent to ReduceSum(Square(input)) + * "ReduceSumSquare" is equivalent to ReduceSum(Square(input)) * - the eltwise_square primitive is added before the reduction op * - if the source and destination dimensions are not equal the reduction op is called * All other reduce ops @@ -298,7 +298,7 @@ void DnnlReduce::CreatePrimitive(DnnlSubgraphPrimitive& sp, DnnlNode& node) { dnnl::memory squeeze_mem = dnnl::memory(squeeze_md, dnnl_engine, nullptr); // if the src and dst dims are equal then we will have a valid data handle here. // Otherwise we must get the data handle at runtime using the AddReshape function. - // reading the data handle directy is more efficent if is it possible. + // reading the data handle directly is more efficient if is it possible. if (!src_and_dst_dims_equal) { squeeze_mem.set_data_handle(reduce_dst_mem.get_data_handle()); } else { diff --git a/onnxruntime/core/providers/dnnl/subgraph/dnnl_subgraph_primitive.h b/onnxruntime/core/providers/dnnl/subgraph/dnnl_subgraph_primitive.h index f97268465e46e..a7e49b54d4507 100644 --- a/onnxruntime/core/providers/dnnl/subgraph/dnnl_subgraph_primitive.h +++ b/onnxruntime/core/providers/dnnl/subgraph/dnnl_subgraph_primitive.h @@ -65,7 +65,7 @@ class DnnlSubgraphPrimitive { dnnl::memory::desc GetOutputInfo(std::string name); bool IsScalarOutput(const std::string& name); bool IsDynamic(); - // All Scalar inputs are automatically converterted to a one dimentional tensor when used in OneDNN + // All Scalar inputs are automatically converterted to a one dimensional tensor when used in OneDNN // If the input being a scalar affects the operator this function can be used to determine if the // original input from ORT was a scalar. bool IsScalar(const DnnlTensor& tensor); diff --git a/onnxruntime/core/providers/dnnl/subgraph/dnnl_transpose.cc b/onnxruntime/core/providers/dnnl/subgraph/dnnl_transpose.cc index 3a7f45c72f27f..b74dbf97a2547 100644 --- a/onnxruntime/core/providers/dnnl/subgraph/dnnl_transpose.cc +++ b/onnxruntime/core/providers/dnnl/subgraph/dnnl_transpose.cc @@ -56,7 +56,8 @@ void DnnlTranspose::CreatePrimitive(DnnlSubgraphPrimitive& sp, DnnlNode& node) { strides_inverse.push_back(strides[ndata_dims - i - 1]); } - // Memory descriptor describes the memory reorder but will not have the correct output dimentions or the correct dnnl::memory::format + // Memory descriptor describes the memory reorder but will not have the correct output dimensions + // or the correct dnnl::memory::format dnnl::memory::desc intermediate_md = dnnl::memory::desc(data_dims, node.Input(IN_DATA).Type(), strides); dnnl::memory intermediate_mem = dnnl::memory(intermediate_md, dnnl_engine); @@ -65,7 +66,7 @@ void DnnlTranspose::CreatePrimitive(DnnlSubgraphPrimitive& sp, DnnlNode& node) { {DNNL_ARG_TO, intermediate_mem}}); // The reorder from above will get the memory in the right order. The next few lines will create a memory and memory descriptor - // that will have the correct dimentions and correct memory::format + // that will have the correct dimensions and correct memory::format dnnl::memory::desc transposed_md = dnnl::memory::desc(transposed_dims, node.Input(IN_DATA).Type(), sp.GetDnnlFormat(data_dims.size())); dnnl::memory transposed_mem = dnnl::memory(transposed_md, dnnl_engine, nullptr); void* handle = intermediate_mem.get_data_handle(); diff --git a/onnxruntime/core/providers/migraphx/migraphx_allocator.cc b/onnxruntime/core/providers/migraphx/migraphx_allocator.cc index 0693eea056416..c9db31e8744a7 100644 --- a/onnxruntime/core/providers/migraphx/migraphx_allocator.cc +++ b/onnxruntime/core/providers/migraphx/migraphx_allocator.cc @@ -42,7 +42,7 @@ void* MIGraphXExternalAllocator::Alloc(size_t size) { if (size > 0) { p = alloc_(size); - // review(codemzs): ORT_ENFORCE does not seem appropiate. + // review(codemzs): ORT_ENFORCE does not seem appropriate. ORT_ENFORCE(p != nullptr); } diff --git a/onnxruntime/core/providers/migraphx/migraphx_stream_handle.cc b/onnxruntime/core/providers/migraphx/migraphx_stream_handle.cc index 9c5bb4ecf5c97..e8e349af75aba 100644 --- a/onnxruntime/core/providers/migraphx/migraphx_stream_handle.cc +++ b/onnxruntime/core/providers/migraphx/migraphx_stream_handle.cc @@ -123,7 +123,7 @@ Status MIGraphXStream::CleanUpOnRunEnd() { } void* MIGraphXStream::GetResource(int version, int id) const { - ORT_ENFORCE(version <= ORT_ROCM_RESOUCE_VERSION, "resource version unsupported!"); + ORT_ENFORCE(version <= ORT_ROCM_RESOURCE_VERSION, "resource version unsupported!"); void* resource{}; switch (id) { case RocmResource::hip_stream_t: diff --git a/onnxruntime/core/providers/nnapi/nnapi_builtin/nnapi_lib/nnapi_implementation.cc b/onnxruntime/core/providers/nnapi/nnapi_builtin/nnapi_lib/nnapi_implementation.cc index cdf1075beb827..91d85efd09c65 100644 --- a/onnxruntime/core/providers/nnapi/nnapi_builtin/nnapi_lib/nnapi_implementation.cc +++ b/onnxruntime/core/providers/nnapi/nnapi_builtin/nnapi_lib/nnapi_implementation.cc @@ -228,7 +228,7 @@ const NnApi LoadNnApi() { nnapi.ASharedMemory_create = getASharedMemory_create(); #else // Mock ASharedMemory_create only if libneuralnetworks.so was successfully - // loaded. This ensures identical behaviour on platforms which use this + // loaded. This ensures identical behavior on platforms which use this // implementation, but don't have libneuralnetworks.so library, and // platforms which use nnapi_implementation_disabled.cc stub. if (libneuralnetworks != nullptr) { diff --git a/onnxruntime/core/providers/rknpu/rknpu_execution_provider.cc b/onnxruntime/core/providers/rknpu/rknpu_execution_provider.cc index 64d8f235840bc..44b34f4b4ce6c 100644 --- a/onnxruntime/core/providers/rknpu/rknpu_execution_provider.cc +++ b/onnxruntime/core/providers/rknpu/rknpu_execution_provider.cc @@ -28,7 +28,7 @@ constexpr const char* RKNPU = "Rknpu"; struct RknpuFuncState { std::string uniq_input_shape; - std::unique_ptr exector; + std::unique_ptr exector; ONNX_NAMESPACE::ModelProto model_proto; std::unordered_map input_map; std::unordered_map output_map; @@ -282,7 +282,7 @@ common::Status RknpuExecutionProvider::Compile(const std::vector p = std::make_unique(); rk::nn::Graph* graph = new rk::nn::Graph(); - *p = {"", std::unique_ptr(new rk::nn::Exection(graph)), + *p = {"", std::unique_ptr(new rk::nn::Execution(graph)), model_proto_[context->node_name], input_info_[context->node_name], output_info_[context->node_name], std::vector{}, std::vector{}}; diff --git a/onnxruntime/core/providers/rocm/nn/conv.cc b/onnxruntime/core/providers/rocm/nn/conv.cc index a2b587a56466f..d7f47d07a8fec 100644 --- a/onnxruntime/core/providers/rocm/nn/conv.cc +++ b/onnxruntime/core/providers/rocm/nn/conv.cc @@ -12,7 +12,7 @@ namespace onnxruntime { namespace rocm { // Op Set 11 for Conv only update document to clearify default dilations and strides value. -// which are already convered by op set 11 cpu versoin, so simply add declaration. +// which are already convered by op set 11 cpu version, so simply add declaration. #define REGISTER_KERNEL_TYPED(T) \ ONNX_OPERATOR_VERSIONED_TYPED_KERNEL_EX( \ Conv, \ diff --git a/onnxruntime/core/providers/rocm/reduction/reduction_ops.cc b/onnxruntime/core/providers/rocm/reduction/reduction_ops.cc index 820745b22f614..11073ab3584eb 100644 --- a/onnxruntime/core/providers/rocm/reduction/reduction_ops.cc +++ b/onnxruntime/core/providers/rocm/reduction/reduction_ops.cc @@ -226,7 +226,7 @@ Status ReduceKernel::ReduceKernelShared( MIOPEN_RETURN_IF_ERROR(miopenGetReductionIndicesSize(miopen_handle, reduce_desc, input_tensor, output_tensor, &indices_bytes)); auto indices_rocm = GetScratchBuffer(indices_bytes, stream); - // need to allocate a separate buffer for ArgMin/ArgMax comparsion output + // need to allocate a separate buffer for ArgMin/ArgMax comparison output auto output_count = output_shape.Size(); if (ReduceTensorIndices == MIOPEN_REDUCE_TENSOR_NO_INDICES) { diff --git a/onnxruntime/core/providers/rocm/rocm_allocator.cc b/onnxruntime/core/providers/rocm/rocm_allocator.cc index 8645b791d4b0f..4a11b158c2cce 100644 --- a/onnxruntime/core/providers/rocm/rocm_allocator.cc +++ b/onnxruntime/core/providers/rocm/rocm_allocator.cc @@ -60,7 +60,7 @@ void* ROCMExternalAllocator::Alloc(size_t size) { if (size > 0) { p = alloc_(size); - // review(codemzs): ORT_ENFORCE does not seem appropiate. + // review(codemzs): ORT_ENFORCE does not seem appropriate. ORT_ENFORCE(p != nullptr); } diff --git a/onnxruntime/core/providers/rocm/rocm_stream_handle.cc b/onnxruntime/core/providers/rocm/rocm_stream_handle.cc index 0c0f64a8bfaf0..ef5689fc9a2d0 100644 --- a/onnxruntime/core/providers/rocm/rocm_stream_handle.cc +++ b/onnxruntime/core/providers/rocm/rocm_stream_handle.cc @@ -140,7 +140,7 @@ Status RocmStream::CleanUpOnRunEnd() { } void* RocmStream::GetResource(int version, int id) const { - ORT_ENFORCE(version <= ORT_ROCM_RESOUCE_VERSION, "resource version unsupported!"); + ORT_ENFORCE(version <= ORT_ROCM_RESOURCE_VERSION, "resource version unsupported!"); void* resource{}; switch (id) { case RocmResource::hip_stream_t: diff --git a/onnxruntime/core/providers/webnn/webnn_execution_provider.cc b/onnxruntime/core/providers/webnn/webnn_execution_provider.cc index e839d6d17b7d9..0da0dfc6dfb26 100644 --- a/onnxruntime/core/providers/webnn/webnn_execution_provider.cc +++ b/onnxruntime/core/providers/webnn/webnn_execution_provider.cc @@ -329,7 +329,7 @@ common::Status WebNNExecutionProvider::Compile(const std::vector InferenceSession::GetOverridableI } } - // returns a list of initializers that can be overriden. + // returns a list of initializers that can be overridden. return std::make_pair(common::Status::OK(), &model_->MainGraph().GetOverridableInitializers()); } diff --git a/onnxruntime/core/session/inference_session.h b/onnxruntime/core/session/inference_session.h index e1cd085d2c271..9662095bf0ed3 100644 --- a/onnxruntime/core/session/inference_session.h +++ b/onnxruntime/core/session/inference_session.h @@ -386,7 +386,7 @@ class InferenceSession { * @param run_options run options. * @param mutable_feeds inputs owned by client code and will be released as long as the feeds be set in session states. * Then the feeds will purely managed in the session states. - * @param fetches outputs produced after the executin of this function. + * @param fetches outputs produced after the execution of this function. * @param state State of the graph needed to resume partial graph run. * @param feeds_fetches_manager Contains feed/fetches name to internal indices mapping and information for device * copy/checks. diff --git a/onnxruntime/core/util/qmath.h b/onnxruntime/core/util/qmath.h index c982a7aa2e7e0..1b2180da95058 100644 --- a/onnxruntime/core/util/qmath.h +++ b/onnxruntime/core/util/qmath.h @@ -552,7 +552,7 @@ struct BlockedQuantizeLinear { std::ptrdiff_t N, const std::ptrdiff_t quant_block_size, const std::ptrdiff_t thread_block_size, bool saturate) { ORT_UNUSED_PARAMETER(saturate); - // to avoid a byte being writen from mutiple threads, use 2 * N as thread block + // to avoid a byte being written from mutiple threads, use 2 * N as thread block ORT_UNUSED_PARAMETER(thread_block_size); constexpr auto low = static_cast(TOut::min_val); constexpr auto high = static_cast(TOut::max_val); @@ -637,7 +637,7 @@ struct BlockedQuantizeLinear { ORT_UNUSED_PARAMETER(saturate); constexpr auto low = static_cast(TOut::min_val); constexpr auto high = static_cast(TOut::max_val); - // to avoid a byte being writen from mutiple threads, use 2 * K as thread block + // to avoid a byte being written from mutiple threads, use 2 * K as thread block auto size_thread_block = 2 * K; auto quant_block_num_K = (K + quant_block_size - 1) / quant_block_size; auto num_thread_block = (M + 1) / 2; @@ -697,7 +697,7 @@ struct BlockedQuantizeLinear { std::ptrdiff_t N, const std::ptrdiff_t quant_block_size, const std::ptrdiff_t thread_block_size, bool saturate) { ORT_UNUSED_PARAMETER(saturate); - // to avoid a byte being writen from mutiple threads, use 2 * N as thread block + // to avoid a byte being written from mutiple threads, use 2 * N as thread block ORT_UNUSED_PARAMETER(thread_block_size); constexpr auto low = static_cast(TOut::min_val); constexpr auto high = static_cast(TOut::max_val); @@ -786,7 +786,7 @@ struct BlockedQuantizeLinear { ORT_UNUSED_PARAMETER(saturate); constexpr auto low = static_cast(TOut::min_val); constexpr auto high = static_cast(TOut::max_val); - // to avoid a byte being writen from mutiple threads, use 2 * K as thread block + // to avoid a byte being written from mutiple threads, use 2 * K as thread block auto size_thread_block = 2 * K; auto quant_block_num_K = (K + quant_block_size - 1) / quant_block_size; auto num_thread_block = (M + 1) / 2; diff --git a/onnxruntime/python/onnxruntime_pybind_schema.cc b/onnxruntime/python/onnxruntime_pybind_schema.cc index 218b59688b01c..c5757095e2e1e 100644 --- a/onnxruntime/python/onnxruntime_pybind_schema.cc +++ b/onnxruntime/python/onnxruntime_pybind_schema.cc @@ -15,7 +15,7 @@ void addGlobalSchemaFunctions(pybind11::module& m) { "get_all_operator_schema", []() -> const std::vector { return ONNX_NAMESPACE::OpSchemaRegistry::get_all_schemas_with_history(); }, - "Return a vector of OpSchema all registed operators"); + "Return a vector of OpSchema all registered operators"); m.def( "get_all_opkernel_def", []() -> const std::vector { std::vector result; diff --git a/onnxruntime/python/onnxruntime_pybind_sparse_tensor.cc b/onnxruntime/python/onnxruntime_pybind_sparse_tensor.cc index db0b2e392b29f..7dcead113ac4f 100644 --- a/onnxruntime/python/onnxruntime_pybind_sparse_tensor.cc +++ b/onnxruntime/python/onnxruntime_pybind_sparse_tensor.cc @@ -41,7 +41,7 @@ struct MakeDType { /// /// The function creates a numpy array that points to -/// data stored within the corresponing tensor. Parent object +/// data stored within the corresponding tensor. Parent object /// holds a reference to the object that owns the data so it /// does not disappear. /// @@ -396,7 +396,7 @@ void addSparseTensorMethods(pybind11::module& m) { }) // pybind apparently has a bug with returning enums from def_property_readonly or methods // returning a method object instead of the enumeration value - // so we are using def_property and throw on a potential modificaiton + // so we are using def_property and throw on a potential modification .def_property( "format", [](const PySparseTensor* py_tensor) -> OrtSparseFormat { const SparseTensor& tensor = py_tensor->Instance(); diff --git a/onnxruntime/python/onnxruntime_pybind_state.cc b/onnxruntime/python/onnxruntime_pybind_state.cc index e13285c60e69f..d7155b2b6899a 100644 --- a/onnxruntime/python/onnxruntime_pybind_state.cc +++ b/onnxruntime/python/onnxruntime_pybind_state.cc @@ -152,7 +152,7 @@ void AsyncCallback(void* user_data, OrtValue** outputs, size_t num_outputs, OrtS } else { // acquire GIL to safely: // 1) invoke python callback - // 2) create, manipulate, and destory python objects + // 2) create, manipulate, and destroy python objects py::gil_scoped_acquire acquire; invoke_callback(); } @@ -946,7 +946,7 @@ std::unique_ptr CreateExecutionProviderInstance( provider_options_map); // This variable is never initialized because the APIs by which it should be initialized are deprecated, - // however they still exist are are in-use. Neverthless, it is used to return CUDAAllocator, + // however they still exist are are in-use. Nevertheless, it is used to return CUDAAllocator, // hence we must try to initialize it here if we can since FromProviderOptions might contain // external CUDA allocator. external_allocator_info = info.external_allocator_info; @@ -973,14 +973,17 @@ std::unique_ptr CreateExecutionProviderInstance( const ROCMExecutionProviderInfo info = GetRocmExecutionProviderInfo(rocm_provider_info, provider_options_map); - // This variable is never initialized because the APIs by which is it should be initialized are deprecated, however they still - // exist are are in-use. Neverthless, it is used to return ROCMAllocator, hence we must try to initialize it here if we can - // since FromProviderOptions might contain external ROCM allocator. + // This variable is never initialized because the APIs by which is it should be initialized are deprecated, + // however they still exist and are in-use. Nevertheless, it is used to return ROCMAllocator, hence we must + // try to initialize it here if we can since FromProviderOptions might contain external ROCM allocator. external_allocator_info = info.external_allocator_info; return rocm_provider_info->CreateExecutionProviderFactory(info)->CreateProvider(); } else { if (!Env::Default().GetEnvironmentVar("ROCM_PATH").empty()) { - ORT_THROW("ROCM_PATH is set but ROCM wasn't able to be loaded. Please install the correct version of ROCM and MIOpen as mentioned in the GPU requirements page, make sure they're in the PATH, and that your GPU is supported."); + ORT_THROW( + "ROCM_PATH is set but ROCM wasn't able to be loaded. Please install the correct version " + "of ROCM and MIOpen as mentioned in the GPU requirements page, make sure they're in the PATH, " + "and that your GPU is supported."); } } #endif @@ -1389,7 +1392,8 @@ void addGlobalMethods(py::module& m) { LogDeprecationWarning("set_openvino_device", "OpenVINO execution provider option \"device_type\""); openvino_device_type = device_type; }, - "Set the prefered OpenVINO device type to be used. If left unset, the device type selected during build time will be used."); + "Set the preferred OpenVINO device type to be used. If left unset, " + "the device type selected during build time will be used."); // TODO remove deprecated global config m.def( "get_openvino_device", []() -> std::string { diff --git a/onnxruntime/python/tools/quantization/calibrate.py b/onnxruntime/python/tools/quantization/calibrate.py index 10492ae419817..65875d09102bd 100644 --- a/onnxruntime/python/tools/quantization/calibrate.py +++ b/onnxruntime/python/tools/quantization/calibrate.py @@ -812,7 +812,7 @@ def collect_absolute_value(self, name_to_arr): hist_edges = hist_edges.astype(data_arr_np.dtype) assert ( data_arr_np.dtype != np.float64 - ), "only float32 or float16 is supported, every constant must be explicetly typed" + ), "only float32 or float16 is supported, every constant must be explicitly typed" self.histogram_dict[tensor] = (hist, hist_edges, min_value, max_value) else: old_histogram = self.histogram_dict[tensor] @@ -834,7 +834,7 @@ def collect_absolute_value(self, name_to_arr): hist[: len(old_hist)] += old_hist assert ( data_arr_np.dtype != np.float64 - ), "only float32 or float16 is supported, every constant must be explicetly typed" + ), "only float32 or float16 is supported, every constant must be explicitly typed" self.histogram_dict[tensor] = (hist, hist_edges, min(old_min, min_value), max(old_max, max_value)) def collect_value(self, name_to_arr): diff --git a/onnxruntime/python/tools/quantization/operators/direct_q8.py b/onnxruntime/python/tools/quantization/operators/direct_q8.py index ae9679ae8ec7a..de610a4c01326 100644 --- a/onnxruntime/python/tools/quantization/operators/direct_q8.py +++ b/onnxruntime/python/tools/quantization/operators/direct_q8.py @@ -13,7 +13,7 @@ def quantize(self): node = self.node if not self.quantizer.force_quantize_no_input_check: - # Keep backward compatiblity + # Keep backward compatibility # Quantize when input[0] is quantized already. Otherwise keep it. quantized_input_value = self.quantizer.find_quantized_value(node.input[0]) if quantized_input_value is None: diff --git a/onnxruntime/python/tools/quantization/quant_utils.py b/onnxruntime/python/tools/quantization/quant_utils.py index e4a9b867b1482..0fdef4ef6f6d3 100644 --- a/onnxruntime/python/tools/quantization/quant_utils.py +++ b/onnxruntime/python/tools/quantization/quant_utils.py @@ -357,7 +357,7 @@ def quantize_data( - when data `type == int8`, from `[-m , m]` -> :math:`[-(2^{b-1}-1), 2^{b-1}-1]` where `m = max(abs(rmin), abs(rmax))` - and add necessary intermediate nodes to trasnform quantized weight to full weight using the equation + and add necessary intermediate nodes to transform quantized weight to full weight using the equation :math:`r = S(q-z)`, where diff --git a/onnxruntime/python/tools/transformers/README.md b/onnxruntime/python/tools/transformers/README.md index 547d1a883c165..4f147219f19f1 100644 --- a/onnxruntime/python/tools/transformers/README.md +++ b/onnxruntime/python/tools/transformers/README.md @@ -29,7 +29,7 @@ Models not in the list may only be partially optimized or not optimized at all. - **hidden_size**: (*default: 768*) BERT-base and BERT-large has 768 and 1024 hidden nodes respectively. - **input_int32**: (*optional*) - Exported model ususally uses int64 tensor as input. If this flag is specified, int32 tensors will be used as input, and it could avoid un-necessary Cast nodes and get better performance. + Exported model usually uses int64 tensor as input. If this flag is specified, int32 tensors will be used as input, and it could avoid un-necessary Cast nodes and get better performance. - **float16**: (*optional*) By default, model uses float32 in computation. If this flag is specified, half-precision float will be used. This option is recommended for NVidia GPU with Tensor Core like V100 and T4. For older GPUs, float32 is likely faster. - **use_gpu**: (*optional*) diff --git a/onnxruntime/python/tools/transformers/benchmark.py b/onnxruntime/python/tools/transformers/benchmark.py index 9baafbbfff0e3..5ec2ab4e50799 100644 --- a/onnxruntime/python/tools/transformers/benchmark.py +++ b/onnxruntime/python/tools/transformers/benchmark.py @@ -930,7 +930,7 @@ def main(): if len(results) == 0: if args.batch_sizes != [0]: - logger.warning("No any result avaiable.") + logger.warning("No any result available.") return csv_filename = args.detail_csv or f"benchmark_detail_{time_stamp}.csv" diff --git a/onnxruntime/python/tools/transformers/large_model_exporter.py b/onnxruntime/python/tools/transformers/large_model_exporter.py index 2083419087a69..0eaccc0fafcc4 100644 --- a/onnxruntime/python/tools/transformers/large_model_exporter.py +++ b/onnxruntime/python/tools/transformers/large_model_exporter.py @@ -368,7 +368,7 @@ def parse_arguments(): required=False, type=str, default=None, - help=("cache directy of huggingface, by setting this to avoid useless downloading if you have one"), + help=("cache directly of huggingface, by setting this to avoid useless downloading if you have one"), ) parser.add_argument( "--with_past", diff --git a/onnxruntime/python/tools/transformers/models/gpt2/benchmark_gpt2.py b/onnxruntime/python/tools/transformers/models/gpt2/benchmark_gpt2.py index 6d6a057574a17..7e786fce30985 100644 --- a/onnxruntime/python/tools/transformers/models/gpt2/benchmark_gpt2.py +++ b/onnxruntime/python/tools/transformers/models/gpt2/benchmark_gpt2.py @@ -193,7 +193,7 @@ def main(args): config = AutoConfig.from_pretrained(args.model_name_or_path, torchscript=args.torchscript, cache_dir=cache_dir) model = model_class.from_pretrained(args.model_name_or_path, config=config, cache_dir=cache_dir) - # This scirpt does not support float16 for PyTorch. + # This script does not support float16 for PyTorch. # if args.float16: # model.half() diff --git a/onnxruntime/python/tools/transformers/models/gpt2/convert_to_onnx.py b/onnxruntime/python/tools/transformers/models/gpt2/convert_to_onnx.py index 27e3899c11b7a..0ab26308295a9 100644 --- a/onnxruntime/python/tools/transformers/models/gpt2/convert_to_onnx.py +++ b/onnxruntime/python/tools/transformers/models/gpt2/convert_to_onnx.py @@ -105,7 +105,7 @@ def parse_arguments(argv=None): required=False, type=float, default=0, - help="the aboslute and relative tolerance for parity verification", + help="the absolute and relative tolerance for parity verification", ) parser.add_argument( diff --git a/onnxruntime/python/tools/transformers/models/gpt2/gpt2_tester.py b/onnxruntime/python/tools/transformers/models/gpt2/gpt2_tester.py index f4705bef6a988..6bfcb0368eaaa 100644 --- a/onnxruntime/python/tools/transformers/models/gpt2/gpt2_tester.py +++ b/onnxruntime/python/tools/transformers/models/gpt2/gpt2_tester.py @@ -137,7 +137,7 @@ def __init__( self.has_position_ids = position_ids is not None self.has_attention_mask = attention_mask is not None - # Emtpy past state for first inference + # Empty past state for first inference self.past = [] past_shape = [ 2, diff --git a/onnxruntime/python/tools/transformers/notebooks/PyTorch_Bert-Squad_OnnxRuntime_GPU.ipynb b/onnxruntime/python/tools/transformers/notebooks/PyTorch_Bert-Squad_OnnxRuntime_GPU.ipynb index 43c31e1ea45ac..7295ae1436c99 100644 --- a/onnxruntime/python/tools/transformers/notebooks/PyTorch_Bert-Squad_OnnxRuntime_GPU.ipynb +++ b/onnxruntime/python/tools/transformers/notebooks/PyTorch_Bert-Squad_OnnxRuntime_GPU.ipynb @@ -1665,7 +1665,7 @@ "### Packing Mode (Effective Transformer)\n", "\n", "When padding ratio is high, it is helpful to use packing mode, also known as [effective transformer](https://github.com/bytedance/effective_transformer).\n", - "This feature requires onnxruntime-gpu verison 1.16 or later. \n", + "This feature requires onnxruntime-gpu version 1.16 or later. \n", "\n", "In below example, average sequence length after removing paddings is 32, the sequence length with paddings is 128. We can see 3x throughput with packing mode (QPS increased from 1617 to 5652)." ] diff --git a/onnxruntime/python/tools/transformers/onnx_model_phi.py b/onnxruntime/python/tools/transformers/onnx_model_phi.py index 05a27ba487f4d..5df765033578b 100644 --- a/onnxruntime/python/tools/transformers/onnx_model_phi.py +++ b/onnxruntime/python/tools/transformers/onnx_model_phi.py @@ -65,7 +65,7 @@ def __call__(self, x): return x -# TODO: move to a seperate file +# TODO: move to a separate file class Fission(Fusion): def __init__( self, diff --git a/onnxruntime/python/tools/transformers/onnx_model_tnlr.py b/onnxruntime/python/tools/transformers/onnx_model_tnlr.py index 98235de6ba6fd..f5a47b19d67fc 100644 --- a/onnxruntime/python/tools/transformers/onnx_model_tnlr.py +++ b/onnxruntime/python/tools/transformers/onnx_model_tnlr.py @@ -17,7 +17,7 @@ class FusionTnlrAttention(FusionAttention): """ Fuse TNLR Attention subgraph into one Attention node. - TNLR Attention has extra addtion after qk nodes and adopts [S, B, NH] as I/O shape. + TNLR Attention has extra addition after qk nodes and adopts [S, B, NH] as I/O shape. """ def __init__( diff --git a/onnxruntime/python/tools/transformers/optimizer.py b/onnxruntime/python/tools/transformers/optimizer.py index 5f161674b614e..06264b426d1e5 100644 --- a/onnxruntime/python/tools/transformers/optimizer.py +++ b/onnxruntime/python/tools/transformers/optimizer.py @@ -531,7 +531,7 @@ def _parse_arguments(): "--disable_symbolic_shape_infer", required=False, action="store_true", - help="diable symoblic shape inference", + help="diable symbolic shape inference", ) parser.set_defaults(disable_symbolic_shape_infer=False) diff --git a/onnxruntime/python/tools/transformers/shape_optimizer.py b/onnxruntime/python/tools/transformers/shape_optimizer.py index 503930b23229f..17fd54f19baf2 100644 --- a/onnxruntime/python/tools/transformers/shape_optimizer.py +++ b/onnxruntime/python/tools/transformers/shape_optimizer.py @@ -3,7 +3,7 @@ # Licensed under the MIT License. # -------------------------------------------------------------------------- -# This tool is not used directly in bert optimization. It could assist developing the optimization script on the following senarios: +# This tool is not used directly in bert optimization. It could assist developing the optimization script on the following scenarios: # (1) It could simplify graph by removing many sub-graphs related to reshape. # (2) It could reduce extra inputs and outputs to fit other tools. The script compare_bert_results.py or bert_perf_test.py requires 3 inputs. diff --git a/onnxruntime/test/contrib_ops/attention_lstm_op_test.cc b/onnxruntime/test/contrib_ops/attention_lstm_op_test.cc index e78b3528c11a4..0634f545e6f7b 100644 --- a/onnxruntime/test/contrib_ops/attention_lstm_op_test.cc +++ b/onnxruntime/test/contrib_ops/attention_lstm_op_test.cc @@ -266,8 +266,8 @@ static const std::vector s_M_2batch{0.1f, -0.25f, 1.0f, 1.0f, -1.0f, -1.5 0.1f, -0.25f, 0.5f, -0.25f, -1.25f, 0.25f, -1.0f, 1.5f, -1.25f}; // real seq lens for memory -static std::vector s_mem_seq_lenghts{3}; -static const std::vector s_mem_seq_lenghts_2batch{3, 2}; +static std::vector s_mem_seq_lengths{3}; +static const std::vector s_mem_seq_lengths_2batch{3, 2}; // [batch_size=1, input_max_step=3, input_only_depth=3] static std::vector s_X_T_data{ @@ -352,7 +352,7 @@ TEST(AttnLSTMTest, ForwardLstmWithBahdanauAMZeroAttention) { RunAttnLstmTest( X_data, W_data, R_data, Y_data, Y_h_data, Y_c_data, - s_memory_layer_weight, s_query_layer_weight, s_attn_v, s_M_data, &s_mem_seq_lenghts, &zero_attn_layer_weight, + s_memory_layer_weight, s_query_layer_weight, s_attn_v, s_M_data, &s_mem_seq_lengths, &zero_attn_layer_weight, input_only_depth, batch_size, cell_hidden_size, input_max_step, memory_max_step, memory_depth, am_attn_size, aw_attn_size, &B_data, nullptr, nullptr, nullptr, &s_seq_lengths, @@ -389,7 +389,7 @@ TEST(AttnLSTMTest, ForwardLstmWithBahdanauAM) { RunAttnLstmTest( X_data, W_data, R_data, Y_data, Y_h_data, Y_c_data, - s_memory_layer_weight, s_query_layer_weight, s_attn_v, s_M_data, &s_mem_seq_lenghts, &s_attn_layer_weight, + s_memory_layer_weight, s_query_layer_weight, s_attn_v, s_M_data, &s_mem_seq_lengths, &s_attn_layer_weight, input_only_depth, batch_size, cell_hidden_size, input_max_step, memory_max_step, memory_depth, am_attn_size, aw_attn_size, &B_data, nullptr, nullptr, nullptr, &s_seq_lengths, @@ -428,7 +428,7 @@ TEST(AttnLSTMTest, ForwardLstmWithBahdanauAMShortenSeqLength) { RunAttnLstmTest( X_data, W_data, R_data, Y_data, Y_h_data, Y_c_data, - s_memory_layer_weight, s_query_layer_weight, s_attn_v, s_M_data, &s_mem_seq_lenghts, &s_attn_layer_weight, + s_memory_layer_weight, s_query_layer_weight, s_attn_v, s_M_data, &s_mem_seq_lengths, &s_attn_layer_weight, input_only_depth, batch_size, cell_hidden_size, input_max_step, memory_max_step, memory_depth, am_attn_size, aw_attn_size, &B_data, nullptr, nullptr, nullptr, &shortenSeqLen, @@ -467,7 +467,7 @@ TEST(AttnLSTMTest, ReverseLstmWithBahdanauAMShortenSeqLength) { RunAttnLstmTest( X_data, W_data, R_data, Y_data, Y_h_data, Y_c_data, - s_memory_layer_weight, s_query_layer_weight, s_attn_v, s_M_data, &s_mem_seq_lenghts, &s_attn_layer_weight, + s_memory_layer_weight, s_query_layer_weight, s_attn_v, s_M_data, &s_mem_seq_lengths, &s_attn_layer_weight, input_only_depth, batch_size, cell_hidden_size, input_max_step, memory_max_step, memory_depth, am_attn_size, aw_attn_size, &B_data, nullptr, nullptr, nullptr, &shortenSeqLen, @@ -521,7 +521,7 @@ TEST(AttnLSTMTest, BidirectionLstmWithBahdanauAMShortenSeqLength) { RunAttnLstmTest( X_data, d_W_data, d_R_data, Y_data, Y_h_data, Y_c_data, - d_memory_layer_weight, d_query_layer_weight, d_attn_v, s_M_data, &s_mem_seq_lenghts, &d_attn_layer_weight, + d_memory_layer_weight, d_query_layer_weight, d_attn_v, s_M_data, &s_mem_seq_lengths, &d_attn_layer_weight, input_only_depth, batch_size, cell_hidden_size, input_max_step, memory_max_step, memory_depth, am_attn_size, aw_attn_size, &d_B_data, nullptr, nullptr, nullptr, &shortenSeqLen, @@ -578,7 +578,7 @@ TEST(AttnLSTMTest, BidirectionLstmWithBahdanauAM2BatchShortenSeqLen) { RunAttnLstmTest( X_data, d_W_data, d_R_data, Y_data, Y_h_data, Y_c_data, - d_memory_layer_weight, d_query_layer_weight, d_attn_v, s_M_2batch, &s_mem_seq_lenghts_2batch, &d_attn_layer_weight, + d_memory_layer_weight, d_query_layer_weight, d_attn_v, s_M_2batch, &s_mem_seq_lengths_2batch, &d_attn_layer_weight, input_only_depth, batch2Size, cell_hidden_size, inputMaxStep4, memory_max_step, memory_depth, am_attn_size, aw_attn_size, &d_B_data, nullptr, nullptr, nullptr, &s_seq_lengths_2batch, diff --git a/onnxruntime/test/framework/allocation_planner_test.cc b/onnxruntime/test/framework/allocation_planner_test.cc index bf15a9d35b56a..26e40b25930c8 100644 --- a/onnxruntime/test/framework/allocation_planner_test.cc +++ b/onnxruntime/test/framework/allocation_planner_test.cc @@ -1288,7 +1288,7 @@ TEST_F(PlannerTest, MultiStream) { CreatePlan({}, false); - EXPECT_EQ(GetState().GetExecutionPlan()->execution_plan.size(), 2) << "2 logic streams for CPU and CUDA seperately"; + EXPECT_EQ(GetState().GetExecutionPlan()->execution_plan.size(), 2) << "2 logic streams for CPU and CUDA separately"; EXPECT_EQ(GetState().GetExecutionPlan()->execution_plan[0]->steps_.size(), 6) << "CPU stream has 6 steps"; EXPECT_NE(strstr(typeid(*GetState().GetExecutionPlan()->execution_plan[0]->steps_[0]).name(), "LaunchKernelStep"), nullptr) << "0th step: LaunchKernelStep for node 1"; EXPECT_NE(strstr(typeid(*GetState().GetExecutionPlan()->execution_plan[0]->steps_[1]).name(), "LaunchKernelStep"), nullptr) << "1st step: LaunchKernelStep for node 2"; diff --git a/onnxruntime/test/framework/inference_session_test.cc b/onnxruntime/test/framework/inference_session_test.cc index 84389c1d9711c..8b230db351edc 100644 --- a/onnxruntime/test/framework/inference_session_test.cc +++ b/onnxruntime/test/framework/inference_session_test.cc @@ -1400,7 +1400,7 @@ TEST(ExecutionProviderTest, OpKernelInfoCanReadConfigOptions) { so.session_logid = "ExecutionProviderTest.OpKernelInfoCanReadConfigOptions"; // add a config key that if read causes the Fuse op kernel to throw in the ctor. this is just to test the value is passed - // through in the simplest way, as the kernel is constructed in InferenceSession::Intialize so we don't need to + // through in the simplest way, as the kernel is constructed in InferenceSession::Initialize so we don't need to // actually run the model. ASSERT_STATUS_OK(so.config_options.AddConfigEntry("ThrowInKernelCtor", "1")); diff --git a/onnxruntime/test/framework/tunable_op_test.cc b/onnxruntime/test/framework/tunable_op_test.cc index 6fe0754db40d3..53aa949647c77 100644 --- a/onnxruntime/test/framework/tunable_op_test.cc +++ b/onnxruntime/test/framework/tunable_op_test.cc @@ -668,7 +668,7 @@ TEST(TuningContext, TunableOpRespectTuningContext) { ASSERT_TRUE(status.IsOK()); ASSERT_EQ(last_run, "FastFull"); - // After TunableOp(...), the result entry is corretly written. + // After TunableOp(...), the result entry is correctly written. ASSERT_EQ(mgr.Lookup(op.Signature()).size(), 1u); ASSERT_EQ(mgr.Lookup(op.Signature(), params.Signature()), tuning::TunableVecAddSelectFast::kFastFullId); } diff --git a/onnxruntime/test/fuzzing/include/BetaDistribution.h b/onnxruntime/test/fuzzing/include/BetaDistribution.h index c5c59922d864c..40e42a598c85a 100644 --- a/onnxruntime/test/fuzzing/include/BetaDistribution.h +++ b/onnxruntime/test/fuzzing/include/BetaDistribution.h @@ -83,7 +83,7 @@ class BetaDistribution { calc_type highest_probability_temp = highest_probability; highest_probability = std::max({highest_probability_temp, distribution(sample)}); - // A new sample number with a higher probabilty has been found + // A new sample number with a higher probability has been found // if (highest_probability > highest_probability_temp) { likely_number = sample; @@ -137,7 +137,7 @@ class BetaDistribution { } } - // Generate the probabilty of having this number + // Generate the probability of having this number // inline calc_type distribution(calc_type randVar) { if (randVar > max() || randVar < min()) { diff --git a/onnxruntime/test/fuzzing/src/test.cpp b/onnxruntime/test/fuzzing/src/test.cpp index 0d51af6b6b0fa..490f7dd4d37a3 100644 --- a/onnxruntime/test/fuzzing/src/test.cpp +++ b/onnxruntime/test/fuzzing/src/test.cpp @@ -365,7 +365,7 @@ int main(int argc, char* argv[]) { std::ifstream ortModelStream(ort_model_file, std::ifstream::in | std::ifstream::binary); ortModelStream.read(model_data.data(), num_bytes); ortModelStream.close(); - // Currently mutations are generated by using XOR of a byte with the preceeding byte at a time. + // Currently mutations are generated by using XOR of a byte with the preceding byte at a time. // Other possible ways may be considered in future, for example swaping two bytes randomly at a time. Logger::testLog << "Starting Test" << Logger::endl; for (size_t& i = run_stats.iteration; i < num_bytes - 1; i++) { diff --git a/onnxruntime/test/ir/graph_test.cc b/onnxruntime/test/ir/graph_test.cc index 5fc036790b765..f6b7bdb1a001c 100644 --- a/onnxruntime/test/ir/graph_test.cc +++ b/onnxruntime/test/ir/graph_test.cc @@ -464,7 +464,7 @@ TEST_F(GraphTest, LocalCustomRegistry) { // Tests the case where function op and function body ops belong to different domains. // Tests that such a model can be loaded successfully, function body initialization is -// successful and domain and verison mapping for each node is successful (by verifying +// successful and domain and version mapping for each node is successful (by verifying // op schema for each of the function body nodes can be found). TEST_F(GraphTest, FunctionOpsetImportTest) { std::shared_ptr model; @@ -481,7 +481,7 @@ TEST_F(GraphTest, FunctionOpsetImportTest) { // phase .i.e. Init function body only if none of EPs have a kernel matching the function op // then this check will not hold true and should be removed. - // We delay the funciton instantiate untill partition the graph + // We delay the function instantiate until partition the graph // this check is no longer valid anymore. /*ASSERT_TRUE(!schema->HasFunction() && !schema->HasContextDependentFunction());*/ continue; diff --git a/onnxruntime/test/ir/schema_registry_manager_test.cc b/onnxruntime/test/ir/schema_registry_manager_test.cc index 704c84343173a..52c286d187e53 100644 --- a/onnxruntime/test/ir/schema_registry_manager_test.cc +++ b/onnxruntime/test/ir/schema_registry_manager_test.cc @@ -89,7 +89,7 @@ TEST(SchemaRegistryManager, OpsetRegTest) { // registry2 has:(op1,domain1,version2) ASSERT_TRUE(registry2->GetSchema("Op1", 1, "Domain1") == nullptr); ASSERT_TRUE(registry2->GetSchema("Op1", 2, "Domain1") != nullptr); - // Fail because this registery doesn't have the information of opset3 + // Fail because this registry doesn't have the information of opset3 ASSERT_TRUE(registry2->GetSchema("Op1", 3, "Domain1") == nullptr); std::shared_ptr registry3 = std::make_shared(); @@ -126,7 +126,7 @@ TEST(SchemaRegistryManager, OpsetRegTest) { // Note that "Op5" has SinceVersion equal to 1, but a V1 operator set was already registered // without this operator. This would normally be invalid, and the registry with the missing // operator could trigger the operator lookup to fail. Version 1 is a special case to allow - // for experimental operators, and is accomplished by not reducing the targetted version to + // for experimental operators, and is accomplished by not reducing the targeted version to // zero in OnnxRuntimeOpSchemaRegistry::GetSchemaAndHistory. // TODO - Consider making the registration algorithm robust to this invalid usage in general ASSERT_TRUE(manager.GetSchema("Op5", 5, "Domain1")->since_version() == 1); diff --git a/onnxruntime/test/mlas/unittest/test_fgemm_fixture.h b/onnxruntime/test/mlas/unittest/test_fgemm_fixture.h index 05c6a0098eecb..53b3edafdf84f 100644 --- a/onnxruntime/test/mlas/unittest/test_fgemm_fixture.h +++ b/onnxruntime/test/mlas/unittest/test_fgemm_fixture.h @@ -9,7 +9,7 @@ #include // -// Short Execute() test helper to register each test seperately by all parameters. +// Short Execute() test helper to register each test separately by all parameters. // template class FgemmShortExecuteTest : public MlasTestFixture> { diff --git a/onnxruntime/test/mlas/unittest/test_halfgemm.cpp b/onnxruntime/test/mlas/unittest/test_halfgemm.cpp index 2a478675d09eb..aafdcc14c0028 100644 --- a/onnxruntime/test/mlas/unittest/test_halfgemm.cpp +++ b/onnxruntime/test/mlas/unittest/test_halfgemm.cpp @@ -17,7 +17,7 @@ Module Name: #include "test_halfgemm.h" // -// Short Execute() test helper to register each test seperately by all parameters. +// Short Execute() test helper to register each test separately by all parameters. // template class HalfGemmShortExecuteTest : public MlasTestFixture> { diff --git a/onnxruntime/test/mlas/unittest/test_pool2d_fixture.h b/onnxruntime/test/mlas/unittest/test_pool2d_fixture.h index 2ede8c3f0ab11..cb748bbaccce0 100644 --- a/onnxruntime/test/mlas/unittest/test_pool2d_fixture.h +++ b/onnxruntime/test/mlas/unittest/test_pool2d_fixture.h @@ -7,7 +7,7 @@ #include "test_pool2d.h" // -// Short Execute() test helper to register each test seperately by all parameters. +// Short Execute() test helper to register each test separately by all parameters. // template class Pooling2dShortExecuteTest : public MlasTestFixture { diff --git a/onnxruntime/test/mlas/unittest/test_pool3d_fixture.h b/onnxruntime/test/mlas/unittest/test_pool3d_fixture.h index 00f95bb00b9ae..e3d2aebc39cec 100644 --- a/onnxruntime/test/mlas/unittest/test_pool3d_fixture.h +++ b/onnxruntime/test/mlas/unittest/test_pool3d_fixture.h @@ -7,7 +7,7 @@ #include "test_pool3d.h" // -// Short Execute() test helper to register each test seperately by all parameters. +// Short Execute() test helper to register each test separately by all parameters. // template class Pooling3dShortExecuteTest : public MlasTestFixture> { diff --git a/onnxruntime/test/mlas/unittest/test_qgemm_fixture.h b/onnxruntime/test/mlas/unittest/test_qgemm_fixture.h index b2657fbde9afa..40f688a16ecca 100644 --- a/onnxruntime/test/mlas/unittest/test_qgemm_fixture.h +++ b/onnxruntime/test/mlas/unittest/test_qgemm_fixture.h @@ -7,7 +7,7 @@ #include "test_qgemm.h" // -// Short Execute() test helper to register each test seperately by all parameters. +// Short Execute() test helper to register each test separately by all parameters. // template class QgemmShortExecuteTest; diff --git a/onnxruntime/test/mlas/unittest/test_sbgemm.cpp b/onnxruntime/test/mlas/unittest/test_sbgemm.cpp index 941de8f05061f..f85fe97776dc1 100644 --- a/onnxruntime/test/mlas/unittest/test_sbgemm.cpp +++ b/onnxruntime/test/mlas/unittest/test_sbgemm.cpp @@ -20,7 +20,7 @@ Module Name: #include "test_sbgemm.h" // -// Short Execute() test helper to register each test seperately by all parameters. +// Short Execute() test helper to register each test separately by all parameters. // template class SBGemmShortExecuteTest : public MlasTestFixture> { @@ -76,7 +76,7 @@ class SBGemmShortExecuteTest : public MlasTestFixture class SymmQgemmShortExecuteTest; diff --git a/onnxruntime/test/optimizer/transpose_optimizer_test.cc b/onnxruntime/test/optimizer/transpose_optimizer_test.cc index ea2823916798e..5ecbf4967b044 100644 --- a/onnxruntime/test/optimizer/transpose_optimizer_test.cc +++ b/onnxruntime/test/optimizer/transpose_optimizer_test.cc @@ -4883,7 +4883,7 @@ static void CheckSharedInitializerHandling(bool broadcast) { // test we re-use a modified shared initializer wherever possible. model has one initializer that is used by 3 DQ nodes // and one initializer that is used by 2 Add nodes. both cases should be handled with the initializer being -// modified in-place for the first usage, and the Transpose added to the second usage being cancelled out when the +// modified in-place for the first usage, and the Transpose added to the second usage being canceled out when the // original Transpose at the start of the model is pushed down. TEST(TransposeOptimizerTests, SharedInitializerHandling) { CheckSharedInitializerHandling(/*broadcast*/ false); @@ -4899,7 +4899,7 @@ TEST(TransposeOptimizerTests, SharedInitializerHandlingBroadcast) { } // Unit test where EstimateTransposeValueCost must look past a DQ -> Squeeze to see the Transponse of a shared -// initializer for the overall cost of pushing the Transpose throught the second Where to be negative. +// initializer for the overall cost of pushing the Transpose through the second Where to be negative. TEST(TransposeOptimizerTests, SharedInitializerHandlingBroadcast2) { auto model_uri = ORT_TSTR("testdata/transpose_optimizer_shared_initializers_broadcast2.onnx"); diff --git a/onnxruntime/test/perftest/ReadMe.txt b/onnxruntime/test/perftest/ReadMe.txt index 4142beefbd034..9c0dbf5d673e7 100644 --- a/onnxruntime/test/perftest/ReadMe.txt +++ b/onnxruntime/test/perftest/ReadMe.txt @@ -10,7 +10,7 @@ Options: -h: help Model path and input data dependency: - Performance test uses the same input structure as onnx_test_runner. It requrires the direcotry trees as below: + Performance test uses the same input structure as onnx_test_runner. It requrires the directory trees as below: --ModelName --test_data_set_0 diff --git a/onnxruntime/test/perftest/ort_test_session.cc b/onnxruntime/test/perftest/ort_test_session.cc index 92d732fba2a0a..0e4f0d0cad3f4 100644 --- a/onnxruntime/test/perftest/ort_test_session.cc +++ b/onnxruntime/test/perftest/ort_test_session.cc @@ -941,7 +941,7 @@ bool OnnxRuntimeTestSession::PopulateGeneratedInputTestData(int32_t seed) { auto tensor_info = type_info.GetTensorTypeAndShapeInfo(); std::vector input_node_dim = tensor_info.GetShape(); - // free dimensions are treated as 1 if not overriden + // free dimensions are treated as 1 if not overridden for (int64_t& dim : input_node_dim) { if (dim == -1) { dim = 1; diff --git a/onnxruntime/test/platform/android/cxa_demangle_test.cc b/onnxruntime/test/platform/android/cxa_demangle_test.cc index 47f149c4d3a22..dbb050ce623f4 100644 --- a/onnxruntime/test/platform/android/cxa_demangle_test.cc +++ b/onnxruntime/test/platform/android/cxa_demangle_test.cc @@ -27,7 +27,7 @@ TEST(DummyCxaDemangleTest, Alloc) { ASSERT_STREQ(output_buffer, input); std::free(output_buffer); - // verify status can be omited + // verify status can be omitted char* output_buffer2 = __cxa_demangle(input, nullptr, nullptr, nullptr); ASSERT_STREQ(output_buffer2, input); std::free(output_buffer2); diff --git a/onnxruntime/test/providers/cpu/controlflow/scan_test.cc b/onnxruntime/test/providers/cpu/controlflow/scan_test.cc index e5f3956438b7a..6bf2fc63ab165 100644 --- a/onnxruntime/test/providers/cpu/controlflow/scan_test.cc +++ b/onnxruntime/test/providers/cpu/controlflow/scan_test.cc @@ -155,7 +155,7 @@ static common::Status CreateSubgraph(Graph& graph, RunOptions& options, const st graph.AddNode("add", "Add", "Add 1 to the loop state", inputs, outputs); } - // subgraph with multiple inputs and outputs to test variadic behaviour. + // subgraph with multiple inputs and outputs to test variadic behavior. // 2 inputs of 2 that are concatenated and then split into 4 outputs of 1 // Concat node diff --git a/onnxruntime/test/providers/cpu/rnn/deep_cpu_lstm_op_test.cc b/onnxruntime/test/providers/cpu/rnn/deep_cpu_lstm_op_test.cc index e73a1b492cc05..3b7e93b8f7668 100644 --- a/onnxruntime/test/providers/cpu/rnn/deep_cpu_lstm_op_test.cc +++ b/onnxruntime/test/providers/cpu/rnn/deep_cpu_lstm_op_test.cc @@ -284,7 +284,7 @@ TEST(LSTMTest, MixedSequenceLengths) { } // we don't have numpy output for this, but by testing twice and swapping which batch is smaller - // we can largely verify the behaviour by comparing to ForwardSimpleWeightsNoBiasTwoRows output. + // we can largely verify the behavior by comparing to ForwardSimpleWeightsNoBiasTwoRows output. std::vector seq_lengths{1, 2}; std::vector Y_data{ @@ -333,7 +333,7 @@ TEST(LSTMTest, MixedSequenceLengthsReverse) { } // we don't have numpy output for this, but by testing twice and swapping which batch is smaller - // we can largely verify the behaviour by comparing to ReverseSimpleWeightsNoBiasTwoRows output. + // we can largely verify the behavior by comparing to ReverseSimpleWeightsNoBiasTwoRows output. std::vector seq_lengths{1, 2}; std::vector Y_data{ diff --git a/onnxruntime/test/providers/cuda/test_cases/allocator_cuda_test.cc b/onnxruntime/test/providers/cuda/test_cases/allocator_cuda_test.cc index 27a0696acb599..b413d04fe81e8 100644 --- a/onnxruntime/test/providers/cuda/test_cases/allocator_cuda_test.cc +++ b/onnxruntime/test/providers/cuda/test_cases/allocator_cuda_test.cc @@ -14,7 +14,7 @@ namespace test { TEST(AllocatorTest, CUDAAllocatorTest) { OrtDevice::DeviceId cuda_device_id = 0; - // ensure CUDA device is avaliable. + // ensure CUDA device is available. CUDA_CALL_THROW(cudaSetDevice(cuda_device_id)); AllocatorCreationInfo default_memory_info( diff --git a/onnxruntime/test/providers/dnnl/transformer/matmul_post_op_transform_test.cc b/onnxruntime/test/providers/dnnl/transformer/matmul_post_op_transform_test.cc index a13fa91366aaf..1274efedbeb61 100644 --- a/onnxruntime/test/providers/dnnl/transformer/matmul_post_op_transform_test.cc +++ b/onnxruntime/test/providers/dnnl/transformer/matmul_post_op_transform_test.cc @@ -14,10 +14,10 @@ * The tests validate that if a fusion occures the expected output matches * the output of each graph if they had not be done separatly. * - * Unfortantly there is no hook to actually check that the fussion occured + * Unfortantly there is no hook to actually check that the fussion occurred * other than inspecting debug logs. * - * The 8 tests use patterns that we have seen in actual models durring testing. + * The 8 tests use patterns that we have seen in actual models during testing. * Other tests validate that non-associative ops work as expected. We are able * to fuse the output of matmul divided by another value but we can not fuse * the a value divided by the output of matmul. Similar with Subtraction. @@ -673,7 +673,7 @@ TEST(DnnlMatMulFusion, matmul_div_sub_1) { // in the matmul post op fusion to check that the 32 post op // limit is not exceded. // to do this we just run the matmul->[add->mul->sub-div] 9 times -// input params are shared accross multiple ops +// input params are shared across multiple ops class Dnnl_matmul_36_post_ops_PostOpTester : public OpTester { public: explicit Dnnl_matmul_36_post_ops_PostOpTester(int opset_version = 7) diff --git a/onnxruntime/test/providers/internal_testing/internal_testing_partitioning_tests.cc b/onnxruntime/test/providers/internal_testing/internal_testing_partitioning_tests.cc index 8cf7efe14b1c9..d58db5178032d 100644 --- a/onnxruntime/test/providers/internal_testing/internal_testing_partitioning_tests.cc +++ b/onnxruntime/test/providers/internal_testing/internal_testing_partitioning_tests.cc @@ -83,7 +83,7 @@ TEST(InternalTestingEP, TestSortResultsInSinglePartition) { } // mode has Resize op with optional input roi which is just a placeholder. -// partition funtion should skip the placeholder inputs. +// partition function should skip the placeholder inputs. TEST(InternalTestingEP, TestResizeWithOptionalInput) { // Resize op has optional input roi which is just a placeholder const ORTCHAR_T* model_path = ORT_TSTR("testdata/model_resize_empty_optional_input.onnx"); diff --git a/onnxruntime/test/providers/qnn/qnn_ep_context_test.cc b/onnxruntime/test/providers/qnn/qnn_ep_context_test.cc index 012845f5eb161..a3768cb98f584 100644 --- a/onnxruntime/test/providers/qnn/qnn_ep_context_test.cc +++ b/onnxruntime/test/providers/qnn/qnn_ep_context_test.cc @@ -654,7 +654,7 @@ TEST_F(QnnHTPBackendTests, QnnContextBinary2InputsTest) { // Context binary only contains a single QNN graph, generated context cache model (detached mode) only has 1 EPContext node // Create another Onnx model which also reference to the bin file, // but the node name is not same with the QNN graph name inside the bin file. -// This is to support backward compitable for the models generated before the PR that +// This is to support backward compatible for the models generated before the PR that // make context generation support multi-partition TEST_F(QnnHTPBackendTests, QnnContextBinaryCache_SingleNodeNameNotMatchGraphNameInCtx) { ProviderOptions provider_options; @@ -732,7 +732,7 @@ TEST_F(QnnHTPBackendTests, QnnContextBinaryCache_SingleNodeNameNotMatchGraphName ASSERT_EQ(std::remove(context_bin.string().c_str()), 0); } -// Model has 2 EPContext nodes, both with main_context=1 and embeded context binary +// Model has 2 EPContext nodes, both with main_context=1 and embedded context binary TEST_F(QnnHTPBackendTests, QnnMultiContextEmbeded) { ProviderOptions provider_options; #if defined(_WIN32) diff --git a/onnxruntime/test/python/onnxruntime_test_python.py b/onnxruntime/test/python/onnxruntime_test_python.py index 892e7de8bb6ed..32eac6f7638c1 100644 --- a/onnxruntime/test/python/onnxruntime_test_python.py +++ b/onnxruntime/test/python/onnxruntime_test_python.py @@ -1783,7 +1783,7 @@ def test_multiple_devices(self): return # https://github.com/microsoft/onnxruntime/issues/18432. Make sure device Id is properly set - # Scenario 1, 3 sessions created with differnt device Id under IOBinding + # Scenario 1, 3 sessions created with different device Id under IOBinding sessions = [] for i in range(3): sessions.append( diff --git a/onnxruntime/test/python/onnxruntime_test_python_mlops.py b/onnxruntime/test/python/onnxruntime_test_python_mlops.py index 6cdf820c8a0e9..8b6b029c57752 100644 --- a/onnxruntime/test/python/onnxruntime_test_python_mlops.py +++ b/onnxruntime/test/python/onnxruntime_test_python_mlops.py @@ -173,7 +173,7 @@ def test_run_model_mlnet(self): # In memory, the size of each element is fixed and equal to the # longest element. We cannot use bytes because numpy is trimming # every final 0 for strings and bytes before creating the array - # (to save space). It does not have this behaviour for void + # (to save space). It does not have this behavior for void # but as a result, numpy does not know anymore the size # of each element, they all have the same size. c1 = np.array([b"A\0A\0\0", b"B\0B\0\0", b"C\0C\0\0"], np.void).reshape(1, 3) diff --git a/onnxruntime/test/python/onnxruntime_test_python_sparse_matmul.py b/onnxruntime/test/python/onnxruntime_test_python_sparse_matmul.py index 22a09ef565d59..fe64aac54951b 100644 --- a/onnxruntime/test/python/onnxruntime_test_python_sparse_matmul.py +++ b/onnxruntime/test/python/onnxruntime_test_python_sparse_matmul.py @@ -54,7 +54,7 @@ def test_run_sparse_output_only(self): def test_run_contrib_sparse_mat_mul(self): """ - Mutliple sparse COO tensor to dense + Multiple sparse COO tensor to dense """ common_shape = [9, 9] # inputs and oputputs same shape A_values = np.array( # noqa: N806 diff --git a/onnxruntime/test/python/quantization/test_quantize_static_resnet.py b/onnxruntime/test/python/quantization/test_quantize_static_resnet.py index 1efa283af6881..d105f647c813b 100644 --- a/onnxruntime/test/python/quantization/test_quantize_static_resnet.py +++ b/onnxruntime/test/python/quantization/test_quantize_static_resnet.py @@ -87,7 +87,7 @@ def test_quantize_static_resnet(self): # * uint8([128, 128, ..., 127, ...]) if per_channel is True # QLinearConv : zero point of per-channel filter must be same. # That's why the quantization forces a symmetric quantization into INT8. - # zero_point is guaranted to be zero whatever the channel is. + # zero_point is guaranteed to be zero whatever the channel is. with open(qdq_file, "rb") as f: onx = onnx.load(f) diff --git a/onnxruntime/test/python/transformers/test_generation.py b/onnxruntime/test/python/transformers/test_generation.py index 33ec1bd7728fe..88f870e92d558 100644 --- a/onnxruntime/test/python/transformers/test_generation.py +++ b/onnxruntime/test/python/transformers/test_generation.py @@ -47,7 +47,7 @@ def setUp(self): "Test best way to invest", # "The AI community building the future", # "The selloff in tech shares deepened", - # "Abortion rights take centre stage", + # "Abortion rights take center stage", ] self.enable_cuda = torch.cuda.is_available() and "CUDAExecutionProvider" in get_available_providers() self.remove_onnx_files() diff --git a/onnxruntime/test/shared_lib/test_inference.cc b/onnxruntime/test/shared_lib/test_inference.cc index eacd41e6b9c6d..52491a179c2ce 100644 --- a/onnxruntime/test/shared_lib/test_inference.cc +++ b/onnxruntime/test/shared_lib/test_inference.cc @@ -1620,7 +1620,7 @@ TEST(CApiTest, test_custom_op_openvino_wrapper_library) { // It has memory leak. The OrtCustomOpDomain created in custom_op_library.cc:RegisterCustomOps function was not freed #if defined(__ANDROID__) TEST(CApiTest, DISABLED_test_custom_op_library) { -// To accomodate a reduced op build pipeline +// To accommodate a reduced op build pipeline #elif defined(REDUCED_OPS_BUILD) && defined(USE_CUDA) TEST(CApiTest, DISABLED_test_custom_op_library) { #else @@ -1674,7 +1674,7 @@ TestInference(*ort_env, CUSTOM_OP_LIBRARY_TEST_MODEL_URI, inputs, "outp // Has memory leak #if defined(__ANDROID__) || defined(ABSL_HAVE_ADDRESS_SANITIZER) TEST(CApiTest, DISABLED_test_custom_op_shape_infer_attr) { -// To accomodate a reduced op build pipeline +// To accommodate a reduced op build pipeline #elif defined(REDUCED_OPS_BUILD) && defined(USE_CUDA) TEST(CApiTest, DISABLED_test_custom_op_shape_infer_attr) { #else @@ -1705,7 +1705,7 @@ TEST(CApiTest, test_custom_op_shape_infer_attr) { // It has memory leak. The OrtCustomOpDomain created in custom_op_library.cc:RegisterCustomOps function was not freed #if defined(__ANDROID__) TEST(CApiTest, test_custom_op_library_copy_variadic) { -// To accomodate a reduced op build pipeline +// To accommodate a reduced op build pipeline #elif defined(REDUCED_OPS_BUILD) && defined(USE_CUDA) TEST(CApiTest, test_custom_op_library_copy_variadic) { #else diff --git a/onnxruntime/test/testdata/transform/model_creation_for_testing.ipynb b/onnxruntime/test/testdata/transform/model_creation_for_testing.ipynb index f8af2d8a9f6e8..e6118e3b53b1d 100644 --- a/onnxruntime/test/testdata/transform/model_creation_for_testing.ipynb +++ b/onnxruntime/test/testdata/transform/model_creation_for_testing.ipynb @@ -309,7 +309,7 @@ " helper.make_node('Slice', ['E', 'startsE', 'endsE', 'axesE', 'stepsE'], ['F']),\n", " # Will be removed.\n", " helper.make_node('Slice', ['F', 'startsF', 'endsF', 'axesF'], ['G']),\n", - " # Will not be removed because of endsG appearing in graph inputs (can be overriden).\n", + " # Will not be removed because of endsG appearing in graph inputs (can be overridden).\n", " helper.make_node('Slice', ['G', 'startsG', 'endsG'], ['H']),\n", " helper.make_node('Max', ['H'], ['I']),\n", " # Will not be removed because node output participates in graph output.\n", diff --git a/onnxruntime/test/testdata/transform/model_parallel/self_attention_megatron_basic_test.py b/onnxruntime/test/testdata/transform/model_parallel/self_attention_megatron_basic_test.py index c57024538f5b2..306ad7d37403a 100644 --- a/onnxruntime/test/testdata/transform/model_parallel/self_attention_megatron_basic_test.py +++ b/onnxruntime/test/testdata/transform/model_parallel/self_attention_megatron_basic_test.py @@ -7,7 +7,7 @@ hidden_per_attention = 2 # Self-attention. -# Handle self-attension. +# Handle self-attention. # MatMul->Add->Split->Reshape->Transpose->MatMul->Div->Mul->Sub->Softmax->Dropout->MatMul->Transpose->Reshape->MatMul->Add # |->Reshape->Transpose->| | # |->Reshape->Transpose------------------------------------------>| diff --git a/onnxruntime/test/testdata/transpose_optimizer_shared_initializers.py b/onnxruntime/test/testdata/transpose_optimizer_shared_initializers.py index d710c796fb0ad..293c5aafe7f0c 100644 --- a/onnxruntime/test/testdata/transpose_optimizer_shared_initializers.py +++ b/onnxruntime/test/testdata/transpose_optimizer_shared_initializers.py @@ -59,7 +59,7 @@ def create_model_with_Where(): # noqa 'Where' is the operator name initializer and other usage. We need to use Where as we require more than 2 inputs. The `condition` input will be having a Transpose pushed through it will have a negative cost. The `X` input will have a positive cost which cancels out the negative value. - The `Y` input will be a shared initializer that is braodcast. If we don't find the Transpose to make the cost of it + The `Y` input will be a shared initializer that is broadcast. If we don't find the Transpose to make the cost of it negative we will not push the Transpose though. If we only have 2 inputs, the broadcast initializer will always cost less due to its smaller rank, meaning we don't diff --git a/onnxruntime/wasm/api.h b/onnxruntime/wasm/api.h index 2cd1515d191c8..0730559c4375b 100644 --- a/onnxruntime/wasm/api.h +++ b/onnxruntime/wasm/api.h @@ -3,7 +3,7 @@ // NOTE: This file contains declarations of exported functions as WebAssembly API. // Unlike a normal C-API, the purpose of this API is to make emcc to generate correct exports for the WebAssembly. The -// macro "EMSCRIPTEN_KEEPALIVE" helps the compiler to mark the function as an exported funtion of the WebAssembly +// macro "EMSCRIPTEN_KEEPALIVE" helps the compiler to mark the function as an exported function of the WebAssembly // module. Users are expected to consume those functions from JavaScript side. #pragma once diff --git a/orttraining/orttraining/core/framework/adasum/adasum_interface.h b/orttraining/orttraining/core/framework/adasum/adasum_interface.h index e872da78fdcf5..d7dc62336421c 100644 --- a/orttraining/orttraining/core/framework/adasum/adasum_interface.h +++ b/orttraining/orttraining/core/framework/adasum/adasum_interface.h @@ -138,7 +138,7 @@ class AdasumInterface { // first n-1 levels are skipped. This is useful when the // communication inside the node is implemented using another // reduce-scatter algorithm, e.g. the one in NCCL, which may be - // desireable on some hardware configurations. When + // desirable on some hardware configurations. When // start_level>1, tensor_counts must be set according to the // slices owned by this rank. // communicator: the communicator to reduce with. diff --git a/orttraining/orttraining/core/framework/ortmodule_graph_builder.cc b/orttraining/orttraining/core/framework/ortmodule_graph_builder.cc index e01456ee3d769..593a8be399bd6 100644 --- a/orttraining/orttraining/core/framework/ortmodule_graph_builder.cc +++ b/orttraining/orttraining/core/framework/ortmodule_graph_builder.cc @@ -223,7 +223,7 @@ void OrtModuleGraphBuilder::GetFrontierTensors() { for (const auto& param : graph_info_.initializer_names_to_train) { std::vector consumer_nodes = graph.GetConsumerNodes(param); // Initial support is limited to caching Cast output. This can - // be extended to accomodate more ops whose result depends only + // be extended to accommodate more ops whose result depends only // on the weight tensor which is a WIP. for (const Node* node : consumer_nodes) { if (node != nullptr && node->OpType() == "Cast") { diff --git a/orttraining/orttraining/core/framework/pipeline.cc b/orttraining/orttraining/core/framework/pipeline.cc index 3b0a63bb2a71a..3614637ca0987 100644 --- a/orttraining/orttraining/core/framework/pipeline.cc +++ b/orttraining/orttraining/core/framework/pipeline.cc @@ -193,7 +193,7 @@ std::vector PipelineScheduler::FindForwardComputeTime(const std::vector 0 && t <= forward_time.at(s - 1)) { - // Foward of the s-th stage must happen after forward of (s-1)-th stage. + // Forward of the s-th stage must happen after forward of (s-1)-th stage. // Note that forward_time[s] is the time slot of the s-th stage. continue; } diff --git a/orttraining/orttraining/core/graph/mixed_precision_transformer.cc b/orttraining/orttraining/core/graph/mixed_precision_transformer.cc index 1bed983cde64d..a4143e7c817fd 100644 --- a/orttraining/orttraining/core/graph/mixed_precision_transformer.cc +++ b/orttraining/orttraining/core/graph/mixed_precision_transformer.cc @@ -46,7 +46,7 @@ static const std::unordered_map> stage1_fp32_node_ }; // Currently the list here is same as stage1 above due to empty FP32_Nodes. -// It's possibile we will have more FP32 nodes added, this map will also be extended. +// It's possible we will have more FP32 nodes added, this map will also be extended. static const std::unordered_map> stage2_fp32_node_args = { {"Dropout", {1}}, {"DropoutGrad", {2}}, diff --git a/orttraining/orttraining/core/graph/optimizer_graph_builder.h b/orttraining/orttraining/core/graph/optimizer_graph_builder.h index b79bde28c0d9c..d33902379cb5e 100644 --- a/orttraining/orttraining/core/graph/optimizer_graph_builder.h +++ b/orttraining/orttraining/core/graph/optimizer_graph_builder.h @@ -125,7 +125,7 @@ class OptimizerGraphBuilder { GraphAugmenter::GraphDefs& graph_defs, std::unordered_map>& weight_to_opt_mapping); - // This function can be overriden by child classes to have different logic + // This function can be overridden by child classes to have different logic // for building optimizers. virtual Status BuildOptimizerNode( const std::unique_ptr& opt_builder, diff --git a/orttraining/orttraining/core/graph/pipeline_transformer.cc b/orttraining/orttraining/core/graph/pipeline_transformer.cc index a58cca0acd014..f989d53aa85d5 100644 --- a/orttraining/orttraining/core/graph/pipeline_transformer.cc +++ b/orttraining/orttraining/core/graph/pipeline_transformer.cc @@ -446,7 +446,7 @@ void FindPipelineLandmarks( // // The input graph is a pipeline's stage, which contains some Send's and Recv's. // -// For diferent pipeline stages, they have different communication patterns as +// For different pipeline stages, they have different communication patterns as // shown below. // // 1. First stage: @@ -1615,7 +1615,7 @@ Status ApplyPipelinePartitionToMainGraph(Graph& graph, send_nodes, recv_nodes, stage_to_rank)); - // Take care of weights that are shared accross stages. + // Take care of weights that are shared across stages. ORT_RETURN_IF_ERROR(HandleSharedInitializer(graph, send_nodes, recv_nodes)); std::set visited_outputs; diff --git a/orttraining/orttraining/core/graph/training_op_defs.cc b/orttraining/orttraining/core/graph/training_op_defs.cc index 20122d378a246..2a8d2de982e79 100644 --- a/orttraining/orttraining/core/graph/training_op_defs.cc +++ b/orttraining/orttraining/core/graph/training_op_defs.cc @@ -1737,7 +1737,7 @@ void RegisterTrainingOpSchemas() { propagateShapeAndTypeFromFirstInput(ctx); }); - // TODO: Depreacate this schema when training support is udpated to opset-12 + // TODO: Depreacate this schema when training support is updated to opset-12 ONNX_CONTRIB_OPERATOR_SCHEMA(GatherND) .SetDomain(kOnnxDomain) .SinceVersion(1) @@ -1820,7 +1820,7 @@ Example 4: .Input(0, "shape", "The shape of source data input of GatherND.", "T1") .Input(1, "indices", "Tensor of rank q >= 1.", "Tind") .Input(2, "update", "The gradient of the output.", "T") - .Output(0, "output", "Tensor graident of the input.", "T") + .Output(0, "output", "Tensor gradient of the input.", "T") .TypeConstraint( "T", {"tensor(float16)", "tensor(float)", "tensor(double)", "tensor(bfloat16)"}, @@ -2493,7 +2493,7 @@ Example 4: .SetSupportLevel(OpSchema::SupportType::EXPERIMENTAL) .SetDoc( "Returns the reduction axes for computing gradients of s0 op s1 with broadcast." - "The ouput axes are deterministic from last to first. " + "The output axes are deterministic from last to first. " "Output is an empty vector when no reduction is necessary for the corresponding input.") .Input(0, "a_shape", "The 1st input shape as Tensor.", "T") .Input(1, "b_shape", "The 2nd input shape as Tensor.", "T") @@ -2530,7 +2530,7 @@ Example 4: ONNX_CONTRIB_OPERATOR_SCHEMA(GistBinarizeDecoder) .SetDomain(kMSDomain) .SinceVersion(1) - .Input(0, "X", "compresssed input", "T1") + .Input(0, "X", "compressed input", "T1") .Output(0, "Y", "uncompressed output", "T") .Attr("to", "The data type to which the elements of the input tensor are cast. " @@ -2568,7 +2568,7 @@ Example 4: ONNX_CONTRIB_OPERATOR_SCHEMA(GistPack1Decoder) .SetDomain(kMSDomain) .SinceVersion(1) - .Input(0, "X", "1 bit compresssed input", "T1") + .Input(0, "X", "1 bit compressed input", "T1") .Output(0, "Y", "uncompressed output", "T") .Attr("to", "The data type to which the elements of the input tensor are cast. " @@ -2606,7 +2606,7 @@ Example 4: ONNX_CONTRIB_OPERATOR_SCHEMA(GistPack8Decoder) .SetDomain(kMSDomain) .SinceVersion(1) - .Input(0, "X", "compresssed input", "T1") + .Input(0, "X", "compressed input", "T1") .Output(0, "Y", "uncompressed output", "T") .Attr("to", "The data type to which the elements of the input tensor are cast. " @@ -2682,7 +2682,7 @@ Example 4: ONNX_CONTRIB_OPERATOR_SCHEMA(GistPackMsfp15Decoder) .SetDomain(kMSDomain) .SinceVersion(1) - .Input(0, "X", "compresssed input", "T1") + .Input(0, "X", "compressed input", "T1") .Output(0, "Y", "uncompressed output", "T") .Attr("to", "The data type to which the elements of the input tensor are cast. " @@ -3191,7 +3191,7 @@ Return true if all elements are true and false otherwise. "Strictly must be one of the types from DataType enum in TensorProto", AttributeProto::INT) .Attr("fuse_outputs", - "If true, fuse all outputs into one continous buffer.", + "If true, fuse all outputs into one continuous buffer.", AttributeProto::INT, static_cast(0)) .TypeConstraint( @@ -3240,7 +3240,7 @@ Return true if all elements are true and false otherwise. .Input(1, "scale", "Scale scalar tensor.", "ScaleT") .Output(0, "output", "The scaled output tensor.", "T") .Attr("scale_down", - "If true, the output tensor is input tensor devided by scale, " + "If true, the output tensor is input tensor divided by scale, " "otherwise, it's input tensor multiplied by scale. " "The default value is false.", AttributeProto::INT, @@ -3636,7 +3636,7 @@ Return true if all elements are true and false otherwise. fail_shape_inference("RecordEvent must have at least (num_outputs + 1) inputs."); // note: if num_input > num_output + 1, - // the additional inputs (idx >= num_ouput + 1) are regarded as dependencies + // the additional inputs (idx >= num_output + 1) are regarded as dependencies // which are only used for maintain topological order for (size_t i = 0; i < ctx.getNumOutputs(); ++i) { propagateElemTypeFromInputToOutput(ctx, i + 1, i); @@ -3689,7 +3689,7 @@ Return true if all elements are true and false otherwise. fail_shape_inference("WaitEvent must have at least 1 output."); // note: if num_input > num_output + 1, - // the additional inputs (idx >= num_ouput + 1) are regarded as dependencies + // the additional inputs (idx >= num_output + 1) are regarded as dependencies // which are only used for maintain topological order for (size_t i = 0; i < ctx.getNumOutputs(); ++i) { propagateElemTypeFromInputToOutput(ctx, i + 1, i); diff --git a/orttraining/orttraining/core/optimizer/graph_transformer_config.h b/orttraining/orttraining/core/optimizer/graph_transformer_config.h index c496e36689de1..a2b44689f9ef0 100644 --- a/orttraining/orttraining/core/optimizer/graph_transformer_config.h +++ b/orttraining/orttraining/core/optimizer/graph_transformer_config.h @@ -17,7 +17,7 @@ struct TrainingGraphTransformerConfiguration : public GraphTransformerConfigurat bool attn_dropout_recompute{false}; // Enable recompute of Gelu activation output to save memory bool gelu_recompute{false}; - // Enable recompute of transformer layer ouput to save memory + // Enable recompute of transformer layer output to save memory bool transformer_layer_recompute{false}; // Number of layers to apply recompute int number_recompute_layers{0}; diff --git a/orttraining/orttraining/core/session/training_session.cc b/orttraining/orttraining/core/session/training_session.cc index 1bf08fa55ca88..87a7cbc0375a4 100644 --- a/orttraining/orttraining/core/session/training_session.cc +++ b/orttraining/orttraining/core/session/training_session.cc @@ -1425,7 +1425,7 @@ std::unordered_set TrainingSession::GetTrainableModelInitializers( #if defined(USE_CUDA) && defined(ORT_USE_NCCL) && defined(USE_NCCL_P2P) // Create NCCL's communication plan. In runtime, we will provide details such -// as pointer to sent/recieved data and the size of the data in byte. See how +// as pointer to sent/received data and the size of the data in byte. See how // Send and Recv call SubmitSendAndWait and SubmitRecvAndWait, respectively. void PipelineTrainingSession::LaunchNcclService(const int pipeline_stage_id) { ORT_ENFORCE(pipeline_stage_id >= 0, "Pipeline stage ID cannot be negative."); @@ -1444,7 +1444,7 @@ void PipelineTrainingSession::LaunchNcclService(const int pipeline_stage_id) { // In this time slot, stage "pipeline_stage_id" sendss data to "task.peer_rank". nccl_service.PlanSend(task.peer_rank); } else if (task.type == pipeline::PipelineTask::Type::Recv) { - // In this time slot, stage "pipeline_stage_id" recieves data from "task.peer_rank". + // In this time slot, stage "pipeline_stage_id" receives data from "task.peer_rank". nccl_service.PlanRecv(task.peer_rank); } } diff --git a/orttraining/orttraining/models/bert/main.cc b/orttraining/orttraining/models/bert/main.cc index 33d0d0346a48a..22cdd9351a206 100644 --- a/orttraining/orttraining/models/bert/main.cc +++ b/orttraining/orttraining/models/bert/main.cc @@ -204,12 +204,14 @@ Status ParseArguments(int argc, char* argv[], BertParameters& params, OrtParamet ("data_parallel_size", "Data parallel group size.", cxxopts::value()->default_value("1")) ("horizontal_parallel_size", "Horizontal model parallel group size.", cxxopts::value()->default_value("1")) ("pipeline_parallel_size", "Number of pipeline stages.", cxxopts::value()->default_value("1")) - ("pipeline_stage_paths", "Specify the forward ONNX files for pipeline evaluation.", cxxopts::value>()->default_value("")) - ("cut_group_info", "Specify the cutting info for graph partition (pipeline only). An example of a cut_group_info of " - "size two is: 1393:407-1463/1585/1707,2369:407-2439/2561/2683. Here, the cut info is split by ',', with the first " - "cut_info equal to 1393:407-1463/1585/1707, and second cut_info equal to 2369:407-2439/2561/2683. Each CutEdge is " - "seperated by ':'. If consumer nodes need to be specified, specify them after producer node with a '-' delimiter and " - "separate each consumer node with a '/'. ", cxxopts::value>()->default_value("")) + ("pipeline_stage_paths", "Specify the forward ONNX files for pipeline evaluation.", + cxxopts::value>()->default_value("")) + ("cut_group_info", "Specify the cutting info for graph partition (pipeline only). An example of a cut_group_info " + "of size two is: 1393:407-1463/1585/1707,2369:407-2439/2561/2683. Here, the cut info is split by ',', with the " + "first cut_info equal to 1393:407-1463/1585/1707, and second cut_info equal to 2369:407-2439/2561/2683. Each " + "CutEdge is separated by ':'. If consumer nodes need to be specified, specify them after producer node with a " + "'-' delimiter and separate each consumer node with a '/'. ", + cxxopts::value>()->default_value("")) ("enable_grad_norm_clip", "Specify whether to enable gradient clipping for optimizers.", cxxopts::value()->default_value("true")) ("enable_gelu_approximation", "Specify whether to enable GELU approximation.", @@ -572,7 +574,7 @@ float GetLossValue(const Tensor& loss_tensor) { // use this table mapping to define what to be stored in mapped_dimensions, and ultimately in json structure // Be mindful on the position, if it's invalid or out of bound, the property population process will be -// either incorrect or aborted. Also make sure to substract the index position by 1 to get valid correspondent value +// either incorrect or aborted. Also make sure to subtract the index position by 1 to get valid correspondent value // namely, in the graph, sequence is at position 1, but in initial tensor shape vector loaded from training data is at position 0, // batch is not part of the initial tensor shape vector till later // see GetTensorDimensionsFromInputs() in training_util.h and training_runner.cc for more details diff --git a/orttraining/orttraining/models/mnist/main.cc b/orttraining/orttraining/models/mnist/main.cc index a2fc6909a86a6..8aaa6b1ebf7f2 100644 --- a/orttraining/orttraining/models/mnist/main.cc +++ b/orttraining/orttraining/models/mnist/main.cc @@ -51,7 +51,8 @@ Status ParseArguments(int argc, char* argv[], MnistParameters& params) { cxxopts::value()->default_value("mnist_data")) ("log_dir", "The directory to write tensorboard events.", cxxopts::value()->default_value("")) - ("use_profiler", "Collect runtime profile data during this training run.", cxxopts::value()->default_value("false")) + ("use_profiler", "Collect runtime profile data during this training run.", + cxxopts::value()->default_value("false")) ("use_gist", "Whether to use GIST encoding/decoding.") ("gist_op", "Opearator type(s) to which GIST is applied.", cxxopts::value()->default_value("0")) ("gist_compr", "Compression type used for GIST", cxxopts::value()->default_value("GistPack8")) @@ -66,11 +67,12 @@ Status ParseArguments(int argc, char* argv[], MnistParameters& params) { ("data_parallel_size", "Data parallel group size.", cxxopts::value()->default_value("1")) ("horizontal_parallel_size", "Horizontal model parallel group size.", cxxopts::value()->default_value("1")) ("pipeline_parallel_size", "Number of pipeline stages.", cxxopts::value()->default_value("1")) - ("cut_group_info", "Specify the cutting info for graph partition (pipeline only). An example of a cut_group_info of " - "size two is: 1393:407-1463/1585/1707,2369:407-2439/2561/2683. Here, the cut info is split by ',', with the first " - "cut_info equal to 1393:407-1463/1585/1707, and second cut_info equal to 2369:407-2439/2561/2683. Each CutEdge is " - "seperated by ':'. If consumer nodes need to be specified, specify them after producer node with a '-' delimiter and " - "separate each consumer node with a '/'. ", cxxopts::value>()->default_value("")) + ("cut_group_info", "Specify the cutting info for graph partition (pipeline only). An example of a cut_group_info " + "of size two is: 1393:407-1463/1585/1707,2369:407-2439/2561/2683. Here, the cut info is split by ',', with the " + "first cut_info equal to 1393:407-1463/1585/1707, and second cut_info equal to 2369:407-2439/2561/2683. Each " + "CutEdge is separated by ':'. If consumer nodes need to be specified, specify them after producer node with a " + "'-' delimiter and separate each consumer node with a '/'. ", + cxxopts::value>()->default_value("")) ("evaluation_period", "How many training steps to make before making an evaluation.", cxxopts::value()->default_value("1")); // clang-format on @@ -301,7 +303,7 @@ int main(int argc, char* args[]) { } if (testData->NumSamples() == 0) { - printf("Warning: No data loaded - run cancelled.\n"); + printf("Warning: No data loaded - run canceled.\n"); return -1; } diff --git a/orttraining/orttraining/models/runner/training_runner.cc b/orttraining/orttraining/models/runner/training_runner.cc index 6421f7c81f7fb..dae6f613f4329 100644 --- a/orttraining/orttraining/models/runner/training_runner.cc +++ b/orttraining/orttraining/models/runner/training_runner.cc @@ -1188,7 +1188,7 @@ Status TrainingRunner::Evaluate(TrainingSession& session, IDataLoader& data_load fetch_names, &fetches)); - // Assume that user-specified fetches are avaliable only on the last pipeline stage. + // Assume that user-specified fetches are available only on the last pipeline stage. // When there is no pipeline, all pipeline_context_.pipeline_stage_id should be 0 and // params_.pipeline_parallel_size is 1. Thus, the following condition is always true if there // is no pipeline. diff --git a/orttraining/orttraining/python/training/onnxblock/blocks.py b/orttraining/orttraining/python/training/onnxblock/blocks.py index 80f07c3738a7e..ed68171cc6f9c 100644 --- a/orttraining/orttraining/python/training/onnxblock/blocks.py +++ b/orttraining/orttraining/python/training/onnxblock/blocks.py @@ -403,12 +403,12 @@ def __init__(self, like: str): def build(self, input_name: Optional[str] = None): cloned_input = None with contextlib.suppress(LookupError): - # Supress LookupError because we want to try to get the input from the output if it's not found in the inputs + # Suppress LookupError because we want to try to get the input from the output if it's not found in the inputs cloned_input = copy.deepcopy(_graph_utils.get_input_from_input_name(self.base, self._like)) if cloned_input is None: with contextlib.suppress(LookupError): - # Supress LookupError because we deal with the case where no input or output was found later. + # Suppress LookupError because we deal with the case where no input or output was found later. cloned_input = copy.deepcopy(_graph_utils.get_output_from_output_name(self.base, self._like)) if cloned_input is None: diff --git a/orttraining/orttraining/python/training/ortmodule/__init__.py b/orttraining/orttraining/python/training/ortmodule/__init__.py index 20e3493395b3d..4bc470c633437 100644 --- a/orttraining/orttraining/python/training/ortmodule/__init__.py +++ b/orttraining/orttraining/python/training/ortmodule/__init__.py @@ -194,7 +194,7 @@ def export_context(): ), ) -# Initalized ORT's random seed with pytorch's initial seed +# Initialized ORT's random seed with pytorch's initial seed # in case user has set pytorch seed before importing ORTModule set_seed(torch.initial_seed() % sys.maxsize) diff --git a/orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cuda/fused_ops/fused_ops_frontend.cpp b/orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cuda/fused_ops/fused_ops_frontend.cpp index 19ba6b17aba02..4e9db732b5385 100644 --- a/orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cuda/fused_ops/fused_ops_frontend.cpp +++ b/orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cuda/fused_ops/fused_ops_frontend.cpp @@ -10,7 +10,7 @@ const size_t EMIT_NUM = 4; -// This will avoid the copies when doing implict Python list <==> C++ std::vector<> conversion. +// This will avoid the copies when doing implicit Python list <==> C++ std::vector<> conversion. PYBIND11_MAKE_OPAQUE(std::vector); // This function is adapted from microsoft/DeepSpeed fused_adam_frontend.cpp @@ -150,7 +150,7 @@ void unscale_fp16_grads_into_fp32_grads(std::vector& all_fp16_params if (idx_to_fp32_from_fp16_params.size() > 0) { auto mem_buffer = MemoryBuffer(memory_buffer_size, idx_to_fp32_from_fp16_params.begin()->second); - const size_t emit_threshhold = memory_buffer_size / EMIT_NUM; + const size_t emit_threshold = memory_buffer_size / EMIT_NUM; size_t acc_size = 0; std::vector partial_new_fp32_grads; @@ -167,7 +167,7 @@ void unscale_fp16_grads_into_fp32_grads(std::vector& all_fp16_params partial_new_fp32_grads.emplace_back(idx_to_fp32_from_fp16_params[idx].grad()); partial_fp16_grads_needing_unscale.emplace_back(fp16_grads_needing_unscale[fp32_from_fp16_param_idx]); - if (acc_size > emit_threshhold || fp32_from_fp16_param_idx == idx_to_fp32_from_fp16_params.size() - 1) { + if (acc_size > emit_threshold || fp32_from_fp16_param_idx == idx_to_fp32_from_fp16_params.size() - 1) { if (partial_fp16_grads_needing_unscale.size() > 0) { std::vector> tensor_lists; tensor_lists.emplace_back(partial_fp16_grads_needing_unscale); diff --git a/orttraining/orttraining/test/distributed/partition_utils.h b/orttraining/orttraining/test/distributed/partition_utils.h index 1369b493655b6..c22d0a3eb2f93 100644 --- a/orttraining/orttraining/test/distributed/partition_utils.h +++ b/orttraining/orttraining/test/distributed/partition_utils.h @@ -338,7 +338,7 @@ common::Status SplitGraph(Graph& graph, // but nodeA, nodeB belong to parition0, nodeC belongs to parition1, and nodeD belongs to parition2. // This means we need to cut edge nodeA->nodeC for the first partition and nodeA->nodeD for the second partition. // - // During the first cut, we identify the edge nodeA->nodeC, for this edge, based on the origional node_arg, + // During the first cut, we identify the edge nodeA->nodeC, for this edge, based on the original node_arg, // we create a new node_arg, called updated_node_arg. The inserted send node will take the original node_arg // as input and the inserted recv node will take the updated_node_arg as the output. // And we update updated_node_args with updated_node_args[original_node_arg] = updated_node_arg @@ -414,7 +414,7 @@ common::Status SplitGraph(Graph& graph, auto producer_node = graph.GetMutableProducerNode(id.node_arg_name); if (!producer_node) { return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Cannot find producer node of node_arg with name: ", id.node_arg_name, - ". Wrong cutting infomation."); + ". Wrong cutting information."); } // once we find out the producer node for id.node_arg_name, find which output index that leads @@ -606,7 +606,7 @@ Status CutBasedApplyPipelinePartitionToMainGraph( ORT_RETURN_IF_ERROR(GenerateSubgraph(graph, recv_nodes.back())); } - // Post check to ensure the curent partition is correct and matches with Send/Recv nodes inserted during split. + // Post check to ensure the current partition is correct and matches with Send/Recv nodes inserted during split. Node* send_node{nullptr}; Node* recv_node{nullptr}; for (auto& node : graph.Nodes()) { diff --git a/orttraining/orttraining/test/graph/bert_toy_fetches.h b/orttraining/orttraining/test/graph/bert_toy_fetches.h index 5bfc5da742cd4..71465c142f127 100644 --- a/orttraining/orttraining/test/graph/bert_toy_fetches.h +++ b/orttraining/orttraining/test/graph/bert_toy_fetches.h @@ -8,7 +8,7 @@ namespace onnxruntime { namespace test { -// Avoid this arrary being initialized on stack. +// Avoid this array being initialized on stack. // Limit the number of arguments to compile with clang. constexpr std::array bert_embeddings_position_embeddings_weight_grad = {-0.009673337, 0.015859816, -0.0060598925, 0.0061725015, 0.0686829, 0.031034196, -0.041214723, 0.04238321, -0.045230567, -0.03455956, 0.037526406, 0.019020742, -0.008562718, -0.030574083, -0.012788322, -0.0008712788, -0.041134313, 0.027024698, -0.012437805, 0.059991226, -0.026614683, -0.06257652, -0.020100333, -0.03510955, 0.05741506, 0.068152145, -0.065179504, 0.038520053, 0.019393224, 0.03954512, 0.006873767, -0.084907904, -0.0050477944, 0.0012708178, 0.0030560307, -0.032130327, -0.0144646885, -0.016298112, -0.042901997, 0.07588, 0.01613088, -0.018301323, -0.010611727, 0.005544794, -0.014955264, -0.016850606, 0.022336477, -0.0030460241, -0.014482946, 0.00859436, -0.014712406, 0.03867981, -0.022954227, 0.015440098, -0.005059921, 0.0035975706, 0.01880927, 0.062380753, 0.02279159, 0.0036130734, 0.029864375, -0.022658946, -0.0069784625, -0.06653513, -0.01116233, 0.021000436, -0.028701056, -0.024398895, 0.011476517, 0.032129377, -0.04200533, 0.05585559, 0.027091827, -0.03708192, -0.029153917, 0.014818583, -0.03863439, -0.03299714, 0.026062695, 0.027578063, -0.033457935, 0.023994414, -0.00042527216, 0.020991987, -0.043016825, 0.03330429, -0.0051043453, -0.061040144, 0.02476727, 0.07664442, -0.0109203905, 0.046167813, 0.05265824, -0.009806289, -0.032828216, -0.053807136, -0.018357445, -0.0060726395, 0.012883636, -0.03604291, -0.020931121, -0.017016709, -0.06521842, 0.09689566, 0.010757825, -0.014480298, -0.011673617, 0.014982184, -0.011422393, -0.015741495, 0.021494215, -0.013776923, -0.017716365, 0.02294489, -0.00073889084, 0.036582764, -0.013822639, 0.0075510093, -0.015371518, 0.012141101, 0.009292599, 0.0632079, 0.023068016, -0.0034772623, 0.033849746, -0.009428004, -0.0021826755, -0.07218023, -0.00040298235, 0.008162888, -0.009084097, -0.025772562, 0.01697198, 0.0096272295, -0.05384024, 0.054271728, 0.0061686123, -0.012313863, -0.010857888, 0.011092398, -0.017863888, -0.023245087, 0.0147367595, 0.0022649313, -0.0307159, 0.004318953, 0.0035282676, 0.026500994, -0.029873395, 0.0049419748, -0.007642911, -0.02280794, 0.016169535, 0.059451614, 0.015289053, 0.021232026, 0.042667653, -0.0034166733, -0.014750072, -0.05480911, 0.0012827339, -0.00061177486, 0.008855328, -0.014449824, -0.008173137, -0.033359475, -0.06602954, 0.074186556, -0.0031156093, 0.0009635263, -0.0151721025, 0.007254398, 0.015830085, 0.009578684, -0.0053947777, -0.020233134, -0.016644966, 0.002484738, -0.019542504, 0.026349604, -0.017563643, -0.005398605, 0.0013201954, 0.034780584, 0.007976923, 0.054721735, 0.015226502, -0.001414868, 0.030154174, 0.011785319, 0.0033271122, -0.07897424, 0.01796715, -0.00018319988, 0.006205301, -0.019297902, 0.03912447, 0.0022418862, -0.048669476, 0.031012537, -0.0155599145, -0.01757, -0.0011392199, 0.016611777, 0.008555129, -0.017760677, -0.02604977, 0.014489464, -0.041648414, -0.017570462, 0.005586198, 0.03271513, -0.04649407, -0.038035538, 2.2510882e-05, -0.006990753, 0.043797504, 0.0970251, 0.0041649155, 0.020328937, 0.058848612, -0.008414367, -0.026458042, -0.06685481}; static std::unordered_map> BERT_TOY_FETCHES = { diff --git a/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py b/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py index 541473b1561db..6f5b03685e801 100644 --- a/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py +++ b/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py @@ -264,7 +264,7 @@ class UnusedBeginParameterNet(torch.nn.Module): def __init__(self, input_size, hidden_size1, hidden_size2, num_classes): super().__init__() - # fc1 is an unused initializer (which is in the begining of initializer list) + # fc1 is an unused initializer (which is in the beginning of initializer list) # which will be dropped after export self.fc1 = torch.nn.Linear(input_size, hidden_size1) self.relu = torch.nn.ReLU() diff --git a/orttraining/orttraining/test/python/qat_poc_example/qat.py b/orttraining/orttraining/test/python/qat_poc_example/qat.py index dcc9e116fda7d..4378118b71b9f 100644 --- a/orttraining/orttraining/test/python/qat_poc_example/qat.py +++ b/orttraining/orttraining/test/python/qat_poc_example/qat.py @@ -24,7 +24,7 @@ onnx.save(onnx_model, os.path.join(model_dir, f"{model_name}.onnx")) logging.info( - "Begining Quantization process for model saved at: %s", + "Beginning Quantization process for model saved at: %s", os.path.join(model_dir, f"{model_name}.onnx"), ) logging.info("Skipping model preprocessing step. As QAT requires a un preprocessed model.") diff --git a/orttraining/orttraining/test/training_ops/cuda/cross_entropy_test.cc b/orttraining/orttraining/test/training_ops/cuda/cross_entropy_test.cc index d36f9b307ec70..61bd9c19f3541 100644 --- a/orttraining/orttraining/test/training_ops/cuda/cross_entropy_test.cc +++ b/orttraining/orttraining/test/training_ops/cuda/cross_entropy_test.cc @@ -1036,7 +1036,7 @@ TEST(CrossEntropyTest, SoftmaxCrossEntropyLossInternalGrad_TinySizeTensorFloatIn std::vector index_dims{8}; std::vector weight_dims{2}; std::vector dX_dims{8, 2}; - // Set run_cpu_baseline_seperately = True because CPU kernel did not support multiple type support + // Set run_cpu_baseline_separately = True because CPU kernel did not support multiple type support // for input and output. TestSoftmaxCrossEntropyLossInternalGrad(dY_dims, log_prob_dims, index_dims, weight_dims, dX_dims, "mean", -1, 5e-2, false /*has_bias*/); diff --git a/orttraining/orttraining/training_api/optimizer.cc b/orttraining/orttraining/training_api/optimizer.cc index 4647f890729f4..e42752b3a2d55 100644 --- a/orttraining/orttraining/training_api/optimizer.cc +++ b/orttraining/orttraining/training_api/optimizer.cc @@ -205,7 +205,7 @@ Optimizer::Optimizer(const ModelIdentifiers& model_identifiers, // by invoking ConstructOptimizerStateAndInputs(). ORT_THROW_IF_ERROR(ConstructOptimizerStateAndInputs()); } else { - delay_optimizer_state_contruction_ = true; + delay_optimizer_state_construction_ = true; } } else { ORT_THROW_IF_ERROR(LoadStateDict(state_->optimizer_checkpoint_state)); @@ -256,7 +256,7 @@ void Optimizer::Initialize(const ModelIdentifiers& model_identifiers, } Status Optimizer::Step() { - if (delay_optimizer_state_contruction_) { + if (delay_optimizer_state_construction_) { ORT_RETURN_IF_ERROR(ConstructOptimizerStateAndInputs()); } @@ -343,7 +343,7 @@ Status Optimizer::ConstructOptimizerStateAndInputs() { ORT_RETURN_IF_ERROR(GenerateMomentumNamedStates(state_->optimizer_checkpoint_state)); ORT_RETURN_IF_ERROR(ConstructInputs()); - delay_optimizer_state_contruction_ = false; + delay_optimizer_state_construction_ = false; return Status::OK(); } diff --git a/orttraining/orttraining/training_api/optimizer.h b/orttraining/orttraining/training_api/optimizer.h index 5b908acf7c9e3..a0717563a8bd0 100644 --- a/orttraining/orttraining/training_api/optimizer.h +++ b/orttraining/orttraining/training_api/optimizer.h @@ -166,7 +166,7 @@ struct Optimizer { int32_t group_count_{0}; - bool delay_optimizer_state_contruction_{false}; + bool delay_optimizer_state_construction_{false}; }; } // namespace api diff --git a/orttraining/orttraining/training_ops/cpu/activation/activations_grad.cc b/orttraining/orttraining/training_ops/cpu/activation/activations_grad.cc index d3f2f9c7a8767..40497467a31a5 100644 --- a/orttraining/orttraining/training_ops/cpu/activation/activations_grad.cc +++ b/orttraining/orttraining/training_ops/cpu/activation/activations_grad.cc @@ -82,7 +82,7 @@ Status ComputeGeluGradDX(gsl::span dY, gsl::span X, gsl::span< static constexpr T kBeta = static_cast(kGamma * kAlpha * 3.0f); // - // Commented out EIGEN implentation due to EIGEN bug. + // Commented out EIGEN implementation due to EIGEN bug. // On Windows Release build with GPU enabled, kAlpha * EIGEN_X below would produce pure 0 // result, even though neither kAlpha nor EIGEN_X is zero. // Given that CPU kernel is mostly for conformance check, where performance is not of high diff --git a/orttraining/orttraining/training_ops/cuda/math/div_grad_impl.cu b/orttraining/orttraining/training_ops/cuda/math/div_grad_impl.cu index 56520337fe683..a468c756ef74d 100644 --- a/orttraining/orttraining/training_ops/cuda/math/div_grad_impl.cu +++ b/orttraining/orttraining/training_ops/cuda/math/div_grad_impl.cu @@ -10,7 +10,7 @@ namespace onnxruntime { namespace cuda { -// for now this operator classes are no different than a funciton. +// for now this operator classes are no different than a function. // Eventually once multiple binary gradient ops are needed, we will pass // its instance from API instead of direct function call. template diff --git a/orttraining/orttraining/training_ops/cuda/optimizer/lamb.cc b/orttraining/orttraining/training_ops/cuda/optimizer/lamb.cc index 501c48e687e98..1152c98447444 100644 --- a/orttraining/orttraining/training_ops/cuda/optimizer/lamb.cc +++ b/orttraining/orttraining/training_ops/cuda/optimizer/lamb.cc @@ -582,7 +582,7 @@ Status LambOptimizer::Compute // Allocate a buffer in byte for reduction API calls. size_t rbs = compute_reduction_buffer_size(max_tensor_size); - // Enlarge reduction buffer to accomodate multi-tensor reduction kernel as well + // Enlarge reduction buffer to accommodate multi-tensor reduction kernel as well constexpr int tensor_group_size = 4; // w, d, w_norm, d_norm constexpr int max_blocks = ChunkGroup::max_block_count; constexpr size_t multitensor_block_reduce_buffer_size = 2 * max_blocks * sizeof(CudaT2); diff --git a/orttraining/orttraining/training_ops/cuda/optimizer/lamb_impl.cu b/orttraining/orttraining/training_ops/cuda/optimizer/lamb_impl.cu index fd55f7c30ff75..f59f5f7dc9c33 100644 --- a/orttraining/orttraining/training_ops/cuda/optimizer/lamb_impl.cu +++ b/orttraining/orttraining/training_ops/cuda/optimizer/lamb_impl.cu @@ -192,7 +192,7 @@ __device__ __forceinline__ void _LambUpdateRule( T2* w_new, T3* g_new, T_MIXED_PRECISION_FP* w_mixed_precision_new) { - // Confidence coefficeint of this update. + // Confidence coefficient of this update. const T2 ratio = (w_norm != T2(0.0f) && r_norm != T2(0.0f)) ? T2(eta) * _Max(T2(ratio_min), _Min(T2(ratio_max), _Sqrt(w_norm / r_norm))) : T2(eta); // Compute delta using the saved update direction. diff --git a/orttraining/tools/scripts/layer_norm_transform.py b/orttraining/tools/scripts/layer_norm_transform.py index b397d1d26a456..bc6fe0eaf8b29 100644 --- a/orttraining/tools/scripts/layer_norm_transform.py +++ b/orttraining/tools/scripts/layer_norm_transform.py @@ -164,7 +164,7 @@ def main(): vocab_size = 30528 # Create a fake data point. - vocab_size = 30528 # It shoudl match the value from BERT config file. + vocab_size = 30528 # It should match the value from BERT config file. input_ids = np.random.randint(low=0, high=vocab_size, size=(batch, sq_length), dtype=np.int64) segment_ids = np.random.randint(low=0, high=2, size=(batch, sq_length), dtype=np.int64) input_mask = np.ones((batch, sq_length), dtype=np.int64) diff --git a/orttraining/tools/scripts/model_transform.py b/orttraining/tools/scripts/model_transform.py index f0cf53990eac3..2fb1936ff2184 100644 --- a/orttraining/tools/scripts/model_transform.py +++ b/orttraining/tools/scripts/model_transform.py @@ -269,7 +269,7 @@ def process_dropout(model): del model.graph.node[d] -# Also need to set following line differently for differnt verison of bert +# Also need to set following line differently for different version of bert # expand_out.name = '412' def add_expand_shape(model): expand_out = model.graph.value_info.add() diff --git a/orttraining/tools/scripts/opset12_model_transform.py b/orttraining/tools/scripts/opset12_model_transform.py index e8c2263a39c32..790bdc34e1ff7 100644 --- a/orttraining/tools/scripts/opset12_model_transform.py +++ b/orttraining/tools/scripts/opset12_model_transform.py @@ -2,7 +2,7 @@ # Licensed under the MIT License. # # This converter is an internal util to upgrade existing bert/gpt-2 models, -# which were previously transformed/optimized from orginal model, to Opset 12 +# which were previously transformed/optimized from original model, to Opset 12 # version as well as replacing deprecated node, i.e., TrainableDropout with # the "Dropout" node matching the Opset 12 Spec. Typically, a model to # be run by this scripts would have "_optimized" substring in its model name, diff --git a/rust/onnxruntime-sys/examples/c_api_sample.rs b/rust/onnxruntime-sys/examples/c_api_sample.rs index e8c9ca8f09a5a..3cfb9d76029a0 100644 --- a/rust/onnxruntime-sys/examples/c_api_sample.rs +++ b/rust/onnxruntime-sys/examples/c_api_sample.rs @@ -31,8 +31,8 @@ fn main() { assert_ne!(g_ort, std::ptr::null_mut()); //************************************************************************* - // initialize enviroment...one enviroment per process - // enviroment maintains thread pools and other state info + // initialize environment...one environment per process + // environment maintains thread pools and other state info let mut env_ptr: *mut OrtEnv = std::ptr::null_mut(); let env_name = std::ffi::CString::new("test").unwrap(); let status = unsafe { diff --git a/rust/onnxruntime/src/tensor/ort_output_tensor.rs b/rust/onnxruntime/src/tensor/ort_output_tensor.rs index 006fbdba6cdb8..83663c0d303f8 100644 --- a/rust/onnxruntime/src/tensor/ort_output_tensor.rs +++ b/rust/onnxruntime/src/tensor/ort_output_tensor.rs @@ -70,7 +70,7 @@ impl Drop for OrtOutputTensor { } } -/// An Ouput tensor with the ptr and the item that will copy from the ptr. +/// An Output tensor with the ptr and the item that will copy from the ptr. #[derive(Debug)] pub struct WithOutputTensor<'a, T> { #[allow(dead_code)] diff --git a/tools/ci_build/build.py b/tools/ci_build/build.py index 54f7b6c3a8fa7..98d9ba22b7190 100644 --- a/tools/ci_build/build.py +++ b/tools/ci_build/build.py @@ -718,7 +718,7 @@ def convert_arg_line_to_args(self, arg_line): # Code coverage parser.add_argument( - "--code_coverage", action="store_true", help="Generate code coverage when targetting Android (only)." + "--code_coverage", action="store_true", help="Generate code coverage when targeting Android (only)." ) # lazy tensor support. @@ -2749,7 +2749,7 @@ def main(): cmake_extra_args += ["-D", "BUILD_AS_ARM64X=ARM64EC"] cmake_extra_args += ["-G", args.cmake_generator] # Cannot test on host build machine for cross-compiled - # builds (Override any user-defined behaviour for test if any) + # builds (Override any user-defined behavior for test if any) if args.test: log.warning( "Cannot test on host build machine for cross-compiled " diff --git a/tools/ci_build/github/azure-pipelines/web-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/web-ci-pipeline.yml index 24809ccfdec1f..036becb7df077 100644 --- a/tools/ci_build/github/azure-pipelines/web-ci-pipeline.yml +++ b/tools/ci_build/github/azure-pipelines/web-ci-pipeline.yml @@ -35,7 +35,7 @@ parameters: default: 'nightly (@dev)' variables: - # pipeline should define the following varaibles + # pipeline should define the following variables # ExtraBuildArgs # VersionSuffix diff --git a/tools/python/util/android/android.py b/tools/python/util/android/android.py index fd25d8bc147cd..dd2dcce01bf4a 100644 --- a/tools/python/util/android/android.py +++ b/tools/python/util/android/android.py @@ -30,7 +30,7 @@ def filename(name, windows_extension): sdk_root = Path(sdk_root).resolve(strict=True) return SdkToolPaths( - # do not use sdk_root/tools/emulator as that is superceeded by sdk_root/emulator/emulator + # do not use sdk_root/tools/emulator as that is superseded by sdk_root/emulator/emulator emulator=str((sdk_root / "emulator" / filename("emulator", "exe")).resolve(strict=True)), adb=str((sdk_root / "platform-tools" / filename("adb", "exe")).resolve(strict=True)), sdkmanager=str( diff --git a/winml/adapter/winml_adapter_session.cpp b/winml/adapter/winml_adapter_session.cpp index fa91978b564ba..5e27d8fb9a985 100644 --- a/winml/adapter/winml_adapter_session.cpp +++ b/winml/adapter/winml_adapter_session.cpp @@ -310,7 +310,7 @@ ORT_API_STATUS_IMPL( winrt::Windows::Foundation::Collections::IMap override_map = winrt::single_threaded_map(); for (auto freeDimOverride : session_options.free_dimension_overrides) { - if (freeDimOverride.dim_identifer_type == onnxruntime::FreeDimensionOverrideType::Name) { + if (freeDimOverride.dim_identifier_type == onnxruntime::FreeDimensionOverrideType::Name) { override_map.Insert( winrt::to_hstring(freeDimOverride.dim_identifier), static_cast(freeDimOverride.dim_value) ); diff --git a/winml/api/Microsoft.AI.MachineLearning.Experimental.idl b/winml/api/Microsoft.AI.MachineLearning.Experimental.idl index ad39a1ed7e684..3322c76f6eef2 100644 --- a/winml/api/Microsoft.AI.MachineLearning.Experimental.idl +++ b/winml/api/Microsoft.AI.MachineLearning.Experimental.idl @@ -128,7 +128,7 @@ namespace ROOT_NS.AI.MachineLearning.Experimental { Boolean CloseModelOnJoin { get; set; }; //! The JoinedNodePrefix property specifies whether the nodes of the second model should have a specific prefixed in the joined model. - //! Node names must be unique or empty. By enabling this, the engine can specifiy the prefix, or eliminate it entirely in cases + //! Node names must be unique or empty. By enabling this, the engine can specify the prefix, or eliminate it entirely in cases //! where the model is known to contain no duplicate node names. //! The default value for CloseModelOnJoin is a new random GUID. String JoinedNodePrefix { get; set; }; diff --git a/winml/api/Windows.AI.MachineLearning.idl b/winml/api/Windows.AI.MachineLearning.idl index 2b55fa8c7a95c..59c58ba80efca 100644 --- a/winml/api/Windows.AI.MachineLearning.idl +++ b/winml/api/Windows.AI.MachineLearning.idl @@ -9,7 +9,7 @@ import "windows.media.idl"; #ifndef WINDOWSAI_RAZZLE_BUILD // Pull in definition for DualApiPartitionAttribute, because the WinML IDL // does not build in the OS Repo, and needs to access internal definitions for -// various custom attirbute definitions. +// various custom attribute definitions. import "dualapipartitionattribute.idl"; import "windows.graphics.directx.direct3d11.idl"; import "windows.graphics.imaging.idl"; diff --git a/winml/lib/Api/LearningModelBinding.cpp b/winml/lib/Api/LearningModelBinding.cpp index 17440f6f0a561..222fdba986dcb 100644 --- a/winml/lib/Api/LearningModelBinding.cpp +++ b/winml/lib/Api/LearningModelBinding.cpp @@ -30,7 +30,7 @@ static winml::ILearningModelFeatureDescriptor FindValidBinding( uint32_t size; WINML_THROW_IF_FAILED(descriptor_native->GetName(&feature_name, &size)); - // Case insensetive comparison of onnx name in feature descriptor, and passed in name + // Case insensitive comparison of onnx name in feature descriptor, and passed in name if (_wcsicmp(feature_name, name.c_str()) == 0) { return descriptor; } diff --git a/winml/lib/Api/impl/NumericData.h b/winml/lib/Api/impl/NumericData.h index 71c61b3c29f6f..129c7cbf1f294 100644 --- a/winml/lib/Api/impl/NumericData.h +++ b/winml/lib/Api/impl/NumericData.h @@ -15,7 +15,7 @@ class numeric_data : public _winml::idata { size_t num_elements, size_t element_size_in_bytes, wfc::IIterable const& buffers ); - // Privte constructor as this type should be created as a shared_ptr + // Private constructor as this type should be created as a shared_ptr numeric_data(size_t num_elements, size_t element_size_in_bytes, wfc::IIterable const& buffers); gsl::span buffer_at(size_t index); gsl::span combined_buffer(); diff --git a/winml/test/api/LearningModelSessionAPITest.cpp b/winml/test/api/LearningModelSessionAPITest.cpp index d6e70e35e3a6d..587f3e28928ae 100644 --- a/winml/test/api/LearningModelSessionAPITest.cpp +++ b/winml/test/api/LearningModelSessionAPITest.cpp @@ -315,7 +315,7 @@ static void NamedDimensionOverride() { LearningModelDevice device(nullptr); WINML_EXPECT_NO_THROW(device = LearningModelDevice(LearningModelDeviceKind::Cpu)); - // the model input shape. the batch size, n, is overriden to 5 + // the model input shape. the batch size, n, is overridden to 5 uint32_t n = 5; int64_t c = 3, h = 720, w = 720; diff --git a/winml/test/common/googleTestMacros.h b/winml/test/common/googleTestMacros.h index 2f493c9b6d6b9..111abd1c3914e 100644 --- a/winml/test/common/googleTestMacros.h +++ b/winml/test/common/googleTestMacros.h @@ -64,7 +64,7 @@ #define INSTANTIATE_TEST_SUITE_P INSTANTIATE_TEST_CASE_P #endif -#define WINML_SKIP_TEST(message) WINML_SUPRESS_UNREACHABLE_BELOW(GTEST_SKIP() << message) +#define WINML_SKIP_TEST(message) WINML_SUPPRESS_UNREACHABLE_BELOW(GTEST_SKIP() << message) #define WINML_EXPECT_NO_THROW(statement) EXPECT_NO_THROW(statement) #define WINML_EXPECT_TRUE(statement) EXPECT_TRUE(statement) diff --git a/winml/test/common/taefTestMacros.h b/winml/test/common/taefTestMacros.h index 48119ff293fc8..3f6377c0a56b2 100644 --- a/winml/test/common/taefTestMacros.h +++ b/winml/test/common/taefTestMacros.h @@ -48,7 +48,7 @@ using namespace WEX::TestExecution; } #define WINML_SKIP_TEST(message) \ - WINML_SUPRESS_UNREACHABLE_BELOW( \ + WINML_SUPPRESS_UNREACHABLE_BELOW( \ Log::Result(TestResults::Skipped, std::wstring_convert>().from_bytes(message).c_str()); \ return; \ ) diff --git a/winml/test/common/test.h b/winml/test/common/test.h index f5adce2b40602..b7afa5dbb5f21 100644 --- a/winml/test/common/test.h +++ b/winml/test/common/test.h @@ -18,9 +18,9 @@ constexpr bool alwaysTrue() { constexpr bool alwaysFalse() { return false; } -#define WINML_SUPRESS_UNREACHABLE_BELOW(statement) \ - if (alwaysTrue()) { \ - statement; \ +#define WINML_SUPPRESS_UNREACHABLE_BELOW(statement) \ + if (alwaysTrue()) { \ + statement; \ } #ifdef BUILD_TAEF_TEST diff --git a/winml/test/image/imagetests.cpp b/winml/test/image/imagetests.cpp index 2251954c59e4c..b408c0315f94a 100644 --- a/winml/test/image/imagetests.cpp +++ b/winml/test/image/imagetests.cpp @@ -211,12 +211,12 @@ class ImageTests : public ::testing::Test { bool ShouldSkip( const std::wstring& model_file_name, const std::wstring& image_file_name, const InputImageSource input_image_source ) { - // Case that the tensor's shape doesn't match model's shape should be skiped + // Case that the tensor's shape doesn't match model's shape should be skipped if ((L"1080.jpg" == image_file_name || L"kitten_224.png" == image_file_name) && (InputImageSource::FromGPUResource == input_image_source || InputImageSource::FromCPUResource == input_image_source)) { return true; } - // Case that the images's shape doesn't match model's shape which expects free dimension should be skiped. + // Case that the images's shape doesn't match model's shape which expects free dimension should be skipped. // Because the fns-candy is not real model that can handle free dimensional input if ((L"1080.jpg" == image_file_name || L"kitten_224.png" == image_file_name) && L"fns-candy_Bgr8_freeDimInput.onnx" == model_file_name) { return true; diff --git a/winml/test/model/model_tests.cpp b/winml/test/model/model_tests.cpp index 27d74d7d6b034..859914014b8bb 100644 --- a/winml/test/model/model_tests.cpp +++ b/winml/test/model/model_tests.cpp @@ -170,7 +170,7 @@ std::string GetTestDataPath() { testDataPath.replace(environmentVariableFetchSuceeded, testDataPathFolderName.length(), testDataPathFolderName); } else { throw std::exception( - "WINML_TEST_DATA_PATH environment variable path needs to be shorter to accomodate the maximum path size of %d\n", + "WINML_TEST_DATA_PATH environment variable path needs to be shorter to accommodate the maximum path size of %d\n", MAX_PATH ); }