From c203d89958b4b0b23ce6967decc48afdd06b7ddb Mon Sep 17 00:00:00 2001 From: Justin Chu Date: Wed, 24 Jul 2024 11:50:11 -0700 Subject: [PATCH] Update ruff and clang-format versions (#21479) ruff -> 0.5.4 clang-format -> 18 --- cgmanifests/generate_cgmanifest.py | 2 +- .../tools/ValidateNativeDelegateAttributes.py | 2 +- include/onnxruntime/core/common/exceptions.h | 6 +- .../core/framework/stream_handles.h | 2 +- include/onnxruntime/core/platform/Barrier.h | 2 +- .../platform/EigenNonBlockingThreadPool.h | 10 +- .../core/providers/custom_op_context.h | 2 +- .../experimental_onnxruntime_cxx_api.h | 6 +- .../core/session/onnxruntime_cxx_api.h | 4 +- .../core/session/onnxruntime_lite_custom_op.h | 2 +- .../contrib_ops/cpu/cpu_contrib_kernels.cc | 134 +- .../contrib_ops/cpu/crop_and_resize.cc | 2 +- .../cuda/bert/flash_attention/alibi.h | 2 +- .../cuda/bert/flash_attention/mask.h | 2 +- .../cuda/bert/flash_attention/softmax.h | 2 +- .../contrib_ops/cuda/cuda_contrib_kernels.cc | 364 +- onnxruntime/core/framework/ex_lib_loader.h | 2 +- .../core/graph/contrib_ops/contrib_defs.cc | 4 +- .../transpose_optimization/optimizer_api.h | 8 +- onnxruntime/core/platform/path_lib.h | 6 +- .../core/providers/coreml/model/model.mm | 2 +- .../providers/cpu/cpu_execution_provider.cc | 3092 ++++++++--------- .../cpu/ml/tree_ensemble_classifier.cc | 18 +- .../core/providers/cpu/ml/treeregressor.cc | 18 +- .../object_detection/non_max_suppression.cc | 4 +- .../cpu/object_detection/roialign.cc | 6 +- .../core/providers/cpu/tensor/expand.cc | 10 +- .../providers/cuda/cuda_execution_provider.cc | 1788 +++++----- onnxruntime/core/providers/cuda/cuda_graph.h | 4 +- .../core/providers/cuda/cuda_profiler.h | 2 +- .../core/providers/cuda/nn/conv_transpose.h | 2 +- .../core/providers/cuda/nvtx_profile.h | 8 +- .../providers/cuda/shared_inc/cuda_utils.h | 2 +- .../core/providers/cuda/tensor/cast_op.cc | 32 +- .../providers/dnnl/dnnl_node_capability.h | 2 +- .../providers/dnnl/subgraph/dnnl_subgraph.h | 2 +- onnxruntime/core/providers/js/allocator.h | 2 +- onnxruntime/core/providers/js/data_transfer.h | 4 +- .../builder/opbuilder/expand_op_builder.cc | 2 +- .../qnn/builder/opbuilder/pad_op_builder.cc | 2 +- .../qnn/builder/qnn_quant_params_wrapper.cc | 2 +- .../core/providers/rocm/rocm_profiler.h | 2 +- .../shared_library/provider_host_api.h | 4 +- .../tensorrt_execution_provider_custom_ops.h | 4 +- .../vitisai/vitisai_provider_factory.cc | 2 +- .../builders/impl/elementwise_op_builder.h | 2 +- .../vsinpu/builders/op_builder_factory.h | 7 +- .../python/onnxruntime_pybind_iobinding.cc | 9 +- .../python/onnxruntime_pybind_ortvalue.cc | 136 +- .../onnxruntime_pybind_sparse_tensor.cc | 3 +- .../python/onnxruntime_pybind_state.cc | 88 +- onnxruntime/python/onnxruntime_validation.py | 17 +- .../tools/pytorch_export_contrib_ops.py | 2 +- .../python/tools/quantization/calibrate.py | 2 +- .../python/tools/symbolic_shape_infer.py | 44 +- .../python/tools/tensorrt/perf/benchmark.py | 12 +- .../python/tools/tensorrt/perf/perf_utils.py | 2 +- .../perf/setup_scripts/setup_onnx_zoo.py | 2 +- .../python/tools/transformers/benchmark.py | 2 +- .../tools/transformers/bert_test_data.py | 6 +- .../tools/transformers/fusion_attention.py | 2 +- .../python/tools/transformers/fusion_utils.py | 2 +- .../bart/utils/export_summarization_edinit.py | 2 +- .../export_summarization_enc_dec_past.py | 2 +- .../models/bart/utils/onnx_inference.py | 4 +- .../models/stable_diffusion/engine_builder.py | 2 +- .../pipeline_stable_diffusion.py | 4 +- .../test/framework/allocation_planner_test.cc | 2 +- onnxruntime/test/onnx/OrtValueList.h | 2 +- .../test/onnx/microbenchmark/activation.cc | 2 +- .../qdq_transformer_fastmath_test.cc | 2 +- .../test/optimizer/qdq_transformer_test.cc | 2 +- .../reduction_test_cases_generator.py | 8 +- .../test/providers/cpu/tensor/pad_test.cc | 12 +- .../test/providers/qnn/qnn_basic_test.cc | 10 +- .../test/python/onnx_backend_test_series.py | 2 +- .../test/python/transformers/rotary_flash.py | 3 - .../generate_tiny_keras2onnx_bert_models.py | 4 +- .../generate_tiny_gpt2_model.py | 4 +- onnxruntime/test/shared_lib/custom_op_utils.h | 20 +- onnxruntime/test/testdata/CNTK/gen.py | 4 +- .../core/framework/adasum/adasum_mpi.cc | 3 +- .../orttraining/core/framework/pipeline.h | 2 +- .../torch/custom_function_register.h | 2 +- .../orttraining/core/framework/torch/gil.h | 2 +- .../core/framework/torch/torch_proxy.h | 4 +- .../orttraining/core/graph/graph_augmenter.h | 6 +- .../core/graph/loss_func/loss_func_common.h | 2 +- .../core/graph/pipeline_transformer.cc | 2 +- .../core/optimizer/megatron_transformer.cc | 4 +- .../core/session/training_session.h | 6 +- orttraining/orttraining/lazy_tensor/flags.h | 2 +- orttraining/orttraining/models/bert/main.cc | 3 +- .../orttraining/models/pipeline_poc/main.cc | 54 +- .../orttraining/models/runner/training_util.h | 4 +- .../python/orttraining_pybind_state.cc | 24 +- .../python/training/ort_triton/kernel/_mm.py | 2 +- .../python/training/ortmodule/_utils.py | 2 +- .../cpu/torch_interop_utils/ctx_pool.h | 4 +- .../test/distributed/partition_utils.h | 2 +- ...orttraining_test_hierarchical_ortmodule.py | 2 +- .../orttraining_test_model_transform.py | 2 +- .../python/orttraining_test_ortmodule_api.py | 12 +- ...training_test_ortmodule_bert_classifier.py | 2 +- ...test_ortmodule_bert_classifier_autocast.py | 2 +- ...g_test_ortmodule_deepspeed_zero_stage_1.py | 2 +- .../orttraining_test_ortmodule_onnx_ops.py | 12 +- .../python/orttraining_test_ortmodule_poc.py | 2 +- .../test/python/orttraining_test_utilities.py | 4 +- .../training_ops/function_op_test_utils.cc | 2 +- .../cpu/torch/torch_custom_function_kernel.h | 2 +- .../cuda/cuda_training_kernels.cc | 466 +-- .../rocm/rocm_training_kernels.cc | 374 +- .../tools/scripts/gpt2_model_transform.py | 2 +- orttraining/tools/scripts/model_transform.py | 2 +- pyproject.toml | 1 + requirements-lintrunner.txt | 8 +- tools/ci_build/build.py | 22 +- tools/ci_build/gen_def.py | 10 +- tools/ci_build/reduce_op_kernels.py | 2 +- tools/ci_build/replace_urls_in_deps.py | 6 +- .../upload_python_package_to_azure_storage.py | 2 +- tools/doc/rename_folders.py | 14 +- .../nuget/generate_nuspec_for_native_nuget.py | 6 +- tools/python/onnx_test_data_utils.py | 2 +- .../util/mobile_helpers/usability_checker.py | 2 +- .../util/reduced_build_config_parser.py | 2 +- winml/lib/Api.Image/CpuDetensorizer.h | 9 +- winml/lib/Api.Image/CpuTensorizer.h | 12 +- winml/lib/Api.Image/D3DDeviceCache.cpp | 24 +- winml/lib/Api.Image/EventTimer.h | 4 +- .../lib/Api.Image/ImageConversionHelpers.cpp | 11 +- winml/lib/Api.Image/ImageConverter.cpp | 3 +- .../Api.Image/TensorToVideoFrameConverter.cpp | 13 +- .../Api.Image/VideoFrameToTensorConverter.cpp | 38 +- .../Api.Image/inc/ConverterResourceStore.h | 2 +- winml/lib/Api/FeatureValues.h | 82 +- winml/lib/Api/ImageFeatureValue.cpp | 14 +- winml/lib/Api/LearningModel.cpp | 2 +- winml/lib/Api/LearningModelSession.cpp | 4 +- winml/lib/Api/NumericData.cpp | 12 +- winml/lib/Api/impl/FeatureCompatibility.h | 6 +- winml/lib/Common/CommonDeviceHelpers.cpp | 6 +- ...er_backed_random_access_stream_reference.h | 5 +- winml/test/api/raw/winml_microsoft.h | 108 +- winml/test/api/raw/winml_windows.h | 112 +- winml/test/image/imagetests.cpp | 21 +- winml/test/model/compare_feature_value.cpp | 3 +- winml/test/model/model_tests.cpp | 10 +- winml/test/model/skip_model_tests.h | 6 +- winml/test/scenario/cppwinrt/NoisyReluCpu.h | 6 +- winml/test/scenario/cppwinrt/ReluCpu.h | 6 +- 152 files changed, 3781 insertions(+), 3842 deletions(-) diff --git a/cgmanifests/generate_cgmanifest.py b/cgmanifests/generate_cgmanifest.py index 3cecbb0cc977f..52bd3f58645f2 100644 --- a/cgmanifests/generate_cgmanifest.py +++ b/cgmanifests/generate_cgmanifest.py @@ -73,7 +73,7 @@ def add_github_dep(name, parsed_url): return # Make a REST call to convert to tag to a git commit url = f"https://api.github.com/repos/{org_name}/{repo_name}/git/refs/tags/{tag}" - print("requesting %s ..." % url) + print("requesting {url} ...") res = requests.get(url, auth=(args.username, args.token)) response_json = res.json() tag_object = response_json["object"] diff --git a/csharp/tools/ValidateNativeDelegateAttributes.py b/csharp/tools/ValidateNativeDelegateAttributes.py index acd6c173bfeb0..7431cc8d9d288 100644 --- a/csharp/tools/ValidateNativeDelegateAttributes.py +++ b/csharp/tools/ValidateNativeDelegateAttributes.py @@ -19,7 +19,7 @@ def check_all_delegates_have_unmanaged_function_pointer_attribute(file: pathlib. line_num = 0 with open(str(file.resolve(strict=True))) as f: prev_line = "" - for line in f.readlines(): + for line in f: line_num += 1 # strip so it's easier to deal with commented out lines. diff --git a/include/onnxruntime/core/common/exceptions.h b/include/onnxruntime/core/common/exceptions.h index 18c117f12ad7d..494a770b8db98 100644 --- a/include/onnxruntime/core/common/exceptions.h +++ b/include/onnxruntime/core/common/exceptions.h @@ -17,13 +17,13 @@ namespace onnxruntime { class NotImplementedException : public std::logic_error { public: - explicit NotImplementedException(const char* _Message = "Function not yet implemented") noexcept : std::logic_error(_Message){}; - explicit NotImplementedException(const std::string& _Message = "Function not yet implemented") noexcept : std::logic_error(_Message){}; + explicit NotImplementedException(const char* _Message = "Function not yet implemented") noexcept : std::logic_error(_Message) {}; + explicit NotImplementedException(const std::string& _Message = "Function not yet implemented") noexcept : std::logic_error(_Message) {}; }; class TypeMismatchException : public std::logic_error { public: - TypeMismatchException() noexcept : logic_error("Type mismatch"){}; + TypeMismatchException() noexcept : logic_error("Type mismatch") {}; }; class OnnxRuntimeException : public std::exception { diff --git a/include/onnxruntime/core/framework/stream_handles.h b/include/onnxruntime/core/framework/stream_handles.h index 9c987f10ccadb..01631e1fb2aa6 100644 --- a/include/onnxruntime/core/framework/stream_handles.h +++ b/include/onnxruntime/core/framework/stream_handles.h @@ -32,7 +32,7 @@ class Stream { return {}; }; // block the host thread until all the tasks in the stream finished. - virtual void Flush(){}; + virtual void Flush() {}; // The framework may reuse the stream instance for multiple iterations. // This is the API that provide a chance to let the device stream cleanup // resource at the end of a iteration. diff --git a/include/onnxruntime/core/platform/Barrier.h b/include/onnxruntime/core/platform/Barrier.h index 915cfc50953ed..1148b052bd9af 100644 --- a/include/onnxruntime/core/platform/Barrier.h +++ b/include/onnxruntime/core/platform/Barrier.h @@ -76,6 +76,6 @@ class Barrier { // Multiple threads can wait on the same Notification object, // but only one caller must call Notify() on the object. struct Notification : Barrier { - Notification() : Barrier(1){}; + Notification() : Barrier(1) {}; }; } // namespace onnxruntime diff --git a/include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h b/include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h index e33007102e198..d4411a6d72356 100644 --- a/include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h +++ b/include/onnxruntime/core/platform/EigenNonBlockingThreadPool.h @@ -219,18 +219,18 @@ class ThreadPoolProfiler { WAIT_REVOKE, MAX_EVENT }; - ThreadPoolProfiler(int, const CHAR_TYPE*){}; + ThreadPoolProfiler(int, const CHAR_TYPE*) {}; ~ThreadPoolProfiler() = default; ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(ThreadPoolProfiler); - void Start(){}; + void Start() {}; std::string Stop() { return "not available for minimal build"; } - void LogStart(){}; + void LogStart() {}; void LogEnd(ThreadPoolEvent){}; void LogEndAndStart(ThreadPoolEvent){}; void LogStartAndCoreAndBlock(std::ptrdiff_t){}; void LogCoreAndBlock(std::ptrdiff_t){}; - void LogThreadId(int){}; - void LogRun(int){}; + void LogThreadId(int) {}; + void LogRun(int) {}; std::string DumpChildThreadStat() { return {}; } }; #else diff --git a/include/onnxruntime/core/providers/custom_op_context.h b/include/onnxruntime/core/providers/custom_op_context.h index 8f3d2476d4fdb..b10126da8e0fb 100644 --- a/include/onnxruntime/core/providers/custom_op_context.h +++ b/include/onnxruntime/core/providers/custom_op_context.h @@ -6,5 +6,5 @@ // CustomOpContext defines an interface allowing a custom op to access ep-specific resources. struct CustomOpContext { CustomOpContext() = default; - virtual ~CustomOpContext(){}; + virtual ~CustomOpContext() {}; }; \ No newline at end of file diff --git a/include/onnxruntime/core/session/experimental_onnxruntime_cxx_api.h b/include/onnxruntime/core/session/experimental_onnxruntime_cxx_api.h index 9e4ceffc44bfd..c1a7839ff22fa 100644 --- a/include/onnxruntime/core/session/experimental_onnxruntime_cxx_api.h +++ b/include/onnxruntime/core/session/experimental_onnxruntime_cxx_api.h @@ -24,9 +24,9 @@ namespace Experimental { struct Session : Ort::Session { Session(Env& env, std::basic_string& model_path, SessionOptions& options) - : Ort::Session(env, model_path.data(), options){}; + : Ort::Session(env, model_path.data(), options) {}; Session(Env& env, void* model_data, size_t model_data_length, SessionOptions& options) - : Ort::Session(env, model_data, model_data_length, options){}; + : Ort::Session(env, model_data, model_data_length, options) {}; // overloaded Run() with sensible defaults std::vector Run(const std::vector& input_names, @@ -52,7 +52,7 @@ struct Session : Ort::Session { struct Value : Ort::Value { Value(OrtValue* p) - : Ort::Value(p){}; + : Ort::Value(p) {}; template static Ort::Value CreateTensor(T* p_data, size_t p_data_element_count, const std::vector& shape); diff --git a/include/onnxruntime/core/session/onnxruntime_cxx_api.h b/include/onnxruntime/core/session/onnxruntime_cxx_api.h index 8091fd4cfc2a3..5d974e1ff5185 100644 --- a/include/onnxruntime/core/session/onnxruntime_cxx_api.h +++ b/include/onnxruntime/core/session/onnxruntime_cxx_api.h @@ -2175,8 +2175,8 @@ struct Op : detail::Base { /// struct ShapeInferContext { struct SymbolicInteger { - SymbolicInteger(int64_t i) : i_(i), is_int_(true){}; - SymbolicInteger(const char* s) : s_(s), is_int_(false){}; + SymbolicInteger(int64_t i) : i_(i), is_int_(true) {}; + SymbolicInteger(const char* s) : s_(s), is_int_(false) {}; SymbolicInteger(const SymbolicInteger&) = default; SymbolicInteger(SymbolicInteger&&) = default; diff --git a/include/onnxruntime/core/session/onnxruntime_lite_custom_op.h b/include/onnxruntime/core/session/onnxruntime_lite_custom_op.h index 57a64380faeb0..ce87d8c56d3fe 100644 --- a/include/onnxruntime/core/session/onnxruntime_lite_custom_op.h +++ b/include/onnxruntime/core/session/onnxruntime_lite_custom_op.h @@ -29,7 +29,7 @@ class ArgBase { ArgBase(OrtKernelContext* ctx, size_t indice, bool is_input) : ctx_(ctx), indice_(indice), is_input_(is_input) {} - virtual ~ArgBase(){}; + virtual ~ArgBase() {}; protected: struct KernelContext ctx_; diff --git a/onnxruntime/contrib_ops/cpu/cpu_contrib_kernels.cc b/onnxruntime/contrib_ops/cpu/cpu_contrib_kernels.cc index 90a51fda0b188..84f9ca88ecf55 100644 --- a/onnxruntime/contrib_ops/cpu/cpu_contrib_kernels.cc +++ b/onnxruntime/contrib_ops/cpu/cpu_contrib_kernels.cc @@ -267,83 +267,83 @@ Status RegisterQuantizationKernels(KernelRegistry& kernel_registry) { Status RegisterCpuContribKernels(KernelRegistry& kernel_registry) { static const BuildKernelCreateInfoFn function_table[] = { - BuildKernelCreateInfo, // default entry to avoid the list become empty after ops-reducing - BuildKernelCreateInfo, + BuildKernelCreateInfo, // default entry to avoid the list become empty after ops-reducing + BuildKernelCreateInfo, - // add more kernels here - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + // add more kernels here + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_SPARSE_TENSORS) - BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, // backward compatibility - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, // backward compatibility + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #ifndef ORT_MINIMAL_BUILD - BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // These ops were experimental ops in onnx domain which have been removed now. We add them here as - // contrib ops to main backward compatibility - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // These ops were experimental ops in onnx domain which have been removed now. We add them here as + // contrib ops to main backward compatibility + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #ifdef ENABLE_ATEN - BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif #ifdef ENABLE_TRAINING_OPS - // Should remove the shrunken_gather include from ENABLE_TRAINING_OPS once 1). compute optimizer is enabled for inference or - // 2). this is needed by inference for other purpose. - BuildKernelCreateInfo, + // Should remove the shrunken_gather include from ENABLE_TRAINING_OPS once 1). compute optimizer is enabled for inference or + // 2). this is needed by inference for other purpose. + BuildKernelCreateInfo, #endif }; diff --git a/onnxruntime/contrib_ops/cpu/crop_and_resize.cc b/onnxruntime/contrib_ops/cpu/crop_and_resize.cc index 1863522c1643c..533d62f5e7486 100644 --- a/onnxruntime/contrib_ops/cpu/crop_and_resize.cc +++ b/onnxruntime/contrib_ops/cpu/crop_and_resize.cc @@ -173,7 +173,7 @@ void CropAndResizeForward(const TensorShape& output_shape, } } } // for pw - } // for ph + } // for ph }, 0); // for n } diff --git a/onnxruntime/contrib_ops/cuda/bert/flash_attention/alibi.h b/onnxruntime/contrib_ops/cuda/bert/flash_attention/alibi.h index 5d94190ecbeb9..18d36cfd88d60 100644 --- a/onnxruntime/contrib_ops/cuda/bert/flash_attention/alibi.h +++ b/onnxruntime/contrib_ops/cuda/bert/flash_attention/alibi.h @@ -17,7 +17,7 @@ struct Alibi { const int max_seqlen_k, max_seqlen_q; __forceinline__ __device__ Alibi(const float alibi_slope, const int max_seqlen_k, const int max_seqlen_q) - : alibi_slope(alibi_slope), max_seqlen_k(max_seqlen_k), max_seqlen_q(max_seqlen_q){}; + : alibi_slope(alibi_slope), max_seqlen_k(max_seqlen_k), max_seqlen_q(max_seqlen_q) {}; template __forceinline__ __device__ void apply_alibi(Tensor& tensor, diff --git a/onnxruntime/contrib_ops/cuda/bert/flash_attention/mask.h b/onnxruntime/contrib_ops/cuda/bert/flash_attention/mask.h index b225e5e3be559..0998155eba635 100644 --- a/onnxruntime/contrib_ops/cuda/bert/flash_attention/mask.h +++ b/onnxruntime/contrib_ops/cuda/bert/flash_attention/mask.h @@ -116,7 +116,7 @@ struct Mask { __forceinline__ __device__ Mask(const int max_seqlen_k, const int max_seqlen_q, const int window_size_left, const int window_size_right, const float alibi_slope = 0.f) - : max_seqlen_k(max_seqlen_k), max_seqlen_q(max_seqlen_q), window_size_left(window_size_left), window_size_right(window_size_right), alibi_slope(!Has_alibi ? 0.0 : alibi_slope){}; + : max_seqlen_k(max_seqlen_k), max_seqlen_q(max_seqlen_q), window_size_left(window_size_left), window_size_right(window_size_right), alibi_slope(!Has_alibi ? 0.0 : alibi_slope) {}; // Causal_mask: whether this particular iteration needs causal masking template diff --git a/onnxruntime/contrib_ops/cuda/bert/flash_attention/softmax.h b/onnxruntime/contrib_ops/cuda/bert/flash_attention/softmax.h index 3c205378f0177..ba678b740d376 100644 --- a/onnxruntime/contrib_ops/cuda/bert/flash_attention/softmax.h +++ b/onnxruntime/contrib_ops/cuda/bert/flash_attention/softmax.h @@ -121,7 +121,7 @@ struct Softmax { using TensorT = decltype(make_tensor(Shape>{})); TensorT row_max, row_sum; - __forceinline__ __device__ Softmax(){}; + __forceinline__ __device__ Softmax() {}; template __forceinline__ __device__ void softmax_rescale_o(Tensor0& acc_s, Tensor1& acc_o, float softmax_scale_log2) { diff --git a/onnxruntime/contrib_ops/cuda/cuda_contrib_kernels.cc b/onnxruntime/contrib_ops/cuda/cuda_contrib_kernels.cc index b237e5c24bbef..21bd5eb91c20f 100644 --- a/onnxruntime/contrib_ops/cuda/cuda_contrib_kernels.cc +++ b/onnxruntime/contrib_ops/cuda/cuda_contrib_kernels.cc @@ -231,206 +231,206 @@ KernelCreateInfo BuildKernelCreateInfo() { Status RegisterCudaContribKernels(KernelRegistry& kernel_registry) { static const BuildKernelCreateInfoFn function_table[] = { - BuildKernelCreateInfo, // default entry to avoid the list become empty after ops-reducing - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, // backward compatibility - BuildKernelCreateInfo, // backward compatibility - BuildKernelCreateInfo, // backward compatibility - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // These ops were experimental ops in onnx domain which have been removed now. We add them here as - // contrib ops to maintain backward compatibility - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // TransposedMatMul is still here for backward compatibility - BuildKernelCreateInfo, // backward compatibility - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, // default entry to avoid the list become empty after ops-reducing + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, // backward compatibility + BuildKernelCreateInfo, // backward compatibility + BuildKernelCreateInfo, // backward compatibility + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // These ops were experimental ops in onnx domain which have been removed now. We add them here as + // contrib ops to maintain backward compatibility + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // TransposedMatMul is still here for backward compatibility + BuildKernelCreateInfo, // backward compatibility + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #ifdef ENABLE_ATEN - BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif #ifdef ENABLE_TRAINING_OPS - // Should remove the shrunken_gather include from ENABLE_TRAINING_OPS once - // 1). compute optimizer is enabled for inference or - // 2). this is needed by inference for other purpose. - BuildKernelCreateInfo, + // Should remove the shrunken_gather include from ENABLE_TRAINING_OPS once + // 1). compute optimizer is enabled for inference or + // 2). this is needed by inference for other purpose. + BuildKernelCreateInfo, #endif #if defined(ORT_USE_NCCL) - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif }; diff --git a/onnxruntime/core/framework/ex_lib_loader.h b/onnxruntime/core/framework/ex_lib_loader.h index cc353a7521786..d7ea5db3e5a26 100644 --- a/onnxruntime/core/framework/ex_lib_loader.h +++ b/onnxruntime/core/framework/ex_lib_loader.h @@ -20,7 +20,7 @@ class ExLibLoader { virtual ~ExLibLoader(); protected: - virtual void PreUnloadLibrary(void* /*handle*/){}; + virtual void PreUnloadLibrary(void* /*handle*/) {}; std::map dso_name_data_map_; diff --git a/onnxruntime/core/graph/contrib_ops/contrib_defs.cc b/onnxruntime/core/graph/contrib_ops/contrib_defs.cc index dea8775c89a30..2d51658953282 100644 --- a/onnxruntime/core/graph/contrib_ops/contrib_defs.cc +++ b/onnxruntime/core/graph/contrib_ops/contrib_defs.cc @@ -2665,10 +2665,10 @@ ONNX_MS_OPERATOR_SET_SCHEMA(CropAndResize, 1, #if !defined(DISABLE_FLOAT8_TYPES) #define GEMM_FLOAT8_TYPES \ - { "tensor(float8e4m3fn)", "tensor(float8e5m2)", "tensor(float16)", "tensor(bfloat16)", "tensor(float)" } + {"tensor(float8e4m3fn)", "tensor(float8e5m2)", "tensor(float16)", "tensor(bfloat16)", "tensor(float)"} #else #define GEMM_FLOAT8_TYPES \ - { "tensor(float16)", "tensor(bfloat16)", "tensor(float)" } + {"tensor(float16)", "tensor(bfloat16)", "tensor(float)"} #endif ONNX_MS_OPERATOR_SET_SCHEMA(GemmFloat8, 1, diff --git a/onnxruntime/core/optimizer/transpose_optimization/optimizer_api.h b/onnxruntime/core/optimizer/transpose_optimization/optimizer_api.h index c042bb0059ac2..e7d2d32809fc5 100644 --- a/onnxruntime/core/optimizer/transpose_optimization/optimizer_api.h +++ b/onnxruntime/core/optimizer/transpose_optimization/optimizer_api.h @@ -86,7 +86,7 @@ class TensorRef { /// Flattened tensor data in bytes virtual std::vector Data() const = 0; - virtual ~TensorRef(){}; + virtual ~TensorRef() {}; }; /// @@ -131,7 +131,7 @@ class ValueInfoRef { /// Indices of dimensions to add. Indices are relative to final shape. virtual void UnsqueezeDims(const std::vector& axes) = 0; - virtual ~ValueInfoRef(){}; + virtual ~ValueInfoRef() {}; }; /// @@ -248,7 +248,7 @@ class NodeRef { /// Id virtual int64_t Id() const = 0; - virtual ~NodeRef(){}; + virtual ~NodeRef() {}; }; /// @@ -449,7 +449,7 @@ class GraphRef { /// True if output of the Graph. virtual bool IsGraphOutput(std::string_view name) const = 0; - virtual ~GraphRef(){}; + virtual ~GraphRef() {}; }; } // namespace api diff --git a/onnxruntime/core/platform/path_lib.h b/onnxruntime/core/platform/path_lib.h index fca8990f14821..94425a3999d42 100644 --- a/onnxruntime/core/platform/path_lib.h +++ b/onnxruntime/core/platform/path_lib.h @@ -228,11 +228,9 @@ inline std::basic_string GetLastComponent(const std::basic_strin typename std::basic_string::size_type pos = input.length(); PATH_CHAR_TYPE sep = GetPathSep(); // remove trailing backslash - for (; pos > 1 && input[pos - 1] == sep; --pos) - ; + for (; pos > 1 && input[pos - 1] == sep; --pos); input.resize(pos); - for (; pos != 0 && input[pos - 1] != sep; --pos) - ; + for (; pos != 0 && input[pos - 1] != sep; --pos); return input.substr(pos); } diff --git a/onnxruntime/core/providers/coreml/model/model.mm b/onnxruntime/core/providers/coreml/model/model.mm index 4fd822f0d0d15..4d20061820e71 100644 --- a/onnxruntime/core/providers/coreml/model/model.mm +++ b/onnxruntime/core/providers/coreml/model/model.mm @@ -502,7 +502,7 @@ Status GetMLMultiArrayCopyInfo(const MLMultiArray* _Nonnull array, class Execution { public: Execution(const std::string& path, const logging::Logger& logger, uint32_t coreml_flags); - ~Execution(){}; + ~Execution() {}; Status LoadModel(); Status Predict(const std::unordered_map& inputs, diff --git a/onnxruntime/core/providers/cpu/cpu_execution_provider.cc b/onnxruntime/core/providers/cpu/cpu_execution_provider.cc index 9147107ac518a..7ac68e3a9a69d 100644 --- a/onnxruntime/core/providers/cpu/cpu_execution_provider.cc +++ b/onnxruntime/core/providers/cpu/cpu_execution_provider.cc @@ -1133,1568 +1133,1568 @@ KernelCreateInfo BuildKernelCreateInfo() { Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) { static const BuildKernelCreateInfoFn function_table[] = { - BuildKernelCreateInfo, // default entry to avoid the list become empty after ops-reducing - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Opset 9 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Opset 10 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // opset 11 - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // OpSet 12 - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // REVIEW(codemzs): ConstEigenVectorArrayMap.cast, - // BuildKernelCreateInfo, BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // opset 13 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // OpSet 14 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Opset 15 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, // default entry to avoid the list become empty after ops-reducing + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Opset 9 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Opset 10 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // opset 11 + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // OpSet 12 + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // REVIEW(codemzs): ConstEigenVectorArrayMap.cast, + // BuildKernelCreateInfo, BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // opset 13 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // OpSet 14 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Opset 15 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_OPTIONAL_TYPE) - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - // Opset 16 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Opset 17 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Opset 18 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + // Opset 16 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Opset 17 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Opset 18 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_OPTIONAL_TYPE) - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - // Opset 19 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + // Opset 19 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_FLOAT8_TYPES) - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_FLOAT8_TYPES) - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Opset 20 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Opset 20 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_FLOAT8_TYPES) - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Opset 21 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Opset 21 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_FLOAT8_TYPES) - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_FLOAT8_TYPES) - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif }; diff --git a/onnxruntime/core/providers/cpu/ml/tree_ensemble_classifier.cc b/onnxruntime/core/providers/cpu/ml/tree_ensemble_classifier.cc index 0c45b315f0280..758066d8a84e0 100644 --- a/onnxruntime/core/providers/cpu/ml/tree_ensemble_classifier.cc +++ b/onnxruntime/core/providers/cpu/ml/tree_ensemble_classifier.cc @@ -41,16 +41,16 @@ TreeEnsembleClassifier::TreeEnsembleClassifier(const OpKernelInfo& info) : Op template Status TreeEnsembleClassifier::GetRemovableAttributes(InlinedVector& removable_attributes) const { - InlinedVector names { - "base_values", "nodes_falsenodeids", "nodes_featureids", "nodes_hitrates", - "nodes_missing_value_tracks_true", "nodes_modes", "nodes_nodeids", "nodes_treeids", - "nodes_truenodeids", "nodes_values", "class_ids", "class_treeids", "class_nodeids", - "class_weights", "classlabels_strings", - "classlabels_int64s" + InlinedVector names{ + "base_values", "nodes_falsenodeids", "nodes_featureids", "nodes_hitrates", + "nodes_missing_value_tracks_true", "nodes_modes", "nodes_nodeids", "nodes_treeids", + "nodes_truenodeids", "nodes_values", "class_ids", "class_treeids", "class_nodeids", + "class_weights", "classlabels_strings", + "classlabels_int64s" #if !defined(ORT_MINIMAL_BUILD) - "base_values_as_tensor", - "nodes_hitrates_as_tensor", "nodes_values_as_tensor", - "class_weights_as_tensor" + "base_values_as_tensor", + "nodes_hitrates_as_tensor", "nodes_values_as_tensor", + "class_weights_as_tensor" #endif }; removable_attributes.swap(names); diff --git a/onnxruntime/core/providers/cpu/ml/treeregressor.cc b/onnxruntime/core/providers/cpu/ml/treeregressor.cc index 17f5cf32960da..6b5b972d3c929 100644 --- a/onnxruntime/core/providers/cpu/ml/treeregressor.cc +++ b/onnxruntime/core/providers/cpu/ml/treeregressor.cc @@ -48,16 +48,16 @@ TreeEnsembleRegressor::TreeEnsembleRegressor(const OpKernelInfo& info) : OpKe template Status TreeEnsembleRegressor::GetRemovableAttributes(InlinedVector& removable_attributes) const { - InlinedVector names { - "base_values", "nodes_falsenodeids", "nodes_featureids", "nodes_hitrates", - "nodes_missing_value_tracks_true", "nodes_modes", "nodes_nodeids", "nodes_treeids", - "nodes_truenodeids", "nodes_values", - "target_ids", "target_treeids", "target_nodeids", - "target_weights" + InlinedVector names{ + "base_values", "nodes_falsenodeids", "nodes_featureids", "nodes_hitrates", + "nodes_missing_value_tracks_true", "nodes_modes", "nodes_nodeids", "nodes_treeids", + "nodes_truenodeids", "nodes_values", + "target_ids", "target_treeids", "target_nodeids", + "target_weights" #if !defined(ORT_MINIMAL_BUILD) - "base_values_as_tensor", - "nodes_hitrates_as_tensor", "nodes_values_as_tensor", - "class_weights_as_tensor" + "base_values_as_tensor", + "nodes_hitrates_as_tensor", "nodes_values_as_tensor", + "class_weights_as_tensor" #endif }; removable_attributes.swap(names); diff --git a/onnxruntime/core/providers/cpu/object_detection/non_max_suppression.cc b/onnxruntime/core/providers/cpu/object_detection/non_max_suppression.cc index 4a176b0726a18..721c2064fae03 100644 --- a/onnxruntime/core/providers/cpu/object_detection/non_max_suppression.cc +++ b/onnxruntime/core/providers/cpu/object_detection/non_max_suppression.cc @@ -195,8 +195,8 @@ Status NonMaxSuppression::Compute(OpKernelContext* ctx) const { } sorted_boxes.pop(); } // while - } // for class_index - } // for batch_index + } // for class_index + } // for batch_index constexpr auto last_dim = 3; const auto num_selected = selected_indices.size(); diff --git a/onnxruntime/core/providers/cpu/object_detection/roialign.cc b/onnxruntime/core/providers/cpu/object_detection/roialign.cc index ead2ccaef002e..d8c81e5cb63e5 100644 --- a/onnxruntime/core/providers/cpu/object_detection/roialign.cc +++ b/onnxruntime/core/providers/cpu/object_detection/roialign.cc @@ -251,9 +251,9 @@ void RoiAlignForward(const TensorShape& output_shape, const T* bottom_data, floa top_data[index] = output_val; } // for pw - } // for ph - } // for c - } // for n + } // for ph + } // for c + } // for n }); } } // namespace diff --git a/onnxruntime/core/providers/cpu/tensor/expand.cc b/onnxruntime/core/providers/cpu/tensor/expand.cc index 6ead2ea73460b..b0c636281bc7a 100644 --- a/onnxruntime/core/providers/cpu/tensor/expand.cc +++ b/onnxruntime/core/providers/cpu/tensor/expand.cc @@ -128,7 +128,7 @@ Status Expand::Compute(OpKernelContext* context) const { memcpy(output_data + output_offset, input_data + input_offset, onnxruntime::narrow(copy_byte)); output_offsets[onnxruntime::narrow(i)] = output_offset; } // for i - }; // distribute_fn + }; // distribute_fn auto per_thread_tasks = distribute_count / concurrency::ThreadPool::DegreeOfParallelism(context->GetOperatorThreadPool()); @@ -169,9 +169,9 @@ Status Expand::Compute(OpKernelContext* context) const { copy_byte >>= 1; } } // while - } // if - } // for - }; // copy_fn + } // if + } // for + }; // copy_fn if (per_thread_tasks > 20) { concurrency::ThreadPool::TryParallelFor( context->GetOperatorThreadPool(), @@ -181,7 +181,7 @@ Status Expand::Compute(OpKernelContext* context) const { } else { copy_fn(0, onnxruntime::narrow(distribute_count)); } // else - } // for + } // for return Status::OK(); } // Expand::compute diff --git a/onnxruntime/core/providers/cuda/cuda_execution_provider.cc b/onnxruntime/core/providers/cuda/cuda_execution_provider.cc index 8c03e489d298d..5771380433b35 100644 --- a/onnxruntime/core/providers/cuda/cuda_execution_provider.cc +++ b/onnxruntime/core/providers/cuda/cuda_execution_provider.cc @@ -1394,916 +1394,916 @@ KernelCreateInfo BuildKernelCreateInfo() { static Status RegisterCudaKernels(KernelRegistry& kernel_registry) { static const BuildKernelCreateInfoFn function_table[] = { - BuildKernelCreateInfo, // default entry to avoid the list become empty after ops-reducing - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, // default entry to avoid the list become empty after ops-reducing + BuildKernelCreateInfo, + BuildKernelCreateInfo, #ifndef USE_CUDA_MINIMAL - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // opset 10 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // opset 11 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // OpSet 12 - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // OpSet 13 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // OpSet 14 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // OpSet 15 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Opset 16 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Opset 17 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Opset 18 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Opset 19 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // opset 10 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // opset 11 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // OpSet 12 + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // OpSet 13 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // OpSet 14 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // OpSet 15 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Opset 16 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Opset 17 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Opset 18 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Opset 19 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_FLOAT8_TYPES) - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_FLOAT8_TYPES) - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_FLOAT8_TYPES) - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_FLOAT8_TYPES) - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #if !defined(DISABLE_FLOAT8_TYPES) - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, - // Opset 20 - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + // Opset 20 + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif }; diff --git a/onnxruntime/core/providers/cuda/cuda_graph.h b/onnxruntime/core/providers/cuda/cuda_graph.h index 064994c1f14ae..dd03db94b631c 100644 --- a/onnxruntime/core/providers/cuda/cuda_graph.h +++ b/onnxruntime/core/providers/cuda/cuda_graph.h @@ -18,7 +18,7 @@ constexpr CudaGraphAnnotation_t kCudaGraphAnnotationSkip = -1; constexpr CudaGraphAnnotation_t kCudaGraphAnnotationDefault = 0; struct CudaGraphSet { - CudaGraphSet(){}; + CudaGraphSet() {}; ~CudaGraphSet(); void Clear(); @@ -31,7 +31,7 @@ struct CudaGraphSet { }; struct CUDAGraphManager { - CUDAGraphManager(){}; + CUDAGraphManager() {}; CUDAGraphManager(cudaStream_t stream); ~CUDAGraphManager(); diff --git a/onnxruntime/core/providers/cuda/cuda_profiler.h b/onnxruntime/core/providers/cuda/cuda_profiler.h index 88c9adc5e17b3..4930e55351615 100644 --- a/onnxruntime/core/providers/cuda/cuda_profiler.h +++ b/onnxruntime/core/providers/cuda/cuda_profiler.h @@ -33,7 +33,7 @@ class CudaProfiler final : public EpProfiler { ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(CudaProfiler); ~CudaProfiler() {} bool StartProfiling(TimePoint) override { return true; } - void EndProfiling(TimePoint, Events&) override{}; + void EndProfiling(TimePoint, Events&) override {}; void Start(uint64_t) override{}; void Stop(uint64_t) override{}; }; diff --git a/onnxruntime/core/providers/cuda/nn/conv_transpose.h b/onnxruntime/core/providers/cuda/nn/conv_transpose.h index 77c9d94162b6b..71ad3ee6e2147 100644 --- a/onnxruntime/core/providers/cuda/nn/conv_transpose.h +++ b/onnxruntime/core/providers/cuda/nn/conv_transpose.h @@ -18,7 +18,7 @@ namespace cuda { template class ConvTranspose : public CudaKernel { public: - ConvTranspose(const OpKernelInfo& info) : CudaKernel(info), conv_transpose_attrs_(info){}; + ConvTranspose(const OpKernelInfo& info) : CudaKernel(info), conv_transpose_attrs_(info) {}; Status PrePack(const Tensor& tensor, int input_idx, AllocatorPtr alloc, bool& is_packed, [[maybe_unused]] PrePackedWeights* prepacked_weights) override; Status ComputeInternal(OpKernelContext* context) const override; diff --git a/onnxruntime/core/providers/cuda/nvtx_profile.h b/onnxruntime/core/providers/cuda/nvtx_profile.h index f98745cbfc5c2..e545578a72fc4 100644 --- a/onnxruntime/core/providers/cuda/nvtx_profile.h +++ b/onnxruntime/core/providers/cuda/nvtx_profile.h @@ -45,7 +45,7 @@ enum class Color : uint32_t { class RangeCreatorBase { public: RangeCreatorBase(const std::string message, const Color color) - : message_(message), color_(color), is_begin_called_(false), is_end_called_(false){}; + : message_(message), color_(color), is_begin_called_(false), is_end_called_(false) {}; // Check if Begin and End are both called. // It's pointless if not all of them are called. @@ -100,7 +100,7 @@ class RangeCreatorBase { class NvtxRangeCreator final : public RangeCreatorBase { public: NvtxRangeCreator(const std::string message, const Color color) - : RangeCreatorBase(message, color){}; + : RangeCreatorBase(message, color) {}; void BeginImpl() override; void EndImpl() override; @@ -114,7 +114,7 @@ class NvtxRangeCreator final : public RangeCreatorBase { class NvtxNestedRangeCreator final : public RangeCreatorBase { public: NvtxNestedRangeCreator(const std::string message, const Color color) - : RangeCreatorBase(message, color){}; + : RangeCreatorBase(message, color) {}; void BeginImpl() override; void EndImpl() override; @@ -123,7 +123,7 @@ class NvtxNestedRangeCreator final : public RangeCreatorBase { class NvtxMarkerCreator final { public: NvtxMarkerCreator(const std::string message, const Color color) - : message_(message), color_(color){}; + : message_(message), color_(color) {}; void Mark(); private: diff --git a/onnxruntime/core/providers/cuda/shared_inc/cuda_utils.h b/onnxruntime/core/providers/cuda/shared_inc/cuda_utils.h index 1f7df9b6fc2e3..ed642754af3ba 100644 --- a/onnxruntime/core/providers/cuda/shared_inc/cuda_utils.h +++ b/onnxruntime/core/providers/cuda/shared_inc/cuda_utils.h @@ -35,7 +35,7 @@ enum class BroadcastIndexType : int32_t { template class IConstantBuffer { public: - virtual ~IConstantBuffer(){}; + virtual ~IConstantBuffer() {}; virtual const T* GetBuffer(cudaStream_t stream, size_t count) = 0; }; diff --git a/onnxruntime/core/providers/cuda/tensor/cast_op.cc b/onnxruntime/core/providers/cuda/tensor/cast_op.cc index 8e5a68e2a278e..821695bbbd42f 100644 --- a/onnxruntime/core/providers/cuda/tensor/cast_op.cc +++ b/onnxruntime/core/providers/cuda/tensor/cast_op.cc @@ -13,23 +13,23 @@ const std::vector& CastOpTypeConstraints() { // Must be done as a local static for a shared provider, to avoid the prefast warning: // Global initializer calls a non-constexpr function 'onnxruntime::DataTypeImpl::GetTensorType' // In a shared provider, GetTensorType is a function call into Onnxruntime and isn't constexpr - static std::vector types { - DataTypeImpl::GetTensorType(), - DataTypeImpl::GetTensorType(), - DataTypeImpl::GetTensorType(), - DataTypeImpl::GetTensorType(), - DataTypeImpl::GetTensorType(), - DataTypeImpl::GetTensorType(), - DataTypeImpl::GetTensorType(), - DataTypeImpl::GetTensorType(), - DataTypeImpl::GetTensorType(), - DataTypeImpl::GetTensorType(), - DataTypeImpl::GetTensorType(), - DataTypeImpl::GetTensorType(), - DataTypeImpl::GetTensorType() + static std::vector types{ + DataTypeImpl::GetTensorType(), + DataTypeImpl::GetTensorType(), + DataTypeImpl::GetTensorType(), + DataTypeImpl::GetTensorType(), + DataTypeImpl::GetTensorType(), + DataTypeImpl::GetTensorType(), + DataTypeImpl::GetTensorType(), + DataTypeImpl::GetTensorType(), + DataTypeImpl::GetTensorType(), + DataTypeImpl::GetTensorType(), + DataTypeImpl::GetTensorType(), + DataTypeImpl::GetTensorType(), + DataTypeImpl::GetTensorType() #if !defined(DISABLE_FLOAT8_TYPES) - , - DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType() + , + DataTypeImpl::GetTensorType(), DataTypeImpl::GetTensorType() #endif }; return types; diff --git a/onnxruntime/core/providers/dnnl/dnnl_node_capability.h b/onnxruntime/core/providers/dnnl/dnnl_node_capability.h index 3ed3705f6d81b..f67b70616547c 100644 --- a/onnxruntime/core/providers/dnnl/dnnl_node_capability.h +++ b/onnxruntime/core/providers/dnnl/dnnl_node_capability.h @@ -42,7 +42,7 @@ enum ORT_DataType : int { */ class DnnlNodeCapability { public: - virtual ~DnnlNodeCapability(){}; + virtual ~DnnlNodeCapability() {}; /** * virtual function expected to be implemented for different node * types. diff --git a/onnxruntime/core/providers/dnnl/subgraph/dnnl_subgraph.h b/onnxruntime/core/providers/dnnl/subgraph/dnnl_subgraph.h index ceac2a6f58b32..add9f440df91f 100644 --- a/onnxruntime/core/providers/dnnl/subgraph/dnnl_subgraph.h +++ b/onnxruntime/core/providers/dnnl/subgraph/dnnl_subgraph.h @@ -18,7 +18,7 @@ class DnnlNode; class DnnlNodeArg { public: DnnlNodeArg(DnnlNode* node, size_t index, bool is_output) - : node_(node), index_(index), is_output_(is_output){}; + : node_(node), index_(index), is_output_(is_output) {}; DnnlNodeArg() = default; DnnlNode* GetNode() { return node_; }; size_t GetIndex() { return index_; }; diff --git a/onnxruntime/core/providers/js/allocator.h b/onnxruntime/core/providers/js/allocator.h index 6aa8313c01f38..267015b2ea58d 100644 --- a/onnxruntime/core/providers/js/allocator.h +++ b/onnxruntime/core/providers/js/allocator.h @@ -15,7 +15,7 @@ class JsCPUAllocator : public CPUAllocator { : CPUAllocator( OrtMemoryInfo("JsCPUAllocator", OrtAllocatorType::OrtDeviceAllocator, OrtDevice(OrtDevice::CPU, OrtDevice::MemType::DEFAULT, 0), - 0, OrtMemTypeCPU)){}; + 0, OrtMemTypeCPU)) {}; }; class JsCustomAllocator : public IAllocator { diff --git a/onnxruntime/core/providers/js/data_transfer.h b/onnxruntime/core/providers/js/data_transfer.h index 3dfb19cfde5ac..6a0e8586776a2 100644 --- a/onnxruntime/core/providers/js/data_transfer.h +++ b/onnxruntime/core/providers/js/data_transfer.h @@ -11,8 +11,8 @@ namespace js { class DataTransfer : public IDataTransfer { public: - DataTransfer(){}; - ~DataTransfer(){}; + DataTransfer() {}; + ~DataTransfer() {}; bool CanCopy(const OrtDevice& src_device, const OrtDevice& dst_device) const override; diff --git a/onnxruntime/core/providers/qnn/builder/opbuilder/expand_op_builder.cc b/onnxruntime/core/providers/qnn/builder/opbuilder/expand_op_builder.cc index 9e31cf9cae21a..d0f6ce9effd9e 100644 --- a/onnxruntime/core/providers/qnn/builder/opbuilder/expand_op_builder.cc +++ b/onnxruntime/core/providers/qnn/builder/opbuilder/expand_op_builder.cc @@ -125,7 +125,7 @@ Status ExpandOpBuilder::ProcessInputs(QnnModelWrapper& qnn_model_wrapper, default: return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported."); } // switch - } // if-else + } // if-else const std::string& output_name = node_unit.Outputs()[0].node_arg.Name(); std::string shape_input_name(input_name + "_" + output_name); diff --git a/onnxruntime/core/providers/qnn/builder/opbuilder/pad_op_builder.cc b/onnxruntime/core/providers/qnn/builder/opbuilder/pad_op_builder.cc index b7455314578de..5fc6d42a8a179 100644 --- a/onnxruntime/core/providers/qnn/builder/opbuilder/pad_op_builder.cc +++ b/onnxruntime/core/providers/qnn/builder/opbuilder/pad_op_builder.cc @@ -163,7 +163,7 @@ Status ProcessConstantValue(QnnModelWrapper& qnn_model_wrapper, default: return ORT_MAKE_STATUS(ONNXRUNTIME, FAIL, "Type not supported."); } // switch - } // if-else + } // if-else QnnParamWrapper constant_value_param(node_unit.Index(), node_unit.Name(), diff --git a/onnxruntime/core/providers/qnn/builder/qnn_quant_params_wrapper.cc b/onnxruntime/core/providers/qnn/builder/qnn_quant_params_wrapper.cc index da2d517f65697..5fc4fb3db4122 100644 --- a/onnxruntime/core/providers/qnn/builder/qnn_quant_params_wrapper.cc +++ b/onnxruntime/core/providers/qnn/builder/qnn_quant_params_wrapper.cc @@ -10,7 +10,7 @@ #include "core/providers/qnn/builder/qnn_model_wrapper.h" #define ALIGN_PTR_UP(ptr, align, type) \ - reinterpret_cast((reinterpret_cast(ptr) + (align)-1) & ~((align)-1)) + reinterpret_cast((reinterpret_cast(ptr) + (align) - 1) & ~((align) - 1)) namespace onnxruntime { namespace qnn { diff --git a/onnxruntime/core/providers/rocm/rocm_profiler.h b/onnxruntime/core/providers/rocm/rocm_profiler.h index 070cca570f481..d5c7e3f273565 100644 --- a/onnxruntime/core/providers/rocm/rocm_profiler.h +++ b/onnxruntime/core/providers/rocm/rocm_profiler.h @@ -34,7 +34,7 @@ class RocmProfiler final : public EpProfiler { ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(RocmProfiler); ~RocmProfiler() {} bool StartProfiling(TimePoint) override { return true; } - void EndProfiling(TimePoint, Events&) override{}; + void EndProfiling(TimePoint, Events&) override {}; void Start(uint64_t) override{}; void Stop(uint64_t) override{}; }; diff --git a/onnxruntime/core/providers/shared_library/provider_host_api.h b/onnxruntime/core/providers/shared_library/provider_host_api.h index 43d661344d787..e25426b5124dd 100644 --- a/onnxruntime/core/providers/shared_library/provider_host_api.h +++ b/onnxruntime/core/providers/shared_library/provider_host_api.h @@ -24,10 +24,10 @@ struct Provider { virtual ProviderOptions GetProviderOptions(const void* /*provider options struct*/) { return {}; } // Update provider options from key-value string configuration - virtual void UpdateProviderOptions(void* /*provider options to be configured*/, const ProviderOptions& /*key-value string provider options*/){}; + virtual void UpdateProviderOptions(void* /*provider options to be configured*/, const ProviderOptions& /*key-value string provider options*/) {}; // Get provider specific custom op domain list. Provider has the resposibility to release OrtCustomOpDomain instances it creates. - virtual void GetCustomOpDomainList(IExecutionProviderFactory* /*pointer to factory instance*/, std::vector& /*provider custom op domain list*/){}; + virtual void GetCustomOpDomainList(IExecutionProviderFactory* /*pointer to factory instance*/, std::vector& /*provider custom op domain list*/) {}; virtual void Initialize() = 0; // Called right after loading the shared library, if this throws any errors Shutdown() will be called and the library unloaded virtual void Shutdown() = 0; // Called right before unloading the shared library diff --git a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider_custom_ops.h b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider_custom_ops.h index 54212d34aa2ce..a72de6ed75399 100644 --- a/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider_custom_ops.h +++ b/onnxruntime/core/providers/tensorrt/tensorrt_execution_provider_custom_ops.h @@ -24,8 +24,8 @@ struct TensorRTCustomKernel { : compute_stream_(compute_stream) { } - void Compute(OrtKernelContext* /*context*/){ - // The implementation is in TensorRT plugin. No need to implement it here. + void Compute(OrtKernelContext* /*context*/) { + // The implementation is in TensorRT plugin. No need to implement it here. }; private: diff --git a/onnxruntime/core/providers/vitisai/vitisai_provider_factory.cc b/onnxruntime/core/providers/vitisai/vitisai_provider_factory.cc index dc34419ef936f..453db30e1320f 100644 --- a/onnxruntime/core/providers/vitisai/vitisai_provider_factory.cc +++ b/onnxruntime/core/providers/vitisai/vitisai_provider_factory.cc @@ -46,7 +46,7 @@ struct VitisAI_Provider : Provider { } }; // Get provider specific custom op domain list. Provider has the resposibility to release OrtCustomOpDomain instances it creates. - void GetCustomOpDomainList(IExecutionProviderFactory*, std::vector&) override{}; + void GetCustomOpDomainList(IExecutionProviderFactory*, std::vector&) override {}; // Called right after loading the shared library, if this throws any errors Shutdown() will be called and the library unloaded void Initialize() override { initialize_vitisai_ep(); } // Called right before unloading the shared library diff --git a/onnxruntime/core/providers/vsinpu/builders/impl/elementwise_op_builder.h b/onnxruntime/core/providers/vsinpu/builders/impl/elementwise_op_builder.h index df2e429f58b2f..4c10ba01b1c2e 100644 --- a/onnxruntime/core/providers/vsinpu/builders/impl/elementwise_op_builder.h +++ b/onnxruntime/core/providers/vsinpu/builders/impl/elementwise_op_builder.h @@ -47,7 +47,7 @@ namespace npu { std::vector>& outputs, \ const NodeUnit& node_unit) override { \ LOGS_DEFAULT(INFO) << "Creating " << #onnx_op_type << " Op"; \ - auto op = graph_ep->GetGraph() -> CreateOperation(); \ + auto op = graph_ep->GetGraph()->CreateOperation(); \ (*op).BindInputs(inputs).BindOutputs(outputs); \ return true; \ ; \ diff --git a/onnxruntime/core/providers/vsinpu/builders/op_builder_factory.h b/onnxruntime/core/providers/vsinpu/builders/op_builder_factory.h index 27c148c1672c5..dc0969429b8ff 100644 --- a/onnxruntime/core/providers/vsinpu/builders/op_builder_factory.h +++ b/onnxruntime/core/providers/vsinpu/builders/op_builder_factory.h @@ -60,10 +60,9 @@ using createIOpBuildItemFunc = std::function()>; using OpBuildItemType = std::map>; static const std::map reg = { -#define REGISTER_OP_BUILDER(ONNX_NODE_TYPE, BUILDER_TYPE) \ - { \ - ONNX_NODE_TYPE, [] { return std::make_unique(); } \ - } +#define REGISTER_OP_BUILDER(ONNX_NODE_TYPE, BUILDER_TYPE) \ + { \ + ONNX_NODE_TYPE, [] { return std::make_unique(); }} REGISTER_OP_BUILDER("Add", AddOpBuilder), REGISTER_OP_BUILDER("Sub", SubOpBuilder), diff --git a/onnxruntime/python/onnxruntime_pybind_iobinding.cc b/onnxruntime/python/onnxruntime_pybind_iobinding.cc index 51a52dbfcb3bc..37081cd0ff2b4 100644 --- a/onnxruntime/python/onnxruntime_pybind_iobinding.cc +++ b/onnxruntime/python/onnxruntime_pybind_iobinding.cc @@ -155,11 +155,7 @@ void addIoBindingMethods(pybind11::module& m) { .def("clear_binding_outputs", [](SessionIOBinding* io_binding) -> void { io_binding->Get()->ClearOutputs(); }) - .def( - "get_outputs", [](const SessionIOBinding* io_binding) -> const std::vector& { - return io_binding->Get()->GetOutputs(); - }, - py::return_value_policy::reference_internal) + .def("get_outputs", [](const SessionIOBinding* io_binding) -> const std::vector& { return io_binding->Get()->GetOutputs(); }, py::return_value_policy::reference_internal) .def("copy_outputs_to_cpu", [](const SessionIOBinding* io_binding) -> py::list { const std::vector& outputs = io_binding->Get()->GetOutputs(); @@ -180,8 +176,7 @@ void addIoBindingMethods(pybind11::module& m) { } ++pos; } - return result; - }); + return result; }); } } // namespace python diff --git a/onnxruntime/python/onnxruntime_pybind_ortvalue.cc b/onnxruntime/python/onnxruntime_pybind_ortvalue.cc index 94235b3043bc7..d76b9032afe73 100644 --- a/onnxruntime/python/onnxruntime_pybind_ortvalue.cc +++ b/onnxruntime/python/onnxruntime_pybind_ortvalue.cc @@ -226,7 +226,7 @@ void addOrtValueMethods(pybind11::module& m) { ORT_THROW("Only OrtValues that are Tensors/SparseTensors are currently supported"); #else - ORT_THROW("Only OrtValues that are Tensors are supported in this build"); + ORT_THROW("Only OrtValues that are Tensors are supported in this build"); #endif }) .def("shape", [](const OrtValue* ort_value) -> py::list { @@ -275,26 +275,15 @@ void addOrtValueMethods(pybind11::module& m) { return *ONNX_NAMESPACE::Utils::DataTypeUtils::ToType(*type_proto); }) - .def( - "element_type", [](const OrtValue* ort_value) -> int32_t { - return GetTensorProtoType(*ort_value); - }, - "Returns an integer equal to the ONNX tensor proto type of the tensor or sequence. " - "This integer is one type defined by ONNX TensorProto_DataType " - "(such as onnx.TensorProto.FLOAT)." - "Raises an exception in any other case.") - .def("has_value", [](const OrtValue* ort_value) -> bool { - return ort_value->IsAllocated(); - }) - .def("is_tensor", [](const OrtValue* ort_value) -> bool { - return ort_value->IsTensor(); - }) - .def("is_sparse_tensor", [](const OrtValue* ort_value) -> bool { - return ort_value->IsSparseTensor(); - }) - .def("is_tensor_sequence", [](const OrtValue* ort_value) -> bool { - return ort_value->IsTensorSequence(); - }) + .def("element_type", [](const OrtValue* ort_value) -> int32_t { return GetTensorProtoType(*ort_value); }, + "Returns an integer equal to the ONNX tensor proto type of the tensor or sequence. " + "This integer is one type defined by ONNX TensorProto_DataType " + "(such as onnx.TensorProto.FLOAT)." + "Raises an exception in any other case.") + .def("has_value", [](const OrtValue* ort_value) -> bool { return ort_value->IsAllocated(); }) + .def("is_tensor", [](const OrtValue* ort_value) -> bool { return ort_value->IsTensor(); }) + .def("is_sparse_tensor", [](const OrtValue* ort_value) -> bool { return ort_value->IsSparseTensor(); }) + .def("is_tensor_sequence", [](const OrtValue* ort_value) -> bool { return ort_value->IsTensorSequence(); }) // Converts Tensor into a numpy array .def("numpy", [](const OrtValue* ml_value) -> py::object { ORT_ENFORCE(ml_value->IsTensor(), "Only OrtValues that are Tensors are convertible to Numpy objects"); @@ -310,37 +299,22 @@ void addOrtValueMethods(pybind11::module& m) { #else py::object obj = GetPyObjFromTensor(*ml_value, nullptr, nullptr); #endif - return obj; - }) + return obj; }) #ifdef ENABLE_TRAINING - .def( - "to_dlpack", [](OrtValue* ort_value) -> py::object { - return py::reinterpret_steal(ToDlpack(*ort_value)); - }, - "Returns a DLPack representing the tensor. This method does not copy the pointer shape, " - "instead, it copies the pointer value. The OrtValue must be persist until the dlpack structure " - "is consumed.") - .def_static( - "from_dlpack", [](py::object data, bool is_bool_tensor) { - return FromDlpack(data.ptr(), is_bool_tensor); - }, - py::arg("data"), py::arg("is_bool_tensor") = false, "Converts a tensor from a external library into an OrtValue by means of the __dlpack__ protocol.") - .def( - "__dlpack__", [](OrtValue* ort_value, py::object /* stream */) -> py::object { - return py::reinterpret_steal(ToDlpack(*ort_value)); - }, - py::arg("stream") = py::none(), - "Returns a DLPack representing the tensor (part of __dlpack__ protocol). " - "This method does not copy the pointer shape, instead, it copies the pointer value. " - "The OrtValue must persist until the dlpack structure is consumed.") - .def( - "__dlpack_device__", [](const OrtValue* ort_value) -> py::tuple { + .def("to_dlpack", [](OrtValue* ort_value) -> py::object { return py::reinterpret_steal(ToDlpack(*ort_value)); }, + "Returns a DLPack representing the tensor. This method does not copy the pointer shape, " + "instead, it copies the pointer value. The OrtValue must be persist until the dlpack structure " + "is consumed.") + .def_static("from_dlpack", [](py::object data, bool is_bool_tensor) { return FromDlpack(data.ptr(), is_bool_tensor); }, py::arg("data"), py::arg("is_bool_tensor") = false, "Converts a tensor from a external library into an OrtValue by means of the __dlpack__ protocol.") + .def("__dlpack__", [](OrtValue* ort_value, py::object /* stream */) -> py::object { return py::reinterpret_steal(ToDlpack(*ort_value)); }, py::arg("stream") = py::none(), + "Returns a DLPack representing the tensor (part of __dlpack__ protocol). " + "This method does not copy the pointer shape, instead, it copies the pointer value. " + "The OrtValue must persist until the dlpack structure is consumed.") + .def("__dlpack_device__", [](const OrtValue* ort_value) -> py::tuple { ORT_ENFORCE(ort_value->IsTensor(), "Only tensor type OrtValues are supported"); const onnxruntime::Tensor& tensor = ort_value->Get(); DLDevice device = onnxruntime::dlpack::GetDlpackDevice(*ort_value, tensor.Location().device.Id()); - return py::make_tuple(static_cast(device.device_type), device.device_id); - }, - "Returns a tuple of integers, (device, device index) (part of __dlpack__ protocol).") + return py::make_tuple(static_cast(device.device_type), device.device_id); }, "Returns a tuple of integers, (device, device index) (part of __dlpack__ protocol).") #endif ; @@ -350,13 +324,8 @@ void addOrtValueMethods(pybind11::module& m) { v->push_back(ortvalue); }) #ifdef ENABLE_TRAINING - .def( - "push_back", [](std::vector* v, py::object dlpack_tensor, const bool is_bool_tensor) { - v->push_back(FromDlpack(dlpack_tensor.ptr(), is_bool_tensor)); - }, - "Add a new OrtValue after being ownership was transferred from the DLPack structure.", py::arg("dlpack_tensor"), py::arg("is_bool_tensor") = false) - .def( - "push_back_batch", [](std::vector* v, std::vector& torch_tensors, std::vector& data_ptrs, std::vector& element_types, const std::vector>& shapes, const std::vector& devices) { + .def("push_back", [](std::vector* v, py::object dlpack_tensor, const bool is_bool_tensor) { v->push_back(FromDlpack(dlpack_tensor.ptr(), is_bool_tensor)); }, "Add a new OrtValue after being ownership was transferred from the DLPack structure.", py::arg("dlpack_tensor"), py::arg("is_bool_tensor") = false) + .def("push_back_batch", [](std::vector* v, std::vector& torch_tensors, std::vector& data_ptrs, std::vector& element_types, const std::vector>& shapes, const std::vector& devices) { for (size_t i = 0; i < torch_tensors.size(); ++i) { py::object& element_type = element_types.at(i); const std::vector& shape = shapes.at(i); @@ -377,52 +346,36 @@ void addOrtValueMethods(pybind11::module& m) { OrtValue ml_value; Tensor::InitOrtValue(ml_type, gsl::make_span(shape), reinterpret_cast(data_ptr), info, ml_value); v->push_back(ml_value); - } - }, - "Add a batch of OrtValue's by wrapping PyTorch tensors.") + } }, "Add a batch of OrtValue's by wrapping PyTorch tensors.") #endif .def("reserve", [](std::vector* v, const size_t len) { v->reserve(len); }) .def("shrink_to_fit", [](std::vector* v) { v->shrink_to_fit(); }) .def("__len__", [](const std::vector& v) { return v.size(); }) - .def( - "__iter__", [](const std::vector& v) { - return py::make_iterator(v.cbegin(), v.cend()); - }, - py::keep_alive<0, 1>()) - .def("__getitem__", [](const std::vector& v, const size_t idx) { - return v.at(idx); - }) - .def( - "bool_tensor_indices", [](std::vector* v) -> std::vector { + .def("__iter__", [](const std::vector& v) { return py::make_iterator(v.cbegin(), v.cend()); }, py::keep_alive<0, 1>()) + .def("__getitem__", [](const std::vector& v, const size_t idx) { return v.at(idx); }) + .def("bool_tensor_indices", [](std::vector* v) -> std::vector { std::vector indices; for (size_t i = 0; i < v->size(); ++i) { if (GetTensorProtoType((*v)[i]) == ONNX_NAMESPACE::TensorProto_DataType_BOOL) { indices.push_back(static_cast(i)); } } - return indices; - }, - "Returns the indices of every boolean tensor in this vector of OrtValue. " - "In case of a boolean tensor, method to_dlpacks returns a uint8 tensor instead of a boolean tensor. " - "If torch consumes the dlpack structure, `.to(torch.bool)` must be applied to the torch tensor " - "to get a boolean tensor.") + return indices; }, + "Returns the indices of every boolean tensor in this vector of OrtValue. " + "In case of a boolean tensor, method to_dlpacks returns a uint8 tensor instead of a boolean tensor. " + "If torch consumes the dlpack structure, `.to(torch.bool)` must be applied to the torch tensor " + "to get a boolean tensor.") #ifdef ENABLE_TRAINING - .def("dlpack_at", [](std::vector* v, const size_t idx) { - return py::reinterpret_steal(ToDlpack(v->at(idx))); - }) + .def("dlpack_at", [](std::vector* v, const size_t idx) { return py::reinterpret_steal(ToDlpack(v->at(idx))); }) #endif - .def( - "element_type_at", [](std::vector* v, const size_t idx) -> int32_t { - return GetTensorProtoType(v->at(idx)); - }, - "Returns an integer equal to the ONNX proto type of the tensor at position i. " - "This integer is one type defined by ONNX TensorProto_DataType " - "(such as onnx.TensorProto.FLOAT)." - "Raises an exception in any other case.", - py::arg("idx")) + .def("element_type_at", [](std::vector* v, const size_t idx) -> int32_t { return GetTensorProtoType(v->at(idx)); }, + "Returns an integer equal to the ONNX proto type of the tensor at position i. " + "This integer is one type defined by ONNX TensorProto_DataType " + "(such as onnx.TensorProto.FLOAT)." + "Raises an exception in any other case.", + py::arg("idx")) #ifdef ENABLE_TRAINING - .def( - "to_dlpacks", [](const std::vector& v, py::object to_tensor) -> py::list { + .def("to_dlpacks", [](const std::vector& v, py::object to_tensor) -> py::list { if (v.size() == 0) return py::list(); @@ -469,9 +422,8 @@ void addOrtValueMethods(pybind11::module& m) { Py_DECREF(capsule); } } - return list_dlpacks; - }, - R"pbdoc(Converts all OrtValue into tensors through DLPack protocol, the method creates + return list_dlpacks; }, + R"pbdoc(Converts all OrtValue into tensors through DLPack protocol, the method creates a DLPack structure for every tensors, then calls python function `to_tensor` to a new object consuming the DLPack structure or return a list of capsule if this function is None. @@ -488,7 +440,7 @@ It creates many tensors acquiring ownership of existing OrtValue. This method saves one object creation and an C++ allocation for every transferred tensor. )pbdoc", - py::arg("to_tensor")) + py::arg("to_tensor")) #endif ; diff --git a/onnxruntime/python/onnxruntime_pybind_sparse_tensor.cc b/onnxruntime/python/onnxruntime_pybind_sparse_tensor.cc index 7dcead113ac4f..1154f3b9f88b8 100644 --- a/onnxruntime/python/onnxruntime_pybind_sparse_tensor.cc +++ b/onnxruntime/python/onnxruntime_pybind_sparse_tensor.cc @@ -397,8 +397,7 @@ void addSparseTensorMethods(pybind11::module& m) { // pybind apparently has a bug with returning enums from def_property_readonly or methods // returning a method object instead of the enumeration value // so we are using def_property and throw on a potential modification - .def_property( - "format", [](const PySparseTensor* py_tensor) -> OrtSparseFormat { + .def_property("format", [](const PySparseTensor* py_tensor) -> OrtSparseFormat { const SparseTensor& tensor = py_tensor->Instance(); auto retval = OrtSparseFormat::ORT_SPARSE_UNDEFINED; switch (tensor.Format()) { diff --git a/onnxruntime/python/onnxruntime_pybind_state.cc b/onnxruntime/python/onnxruntime_pybind_state.cc index 6b5daf8cb882b..679ccce7fb07a 100644 --- a/onnxruntime/python/onnxruntime_pybind_state.cc +++ b/onnxruntime/python/onnxruntime_pybind_state.cc @@ -1425,7 +1425,7 @@ void addGlobalMethods(py::module& m) { ORT_UNUSED_PARAMETER(algo); ORT_THROW("set_cudnn_conv_algo_search is not supported in ROCM"); #else - cudnn_conv_algo_search = algo; + cudnn_conv_algo_search = algo; #endif }); // TODO remove deprecated global config @@ -1436,7 +1436,7 @@ void addGlobalMethods(py::module& m) { ORT_UNUSED_PARAMETER(use_single_stream); ORT_THROW("set_do_copy_in_default_stream is not supported in ROCM"); #else - do_copy_in_default_stream = use_single_stream; + do_copy_in_default_stream = use_single_stream; #endif }); // TODO remove deprecated global config @@ -1801,10 +1801,10 @@ Applies to session load, initialization, etc. Default is 0.)pbdoc") } ORT_THROW_IF_ERROR(options->value.AddExternalInitializers(names_ptrs, values_ptrs)); #else - ORT_UNUSED_PARAMETER(options); - ORT_UNUSED_PARAMETER(names); - ORT_UNUSED_PARAMETER(ort_values); - ORT_THROW("External initializers are not supported in this build."); + ORT_UNUSED_PARAMETER(options); + ORT_UNUSED_PARAMETER(names); + ORT_UNUSED_PARAMETER(ort_values); + ORT_THROW("External initializers are not supported in this build."); #endif }); @@ -1866,8 +1866,7 @@ including arg name, arg type (contains both type and shape).)pbdoc") return *(na.Type()); }, "node type") - .def( - "__str__", [](const onnxruntime::NodeArg& na) -> std::string { + .def("__str__", [](const onnxruntime::NodeArg& na) -> std::string { std::ostringstream res; res << "NodeArg(name='" << na.Name() << "', type='" << *(na.Type()) << "', shape="; auto shape = na.Shape(); @@ -1893,11 +1892,8 @@ including arg name, arg type (contains both type and shape).)pbdoc") } res << ")"; - return std::string(res.str()); - }, - "converts the node into a readable string") - .def_property_readonly( - "shape", [](const onnxruntime::NodeArg& na) -> std::vector { + return std::string(res.str()); }, "converts the node into a readable string") + .def_property_readonly("shape", [](const onnxruntime::NodeArg& na) -> std::vector { auto shape = na.Shape(); std::vector arr; if (shape == nullptr || shape->dim_size() == 0) { @@ -1914,9 +1910,7 @@ including arg name, arg type (contains both type and shape).)pbdoc") arr[i] = py::none(); } } - return arr; - }, - "node shape (assuming the node holds a tensor)"); + return arr; }, "node shape (assuming the node holds a tensor)"); py::class_ sessionObjectInitializer(m, "SessionObjectInitializer"); py::class_(m, "InferenceSession", R"pbdoc(This is the main class used to run a model.)pbdoc") @@ -2107,51 +2101,28 @@ including arg name, arg type (contains both type and shape).)pbdoc") .def_property_readonly("get_profiling_start_time_ns", [](const PyInferenceSession* sess) -> uint64_t { return sess->GetSessionHandle()->GetProfiling().GetStartTimeNs(); }) - .def( - "get_providers", [](const PyInferenceSession* sess) -> const std::vector& { - return sess->GetSessionHandle()->GetRegisteredProviderTypes(); - }, - py::return_value_policy::reference_internal) - .def( - "get_provider_options", [](const PyInferenceSession* sess) -> const ProviderOptionsMap& { - return sess->GetSessionHandle()->GetAllProviderOptions(); - }, - py::return_value_policy::reference_internal) - .def_property_readonly( - "session_options", [](const PyInferenceSession* sess) -> PySessionOptions* { + .def("get_providers", [](const PyInferenceSession* sess) -> const std::vector& { return sess->GetSessionHandle()->GetRegisteredProviderTypes(); }, py::return_value_policy::reference_internal) + .def("get_provider_options", [](const PyInferenceSession* sess) -> const ProviderOptionsMap& { return sess->GetSessionHandle()->GetAllProviderOptions(); }, py::return_value_policy::reference_internal) + .def_property_readonly("session_options", [](const PyInferenceSession* sess) -> PySessionOptions* { auto session_options = std::make_unique(); session_options->value = sess->GetSessionHandle()->GetSessionOptions(); - return session_options.release(); - }, - py::return_value_policy::take_ownership) - .def_property_readonly( - "inputs_meta", [](const PyInferenceSession* sess) -> const std::vector& { + return session_options.release(); }, py::return_value_policy::take_ownership) + .def_property_readonly("inputs_meta", [](const PyInferenceSession* sess) -> const std::vector& { auto res = sess->GetSessionHandle()->GetModelInputs(); OrtPybindThrowIfError(res.first); - return *(res.second); - }, - py::return_value_policy::reference_internal) - .def_property_readonly( - "outputs_meta", [](const PyInferenceSession* sess) -> const std::vector& { + return *(res.second); }, py::return_value_policy::reference_internal) + .def_property_readonly("outputs_meta", [](const PyInferenceSession* sess) -> const std::vector& { auto res = sess->GetSessionHandle()->GetModelOutputs(); OrtPybindThrowIfError(res.first); - return *(res.second); - }, - py::return_value_policy::reference_internal) - .def_property_readonly( - "overridable_initializers", [](const PyInferenceSession* sess) -> const std::vector& { + return *(res.second); }, py::return_value_policy::reference_internal) + .def_property_readonly("overridable_initializers", [](const PyInferenceSession* sess) -> const std::vector& { auto res = sess->GetSessionHandle()->GetOverridableInitializers(); OrtPybindThrowIfError(res.first); - return *(res.second); - }, - py::return_value_policy::reference_internal) - .def_property_readonly( - "model_meta", [](const PyInferenceSession* sess) -> const onnxruntime::ModelMetadata& { + return *(res.second); }, py::return_value_policy::reference_internal) + .def_property_readonly("model_meta", [](const PyInferenceSession* sess) -> const onnxruntime::ModelMetadata& { auto res = sess->GetSessionHandle()->GetModelMetadata(); OrtPybindThrowIfError(res.first); - return *(res.second); - }, - py::return_value_policy::reference_internal) + return *(res.second); }, py::return_value_policy::reference_internal) .def("run_with_iobinding", [](PyInferenceSession* sess, SessionIOBinding& io_binding, RunOptions* run_options = nullptr) -> void { Status status; // release GIL to allow multiple python threads to invoke Run() in parallel. @@ -2161,8 +2132,7 @@ including arg name, arg type (contains both type and shape).)pbdoc") else status = sess->GetSessionHandle()->Run(*run_options, *io_binding.Get()); if (!status.IsOK()) - throw std::runtime_error("Error in execution: " + status.ErrorMessage()); - }) + throw std::runtime_error("Error in execution: " + status.ErrorMessage()); }) .def("get_tuning_results", [](PyInferenceSession* sess) -> py::list { #if !defined(ORT_MINIMAL_BUILD) auto results = sess->GetSessionHandle()->GetTuningResults(); @@ -2177,8 +2147,8 @@ including arg name, arg type (contains both type and shape).)pbdoc") return ret; #else - ORT_UNUSED_PARAMETER(sess); - ORT_THROW("TunableOp and get_tuning_results are not supported in this build."); + ORT_UNUSED_PARAMETER(sess); + ORT_THROW("TunableOp and get_tuning_results are not supported in this build."); #endif }) .def("set_tuning_results", [](PyInferenceSession* sess, py::list results, bool error_on_invalid) -> void { @@ -2209,10 +2179,10 @@ including arg name, arg type (contains both type and shape).)pbdoc") throw std::runtime_error("Error in execution: " + status.ErrorMessage()); } #else - ORT_UNUSED_PARAMETER(sess); - ORT_UNUSED_PARAMETER(results); - ORT_UNUSED_PARAMETER(error_on_invalid); - ORT_THROW("TunableOp and set_tuning_results are not supported in this build."); + ORT_UNUSED_PARAMETER(sess); + ORT_UNUSED_PARAMETER(results); + ORT_UNUSED_PARAMETER(error_on_invalid); + ORT_THROW("TunableOp and set_tuning_results are not supported in this build."); #endif }); diff --git a/onnxruntime/python/onnxruntime_validation.py b/onnxruntime/python/onnxruntime_validation.py index 10d9f469863c4..81e6461e4417f 100644 --- a/onnxruntime/python/onnxruntime_validation.py +++ b/onnxruntime/python/onnxruntime_validation.py @@ -24,8 +24,7 @@ def check_distro_info(): if __my_distro_ver__ not in ["10", "11"]: warnings.warn( - "Unsupported Windows version (%s). ONNX Runtime supports Windows 10 and above, only." - % __my_distro_ver__ + f"Unsupported Windows version ({__my_distro_ver__}). ONNX Runtime supports Windows 10 and above, only." ) elif __my_system__ == "linux": """Although the 'platform' python module for getting Distro information works well on standard OS images @@ -54,11 +53,11 @@ def check_distro_info(): if int(__my_distro_ver__.split(".")[0]) < 11: warnings.warn( - "Unsupported macOS version (%s). ONNX Runtime supports macOS 11.0 or later." % (__my_distro_ver__) + f"Unsupported macOS version ({__my_distro_ver__}). ONNX Runtime supports macOS 11.0 or later." ) else: warnings.warn( - "Unsupported platform (%s). ONNX Runtime supports Linux, macOS and Windows platforms, only." % __my_system__ + f"Unsupported platform ({__my_system__}). ONNX Runtime supports Linux, macOS and Windows platforms, only." ) @@ -115,10 +114,10 @@ def validate_build_package_info(): cudart_version = None def print_build_package_info(): - warnings.warn("onnxruntime training package info: package_name: %s" % package_name) - warnings.warn("onnxruntime training package info: __version__: %s" % version) - warnings.warn("onnxruntime training package info: cuda_version: %s" % cuda_version) - warnings.warn("onnxruntime build info: cudart_version: %s" % cudart_version) + warnings.warn(f"onnxruntime training package info: package_name: {package_name}") + warnings.warn(f"onnxruntime training package info: __version__: {version}") + warnings.warn(f"onnxruntime training package info: cuda_version: {cuda_version}") + warnings.warn(f"onnxruntime build info: cudart_version: {cudart_version}") # collection cuda library info from current environment. from onnxruntime.capi.onnxruntime_collect_build_info import find_cudart_versions @@ -127,7 +126,7 @@ def print_build_package_info(): if cudart_version and local_cudart_versions and cudart_version not in local_cudart_versions: print_build_package_info() warnings.warn("WARNING: failed to find cudart version that matches onnxruntime build info") - warnings.warn("WARNING: found cudart versions: %s" % local_cudart_versions) + warnings.warn(f"WARNING: found cudart versions: {local_cudart_versions}") else: # TODO: rcom pass diff --git a/onnxruntime/python/tools/pytorch_export_contrib_ops.py b/onnxruntime/python/tools/pytorch_export_contrib_ops.py index aeb78f03dd721..d8cf3c1304219 100644 --- a/onnxruntime/python/tools/pytorch_export_contrib_ops.py +++ b/onnxruntime/python/tools/pytorch_export_contrib_ops.py @@ -22,7 +22,7 @@ def _reg(symbolic_fn: typing.Callable): - name = "::%s" % symbolic_fn.__name__ + name = f"::{symbolic_fn.__name__}" torch.onnx.register_custom_op_symbolic(name, symbolic_fn, _OPSET_VERSION) _registered_ops.add(name) diff --git a/onnxruntime/python/tools/quantization/calibrate.py b/onnxruntime/python/tools/quantization/calibrate.py index 65875d09102bd..703accbcc1c48 100644 --- a/onnxruntime/python/tools/quantization/calibrate.py +++ b/onnxruntime/python/tools/quantization/calibrate.py @@ -1076,7 +1076,7 @@ def get_entropy_threshold(self, histogram, num_quantized_bins): for i in range(num_half_quantized_bin, zero_bin_index + 1, 1): start_index = zero_bin_index - i - end_index = zero_bin_index + i + 1 if (zero_bin_index + i + 1) <= num_bins else num_bins + end_index = min(zero_bin_index + i + 1, num_bins) thresholds[i - num_half_quantized_bin] = (hist_edges[start_index], hist_edges[end_index]) diff --git a/onnxruntime/python/tools/symbolic_shape_infer.py b/onnxruntime/python/tools/symbolic_shape_infer.py index ac959d5c061f7..f88011c7a2cf9 100755 --- a/onnxruntime/python/tools/symbolic_shape_infer.py +++ b/onnxruntime/python/tools/symbolic_shape_infer.py @@ -24,7 +24,7 @@ def get_attribute(node, attr_name, default_value=None): def get_dim_from_proto(dim): - return getattr(dim, dim.WhichOneof("value")) if type(dim.WhichOneof("value")) is str else None # noqa: E721 + return getattr(dim, dim.WhichOneof("value")) if type(dim.WhichOneof("value")) is str else None def is_sequence(type_proto): @@ -92,19 +92,19 @@ def get_opset(mp, domain=None): def as_scalar(x): - if type(x) == list: # noqa: E721 + if type(x) is list: assert len(x) == 1 return x[0] - elif type(x) == np.ndarray: + elif type(x) is np.ndarray: return x.item() else: return x def as_list(x, keep_none): - if type(x) == list: # noqa: E721 + if type(x) is list: return x - elif type(x) == np.ndarray: + elif type(x) is np.ndarray: return list(x) elif keep_none and x is None: return None @@ -113,7 +113,7 @@ def as_list(x, keep_none): def sympy_reduce_product(x): - if type(x) == list: # noqa: E721 + if type(x) is list: value = sympy.Integer(1) for v in x: value = value * v @@ -258,7 +258,7 @@ def __init__(self, int_max, auto_merge, guess_output_rank, verbose, prefix=""): self.prefix_ = prefix def _add_suggested_merge(self, symbols, apply=False): - assert all([(type(s) == str and s in self.symbolic_dims_) or is_literal(s) for s in symbols]) # noqa: E721 + assert all([(type(s) is str and s in self.symbolic_dims_) or is_literal(s) for s in symbols]) symbols = set(symbols) for k, v in self.suggested_merge_.items(): if k in symbols: @@ -278,7 +278,7 @@ def _add_suggested_merge(self, symbols, apply=False): break if map_to is None: for s in symbols: - if type(self.symbolic_dims_[s]) == sympy.Symbol: + if type(self.symbolic_dims_[s]) is sympy.Symbol: map_to = s break # when nothing to map to, use the shorter one @@ -328,7 +328,7 @@ def _preprocess(self, in_mp): ) def _merge_symbols(self, dims): - if not all([type(d) == str for d in dims]): # noqa: E721 + if not all([type(d) is str for d in dims]): if self.auto_merge_: unique_dims = list(set(dims)) is_int = [is_literal(d) for d in unique_dims] @@ -408,7 +408,7 @@ def _get_shape_rank(self, node, idx): def _get_sympy_shape(self, node, idx): sympy_shape = [] for d in self._get_shape(node, idx): - if type(d) == str: # noqa: E721 + if type(d) is str: sympy_shape.append( self.symbolic_dims_[d] if d in self.symbolic_dims_ @@ -590,7 +590,7 @@ def _onnx_infer_subgraph(self, node, subgraph, use_node_input=True, inc_subgraph # for new symbolic dims from subgraph output, add to main graph symbolic dims subgraph_shapes = [get_shape_from_value_info(o) for o in symbolic_shape_inference.out_mp_.graph.output] subgraph_new_symbolic_dims = { - d for s in subgraph_shapes if s for d in s if type(d) == str and d not in self.symbolic_dims_ # noqa: E721 + d for s in subgraph_shapes if s for d in s if type(d) is str and d not in self.symbolic_dims_ } new_dims = {} for d in subgraph_new_symbolic_dims: @@ -610,7 +610,7 @@ def int_or_float(value, allow_float_values): if all([v is not None for v in values]): # some shape compute is in floating point, cast to int for sympy for i, v in enumerate(values): - if type(v) != np.ndarray: + if type(v) is not np.ndarray: continue if len(v.shape) > 1: new_v = None # ignore value for rank > 1 @@ -924,7 +924,7 @@ def _infer_Concat(self, node): # noqa: N802 if all([d == dims[0] for d in dims]): continue merged = self._merge_symbols(dims) - if type(merged) == str: # noqa: E721 + if type(merged) is str: sympy_shape[d] = self.symbolic_dims_[merged] if merged else None else: sympy_shape[d] = merged @@ -1060,7 +1060,7 @@ def _infer_Einsum(self, node): # noqa: N802 dim = shape[-i] if letter not in letter_to_dim: letter_to_dim[letter] = dim - elif type(dim) != sympy.Symbol: + elif type(dim) is not sympy.Symbol: letter_to_dim[letter] = dim num_operands = num_operands + 1 @@ -1127,8 +1127,8 @@ def _infer_Gather(self, node): # noqa: N802 idx = self._try_get_value(node, 1) if idx is not None: data = self.sympy_data_[node.input[0]] - if type(data) == list: # noqa: E721 - if type(idx) == np.ndarray and len(idx.shape) == 1: + if type(data) is list: + if type(idx) is np.ndarray and len(idx.shape) == 1: self.sympy_data_[node.output[0]] = [data[int(i)] for i in idx] else: self.sympy_data_[node.output[0]] = data[int(idx)] @@ -1530,7 +1530,7 @@ def _infer_aten_upsample(self, node): new_shape = input_shape[:2] output_size = self._try_get_value(node, 1) if output_size is not None: - new_shape += [dim_size.item() if type(dim_size) == np.int64 else dim_size for dim_size in output_size] + new_shape += [dim_size.item() if type(dim_size) is np.int64 else dim_size for dim_size in output_size] else: rank = len(input_shape) new_shape += [str(self._new_symbolic_dim_from_output(node, 0, i)) for i in range(2, rank)] @@ -1645,7 +1645,7 @@ def _infer_Reshape(self, node): # noqa: N802 deferred_dim_idx = -1 non_deferred_size = 1 for i, d in enumerate(shape_value): - if type(d) == sympy.Symbol: + if type(d) is sympy.Symbol: new_sympy_shape.append(d) elif d == 0: new_sympy_shape.append(input_sympy_shape[i]) @@ -1940,7 +1940,7 @@ def handle_negative_index(index, bound): # handle sympy_data if needed, for slice in shape computation if ( node.input[0] in self.sympy_data_ - and [0] == axes + and axes == [0] and starts is not None and len(starts) == 1 and ends is not None @@ -1949,8 +1949,8 @@ def handle_negative_index(index, bound): and len(steps) == 1 ): input_sympy_data = self.sympy_data_[node.input[0]] - if type(input_sympy_data) == list or ( # noqa: E721 - type(input_sympy_data) == np.array and len(input_sympy_data.shape) == 1 + if type(input_sympy_data) is list or ( + type(input_sympy_data) is np.array and len(input_sympy_data.shape) == 1 ): self.sympy_data_[node.output[0]] = input_sympy_data[starts[0] : ends[0] : steps[0]] @@ -2616,7 +2616,7 @@ def _infer_impl(self, start_sympy_data=None): # some models use None for symbolic dim in input, replace it with a string input_dims[i_dim].dim_param = str(self._new_symbolic_dim(i.name, i_dim)) - self.input_symbols_.update([d for d in input_shape if type(d) == str]) # noqa: E721 + self.input_symbols_.update([d for d in input_shape if type(d) is str]) for s in self.input_symbols_: if s in self.suggested_merge_: diff --git a/onnxruntime/python/tools/tensorrt/perf/benchmark.py b/onnxruntime/python/tools/tensorrt/perf/benchmark.py index 8af074f24acc9..4fa5d0c0ea034 100644 --- a/onnxruntime/python/tools/tensorrt/perf/benchmark.py +++ b/onnxruntime/python/tools/tensorrt/perf/benchmark.py @@ -925,8 +925,8 @@ def find_model_path(path): logger.info(target_model_path) if len(target_model_path) > 1: - logger.error("We expect to find only one model in " + path) # noqa: G003 - raise + logger.error("We expect to find only one model in %s", path) + raise RuntimeError return target_model_path[0] @@ -1007,7 +1007,7 @@ def parse_models_info_from_file(root_dir, path, models): models[row["model_name"]] = {} else: logger.error("Model name must be provided in models_info.json") - raise + raise RuntimeError model = models[row["model_name"]] @@ -1018,19 +1018,19 @@ def parse_models_info_from_file(root_dir, path, models): model["working_directory"] = os.path.join(root_working_directory, row["working_directory"]) else: logger.error("Model path must be provided in models_info.json") - raise + raise RuntimeError if "model_path" in row: model["model_path"] = row["model_path"] else: logger.error("Model path must be provided in models_info.json") - raise + raise RuntimeError if "test_data_path" in row: model["test_data_path"] = row["test_data_path"] else: logger.error("Test data path must be provided in models_info.json") - raise + raise RuntimeError if "model_path_fp16" in row: model["model_path_fp16"] = row["model_path_fp16"] diff --git a/onnxruntime/python/tools/tensorrt/perf/perf_utils.py b/onnxruntime/python/tools/tensorrt/perf/perf_utils.py index c447bf9cffe27..0d0f7cc48f361 100644 --- a/onnxruntime/python/tools/tensorrt/perf/perf_utils.py +++ b/onnxruntime/python/tools/tensorrt/perf/perf_utils.py @@ -234,7 +234,7 @@ def calculate_trt_op_percentage(trt_op_map, cuda_op_map): if total_ops == 0: print("Error ...") - raise + raise RuntimeError if len(trt_op_map) == 0: total_cuda_and_cpu_ops = total_ops diff --git a/onnxruntime/python/tools/tensorrt/perf/setup_scripts/setup_onnx_zoo.py b/onnxruntime/python/tools/tensorrt/perf/setup_scripts/setup_onnx_zoo.py index 4f763ad84426d..0532dd7c72c1c 100644 --- a/onnxruntime/python/tools/tensorrt/perf/setup_scripts/setup_onnx_zoo.py +++ b/onnxruntime/python/tools/tensorrt/perf/setup_scripts/setup_onnx_zoo.py @@ -71,7 +71,7 @@ def write_json(models): def main(): links = [] with open("links.txt") as fh: - links = [link.rstrip() for link in fh.readlines()] + links = [link.rstrip() for link in fh] model_list = [] for link in links: diff --git a/onnxruntime/python/tools/transformers/benchmark.py b/onnxruntime/python/tools/transformers/benchmark.py index 5ec2ab4e50799..4800c48744236 100644 --- a/onnxruntime/python/tools/transformers/benchmark.py +++ b/onnxruntime/python/tools/transformers/benchmark.py @@ -802,7 +802,7 @@ def main(): try: os.mkdir(args.cache_dir) except OSError: - logger.error("Creation of the directory %s failed" % args.cache_dir) # noqa: G002 + logger.error("Creation of the directory %s failed", args.cache_dir) enable_torch = "torch" in args.engines enable_torch2 = "torch2" in args.engines diff --git a/onnxruntime/python/tools/transformers/bert_test_data.py b/onnxruntime/python/tools/transformers/bert_test_data.py index aa82e047df328..167fc8697ce06 100644 --- a/onnxruntime/python/tools/transformers/bert_test_data.py +++ b/onnxruntime/python/tools/transformers/bert_test_data.py @@ -168,11 +168,11 @@ def output_test_data(directory: str, inputs: Dict[str, np.ndarray]): try: os.mkdir(directory) except OSError: - print("Creation of the directory %s failed" % directory) + print(f"Creation of the directory {directory} failed") else: - print("Successfully created the directory %s " % directory) + print(f"Successfully created the directory {directory} ") else: - print("Warning: directory %s existed. Files will be overwritten." % directory) + print(f"Warning: directory {directory} existed. Files will be overwritten.") for index, (name, data) in enumerate(inputs.items()): tensor = numpy_helper.from_array(data, name) diff --git a/onnxruntime/python/tools/transformers/fusion_attention.py b/onnxruntime/python/tools/transformers/fusion_attention.py index f48cabd25fc5c..dc2b38f3928ac 100644 --- a/onnxruntime/python/tools/transformers/fusion_attention.py +++ b/onnxruntime/python/tools/transformers/fusion_attention.py @@ -672,7 +672,7 @@ def create_multihead_attention_node( q_matmul, k_matmul, v_matmul, q_add, k_add, v_add, num_heads ) mha_inputs.extend([q_slice.output[0], k_slice.output[0], v_slice.output[0]]) - elif type(k_matmul) == NodeProto and type(v_matmul) == NodeProto: + elif type(k_matmul) is NodeProto and type(v_matmul) is NodeProto: if self.disable_multi_head_attention_bias: mha_inputs.extend([q_add.output[0], k_matmul.output[0], v_add.output[0]]) else: diff --git a/onnxruntime/python/tools/transformers/fusion_utils.py b/onnxruntime/python/tools/transformers/fusion_utils.py index 726c587ff7043..dbd9e828198ca 100644 --- a/onnxruntime/python/tools/transformers/fusion_utils.py +++ b/onnxruntime/python/tools/transformers/fusion_utils.py @@ -159,7 +159,7 @@ def transpose_2d_int8_tensor(tensor: onnx_proto.TensorProto): tensor (TensorProto): transposed tensor """ if not isinstance(tensor, onnx_proto.TensorProto): - raise ValueError("Expected input type is an ONNX TensorProto but got %s" % type(tensor)) + raise ValueError(f"Expected input type is an ONNX TensorProto but got {type(tensor)}") if len(tensor.dims) != 2 or tensor.data_type != onnx_proto.TensorProto.INT8: raise ValueError("Only INT8 2-D tensors can be transposed") diff --git a/onnxruntime/python/tools/transformers/models/bart/utils/export_summarization_edinit.py b/onnxruntime/python/tools/transformers/models/bart/utils/export_summarization_edinit.py index 111520a6e3aeb..8a610fb17671b 100644 --- a/onnxruntime/python/tools/transformers/models/bart/utils/export_summarization_edinit.py +++ b/onnxruntime/python/tools/transformers/models/bart/utils/export_summarization_edinit.py @@ -205,5 +205,5 @@ def export_encoder(args): no_repeat_ngram_size=no_repeat_ngram_size, ) time_cost = time.time() - start_time - print("--- %s seconds ---" % (time_cost)) + print(f"--- {time_cost} seconds ---") print(tokenizer.decode(pred_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)) diff --git a/onnxruntime/python/tools/transformers/models/bart/utils/export_summarization_enc_dec_past.py b/onnxruntime/python/tools/transformers/models/bart/utils/export_summarization_enc_dec_past.py index 29c39730c79ef..afd01ae9d025f 100644 --- a/onnxruntime/python/tools/transformers/models/bart/utils/export_summarization_enc_dec_past.py +++ b/onnxruntime/python/tools/transformers/models/bart/utils/export_summarization_enc_dec_past.py @@ -266,5 +266,5 @@ def export_decoder(args): use_cache=True, ) time_cost = time.time() - start_time - print("--- %s seconds ---" % (time_cost)) + print(f"--- {time_cost} seconds ---") print(tokenizer.decode(pred_ids[0], skip_special_tokens=True, clean_up_tokenization_spaces=False)) diff --git a/onnxruntime/python/tools/transformers/models/bart/utils/onnx_inference.py b/onnxruntime/python/tools/transformers/models/bart/utils/onnx_inference.py index c4c8a2dcba697..7a5cfe42c7efe 100644 --- a/onnxruntime/python/tools/transformers/models/bart/utils/onnx_inference.py +++ b/onnxruntime/python/tools/transformers/models/bart/utils/onnx_inference.py @@ -49,7 +49,7 @@ def run_inference(args): no_repeat_ngram_size=no_repeat_ngram_size, ) time_cost = time.time() - start_time - print("--- %s seconds ---" % (time_cost)) + print(f"--- {time_cost} seconds ---") for j in range(batch_num): for i in range(beam): print( @@ -81,7 +81,7 @@ def run_inference(args): start_time = time.time() out = sess.run(None, ort_inputs) time_cost = time.time() - start_time - print("--- %s seconds ---" % (time_cost)) + print(f"--- {time_cost} seconds ---") for j in range(batch_num): for i in range(beam): print( diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder.py index 26b9a2792e9e1..0b6d325803554 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/engine_builder.py @@ -117,7 +117,7 @@ def get_cached_model_name(self, model_name): model_name = model_name + "_" + "_".join(self.pipeline_info.controlnet) if hash_source: - model_name += "_" + hashlib.md5("\t".join(hash_source).encode("utf-8")).digest().hex()[:8] + model_name += "_" + hashlib.md5("\t".join(hash_source).encode("utf-8")).hexdigest()[:8] # TODO: When we support original VAE, we shall save custom VAE to another directory. diff --git a/onnxruntime/python/tools/transformers/models/stable_diffusion/pipeline_stable_diffusion.py b/onnxruntime/python/tools/transformers/models/stable_diffusion/pipeline_stable_diffusion.py index 1629537dc294f..522cc541c1e57 100644 --- a/onnxruntime/python/tools/transformers/models/stable_diffusion/pipeline_stable_diffusion.py +++ b/onnxruntime/python/tools/transformers/models/stable_diffusion/pipeline_stable_diffusion.py @@ -459,9 +459,9 @@ def denoise_latent( noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) noise_pred = noise_pred_uncond + guidance * (noise_pred_text - noise_pred_uncond) - if type(self.scheduler) == UniPCMultistepScheduler: + if type(self.scheduler) is UniPCMultistepScheduler: latents = self.scheduler.step(noise_pred, timestep, latents, return_dict=False)[0] - elif type(self.scheduler) == LCMScheduler: + elif type(self.scheduler) is LCMScheduler: latents = self.scheduler.step(noise_pred, timestep, latents, generator=self.generator)[0] else: latents = self.scheduler.step(noise_pred, latents, step_offset + step_index, timestep) diff --git a/onnxruntime/test/framework/allocation_planner_test.cc b/onnxruntime/test/framework/allocation_planner_test.cc index 26e40b25930c8..4e9e80b180e9c 100644 --- a/onnxruntime/test/framework/allocation_planner_test.cc +++ b/onnxruntime/test/framework/allocation_planner_test.cc @@ -1883,7 +1883,7 @@ TEST_F(PlannerTest, ParaPlanCreation) { ORT_ENFORCE(main_graph_ort_value_index_map.GetName(per_value_plan.reused_buffer, reused).IsOK()); reuse_pairs.erase(reused); } // if - } // for + } // for ASSERT_TRUE(reuse_pairs.empty()); } diff --git a/onnxruntime/test/onnx/OrtValueList.h b/onnxruntime/test/onnx/OrtValueList.h index 2929cdca428d9..921c1d3872111 100644 --- a/onnxruntime/test/onnx/OrtValueList.h +++ b/onnxruntime/test/onnx/OrtValueList.h @@ -14,7 +14,7 @@ class OrtValueArray { public: ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OrtValueArray); // n must be non-negative - OrtValueArray(int n) : values(static_cast(n), nullptr){}; + OrtValueArray(int n) : values(static_cast(n), nullptr) {}; ~OrtValueArray() { for (OrtValue* v : values) { if (v != nullptr) Ort::GetApi().ReleaseValue(v); diff --git a/onnxruntime/test/onnx/microbenchmark/activation.cc b/onnxruntime/test/onnx/microbenchmark/activation.cc index 69ee72996365e..df36135bd3017 100644 --- a/onnxruntime/test/onnx/microbenchmark/activation.cc +++ b/onnxruntime/test/onnx/microbenchmark/activation.cc @@ -27,7 +27,7 @@ class Allocs : public IExecutionProvider { std::shared_ptr alloc = std::make_shared(); public: - Allocs() : IExecutionProvider("fake"){}; + Allocs() : IExecutionProvider("fake") {}; AllocatorPtr GetAllocator(OrtMemType) const { return alloc; } diff --git a/onnxruntime/test/optimizer/qdq_transformer_fastmath_test.cc b/onnxruntime/test/optimizer/qdq_transformer_fastmath_test.cc index ec9f78da14a75..ccfa1f1159937 100644 --- a/onnxruntime/test/optimizer/qdq_transformer_fastmath_test.cc +++ b/onnxruntime/test/optimizer/qdq_transformer_fastmath_test.cc @@ -401,7 +401,7 @@ void QDQTransformerGemmTests(bool has_output_q, bool has_bias, bool beta_not_one auto check_binary_op_graph = [&](InferenceSessionWrapper& session) { auto op_to_count = CountOpsInGraph(session.GetGraph()); const QDQOpKeys qdq_keys = GetQDQOpKeys(use_contrib_qdq); - if ((!has_output_q || std::is_same_v)&&(!has_bias || (std::is_same_v && !beta_not_one)) && + if ((!has_output_q || std::is_same_v) && (!has_bias || (std::is_same_v && !beta_not_one)) && (std::is_same_v || std::is_same_v)) { EXPECT_EQ(op_to_count["com.microsoft.QGemm"], 1); EXPECT_EQ(op_to_count["Gemm"], 0); diff --git a/onnxruntime/test/optimizer/qdq_transformer_test.cc b/onnxruntime/test/optimizer/qdq_transformer_test.cc index 1638851daf65a..14c5b60d6e0bd 100644 --- a/onnxruntime/test/optimizer/qdq_transformer_test.cc +++ b/onnxruntime/test/optimizer/qdq_transformer_test.cc @@ -786,7 +786,7 @@ void QDQTransformerGemmTests(bool has_output_q, bool has_bias, bool beta_not_one auto check_binary_op_graph = [&](InferenceSessionWrapper& session) { auto op_to_count = CountOpsInGraph(session.GetGraph()); const QDQOpKeys qdq_keys = GetQDQOpKeys(use_contrib_qdq); - if ((!has_output_q || std::is_same_v)&&(!has_bias || (std::is_same_v && !beta_not_one)) && + if ((!has_output_q || std::is_same_v) && (!has_bias || (std::is_same_v && !beta_not_one)) && (std::is_same_v || std::is_same_v)) { EXPECT_EQ(op_to_count["com.microsoft.QGemm"], 1); EXPECT_EQ(op_to_count["Gemm"], 0); diff --git a/onnxruntime/test/providers/cpu/reduction/reduction_test_cases_generator.py b/onnxruntime/test/providers/cpu/reduction/reduction_test_cases_generator.py index 568a4649f3977..bd06ae9fe881a 100644 --- a/onnxruntime/test/providers/cpu/reduction/reduction_test_cases_generator.py +++ b/onnxruntime/test/providers/cpu/reduction/reduction_test_cases_generator.py @@ -40,13 +40,13 @@ def TestReduction(op, data, axes, keepdims): # noqa: N802 def PrintResult(op, axes, keepdims, res): # noqa: N802 - print(' {"%s",' % op) + print(f' {{"{op}",') print("OpAttributesResult(") print(" // ReductionAttribute") print(" {") print(" // axes_") print("{", end="") - print(*axes, sep=", ", end="") if axes else print("") + print(*axes, sep=", ", end="") if axes else print() print("},") print(" // keep_dims_") print(keepdims, ",") @@ -60,7 +60,7 @@ def PrintResult(op, axes, keepdims, res): # noqa: N802 print(" // expected values") print("{", end="") for i in range(res.size): - print("%5.6ff," % res.item(i)) + print(f"{res.item(i):5.6f}f,") print("})},") @@ -130,7 +130,7 @@ def PrintReenableOptimizations(): # noqa: N802 print("{") for i in range(input_data.size): print( - "%5.6ff," % input_data.item(i), + f"{input_data.item(i):5.6f}f,", ) print("},") print("// input_dims") diff --git a/onnxruntime/test/providers/cpu/tensor/pad_test.cc b/onnxruntime/test/providers/cpu/tensor/pad_test.cc index 5fc8ed417391e..1d9cd15f53327 100644 --- a/onnxruntime/test/providers/cpu/tensor/pad_test.cc +++ b/onnxruntime/test/providers/cpu/tensor/pad_test.cc @@ -66,13 +66,13 @@ static void RunAllOpsetAllDomainPadTests( bool pads_is_initializer; bool value_is_initializer; }; - const std::vector all_test_params { - {false, false}, + const std::vector all_test_params{ + {false, false}, #if (defined(USE_NNAPI) && defined(__ANDROID__)) || (defined(USE_COREML) && defined(__APPLE__)) - // only enable when building NNAPI EP on Android or building CoreML EP for Apple environment - // test runs out of memory in QEMU aarch64 environment, so don't enable otherwise - // TODO try to enable when we move from QEMU to arm64 CI machines - {true, true}, + // only enable when building NNAPI EP on Android or building CoreML EP for Apple environment + // test runs out of memory in QEMU aarch64 environment, so don't enable otherwise + // TODO try to enable when we move from QEMU to arm64 CI machines + {true, true}, #endif }; for (const auto& test_params : all_test_params) { diff --git a/onnxruntime/test/providers/qnn/qnn_basic_test.cc b/onnxruntime/test/providers/qnn/qnn_basic_test.cc index 9489d354755e4..9d19c36dc94b2 100644 --- a/onnxruntime/test/providers/qnn/qnn_basic_test.cc +++ b/onnxruntime/test/providers/qnn/qnn_basic_test.cc @@ -835,14 +835,14 @@ TEST_F(QnnHTPBackendTests, HTPGraphFinalizationOptimizationModes) { // Test that models run with various SoC model values TEST_F(QnnHTPBackendTests, HTPSocModels) { - constexpr std::array soc_models = { "", // No explicit SoC model specified - "0", // "Unknown" + constexpr std::array soc_models = {"", // No explicit SoC model specified + "0", // "Unknown" #if defined(_M_ARM64) - "37" }; // SC8280X + "37"}; // SC8280X #elif defined(__linux__) - "30" }; // SM8350 + "30"}; // SM8350 #else - "" }; + ""}; #endif for (auto soc_model : soc_models) { diff --git a/onnxruntime/test/python/onnx_backend_test_series.py b/onnxruntime/test/python/onnx_backend_test_series.py index 6eebc996fde9c..9b1e87f6ec02e 100644 --- a/onnxruntime/test/python/onnx_backend_test_series.py +++ b/onnxruntime/test/python/onnx_backend_test_series.py @@ -76,7 +76,7 @@ def apply_filters(filters, category): opset_version = f"opset{onnx.defs.onnx_opset_version()}" validated_filters = [] for f in filters[category]: - if type(f) is list: # noqa: E721 + if type(f) is list: opset_regex = f[0] filter_regex = f[1] opset_match = re.match(opset_regex, opset_version) diff --git a/onnxruntime/test/python/transformers/rotary_flash.py b/onnxruntime/test/python/transformers/rotary_flash.py index 42bff9c92b41b..4329b2c1a6057 100644 --- a/onnxruntime/test/python/transformers/rotary_flash.py +++ b/onnxruntime/test/python/transformers/rotary_flash.py @@ -486,9 +486,6 @@ def backward(ctx, dkv): return dkv, None, None, None, None -apply_rotary_emb_kv_ = ApplyRotaryEmbKV.apply - - def apply_rotary_emb_kv_( kv, cos, diff --git a/onnxruntime/test/python/transformers/test_data/bert_squad_tensorflow2.1_keras2onnx_opset11/generate_tiny_keras2onnx_bert_models.py b/onnxruntime/test/python/transformers/test_data/bert_squad_tensorflow2.1_keras2onnx_opset11/generate_tiny_keras2onnx_bert_models.py index 0086ce0d289c7..c1e95f35a633b 100644 --- a/onnxruntime/test/python/transformers/test_data/bert_squad_tensorflow2.1_keras2onnx_opset11/generate_tiny_keras2onnx_bert_models.py +++ b/onnxruntime/test/python/transformers/test_data/bert_squad_tensorflow2.1_keras2onnx_opset11/generate_tiny_keras2onnx_bert_models.py @@ -343,9 +343,9 @@ def generate_test_data( try: os.mkdir(path) except OSError: - print("Creation of the directory %s failed" % path) + print(f"Creation of the directory {path} failed") else: - print("Successfully created the directory %s " % path) + print(f"Successfully created the directory {path} ") if input_tensor_only: return diff --git a/onnxruntime/test/python/transformers/test_data/gpt2_pytorch1.5_opset11/generate_tiny_gpt2_model.py b/onnxruntime/test/python/transformers/test_data/gpt2_pytorch1.5_opset11/generate_tiny_gpt2_model.py index 065783d5812a8..4a4a0bc2c5098 100644 --- a/onnxruntime/test/python/transformers/test_data/gpt2_pytorch1.5_opset11/generate_tiny_gpt2_model.py +++ b/onnxruntime/test/python/transformers/test_data/gpt2_pytorch1.5_opset11/generate_tiny_gpt2_model.py @@ -452,9 +452,9 @@ def generate_test_data( try: os.mkdir(path) except OSError: - print("Creation of the directory %s failed" % path) + print(f"Creation of the directory {path} failed") else: - print("Successfully created the directory %s " % path) + print(f"Successfully created the directory {path} ") sess_options = onnxruntime.SessionOptions() sess_options.graph_optimization_level = onnxruntime.GraphOptimizationLevel.ORT_DISABLE_ALL diff --git a/onnxruntime/test/shared_lib/custom_op_utils.h b/onnxruntime/test/shared_lib/custom_op_utils.h index 8ead4056b1b54..e11540aaa5691 100644 --- a/onnxruntime/test/shared_lib/custom_op_utils.h +++ b/onnxruntime/test/shared_lib/custom_op_utils.h @@ -381,9 +381,9 @@ struct StandaloneCustomOp : Ort::CustomOpBase { @@ -397,9 +397,9 @@ struct MulTopOpFloat : Ort::CustomOpBase { }; struct MulTopKernelInt32 { - MulTopKernelInt32(const OrtKernelInfo*){}; + MulTopKernelInt32(const OrtKernelInfo*) {}; ~MulTopKernelInt32() = default; - void Compute(OrtKernelContext*){}; + void Compute(OrtKernelContext*) {}; }; struct MulTopOpInt32 : Ort::CustomOpBase { @@ -413,9 +413,9 @@ struct MulTopOpInt32 : Ort::CustomOpBase { }; struct MulTopKernelDouble { - MulTopKernelDouble(const OrtKernelInfo*){}; + MulTopKernelDouble(const OrtKernelInfo*) {}; ~MulTopKernelDouble() = default; - void Compute(OrtKernelContext*){}; + void Compute(OrtKernelContext*) {}; }; // MulTopOpDouble and MulTopOpFloat has input count mismatch @@ -430,9 +430,9 @@ struct MulTopOpDouble : Ort::CustomOpBase { }; struct MulTopKernelInt16 { - MulTopKernelInt16(const OrtKernelInfo*){}; + MulTopKernelInt16(const OrtKernelInfo*) {}; ~MulTopKernelInt16() = default; - void Compute(OrtKernelContext*){}; + void Compute(OrtKernelContext*) {}; }; // MulTopOpInt16 and MulTopOpFloat has output count mismatch @@ -448,9 +448,9 @@ struct MulTopOpInt16 : Ort::CustomOpBase { // MulTopKernelFloat16 and MulTopOpFloat has input characteristic mismatch struct MulTopKernelFloat16 { - MulTopKernelFloat16(const OrtKernelInfo*){}; + MulTopKernelFloat16(const OrtKernelInfo*) {}; ~MulTopKernelFloat16() = default; - void Compute(OrtKernelContext*){}; + void Compute(OrtKernelContext*) {}; }; struct MulTopOpFloat16 : Ort::CustomOpBase { diff --git a/onnxruntime/test/testdata/CNTK/gen.py b/onnxruntime/test/testdata/CNTK/gen.py index 37241a46808b5..5a3ca461f471a 100644 --- a/onnxruntime/test/testdata/CNTK/gen.py +++ b/onnxruntime/test/testdata/CNTK/gen.py @@ -48,10 +48,10 @@ def Save(dir, func, feed, outputs): # noqa: N802 if actual_input_name.startswith(cntk_name): cntk_to_actual_names[cntk_name] = actual_input_name - if type(feed) is not dict: # noqa: E721 + if type(feed) is not dict: feed = {func.arguments[0]: feed} - if type(outputs) is not dict: # noqa: E721 + if type(outputs) is not dict: outputs = {func.outputs[0]: outputs} test_data_dir = os.path.join(dir, data_dir) diff --git a/orttraining/orttraining/core/framework/adasum/adasum_mpi.cc b/orttraining/orttraining/core/framework/adasum/adasum_mpi.cc index 805de812cfa65..dc812ee2aec3f 100644 --- a/orttraining/orttraining/core/framework/adasum/adasum_mpi.cc +++ b/orttraining/orttraining/core/framework/adasum/adasum_mpi.cc @@ -35,8 +35,7 @@ void AdasumMPI::InitializeVHDDReductionComms(WorkerGroupType worker_group) { int nearest_power_2 = 1; int log_size; for (nearest_power_2 = 1, log_size = 0; (nearest_power_2 << 1) <= size; - nearest_power_2 = (nearest_power_2 << 1), log_size++) - ; + nearest_power_2 = (nearest_power_2 << 1), log_size++); int shift_val; int level; reduction_comms_ = std::make_unique>(); diff --git a/orttraining/orttraining/core/framework/pipeline.h b/orttraining/orttraining/core/framework/pipeline.h index a93ba1081d7df..79701106c9c1d 100644 --- a/orttraining/orttraining/core/framework/pipeline.h +++ b/orttraining/orttraining/core/framework/pipeline.h @@ -247,7 +247,7 @@ struct PipelineWorkerState { struct PipelineWorkerPool { PipelineWorkerPool() = default; - PipelineWorkerPool(size_t num_workers) : workers(num_workers), worker_states(num_workers){}; + PipelineWorkerPool(size_t num_workers) : workers(num_workers), worker_states(num_workers) {}; void Join(size_t worker_id); void JoinAll(); diff --git a/orttraining/orttraining/core/framework/torch/custom_function_register.h b/orttraining/orttraining/core/framework/torch/custom_function_register.h index 762258a45221e..ddb838ba6475c 100644 --- a/orttraining/orttraining/core/framework/torch/custom_function_register.h +++ b/orttraining/orttraining/core/framework/torch/custom_function_register.h @@ -102,7 +102,7 @@ class OrtTorchFunctionPool final { void UnRegisterFunctions(); private: - OrtTorchFunctionPool(){}; + OrtTorchFunctionPool() {}; ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(OrtTorchFunctionPool); void UnRegisterGlobalFunctions(); diff --git a/orttraining/orttraining/core/framework/torch/gil.h b/orttraining/orttraining/core/framework/torch/gil.h index c928571d2024a..b14b062785eef 100644 --- a/orttraining/orttraining/core/framework/torch/gil.h +++ b/orttraining/orttraining/core/framework/torch/gil.h @@ -13,7 +13,7 @@ // See https://docs.python.org/3/c-api/init.html#non-python-created-threads for details. class GilGuard { public: - GilGuard() : state_(PyGILState_Ensure()){}; + GilGuard() : state_(PyGILState_Ensure()) {}; ~GilGuard() { PyGILState_Release(state_); }; private: diff --git a/orttraining/orttraining/core/framework/torch/torch_proxy.h b/orttraining/orttraining/core/framework/torch/torch_proxy.h index b80acd6c4791a..37766e67ef42f 100644 --- a/orttraining/orttraining/core/framework/torch/torch_proxy.h +++ b/orttraining/orttraining/core/framework/torch/torch_proxy.h @@ -95,8 +95,8 @@ class TorchProxy { std::vector& bw_output_to_input_alias_map); private: - TorchProxy(){}; - ~TorchProxy(){}; + TorchProxy() {}; + ~TorchProxy() {}; ORT_DISALLOW_COPY_ASSIGNMENT_AND_MOVE(TorchProxy); diff --git a/orttraining/orttraining/core/graph/graph_augmenter.h b/orttraining/orttraining/core/graph/graph_augmenter.h index eb146ca0e84f3..c3b6d227f01fd 100644 --- a/orttraining/orttraining/core/graph/graph_augmenter.h +++ b/orttraining/orttraining/core/graph/graph_augmenter.h @@ -33,7 +33,7 @@ struct OpDef { OpDef(const std::string& type, const std::string& domain = kOnnxDomain, const int opset_version = 9) : type(type), domain(domain), - opset_version(opset_version){}; + opset_version(opset_version) {}; std::string type; std::string domain; @@ -52,7 +52,7 @@ struct NodeDef { output_args(output_args), attributes(attributes), name(name), - priority(priority){}; + priority(priority) {}; NodeDef(const std::string& op_type, const std::vector& input_args, @@ -64,7 +64,7 @@ struct NodeDef { output_args(output_args), attributes(attributes), name(name), - priority(priority){}; + priority(priority) {}; NodeDef(const OpDef& op_def, const std::vector& input_args, diff --git a/orttraining/orttraining/core/graph/loss_func/loss_func_common.h b/orttraining/orttraining/core/graph/loss_func/loss_func_common.h index 2b60280e076aa..61bc0a094dac4 100644 --- a/orttraining/orttraining/core/graph/loss_func/loss_func_common.h +++ b/orttraining/orttraining/core/graph/loss_func/loss_func_common.h @@ -21,7 +21,7 @@ struct LossFunctionInfo { struct ILossFunction { virtual GraphAugmenter::GraphDefs operator()(const Graph& graph, const LossFunctionInfo& loss_func_info) = 0; - virtual ~ILossFunction(){}; + virtual ~ILossFunction() {}; }; TypeProto* GetSparseTypeProto(const NodeArg* input_arg, diff --git a/orttraining/orttraining/core/graph/pipeline_transformer.cc b/orttraining/orttraining/core/graph/pipeline_transformer.cc index f989d53aa85d5..3495c3da72e3d 100644 --- a/orttraining/orttraining/core/graph/pipeline_transformer.cc +++ b/orttraining/orttraining/core/graph/pipeline_transformer.cc @@ -887,7 +887,7 @@ struct PipelineStageNodeGroup { // the consumer nodes of a particular initializer can be more than one, so we need a vector to store those // nodes. std::vector nodes; - PipelineStageNodeGroup(const size_t stage, std::vector& node_group) : stage_id(stage), nodes(std::move(node_group)){}; + PipelineStageNodeGroup(const size_t stage, std::vector& node_group) : stage_id(stage), nodes(std::move(node_group)) {}; }; // This function passes through the given initializer across stages specified in node_groups[i].stage_id. diff --git a/orttraining/orttraining/core/optimizer/megatron_transformer.cc b/orttraining/orttraining/core/optimizer/megatron_transformer.cc index 4ebea5cf386cc..25e16304789b6 100644 --- a/orttraining/orttraining/core/optimizer/megatron_transformer.cc +++ b/orttraining/orttraining/core/optimizer/megatron_transformer.cc @@ -21,7 +21,7 @@ struct OpInfo { const size_t output_count = 1) : op_type(op_type), supported_versions(supported_versions), domain(domain), - output_count(output_count){}; + output_count(output_count) {}; std::string op_type; std::initializer_list supported_versions; @@ -53,7 +53,7 @@ const OpInfo where_info = OpInfo("Where", opset_v9); struct NodeInfo { NodeInfo(const std::vector& op_infos, const bool required = true) : op_infos(op_infos), - required(required){}; + required(required) {}; std::vector op_infos; bool required; diff --git a/orttraining/orttraining/core/session/training_session.h b/orttraining/orttraining/core/session/training_session.h index 37b708fb7d1dd..765f88e1c992e 100644 --- a/orttraining/orttraining/core/session/training_session.h +++ b/orttraining/orttraining/core/session/training_session.h @@ -46,7 +46,7 @@ class TrainingSession : public InferenceSession { TrainingSession(const SessionOptions& session_options, const Environment& env) : InferenceSession(session_options, env), is_mixed_precision_enabled_(false) {} - virtual ~TrainingSession(){}; + virtual ~TrainingSession() {}; /** * The training configuration options. @@ -215,11 +215,11 @@ class TrainingSession : public InferenceSession { // If the edge is unique, i.e. only have one consumer node, or all the edges // with the same node_arg_name needs to be cut, specify the node_arg_name // suffices. - CutEdge(std::string edge) : node_arg_name(edge){}; + CutEdge(std::string edge) : node_arg_name(edge) {}; // If the edges with same node_arg_name belongs to different cut, i.e. some of its // consumer node belongs to one partition, and some belongs to another, specify // the consumer node names which you want to perform the cut on. - CutEdge(std::string edge, std::vector nodes) : node_arg_name(edge), consumer_nodes(nodes){}; + CutEdge(std::string edge, std::vector nodes) : node_arg_name(edge), consumer_nodes(nodes) {}; }; // CutInfo is a group of CutEdges that describes a specific cut that composed of splitting those edges. typedef std::vector CutInfo; diff --git a/orttraining/orttraining/lazy_tensor/flags.h b/orttraining/orttraining/lazy_tensor/flags.h index b849f9f9a0a3e..1812466d10346 100644 --- a/orttraining/orttraining/lazy_tensor/flags.h +++ b/orttraining/orttraining/lazy_tensor/flags.h @@ -60,7 +60,7 @@ class DynamicSettings { } private: - DynamicSettings() : onnx_fusion_status_(true){}; + DynamicSettings() : onnx_fusion_status_(true) {}; bool onnx_fusion_status_; }; diff --git a/orttraining/orttraining/models/bert/main.cc b/orttraining/orttraining/models/bert/main.cc index 22cdd9351a206..c4c7a98ba116a 100644 --- a/orttraining/orttraining/models/bert/main.cc +++ b/orttraining/orttraining/models/bert/main.cc @@ -861,8 +861,7 @@ int main(int argc, char* argv[]) { OrtParameters ort_params{}; RETURN_IF_FAIL(ParseArguments(argc, argv, params, ort_params)); bool keep_looping = params.debug_break; - while (keep_looping) - ; + while (keep_looping); // setup logger, be noted: LOGS_DEFAULT must be after logging manager initialization. string default_logger_id{"Default"}; diff --git a/orttraining/orttraining/models/pipeline_poc/main.cc b/orttraining/orttraining/models/pipeline_poc/main.cc index c461e4bbf3600..1b7d6b9ea26f6 100644 --- a/orttraining/orttraining/models/pipeline_poc/main.cc +++ b/orttraining/orttraining/models/pipeline_poc/main.cc @@ -86,36 +86,36 @@ int main(int argc, char* argv[]) { // setup onnxruntime env std::vector overrides = {}; SessionOptions so = { - ExecutionMode::ORT_SEQUENTIAL, // execution_mode - ExecutionOrder::DEFAULT, // execution_order - false, // enable_profiling - ORT_TSTR(""), // optimized_model_filepath - true, // enable_mem_pattern - true, // enable_mem_reuse - true, // enable_cpu_mem_arena - ORT_TSTR("onnxruntime_profile_"), // profile_file_prefix - "", // session_logid - -1, // session_log_severity_level - 0, // session_log_verbosity_level - 5, // max_num_graph_transformation_steps - TransformerLevel::Level1, // graph_optimization_level - {}, // intra_op_param - {}, // inter_op_param - overrides, // free_dimension_overrides - true, // use_per_session_threads - true, // thread_pool_allow_spinning - false, // use_deterministic_compute - {}, // session_configurations - {}, // initializers_to_share_map + ExecutionMode::ORT_SEQUENTIAL, // execution_mode + ExecutionOrder::DEFAULT, // execution_order + false, // enable_profiling + ORT_TSTR(""), // optimized_model_filepath + true, // enable_mem_pattern + true, // enable_mem_reuse + true, // enable_cpu_mem_arena + ORT_TSTR("onnxruntime_profile_"), // profile_file_prefix + "", // session_logid + -1, // session_log_severity_level + 0, // session_log_verbosity_level + 5, // max_num_graph_transformation_steps + TransformerLevel::Level1, // graph_optimization_level + {}, // intra_op_param + {}, // inter_op_param + overrides, // free_dimension_overrides + true, // use_per_session_threads + true, // thread_pool_allow_spinning + false, // use_deterministic_compute + {}, // session_configurations + {}, // initializers_to_share_map #if !defined(ORT_MINIMAL_BUILD) && !defined(DISABLE_EXTERNAL_INITIALIZERS) - {}, // external_initializers - {}, // external_initializer_files + {}, // external_initializers + {}, // external_initializer_files #endif - nullptr, // custom_create_thread_fn - nullptr, // custom_thread_creation_options - nullptr, // custom_join_thread_fn + nullptr, // custom_create_thread_fn + nullptr, // custom_thread_creation_options + nullptr, // custom_join_thread_fn #if !defined(ORT_MINIMAL_BUILD) || defined(ORT_MINIMAL_BUILD_CUSTOM_OPS) - {}, // custom_op_libs + {}, // custom_op_libs #endif }; diff --git a/orttraining/orttraining/models/runner/training_util.h b/orttraining/orttraining/models/runner/training_util.h index 8c76ce7e50dc9..1499b30180f61 100644 --- a/orttraining/orttraining/models/runner/training_util.h +++ b/orttraining/orttraining/models/runner/training_util.h @@ -98,7 +98,7 @@ class RandomDataSet : public DataSet { : DataSet(tensor_names), num_samples_(num_samples), tensor_shapes_(tensor_shapes), - tensor_types_(tensor_types){}; + tensor_types_(tensor_types) {}; virtual ~RandomDataSet() {} @@ -189,7 +189,7 @@ class LossScaler { min_loss_scale_(min_loss_scale), max_loss_scale_(max_loss_scale), loss_scale_(loss_scale), - stable_steps_(0){}; + stable_steps_(0) {}; std::string GetLossScaleInputName() const { return loss_scale_input_name_; } diff --git a/orttraining/orttraining/python/orttraining_pybind_state.cc b/orttraining/orttraining/python/orttraining_pybind_state.cc index a81ea76e807ca..b2392b68ac43e 100644 --- a/orttraining/orttraining/python/orttraining_pybind_state.cc +++ b/orttraining/orttraining/python/orttraining_pybind_state.cc @@ -319,7 +319,7 @@ void addObjectMethodsForTraining(py::module& m) { auto& pool = onnxruntime::language_interop_ops::torch::OrtTorchFunctionPool::GetInstance(); pool.RegisterForwardRunner(function_address); #else - ORT_UNUSED_PARAMETER(obj); + ORT_UNUSED_PARAMETER(obj); #endif }); m.def("register_backward_runner", [](py::object obj) -> void { @@ -328,7 +328,7 @@ void addObjectMethodsForTraining(py::module& m) { auto& pool = onnxruntime::language_interop_ops::torch::OrtTorchFunctionPool::GetInstance(); pool.RegisterBackwardRunner(function_address); #else - ORT_UNUSED_PARAMETER(obj); + ORT_UNUSED_PARAMETER(obj); #endif }); m.def("register_torch_autograd_function", [](std::string function_full_qual_name, py::object obj) -> void { @@ -336,8 +336,8 @@ void addObjectMethodsForTraining(py::module& m) { auto& pool = onnxruntime::language_interop_ops::torch::OrtTorchFunctionPool::GetInstance(); pool.RegisterTorchAutogradFunction(function_full_qual_name, obj.ptr()); #else - ORT_UNUSED_PARAMETER(function_full_qual_name); - ORT_UNUSED_PARAMETER(obj); + ORT_UNUSED_PARAMETER(function_full_qual_name); + ORT_UNUSED_PARAMETER(obj); #endif }); m.def("register_shape_inference_function", [](std::string function_full_qual_name, py::object obj) -> void { @@ -345,8 +345,8 @@ void addObjectMethodsForTraining(py::module& m) { auto& pool = onnxruntime::language_interop_ops::torch::OrtTorchFunctionPool::GetInstance(); pool.RegisterShapeInferenceFunction(function_full_qual_name, obj.ptr()); #else - ORT_UNUSED_PARAMETER(function_full_qual_name); - ORT_UNUSED_PARAMETER(obj); + ORT_UNUSED_PARAMETER(function_full_qual_name); + ORT_UNUSED_PARAMETER(obj); #endif }); m.def("get_shape_inference_function", [](std::string function_full_qual_name) -> py::object { @@ -368,8 +368,8 @@ void addObjectMethodsForTraining(py::module& m) { auto& pool = onnxruntime::language_interop_ops::torch::OrtTorchFunctionPool::GetInstance(); pool.RegisterInputAliasFunction(function_full_qual_name, obj.ptr()); #else - ORT_UNUSED_PARAMETER(function_full_qual_name); - ORT_UNUSED_PARAMETER(obj); + ORT_UNUSED_PARAMETER(function_full_qual_name); + ORT_UNUSED_PARAMETER(obj); #endif }); m.def("register_miscellaneous_const_input", [](py::object obj) -> void { @@ -377,7 +377,7 @@ void addObjectMethodsForTraining(py::module& m) { auto& pool = onnxruntime::language_interop_ops::torch::OrtTorchFunctionPool::GetInstance(); pool.RegisterMiscellaneousConstInput(obj.ptr()); #else - ORT_UNUSED_PARAMETER(obj); + ORT_UNUSED_PARAMETER(obj); #endif }); m.def("unregister_python_functions", []() -> void { @@ -391,14 +391,14 @@ void addObjectMethodsForTraining(py::module& m) { #ifdef ENABLE_TRAINING_TORCH_INTEROP return true; #else - return false; + return false; #endif }); m.def("is_triton_enabled", []() -> bool { #ifdef ENABLE_TRITON return true; #else - return false; + return false; #endif }); #ifdef ENABLE_TRITON @@ -1036,7 +1036,7 @@ void addObjectMethodsForTraining(py::module& m) { #ifdef __linux__ return true; #else - return false; + return false; #endif }); #endif diff --git a/orttraining/orttraining/python/training/ort_triton/kernel/_mm.py b/orttraining/orttraining/python/training/ort_triton/kernel/_mm.py index a3681a13699a0..1a944082fa4ba 100644 --- a/orttraining/orttraining/python/training/ort_triton/kernel/_mm.py +++ b/orttraining/orttraining/python/training/ort_triton/kernel/_mm.py @@ -372,7 +372,7 @@ def _gen_bmm_module( ) -> Tuple[str, ModuleType]: func_name = gen_unique_name("bmm") kwargs = _mm_configs(dtype, m, n, k, trans_a, trans_b, alpha, func_name) - batch = batch_a if batch_a >= batch_b else batch_b + batch = max(batch_a, batch_b) kwargs["stride_aq"] = m * k if batch_a == batch else 0 kwargs["stride_bq"] = k * n if batch_b == batch else 0 kwargs["batch"] = batch diff --git a/orttraining/orttraining/python/training/ortmodule/_utils.py b/orttraining/orttraining/python/training/ortmodule/_utils.py index c299d1c5db4e7..4787cb31a24fd 100644 --- a/orttraining/orttraining/python/training/ortmodule/_utils.py +++ b/orttraining/orttraining/python/training/ortmodule/_utils.py @@ -74,7 +74,7 @@ def _ortvalues_to_torch_tensor( return tuple(C.to_aten_ort_device_tensor(ov) for ov in ortvalues) if not isinstance(ortvalues, C.OrtValueVector): - raise TypeError("ortvalues must be an instance of OrtValueVector not %r." % type(ortvalues)) + raise TypeError(f"ortvalues must be an instance of OrtValueVector not {type(ortvalues)!r}.") res: List[torch.Tensor] = ortvalues.to_dlpacks(_from_dlpack) bool_indices = ortvalues.bool_tensor_indices() diff --git a/orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cpu/torch_interop_utils/ctx_pool.h b/orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cpu/torch_interop_utils/ctx_pool.h index e7b101d987d7a..b62c2c40c30ee 100644 --- a/orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cpu/torch_interop_utils/ctx_pool.h +++ b/orttraining/orttraining/python/training/ortmodule/torch_cpp_extensions/cpu/torch_interop_utils/ctx_pool.h @@ -58,8 +58,8 @@ class PyNodeSharedPointerPool { } private: - PyNodeSharedPointerPool(){}; - ~PyNodeSharedPointerPool(){}; + PyNodeSharedPointerPool() {}; + ~PyNodeSharedPointerPool() {}; PyNodeSharedPointerPool(const PyNodeSharedPointerPool&) = delete; PyNodeSharedPointerPool& operator=(const PyNodeSharedPointerPool&) = delete; diff --git a/orttraining/orttraining/test/distributed/partition_utils.h b/orttraining/orttraining/test/distributed/partition_utils.h index c22d0a3eb2f93..787a001903cce 100644 --- a/orttraining/orttraining/test/distributed/partition_utils.h +++ b/orttraining/orttraining/test/distributed/partition_utils.h @@ -159,7 +159,7 @@ struct PipelineStageNodeGroup { // the consumer nodes of a particular initializer can be more than one, so we need a vector to store those // nodes. std::vector nodes; - PipelineStageNodeGroup(const size_t stage, std::vector& node_group) : stage_id(stage), nodes(std::move(node_group)){}; + PipelineStageNodeGroup(const size_t stage, std::vector& node_group) : stage_id(stage), nodes(std::move(node_group)) {}; }; // This function passes through the given initializer across stages specified in node_groups[i].stage_id. diff --git a/orttraining/orttraining/test/python/orttraining_test_hierarchical_ortmodule.py b/orttraining/orttraining/test/python/orttraining_test_hierarchical_ortmodule.py index 8afbafccb8241..655c9def2c66c 100644 --- a/orttraining/orttraining/test/python/orttraining_test_hierarchical_ortmodule.py +++ b/orttraining/orttraining/test/python/orttraining_test_hierarchical_ortmodule.py @@ -198,7 +198,7 @@ def call_backward(y): y.sum().backward() def call_allclose(y, y_ref): - assert type(y) == type(y_ref) + assert type(y) is type(y_ref) if isinstance(y, Iterable): for ele, ele_ref in zip(y, y_ref): torch.allclose(ele, ele_ref) diff --git a/orttraining/orttraining/test/python/orttraining_test_model_transform.py b/orttraining/orttraining/test/python/orttraining_test_model_transform.py index 095830cd54ab8..6ea81fc6aa089 100644 --- a/orttraining/orttraining/test/python/orttraining_test_model_transform.py +++ b/orttraining/orttraining/test/python/orttraining_test_model_transform.py @@ -77,7 +77,7 @@ def fix_transpose(model): weight = numpy_helper.to_array(t[1]) assert len(weight.shape) == 2 weight = weight.transpose(perm) - new_weight = numpy_helper.from_array(weight, "%s_transposed" % t[1].name) + new_weight = numpy_helper.from_array(weight, f"{t[1].name}_transposed") model.graph.initializer.extend([new_weight]) replace_input_arg(model, node.output[0], new_weight.name) diff --git a/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py b/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py index fe59c398d7abb..3615a12705241 100644 --- a/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py +++ b/orttraining/orttraining/test/python/orttraining_test_ortmodule_api.py @@ -3976,9 +3976,9 @@ def forward(self, input1, bool_argument, int_argument, float_argument): out = self.relu(out) return out - assert type(bool_argument) is bool # noqa: E721 - assert type(int_argument) is int # noqa: E721 - assert type(float_argument) is float # noqa: E721 + assert type(bool_argument) is bool + assert type(int_argument) is int + assert type(float_argument) is float device = "cuda" N, D_in, H, D_out = 32, 784, 500, 10 # noqa: N806 @@ -4014,8 +4014,8 @@ def forward(self, input1, bool_argument): out = self.relu(out) return out - assert type(bool_arguments[0]) is bool # noqa: E721 - assert type(bool_arguments[1]) is bool # noqa: E721 + assert type(bool_arguments[0]) is bool + assert type(bool_arguments[1]) is bool device = "cuda" N, D_in, H, D_out = 32, 784, 500, 10 # noqa: N806 @@ -5501,7 +5501,7 @@ def forward(self, x): return x[: self.dim, :] def random_state_equal(a, b): - assert type(a) == type(b) + assert type(a) is type(b) if isinstance(a, tuple): assert len(a) == len(b) return all([random_state_equal(a_i, b_i) for a_i, b_i in zip(a, b)]) diff --git a/orttraining/orttraining/test/python/orttraining_test_ortmodule_bert_classifier.py b/orttraining/orttraining/test/python/orttraining_test_ortmodule_bert_classifier.py index a1a7d4660f266..41e1e0f5d0d57 100644 --- a/orttraining/orttraining/test/python/orttraining_test_ortmodule_bert_classifier.py +++ b/orttraining/orttraining/test/python/orttraining_test_ortmodule_bert_classifier.py @@ -385,7 +385,7 @@ def main(): # Set log level numeric_level = getattr(logging, args.log_level.upper(), None) if not isinstance(numeric_level, int): - raise ValueError("Invalid log level: %s" % args.log_level) + raise ValueError(f"Invalid log level: {args.log_level}") logging.basicConfig(level=numeric_level) # 2. Dataloader diff --git a/orttraining/orttraining/test/python/orttraining_test_ortmodule_bert_classifier_autocast.py b/orttraining/orttraining/test/python/orttraining_test_ortmodule_bert_classifier_autocast.py index 0d5aba1a1a5c4..801eb58727689 100644 --- a/orttraining/orttraining/test/python/orttraining_test_ortmodule_bert_classifier_autocast.py +++ b/orttraining/orttraining/test/python/orttraining_test_ortmodule_bert_classifier_autocast.py @@ -385,7 +385,7 @@ def main(): # Set log level numeric_level = getattr(logging, args.log_level.upper(), None) if not isinstance(numeric_level, int): - raise ValueError("Invalid log level: %s" % args.log_level) + raise ValueError(f"Invalid log level: {args.log_level}") logging.basicConfig(level=numeric_level) # 2. Dataloader diff --git a/orttraining/orttraining/test/python/orttraining_test_ortmodule_deepspeed_zero_stage_1.py b/orttraining/orttraining/test/python/orttraining_test_ortmodule_deepspeed_zero_stage_1.py index 5b28e9c52b480..5e0a4d38b51d6 100644 --- a/orttraining/orttraining/test/python/orttraining_test_ortmodule_deepspeed_zero_stage_1.py +++ b/orttraining/orttraining/test/python/orttraining_test_ortmodule_deepspeed_zero_stage_1.py @@ -219,7 +219,7 @@ def main(): } log_level = log_level_mapping.get(args.log_level.upper(), None) if not isinstance(log_level, LogLevel): - raise ValueError("Invalid log level: %s" % args.log_level) + raise ValueError(f"Invalid log level: {args.log_level}") debug_options = DebugOptions(log_level=log_level, save_onnx=args.export_onnx_graphs, onnx_prefix="MNIST") model = ORTModule(model, debug_options) diff --git a/orttraining/orttraining/test/python/orttraining_test_ortmodule_onnx_ops.py b/orttraining/orttraining/test/python/orttraining_test_ortmodule_onnx_ops.py index e1def2022d63f..537dcd2ccdb09 100644 --- a/orttraining/orttraining/test/python/orttraining_test_ortmodule_onnx_ops.py +++ b/orttraining/orttraining/test/python/orttraining_test_ortmodule_onnx_ops.py @@ -74,11 +74,11 @@ def run_step(model, x): ) onnx_graph_train = ort_model._torch_module._execution_manager._training_manager._onnx_models.optimized_model if debug: - with open("debug_%s_ortmodule_infer.onnx" % name, "wb") as f: + with open(f"debug_{name}_ortmodule_infer.onnx", "wb") as f: f.write(onnx_graph_inf.SerializeToString()) - with open("debug_%s_ortmodule_train.onnx" % name, "wb") as f: + with open(f"debug_{name}_ortmodule_train.onnx", "wb") as f: f.write(onnx_graph_train.SerializeToString()) - self.assertIn('op_type: "%s"' % name, str(onnx_graph_inf)) + self.assertIn(f'op_type: "{name}"', str(onnx_graph_inf)) for onnx_model in [onnx_graph_inf, onnx_graph_train]: for oimp in onnx_model.opset_import: if oimp.domain == "": @@ -86,10 +86,10 @@ def run_step(model, x): if op_grad_type is not None: if isinstance(op_grad_type, tuple): text = str(onnx_graph_train) - if all(map(lambda op: ('op_type: "%s"' % op) not in text, op_grad_type)): + if all(map(lambda op: (f'op_type: "{op}"') not in text, op_grad_type)): raise AssertionError("Operator {} not found in {}.".format(" or ".join(op_grad_type), text)) else: - self.assertIn('op_type: "%s"' % op_grad_type, str(onnx_graph_train)) + self.assertIn(f'op_type: "{op_grad_type}"', str(onnx_graph_train)) def get_torch_model_name(self, name, device): def from_numpy(v, device=None, requires_grad=False): @@ -137,7 +137,7 @@ def forward(self, input1): return TestGatherElement, "GatherElementsGrad", dict(rtol=1e-04, atol=1e-05) - raise AssertionError("Unexpected name=%r." % name) + raise AssertionError(f"Unexpected name={name!r}.") def test_onnx_ops(self): for name in ["GatherElements", "Softmax"]: diff --git a/orttraining/orttraining/test/python/orttraining_test_ortmodule_poc.py b/orttraining/orttraining/test/python/orttraining_test_ortmodule_poc.py index d6f84d94c2838..5872a69dde876 100644 --- a/orttraining/orttraining/test/python/orttraining_test_ortmodule_poc.py +++ b/orttraining/orttraining/test/python/orttraining_test_ortmodule_poc.py @@ -201,7 +201,7 @@ def main(): # Set log level numeric_level = getattr(logging, args.log_level.upper(), None) if not isinstance(numeric_level, int): - raise ValueError("Invalid log level: %s" % args.log_level) + raise ValueError(f"Invalid log level: {args.log_level}") logging.basicConfig(level=numeric_level) else: print("Training MNIST on vanilla PyTorch....") diff --git a/orttraining/orttraining/test/python/orttraining_test_utilities.py b/orttraining/orttraining/test/python/orttraining_test_utilities.py index 0892bafcdb95d..faa04f327be7f 100644 --- a/orttraining/orttraining/test/python/orttraining_test_utilities.py +++ b/orttraining/orttraining/test/python/orttraining_test_utilities.py @@ -237,7 +237,7 @@ def test_data_flatten_and_unflatten(input_output_map, flag: int): flatten_schema = input_output_map[2] def _recursive_compare(real, expected): - assert type(real) == type(expected) + assert type(real) is type(expected) if isinstance(real, str): assert real == expected elif isinstance(real, abc.Sequence): @@ -258,7 +258,7 @@ def _recursive_compare(real, expected): out, schema = extract_data_and_schema(raw_data) assert all([torch.allclose(o, d) if isinstance(o, torch.Tensor) else o == d for o, d in zip(out, flatten_data)]) if not isinstance(raw_data, torch.Tensor): - assert type(schema) == type(raw_data) + assert type(schema) is type(raw_data) assert str(schema) == str(flatten_schema) diff --git a/orttraining/orttraining/test/training_ops/function_op_test_utils.cc b/orttraining/orttraining/test/training_ops/function_op_test_utils.cc index 9504ba2c1e69a..3daf6db96e31c 100644 --- a/orttraining/orttraining/test/training_ops/function_op_test_utils.cc +++ b/orttraining/orttraining/test/training_ops/function_op_test_utils.cc @@ -72,7 +72,7 @@ void OpFunctionTester::RunFunctionBodyGraphOnCPU(TwoDArray& results) { } } -OpFunctionTester::~OpFunctionTester(){}; +OpFunctionTester::~OpFunctionTester() {}; template std::unique_ptr CreateOpTester(const onnxruntime::training::OpDef& op_def, diff --git a/orttraining/orttraining/training_ops/cpu/torch/torch_custom_function_kernel.h b/orttraining/orttraining/training_ops/cpu/torch/torch_custom_function_kernel.h index f58cd3ecbaeca..850dc6de735f0 100644 --- a/orttraining/orttraining/training_ops/cpu/torch/torch_custom_function_kernel.h +++ b/orttraining/orttraining/training_ops/cpu/torch/torch_custom_function_kernel.h @@ -25,7 +25,7 @@ class PythonOp final : public OpKernel, public PythonOpBase { // Pytorch's torch.autograd.Function.backward(...) wrapper. class PythonOpGrad final : public OpKernel, public PythonOpGradBase { public: - PythonOpGrad(const OpKernelInfo& info) : OpKernel(info), PythonOpGradBase(info){}; + PythonOpGrad(const OpKernelInfo& info) : OpKernel(info), PythonOpGradBase(info) {}; Status Compute(OpKernelContext* context) const override; }; diff --git a/orttraining/orttraining/training_ops/cuda/cuda_training_kernels.cc b/orttraining/orttraining/training_ops/cuda/cuda_training_kernels.cc index bcc9a06f5a250..dac1d7a84b9d9 100644 --- a/orttraining/orttraining/training_ops/cuda/cuda_training_kernels.cc +++ b/orttraining/orttraining/training_ops/cuda/cuda_training_kernels.cc @@ -271,258 +271,258 @@ class ONNX_OPERATOR_KERNEL_CLASS_NAME(kCudaExecutionProvider, kMSDomain, 1, Mega Status RegisterCudaTrainingKernels(KernelRegistry& kernel_registry) { static const BuildKernelCreateInfoFn function_table[] = { - BuildKernelCreateInfo, // default entry to avoid the list become empty after ops-reducing - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // Adam - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Lamb - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // BuildKernelCreateInfo, - BuildKernelCreateInfo, - // BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Adam - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // Lamb - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, // default entry to avoid the list become empty after ops-reducing + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // Adam + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Lamb + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // BuildKernelCreateInfo, + BuildKernelCreateInfo, + // BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Adam + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // Lamb + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, // the kernels within the following ifdef are not included in a build with // --enable_training_ops but without --enable_training #ifdef ENABLE_TRAINING // P2P communication operators. #if defined(ORT_USE_NCCL) || defined(USE_MPI) - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif #ifdef USE_MPI - BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #ifdef ENABLE_TRITON - BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #ifdef ENABLE_TRAINING_TORCH_INTEROP - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif #ifdef ORT_USE_NCCL - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif #endif }; diff --git a/orttraining/orttraining/training_ops/rocm/rocm_training_kernels.cc b/orttraining/orttraining/training_ops/rocm/rocm_training_kernels.cc index 7824e98fe8a53..c570f727f2a92 100644 --- a/orttraining/orttraining/training_ops/rocm/rocm_training_kernels.cc +++ b/orttraining/orttraining/training_ops/rocm/rocm_training_kernels.cc @@ -222,207 +222,207 @@ class ONNX_OPERATOR_KERNEL_CLASS_NAME(kRocmExecutionProvider, kMSDomain, 1, Mega Status RegisterRocmTrainingKernels(KernelRegistry& kernel_registry) { static const BuildKernelCreateInfoFn function_table[] = { - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // Adam - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - // Lamb - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // BuildKernelCreateInfo, - BuildKernelCreateInfo, - // BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - // BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // Adam + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + // Lamb + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // BuildKernelCreateInfo, + BuildKernelCreateInfo, + // BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + // BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, // P2P communication operators. #if defined(ORT_USE_NCCL) || defined(USE_MPI) - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif #ifdef USE_MPI // BuildKernelCreateInfo, #endif - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #ifdef ENABLE_TRAINING_TORCH_INTEROP - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif #ifdef ORT_USE_NCCL - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, - BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, + BuildKernelCreateInfo, #endif }; diff --git a/orttraining/tools/scripts/gpt2_model_transform.py b/orttraining/tools/scripts/gpt2_model_transform.py index 294af13fe69b7..50bfda4b407af 100644 --- a/orttraining/tools/scripts/gpt2_model_transform.py +++ b/orttraining/tools/scripts/gpt2_model_transform.py @@ -192,7 +192,7 @@ def fix_transpose(model): weight = numpy_helper.to_array(t[1]) assert len(weight.shape) == 2 weight = weight.transpose(perm) - new_weight = numpy_helper.from_array(weight, "%s_transposed" % t[1].name) + new_weight = numpy_helper.from_array(weight, f"{t[1].name}_transposed") model.graph.initializer.extend([new_weight]) replace_input_arg(model, node.output[0], new_weight.name) diff --git a/orttraining/tools/scripts/model_transform.py b/orttraining/tools/scripts/model_transform.py index 2fb1936ff2184..e87429d10bf88 100644 --- a/orttraining/tools/scripts/model_transform.py +++ b/orttraining/tools/scripts/model_transform.py @@ -227,7 +227,7 @@ def fix_transpose(model): weight = numpy_helper.to_array(t[1]) assert len(weight.shape) == 2 weight = weight.transpose(perm) - new_weight = numpy_helper.from_array(weight, "%s_transposed" % t[1].name) + new_weight = numpy_helper.from_array(weight, f"{t[1].name}_transposed") model.graph.initializer.extend([new_weight]) replace_input_arg(model, node.output[0], new_weight.name) diff --git a/pyproject.toml b/pyproject.toml index 286e4f12721a2..1c3a719fb544a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -84,6 +84,7 @@ ignore = [ "PYI041", # May create confusion "PYI024", # May create confusion "SIM102", # We don't perfer always combining if branches + "SIM103", # Do not collapse if-else "SIM108", # We don't encourage ternary operators "SIM114", # Don't combine if branches for debugability "SIM116", # Don't use dict lookup to replace if-else diff --git a/requirements-lintrunner.txt b/requirements-lintrunner.txt index d19ebe379b50b..7d384f7b1df67 100644 --- a/requirements-lintrunner.txt +++ b/requirements-lintrunner.txt @@ -1,9 +1,9 @@ # This file is auto updated by dependabot -lintrunner-adapters>=0.11.0 +lintrunner-adapters>=0.12.4 # RUFF -ruff==0.3.2 +ruff==0.5.4 # BLACK-ISORT black==24.2.0 -isort==5.12.0 +isort==5.13.2 # CLANGFORMAT -clang-format==17.0.4 +clang-format==18.1.8 diff --git a/tools/ci_build/build.py b/tools/ci_build/build.py index 98d9ba22b7190..587d035541c45 100644 --- a/tools/ci_build/build.py +++ b/tools/ci_build/build.py @@ -65,7 +65,7 @@ def _check_python_version(): def _str_to_bool(s): """Convert string to bool (in argparse context).""" if s.lower() not in ["true", "false"]: - raise ValueError("Need bool; got %r" % s) + raise ValueError(f"Need bool; got {s!r}") return {"true": True, "false": False}[s.lower()] @@ -806,7 +806,7 @@ def resolve_executable_path(command_or_path): def get_linux_distro(): try: with open("/etc/os-release") as f: - dist_info = dict(line.strip().split("=", 1) for line in f.readlines()) + dist_info = dict(line.strip().split("=", 1) for line in f) return dist_info.get("NAME", "").strip('"'), dist_info.get("VERSION", "").strip('"') except (OSError, ValueError): return "", "" @@ -1236,7 +1236,7 @@ def generate_build_tree( cmake_args += ["-Donnxruntime_USE_FULL_PROTOBUF=ON", "-DProtobuf_USE_STATIC_LIBS=ON"] if args.use_tvm and args.llvm_path is not None: - cmake_args += ["-DLLVM_DIR=%s" % args.llvm_path] + cmake_args += [f"-DLLVM_DIR={args.llvm_path}"] if args.use_cuda and not is_windows(): nvml_stub_path = cuda_home + "/lib64/stubs" @@ -1452,7 +1452,7 @@ def generate_build_tree( if args.enable_lazy_tensor: import torch - cmake_args += ["-Donnxruntime_PREBUILT_PYTORCH_PATH=%s" % os.path.dirname(torch.__file__)] + cmake_args += [f"-Donnxruntime_PREBUILT_PYTORCH_PATH={os.path.dirname(torch.__file__)}"] cmake_args += ["-D_GLIBCXX_USE_CXX11_ABI=" + str(int(torch._C._GLIBCXX_USE_CXX11_ABI))] if args.use_azure: @@ -1582,7 +1582,7 @@ def generate_build_tree( else: cuda_compile_flags_str = cuda_compile_flags_str + " " + compile_flag if len(cuda_compile_flags_str) != 0: - cudaflags.append('-Xcompiler="%s"' % cuda_compile_flags_str) + cudaflags.append(f'-Xcompiler="{cuda_compile_flags_str}"') elif is_linux() or is_macOS(): if is_linux(): ldflags = ["-Wl,-Bsymbolic-functions", "-Wl,-z,relro", "-Wl,-z,now", "-Wl,-z,noexecstack"] @@ -1650,16 +1650,16 @@ def generate_build_tree( temp_cmake_args = cmake_args.copy() if cflags is not None and cxxflags is not None and len(cflags) != 0 and len(cxxflags) != 0: temp_cmake_args += [ - "-DCMAKE_C_FLAGS=%s" % (" ".join(cflags)), - "-DCMAKE_CXX_FLAGS=%s" % (" ".join(cxxflags)), + "-DCMAKE_C_FLAGS={}".format(" ".join(cflags)), + "-DCMAKE_CXX_FLAGS={}".format(" ".join(cxxflags)), ] if cudaflags is not None and len(cudaflags) != 0: - temp_cmake_args += ["-DCMAKE_CUDA_FLAGS_INIT=%s" % (" ".join(cudaflags))] + temp_cmake_args += ["-DCMAKE_CUDA_FLAGS_INIT={}".format(" ".join(cudaflags))] if ldflags is not None and len(ldflags) != 0: temp_cmake_args += [ - "-DCMAKE_EXE_LINKER_FLAGS_INIT=%s" % (" ".join(ldflags)), - "-DCMAKE_MODULE_LINKER_FLAGS_INIT=%s" % (" ".join(ldflags)), - "-DCMAKE_SHARED_LINKER_FLAGS_INIT=%s" % (" ".join(ldflags)), + "-DCMAKE_EXE_LINKER_FLAGS_INIT={}".format(" ".join(ldflags)), + "-DCMAKE_MODULE_LINKER_FLAGS_INIT={}".format(" ".join(ldflags)), + "-DCMAKE_SHARED_LINKER_FLAGS_INIT={}".format(" ".join(ldflags)), ] run_subprocess( [ diff --git a/tools/ci_build/gen_def.py b/tools/ci_build/gen_def.py index fe47d8dbe57fe..c4add6f0e8910 100755 --- a/tools/ci_build/gen_def.py +++ b/tools/ci_build/gen_def.py @@ -15,11 +15,11 @@ def parse_arguments(): args = parse_arguments() -print("Generating symbol file for %s" % str(args.config)) +print(f"Generating symbol file for {args.config!s}") with open(args.version_file) as f: VERSION_STRING = f.read().strip() -print("VERSION:%s" % VERSION_STRING) +print(f"VERSION:{VERSION_STRING}") symbols = set() for c in args.config: @@ -41,16 +41,16 @@ def parse_arguments(): elif args.style == "xcode": pass # xcode compile don't has any header. else: - file.write("VERS_%s {\n" % VERSION_STRING) + file.write(f"VERS_{VERSION_STRING} {{\n") file.write(" global:\n") for symbol in symbols: if args.style == "vc": file.write(" %s @%d\n" % (symbol, symbol_index)) elif args.style == "xcode": - file.write("_%s\n" % symbol) + file.write(f"_{symbol}\n") else: - file.write(" %s;\n" % symbol) + file.write(f" {symbol};\n") symbol_index += 1 if args.style == "gcc": diff --git a/tools/ci_build/reduce_op_kernels.py b/tools/ci_build/reduce_op_kernels.py index 6b73b1e063e58..df6bbf7a4058e 100755 --- a/tools/ci_build/reduce_op_kernels.py +++ b/tools/ci_build/reduce_op_kernels.py @@ -256,7 +256,7 @@ def _generate_type_control_overrides(ort_root: Path, build_dir: Path, cpp_lines: inserted = False with open(src) as input, open(target, "w") as output: inside_insertion_block = False - for line in input.readlines(): + for line in input: if "@@insertion_point_begin(allowed_types)@@" in line: inside_insertion_block = True output.write(line) diff --git a/tools/ci_build/replace_urls_in_deps.py b/tools/ci_build/replace_urls_in_deps.py index ac4f515d5482b..37dad358a6feb 100644 --- a/tools/ci_build/replace_urls_in_deps.py +++ b/tools/ci_build/replace_urls_in_deps.py @@ -53,10 +53,10 @@ def main(): csv_file_path = backup_csv_file_path else: # Make a copy before modifying it - print("Making a copy to %s" % str(backup_csv_file_path)) + print(f"Making a copy to {backup_csv_file_path!s}") shutil.copy(csv_file_path, backup_csv_file_path) - print("Reading from %s" % str(csv_file_path)) + print(f"Reading from {csv_file_path!s}") # Read the whole file into memory first with csv_file_path.open("r", encoding="utf-8") as f: depfile_reader = csv.reader(f, delimiter=";") @@ -69,7 +69,7 @@ def main(): deps.append(Dep(row[0], row[1], row[2])) csv_file_path = Path(REPO_DIR) / "cmake" / "deps.txt" - print("Writing to %s" % str(csv_file_path)) + print(f"Writing to {csv_file_path!s}") # Write updated content back with csv_file_path.open("w", newline="", encoding="utf-8") as f: depfile_writer = csv.writer(f, delimiter=";") diff --git a/tools/ci_build/upload_python_package_to_azure_storage.py b/tools/ci_build/upload_python_package_to_azure_storage.py index b7969f02e518e..16ff5d1f71611 100755 --- a/tools/ci_build/upload_python_package_to_azure_storage.py +++ b/tools/ci_build/upload_python_package_to_azure_storage.py @@ -62,7 +62,7 @@ def upload_whl(python_wheel_path, final_storage=False): with open(download_path_to_html, "w") as f: for item in lines: - f.write("%s\n" % item) + f.write(f"{item}\n") else: warnings.warn(f"'{new_line}' exists in {download_path_to_html}. The html file is not updated.") run_subprocess( diff --git a/tools/doc/rename_folders.py b/tools/doc/rename_folders.py index 90d800f2a4498..587755d101ce2 100644 --- a/tools/doc/rename_folders.py +++ b/tools/doc/rename_folders.py @@ -26,7 +26,7 @@ def rename_folder(root): full_into = os.path.join(r, into) if os.path.exists(full_into): raise RuntimeError("%r already exists, previous documentation should be removed.") - print("rename %r" % full_src) + print(f"rename {full_src!r}") os.rename(full_src, full_into) return renamed @@ -51,13 +51,13 @@ def replace_files(root, renamed): for k, v in subs.items(): if k == v: raise ValueError(f"{k!r} == {v!r}") - if ('"%s' % k) in f[0]: - repl.append((f[0], f[0].replace('"%s' % k, '"%s' % v))) - if ("/%s" % k) in f[0]: - repl.append((f[0], f[0].replace("/%s" % k, "/%s" % v))) + if (f'"{k}') in f[0]: + repl.append((f[0], f[0].replace(f'"{k}', f'"{v}'))) + if (f"/{k}") in f[0]: + repl.append((f[0], f[0].replace(f"/{k}", f"/{v}"))) if len(repl) == 0: continue - print("update %r" % full) + print(f"update {full!r}") for k, v in repl: content = content.replace(k, v) with open(full, "w", encoding="utf-8") as f: @@ -71,7 +71,7 @@ def replace_files(root, renamed): root = sys.argv[-1] else: root = "../../build/docs/html" - print("look into %r" % root) + print(f"look into {root!r}") ren = rename_folder(root) if len(ren) == 0: ren = [ diff --git a/tools/nuget/generate_nuspec_for_native_nuget.py b/tools/nuget/generate_nuspec_for_native_nuget.py index 60d1884a9591f..a005bd4c4b89d 100644 --- a/tools/nuget/generate_nuspec_for_native_nuget.py +++ b/tools/nuget/generate_nuspec_for_native_nuget.py @@ -67,7 +67,7 @@ def generate_file_list_for_ep(nuget_artifacts_dir, ep, files_list, include_pdbs, and package_name != "Microsoft.ML.OnnxRuntime.Gpu.Linux" ): files_list.append( - '' % cpu_arch + '' ) for cpu_arch in ["x86_64", "arm64"]: if child.name == get_package_name("osx", cpu_arch, ep, is_training_package): @@ -79,7 +79,7 @@ def generate_file_list_for_ep(nuget_artifacts_dir, ep, files_list, include_pdbs, is_versioned_dylib = re.match(r".*[\.\d+]+\.dylib$", child_file.name) if child_file.is_file() and child_file.suffix == ".dylib" and not is_versioned_dylib: files_list.append( - '' % cpu_arch + '' ) for cpu_arch in ["x64", "aarch64"]: if child.name == get_package_name("linux", cpu_arch, ep, is_training_package): @@ -97,7 +97,7 @@ def generate_file_list_for_ep(nuget_artifacts_dir, ep, files_list, include_pdbs, and package_name != "Microsoft.ML.OnnxRuntime.Gpu.Windows" ): files_list.append( - '' % cpu_arch + '' ) if child.name == "onnxruntime-android" or child.name == "onnxruntime-training-android": diff --git a/tools/python/onnx_test_data_utils.py b/tools/python/onnx_test_data_utils.py index 56485bb78abbd..d50d610a903b7 100644 --- a/tools/python/onnx_test_data_utils.py +++ b/tools/python/onnx_test_data_utils.py @@ -59,7 +59,7 @@ def image_to_numpy(filename, shape, channels_last, add_batch_dim): # target size. w_ratio = new_w / w h_ratio = new_h / h - ratio = w_ratio if w_ratio > h_ratio else h_ratio + ratio = max(h_ratio, w_ratio) interim_w = int(w * ratio) interim_h = int(h * ratio) img = img.resize((interim_w, interim_h), PIL.Image.ANTIALIAS) diff --git a/tools/python/util/mobile_helpers/usability_checker.py b/tools/python/util/mobile_helpers/usability_checker.py index 3d8042ad5412b..a8b5021f1387b 100644 --- a/tools/python/util/mobile_helpers/usability_checker.py +++ b/tools/python/util/mobile_helpers/usability_checker.py @@ -29,7 +29,7 @@ def __init__(self, filename): self._ops_seen = set() with open(filename) as f: - for line in f.readlines(): + for line in f: # we're looking for a markdown table with 2 columns. first is op name. second is caveats # op name is domain:op if line.startswith("|"): diff --git a/tools/python/util/reduced_build_config_parser.py b/tools/python/util/reduced_build_config_parser.py index cb90026808fde..be39562e2d60d 100644 --- a/tools/python/util/reduced_build_config_parser.py +++ b/tools/python/util/reduced_build_config_parser.py @@ -113,7 +113,7 @@ def process_non_op_line(line): return False with open(config_file) as config: - for line in [orig_line.strip() for orig_line in config.readlines()]: + for line in [orig_line.strip() for orig_line in config]: if process_non_op_line(line): continue diff --git a/winml/lib/Api.Image/CpuDetensorizer.h b/winml/lib/Api.Image/CpuDetensorizer.h index e175fbbb4b6a3..04d828097ff3b 100644 --- a/winml/lib/Api.Image/CpuDetensorizer.h +++ b/winml/lib/Api.Image/CpuDetensorizer.h @@ -36,7 +36,8 @@ class CpuDetensorizer { auto nominalRangeConverter = NominalRangeConverter(pixelRange); - if (formatFrom == formatTo && (formatFrom == kImageTensorChannelTypeBGR8 || formatFrom == kImageTensorChannelTypeRGB8)) { + if (formatFrom == formatTo && + (formatFrom == kImageTensorChannelTypeBGR8 || formatFrom == kImageTensorChannelTypeRGB8)) { for (uint32_t i = 0; i < tensorHeight; i++) { BYTE* pPixel = pData; @@ -52,7 +53,8 @@ class CpuDetensorizer { pData += bufferWidth; } - } else if ((formatFrom == kImageTensorChannelTypeRGB8 && formatTo == kImageTensorChannelTypeBGR8) || (formatFrom == kImageTensorChannelTypeBGR8 && formatTo == kImageTensorChannelTypeRGB8)) { + } else if ((formatFrom == kImageTensorChannelTypeRGB8 && formatTo == kImageTensorChannelTypeBGR8) || + (formatFrom == kImageTensorChannelTypeBGR8 && formatTo == kImageTensorChannelTypeRGB8)) { for (uint32_t i = 0; i < tensorHeight; i++) { BYTE* pPixel = pData; @@ -68,7 +70,8 @@ class CpuDetensorizer { pData += bufferWidth; } - } else if (formatFrom == kImageTensorChannelTypeGRAY8 && (formatTo == kImageTensorChannelTypeBGR8 || formatTo == kImageTensorChannelTypeRGB8)) { + } else if (formatFrom == kImageTensorChannelTypeGRAY8 && + (formatTo == kImageTensorChannelTypeBGR8 || formatTo == kImageTensorChannelTypeRGB8)) { // just replicate the gray data across each channel for (uint32_t i = 0; i < end; i += bufferWidth) { for (uint32_t j = i; j < i + bytesPerRow; j += 4) { diff --git a/winml/lib/Api.Image/CpuTensorizer.h b/winml/lib/Api.Image/CpuTensorizer.h index ca5773b28fce2..ed9006470fd0e 100644 --- a/winml/lib/Api.Image/CpuTensorizer.h +++ b/winml/lib/Api.Image/CpuTensorizer.h @@ -39,7 +39,8 @@ class CpuTensorizer { auto nominalRangeConverter = NominalRangeConverter(pixelRange); - if (formatFrom == kImageTensorChannelTypeBGR8 && formatTo == kImageTensorChannelTypeBGR8 || formatFrom == kImageTensorChannelTypeRGB8 && formatTo == kImageTensorChannelTypeRGB8) { + if (formatFrom == kImageTensorChannelTypeBGR8 && formatTo == kImageTensorChannelTypeBGR8 || + formatFrom == kImageTensorChannelTypeRGB8 && formatTo == kImageTensorChannelTypeRGB8) { // Convert BGR8 -> BGR8 or RGB8 -> RGB8 for (uint64_t y = 0; y < yElements; y++) { DeinterleaveRowByteToFloat( @@ -52,7 +53,8 @@ class CpuTensorizer { nominalRangeConverter ); } - } else if (formatFrom == kImageTensorChannelTypeBGR8 && formatTo == kImageTensorChannelTypeRGB8 || formatFrom == kImageTensorChannelTypeRGB8 && formatTo == kImageTensorChannelTypeBGR8) { + } else if (formatFrom == kImageTensorChannelTypeBGR8 && formatTo == kImageTensorChannelTypeRGB8 || + formatFrom == kImageTensorChannelTypeRGB8 && formatTo == kImageTensorChannelTypeBGR8) { // Convert RGB8 -> BGR8 or BGR8 -> RGB8 for (uint32_t y = 0; y < yElements; y++) { DeinterleaveRowByteToFloat( @@ -65,7 +67,8 @@ class CpuTensorizer { nominalRangeConverter ); } - } else if (formatTo == kImageTensorChannelTypeGRAY8 && (formatFrom == kImageTensorChannelTypeBGR8 || formatFrom == kImageTensorChannelTypeRGB8)) { + } else if (formatTo == kImageTensorChannelTypeGRAY8 && + (formatFrom == kImageTensorChannelTypeBGR8 || formatFrom == kImageTensorChannelTypeRGB8)) { // Convert BGR8 -> GRAY8 or RGB8 -> GRAY8 uint32_t blueIncrement = formatFrom == kImageTensorChannelTypeBGR8 ? 0 : 2; uint32_t redIncrement = formatFrom == kImageTensorChannelTypeBGR8 ? 2 : 0; @@ -80,7 +83,8 @@ class CpuTensorizer { pixelInd++; } } - } else if (formatFrom == kImageTensorChannelTypeGRAY8 && (formatTo == kImageTensorChannelTypeBGR8 || formatTo == kImageTensorChannelTypeRGB8)) { + } else if (formatFrom == kImageTensorChannelTypeGRAY8 && + (formatTo == kImageTensorChannelTypeBGR8 || formatTo == kImageTensorChannelTypeRGB8)) { // Convert GRAY8 -> BGR8 or GRAY8 -> RGB8 for (UINT32 i = start; i < end; i += bufferWidth) { for (UINT32 j = i; j < i + bytesPerRow; j += bytesPerPixel) { diff --git a/winml/lib/Api.Image/D3DDeviceCache.cpp b/winml/lib/Api.Image/D3DDeviceCache.cpp index 977f2ba75216a..549a7bba77ef6 100644 --- a/winml/lib/Api.Image/D3DDeviceCache.cpp +++ b/winml/lib/Api.Image/D3DDeviceCache.cpp @@ -349,7 +349,8 @@ ID3D12RootSignature* D3DDeviceCache::GetTensorizeRootSignature() { newRootSignature->SetName(L"Tensorize Rootsignature"); } - if (InterlockedCompareExchangePointer(tensorize_root_signature_.put_void(), newRootSignature.get(), nullptr) == nullptr) { + if (InterlockedCompareExchangePointer(tensorize_root_signature_.put_void(), newRootSignature.get(), nullptr) == + nullptr) { // This thread won the race and just cached the PSO newRootSignature.detach(); } @@ -401,7 +402,8 @@ ID3D12RootSignature* D3DDeviceCache::GetDetensorizeRootSignature() { newRootSignature->SetName(L"Detensorize Rootsignature"); } - if (InterlockedCompareExchangePointer(detensorize_root_signature_.put_void(), newRootSignature.get(), nullptr) == nullptr) { + if (InterlockedCompareExchangePointer(detensorize_root_signature_.put_void(), newRootSignature.get(), nullptr) == + nullptr) { // This thread won the race and just cached the PSO newRootSignature.detach(); } @@ -416,7 +418,8 @@ ID3D12PipelineState* D3DDeviceCache::GetCachedPipelineState( PipelineStateCacheFormat formatTo, PipelineStateCacheOperation operation ) { - if (cached_pipeline_state[static_cast(type)][static_cast(formatFrom)][static_cast(formatTo)][static_cast(operation)] == nullptr) { + if (cached_pipeline_state[static_cast(type)][static_cast(formatFrom)][static_cast(formatTo)] + [static_cast(operation)] == nullptr) { winrt::com_ptr newPSO; if (operation == PipelineStateCacheOperation::kTensorize) { newPSO.attach(CreateTensorizePipelineState(type, formatFrom, formatTo)); @@ -425,12 +428,12 @@ ID3D12PipelineState* D3DDeviceCache::GetCachedPipelineState( } if (InterlockedCompareExchangePointer( - cached_pipeline_state[static_cast(type)][static_cast(formatFrom)][static_cast(formatTo)] - [static_cast(operation)] - .put_void(), - newPSO.get(), - nullptr - ) == nullptr) { + cached_pipeline_state[static_cast(type)][static_cast(formatFrom)][static_cast(formatTo)] + [static_cast(operation)] + .put_void(), + newPSO.get(), + nullptr + ) == nullptr) { // This thread won the race and just cached the PSO newPSO.detach(); } @@ -653,7 +656,8 @@ ID3D12Resource* D3DDeviceCache::GetDetensorizeVertexBuffer(_Out_ UINT* vertexBuf memcpy(pVertexDataBegin, triangleVertices, sizeof(triangleVertices)); newResource->Unmap(0, nullptr); - if (InterlockedCompareExchangePointer(detensorize_vertex_buffer_.put_void(), newResource.get(), nullptr) == nullptr) { + if (InterlockedCompareExchangePointer(detensorize_vertex_buffer_.put_void(), newResource.get(), nullptr) == + nullptr) { // This thread won the race and just cached the PSO newResource.detach(); } diff --git a/winml/lib/Api.Image/EventTimer.h b/winml/lib/Api.Image/EventTimer.h index 3620a7a2c0ee1..590675646b70d 100644 --- a/winml/lib/Api.Image/EventTimer.h +++ b/winml/lib/Api.Image/EventTimer.h @@ -4,7 +4,9 @@ class EventTimer { public: bool Start() { auto now = std::chrono::high_resolution_clock::now(); - if (!_started || std::chrono::duration_cast(now - _startTime).count() > _kDurationBetweenSendingEvents) { + if (!_started || + std::chrono::duration_cast(now - _startTime).count() > + _kDurationBetweenSendingEvents) { _started = true; _startTime = std::chrono::high_resolution_clock::now(); return true; diff --git a/winml/lib/Api.Image/ImageConversionHelpers.cpp b/winml/lib/Api.Image/ImageConversionHelpers.cpp index 11434c5fffb8e..441413bface28 100644 --- a/winml/lib/Api.Image/ImageConversionHelpers.cpp +++ b/winml/lib/Api.Image/ImageConversionHelpers.cpp @@ -69,7 +69,8 @@ void _winmli::ConvertVideoFrameToVideoFrame( wgdx::Direct3D11::IDirect3DSurface spInputDirect3DSurface = inputVideoFrame.Direct3DSurface(); // only one of softwarebitmap or direct3Dsurface should be non-null - if ((spInputSoftwareBitmap == nullptr && spInputDirect3DSurface == nullptr) || (spInputSoftwareBitmap != nullptr && spInputDirect3DSurface != nullptr)) { + if ((spInputSoftwareBitmap == nullptr && spInputDirect3DSurface == nullptr) || + (spInputSoftwareBitmap != nullptr && spInputDirect3DSurface != nullptr)) { WINML_THROW_HR(E_INVALIDARG); } @@ -133,11 +134,9 @@ bool _winmli::NeedsVideoFrameConversion( if (FAILED((hr = GetVideoFrameInfo(inputVideoFrame, format, width, height, luid)))) { bNeedConversion = true; - } else if (((int)inputBounds.Width != outputWidth) || - (inputBounds.X != 0) || - ((int)inputBounds.Height != outputHeight) || - (inputBounds.Y != 0) || - (inputVideoFrame == nullptr)) // Check crop + } else if (((int)inputBounds.Width != outputWidth) || (inputBounds.X != 0) || + ((int)inputBounds.Height != outputHeight) || (inputBounds.Y != 0) || + (inputVideoFrame == nullptr)) // Check crop { bNeedConversion = true; } else if (luid.HighPart != outputLuid.HighPart || luid.LowPart != outputLuid.LowPart) { diff --git a/winml/lib/Api.Image/ImageConverter.cpp b/winml/lib/Api.Image/ImageConverter.cpp index bb97f0ec7ff34..84b6f5a3a4c5c 100644 --- a/winml/lib/Api.Image/ImageConverter.cpp +++ b/winml/lib/Api.Image/ImageConverter.cpp @@ -50,7 +50,8 @@ ComPtr ImageConverter::FetchOrCreateFenceOnDevice( ComPtr fence; UINT comPtrSize = static_cast(sizeof(fence.GetAddressOf())); - if (FAILED(pD3D11Device->GetPrivateData(device_cache.GetFenceGuid(), &comPtrSize, fence.GetAddressOf())) || fence.Get() == nullptr) { + if (FAILED(pD3D11Device->GetPrivateData(device_cache.GetFenceGuid(), &comPtrSize, fence.GetAddressOf())) || + fence.Get() == nullptr) { // There's no fence on the device, so create a new one ComPtr spD3D11Device5; WINML_THROW_IF_FAILED(pD3D11Device->QueryInterface(IID_PPV_ARGS(&spD3D11Device5))); diff --git a/winml/lib/Api.Image/TensorToVideoFrameConverter.cpp b/winml/lib/Api.Image/TensorToVideoFrameConverter.cpp index 76a5623c5b4a5..456931d21e0a8 100644 --- a/winml/lib/Api.Image/TensorToVideoFrameConverter.cpp +++ b/winml/lib/Api.Image/TensorToVideoFrameConverter.cpp @@ -196,8 +196,9 @@ void TensorToVideoFrameConverter::DX12TensorToVideoFrame( UINT comPtrSize = static_cast(sizeof(spSharedD3D11Texture.GetAddressOf())); UINT handleSize = static_cast(sizeof(sharedHandle)); - if ((FAILED(spVideoFrameTexture->GetPrivateData( - _d3d11TextureGUID, &comPtrSize, spSharedD3D11Texture.GetAddressOf())) || + if ((FAILED( + spVideoFrameTexture->GetPrivateData(_d3d11TextureGUID, &comPtrSize, spSharedD3D11Texture.GetAddressOf()) + ) || !spSharedD3D11Texture.Get()) || (FAILED(spVideoFrameTexture->GetPrivateData(_handleGUID, &handleSize, &sharedHandle)) || sharedHandle != shared_handle_)) { @@ -365,7 +366,8 @@ void TensorToVideoFrameConverter::SoftwareTensorToVideoFrame( wgdx::Direct3D11::IDirect3DSurface spOutputSurface = pDestVideoFrame.Direct3DSurface(); // only one of softwarebitmap or direct3Dsurface should be non-null - if ((spOutputSoftwareBitmap == nullptr && spOutputSurface == nullptr) || (spOutputSoftwareBitmap != nullptr && spOutputSurface != nullptr)) { + if ((spOutputSoftwareBitmap == nullptr && spOutputSurface == nullptr) || + (spOutputSoftwareBitmap != nullptr && spOutputSurface != nullptr)) { WINML_THROW_HR(E_INVALIDARG); } if (spOutputSoftwareBitmap) { @@ -381,7 +383,10 @@ void TensorToVideoFrameConverter::SoftwareTensorToVideoFrame( if (_winmli::NeedsVideoFrameConversion( pDestVideoFrame, {}, {0, 0, (UINT32)tensorWidth, (UINT32)tensorHeight}, tensorWidth, tensorHeight )) { - if (converted_video_frame_ == nullptr || _winmli::NeedsVideoFrameConversion(converted_video_frame_, {}, {0, 0, (UINT32)tensorWidth, (UINT32)tensorHeight}, tensorWidth, tensorHeight)) { + if (converted_video_frame_ == nullptr || + _winmli::NeedsVideoFrameConversion( + converted_video_frame_, {}, {0, 0, (UINT32)tensorWidth, (UINT32)tensorHeight}, tensorWidth, tensorHeight + )) { converted_video_frame_ = wm::VideoFrame::CreateWithSoftwareBitmap( wgi::SoftwareBitmap(wgi::BitmapPixelFormat::Bgra8, tensorWidth, tensorHeight) ); diff --git a/winml/lib/Api.Image/VideoFrameToTensorConverter.cpp b/winml/lib/Api.Image/VideoFrameToTensorConverter.cpp index 0a763c77c94f4..a9b507ae4e16f 100644 --- a/winml/lib/Api.Image/VideoFrameToTensorConverter.cpp +++ b/winml/lib/Api.Image/VideoFrameToTensorConverter.cpp @@ -138,14 +138,19 @@ void VideoFrameToTensorConverter::VideoFrameToSoftwareTensor( wgdx::Direct3D11::IDirect3DSurface spInputSurface = inputVideoFrame.Direct3DSurface(); // only one of softwarebitmap or direct3Dsurface should be non-null - if ((spInputSoftwareBitmap == nullptr && spInputSurface == nullptr) || (spInputSoftwareBitmap != nullptr && spInputSurface != nullptr)) { + if ((spInputSoftwareBitmap == nullptr && spInputSurface == nullptr) || + (spInputSoftwareBitmap != nullptr && spInputSurface != nullptr)) { WINML_THROW_IF_FAILED(E_INVALIDARG); } UINT32 tensorHeight = static_cast(tensorDesc.sizes[2]); UINT32 tensorWidth = static_cast(tensorDesc.sizes[3]); - if (spInputSurface || _winmli::NeedsVideoFrameConversion(inputVideoFrame, {}, inputBounds, tensorWidth, tensorHeight)) { - if (converted_video_frame_ == nullptr || _winmli::NeedsVideoFrameConversion(converted_video_frame_, {}, {0, 0, (UINT32)tensorWidth, (UINT32)tensorHeight}, tensorWidth, tensorHeight)) { + if (spInputSurface || + _winmli::NeedsVideoFrameConversion(inputVideoFrame, {}, inputBounds, tensorWidth, tensorHeight)) { + if (converted_video_frame_ == nullptr || + _winmli::NeedsVideoFrameConversion( + converted_video_frame_, {}, {0, 0, (UINT32)tensorWidth, (UINT32)tensorHeight}, tensorWidth, tensorHeight + )) { converted_video_frame_ = wm::VideoFrame::CreateWithSoftwareBitmap( wgi::SoftwareBitmap(wgi::BitmapPixelFormat::Bgra8, tensorWidth, tensorHeight) ); @@ -236,8 +241,8 @@ void VideoFrameToTensorConverter::VideoFrameToDX12Tensor( // TODO: Scale during the tensorization phase instead of using the video frame pipeline when the input bounds are not the same size as the tensor if (!_winmli::DirectXPixelFormatSupported(spDirect3DSurface.Description().Format) || - static_cast(inputBounds.Width) != tensorDesc.sizes[3] || - static_cast(inputBounds.Height) != tensorDesc.sizes[2]) { + static_cast(inputBounds.Width) != tensorDesc.sizes[3] || + static_cast(inputBounds.Height) != tensorDesc.sizes[2]) { // Force the VideoFrame to not do a conversion if the format is supported since we do it during the tensorization anyway wgdx::DirectXPixelFormat newFormat = _winmli::DirectXPixelFormatSupported(spDirect3DSurface.Description().Format) ? spDirect3DSurface.Description().Format @@ -269,7 +274,7 @@ void VideoFrameToTensorConverter::VideoFrameToDX12Tensor( D3D11_cached_texture_->GetDesc(&cachedTextureDesc); if (cachedTextureDesc.Width != scaledBounds.Width || cachedTextureDesc.Height != scaledBounds.Height || - cachedTextureDesc.Format != videoFrameTextureDesc.Format) { + cachedTextureDesc.Format != videoFrameTextureDesc.Format) { // The dimensions or format don't match, so we need to re-create our texture WINML_THROW_IF_FAILED( pDeviceCache->GetD3D11Device()->CreateTexture2D(&videoFrameTextureDesc, nullptr, &D3D11_cached_texture_) @@ -289,12 +294,12 @@ void VideoFrameToTensorConverter::VideoFrameToDX12Tensor( UINT comPtrSize = static_cast(sizeof(spSharedD3D11Texture.GetAddressOf())); UINT handleSize = static_cast(sizeof(sharedHandle)); - if ((FAILED(spVideoFrameTexture->GetPrivateData( - d3d11_texture_GUID_, &comPtrSize, spSharedD3D11Texture.GetAddressOf() - )) || - !spSharedD3D11Texture.Get()) || - (FAILED(spVideoFrameTexture->GetPrivateData(handle_GUID_, &handleSize, &sharedHandle)) || - sharedHandle != shared_handle_)) { + if ((FAILED( + spVideoFrameTexture->GetPrivateData(d3d11_texture_GUID_, &comPtrSize, spSharedD3D11Texture.GetAddressOf()) + ) || + !spSharedD3D11Texture.Get()) || + (FAILED(spVideoFrameTexture->GetPrivateData(handle_GUID_, &handleSize, &sharedHandle)) || + sharedHandle != shared_handle_)) { // Create a new shared texture that we cache on the video frame texture WINML_THROW_IF_FAILED(spTextureDevice->CreateTexture2D(&videoFrameTextureDesc, nullptr, &spSharedD3D11Texture)); @@ -423,9 +428,9 @@ void VideoFrameToTensorConverter::ConvertDX12TextureToGPUTensor( WINML_THROW_IF_FAILED(ULongLongMult(ullNumElementsTensor, uiTensorElementSize, &ullTensorSize)); if (outputDesc.Width < ullTensorSize || outputDesc.Height != 1 || - outputDesc.Dimension != D3D12_RESOURCE_DIMENSION_BUFFER || - !(outputDesc.Flags & D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS) || - outputHeapProperties.Type != D3D12_HEAP_TYPE_DEFAULT) { + outputDesc.Dimension != D3D12_RESOURCE_DIMENSION_BUFFER || + !(outputDesc.Flags & D3D12_RESOURCE_FLAG_ALLOW_UNORDERED_ACCESS) || + outputHeapProperties.Type != D3D12_HEAP_TYPE_DEFAULT) { WINML_THROW_IF_FAILED(E_INVALIDARG); } } @@ -565,7 +570,8 @@ void VideoFrameToTensorConverter::ConvertSoftwareBitmapToGPUTensor( wgi::BitmapBounds scaledBounds = inputBounds; // TODO: Scale during the tensorization phase instead of using the video frame pipeline when the input bounds are not the same size as the tensor - if (static_cast(inputBounds.Width) != tensorDesc.sizes[3] || static_cast(inputBounds.Height) != tensorDesc.sizes[2]) { + if (static_cast(inputBounds.Width) != tensorDesc.sizes[3] || + static_cast(inputBounds.Height) != tensorDesc.sizes[2]) { scaledBounds = {0, 0, static_cast(tensorDesc.sizes[3]), static_cast(tensorDesc.sizes[2])}; // Force the VideoFrame to not do a conversion if the format is supported since we do it during the tensorization anyway diff --git a/winml/lib/Api.Image/inc/ConverterResourceStore.h b/winml/lib/Api.Image/inc/ConverterResourceStore.h index ffb413e0b92f3..24406c9fdaaef 100644 --- a/winml/lib/Api.Image/inc/ConverterResourceStore.h +++ b/winml/lib/Api.Image/inc/ConverterResourceStore.h @@ -25,7 +25,7 @@ struct ConverterResourceDescription { // 2) the resources are on different devices // 3) the resources have different pixel formats if (desc.width != width || desc.height != height || desc.luid.HighPart != luid.HighPart || - desc.luid.LowPart != luid.LowPart || desc.pixel_format != pixel_format) { + desc.luid.LowPart != luid.LowPart || desc.pixel_format != pixel_format) { return false; } diff --git a/winml/lib/Api/FeatureValues.h b/winml/lib/Api/FeatureValues.h index a330b244d40fc..fe6429f0a421b 100644 --- a/winml/lib/Api/FeatureValues.h +++ b/winml/lib/Api/FeatureValues.h @@ -29,37 +29,37 @@ #include "ImageFeatureValue.h" // CREATE_TENSOR is used by data tensor types to implement common functionality -#define CREATE_TENSOR(type, element_type, element_view_type) \ - namespace WINMLP { \ - struct type : public _winml::TensorBase< \ - element_type, \ - element_view_type, \ - type, \ - I##type, \ - type##T> { \ - using Base = TensorBase< \ - element_type, \ - element_view_type, \ - type, \ - I##type, \ - type##T>; \ - \ - type() = default; \ - \ - type(wfc::IIterable const& shape) : Base(shape){}; \ - \ - type(std::vector const& shape) : Base(shape){}; \ - \ - type(std::vector const& shape, ID3D12Resource* pResource) : Base(shape, pResource){}; \ - }; \ - } \ - namespace WINML::factory_implementation { \ - struct type : type##T { \ - STDMETHOD(CreateFromD3D12Resource) \ - (ID3D12Resource * value, __int64* shape, int shapeSize, IUnknown** result) { \ - return winmlp::type::CreateFromD3D12Resource(value, shape, shapeSize, result); \ - } \ - }; \ +#define CREATE_TENSOR(type, element_type, element_view_type) \ + namespace WINMLP { \ + struct type : public _winml::TensorBase< \ + element_type, \ + element_view_type, \ + type, \ + I##type, \ + type##T> { \ + using Base = TensorBase< \ + element_type, \ + element_view_type, \ + type, \ + I##type, \ + type##T>; \ + \ + type() = default; \ + \ + type(wfc::IIterable const& shape) : Base(shape) {}; \ + \ + type(std::vector const& shape) : Base(shape) {}; \ + \ + type(std::vector const& shape, ID3D12Resource* pResource) : Base(shape, pResource) {}; \ + }; \ + } \ + namespace WINML::factory_implementation { \ + struct type : type##T { \ + STDMETHOD(CreateFromD3D12Resource) \ + (ID3D12Resource * value, __int64* shape, int shapeSize, IUnknown** result) { \ + return winmlp::type::CreateFromD3D12Resource(value, shape, shapeSize, result); \ + } \ + }; \ } CREATE_TENSOR(TensorBoolean, bool, bool) @@ -86,11 +86,11 @@ CREATE_TENSOR(TensorString, std::string, winrt::hstring) #pragma warning(pop) // CREATE_MAP is used by map types to implement common functionality -#define CREATE_MAP(type, key_type, value_type) \ - namespace WINMLP { \ - struct type : public _winml::MapBase { \ - type(wfc::IMap const& data) : MapBase(data){}; \ - }; \ +#define CREATE_MAP(type, key_type, value_type) \ + namespace WINMLP { \ + struct type : public _winml::MapBase { \ + type(wfc::IMap const& data) : MapBase(data) {}; \ + }; \ } CREATE_MAP(MapInt64BitToInt64Bit, int64_t, int64_t) @@ -103,11 +103,11 @@ CREATE_MAP(MapStringToDouble, hstring, double) CREATE_MAP(MapStringToString, hstring, hstring) // CREATE_SEQUENCE is used by sequence types to implement common functionality -#define CREATE_SEQUENCE(type, element_type, raw_type) \ - namespace WINMLP { \ - struct type : public _winml::SequenceBase { \ - type(wfc::IIterable const& data) : SequenceBase(data){}; \ - }; \ +#define CREATE_SEQUENCE(type, element_type, raw_type) \ + namespace WINMLP { \ + struct type : public _winml::SequenceBase { \ + type(wfc::IIterable const& data) : SequenceBase(data) {}; \ + }; \ } using AbiMapStringFloat = wfc::IMap; diff --git a/winml/lib/Api/ImageFeatureValue.cpp b/winml/lib/Api/ImageFeatureValue.cpp index 8628c578e5004..65f2e56180e19 100644 --- a/winml/lib/Api/ImageFeatureValue.cpp +++ b/winml/lib/Api/ImageFeatureValue.cpp @@ -221,7 +221,9 @@ static _winml::ImageTensorDescription CreateImageTensorDescriptor( THROW_HR(E_NOTIMPL); } - if (pixelRange != winml::LearningModelPixelRange::ZeroTo255 && pixelRange != winml::LearningModelPixelRange::ZeroToOne && pixelRange != winml::LearningModelPixelRange::MinusOneToOne) { + if (pixelRange != winml::LearningModelPixelRange::ZeroTo255 && + pixelRange != winml::LearningModelPixelRange::ZeroToOne && + pixelRange != winml::LearningModelPixelRange::MinusOneToOne) { THROW_HR(E_NOTIMPL); } @@ -331,12 +333,11 @@ std::optional ImageFeatureValue::GetIn // The the widths and heights of input data must be the same. Or the // tensorDescriptor cannot describ the shape of the inputs. if (spImageDescriptor->Width() == MAXUINT32 && - !(std::adjacent_find(m_widths.begin(), m_widths.end(), std::not_equal_to()) == m_widths.end())) { + !(std::adjacent_find(m_widths.begin(), m_widths.end(), std::not_equal_to()) == m_widths.end())) { THROW_HR(E_INVALIDARG); } if (spImageDescriptor->Height() == MAXUINT32 && - !(std::adjacent_find(m_heights.begin(), m_heights.end(), std::not_equal_to()) == m_heights.end() - )) { + !(std::adjacent_find(m_heights.begin(), m_heights.end(), std::not_equal_to()) == m_heights.end())) { THROW_HR(E_INVALIDARG); } descriptorWidth = (spImageDescriptor->Width() == MAXUINT32) ? m_widths[0] : spImageDescriptor->Width(); @@ -354,12 +355,11 @@ std::optional ImageFeatureValue::GetIn return {}; } if (-1 == shape.GetAt(3) && - !(std::adjacent_find(m_widths.begin(), m_widths.end(), std::not_equal_to()) == m_widths.end())) { + !(std::adjacent_find(m_widths.begin(), m_widths.end(), std::not_equal_to()) == m_widths.end())) { THROW_HR(E_INVALIDARG); } if (-1 == shape.GetAt(2) && - !(std::adjacent_find(m_heights.begin(), m_heights.end(), std::not_equal_to()) == m_heights.end() - )) { + !(std::adjacent_find(m_heights.begin(), m_heights.end(), std::not_equal_to()) == m_heights.end())) { THROW_HR(E_INVALIDARG); } descriptorWidth = (-1 == shape.GetAt(3)) ? m_widths[0] : static_cast(shape.GetAt(3)); diff --git a/winml/lib/Api/LearningModel.cpp b/winml/lib/Api/LearningModel.cpp index 6d7c8317ce5f9..8de14a5dfce10 100644 --- a/winml/lib/Api/LearningModel.cpp +++ b/winml/lib/Api/LearningModel.cpp @@ -64,7 +64,7 @@ LearningModel::LearningModel(const hstring& path, const winml::ILearningModelOpe WINML_THROW_IF_FAILED(CreateOnnxruntimeEngineFactory(engine_factory_.put())); - wil::unique_handle file_handle { + wil::unique_handle file_handle{ #if WINVER >= _WIN32_WINNT_WIN8 CreateFile2(path.c_str(), GENERIC_READ, FILE_SHARE_READ, OPEN_EXISTING, NULL) }; diff --git a/winml/lib/Api/LearningModelSession.cpp b/winml/lib/Api/LearningModelSession.cpp index 011a4a718f82a..57bafda57fe54 100644 --- a/winml/lib/Api/LearningModelSession.cpp +++ b/winml/lib/Api/LearningModelSession.cpp @@ -21,8 +21,8 @@ static const auto c_enable_debug_output = L"EnableDebugOutput"; namespace guid_details { // This GUID is to be used for delimiting ML-related categories of capturable work. // {D113B493-BBA2-4993-8608-D706A73B91CE} -struct __declspec(uuid("D113B493-BBA2-4993-8608-D706A73B91CE")) __declspec(novtable -) WINML_PIX_EVAL_CAPTURABLE_WORK_GUID {}; +struct __declspec(uuid("D113B493-BBA2-4993-8608-D706A73B91CE")) +__declspec(novtable) WINML_PIX_EVAL_CAPTURABLE_WORK_GUID {}; } // namespace guid_details static const GUID WINML_PIX_EVAL_CAPTURABLE_WORK_GUID = __uuidof(guid_details::WINML_PIX_EVAL_CAPTURABLE_WORK_GUID); diff --git a/winml/lib/Api/NumericData.cpp b/winml/lib/Api/NumericData.cpp index ae5f9155d425c..1e3ba5438c10a 100644 --- a/winml/lib/Api/NumericData.cpp +++ b/winml/lib/Api/NumericData.cpp @@ -68,9 +68,7 @@ gsl::span numeric_data::buffer(bool should_sync_buffer) { } auto span = combined_buffer(); if (should_sync_buffer) { - _winml::LoadSpanFromDisjointBuffers( - buffers_.size(), [this](size_t i) { return buffer_at(i); }, span - ); + _winml::LoadSpanFromDisjointBuffers(buffers_.size(), [this](size_t i) { return buffer_at(i); }, span); } return span; @@ -80,9 +78,7 @@ bool numeric_data::flush() { auto should_flush = buffers_.size() != 1; if (should_flush) { auto span = combined_buffer(); - _winml::StoreSpanIntoDisjointBuffers( - buffers_.size(), [this](size_t i) { return buffer_at(i); }, span - ); + _winml::StoreSpanIntoDisjointBuffers(buffers_.size(), [this](size_t i) { return buffer_at(i); }, span); } return should_flush; } @@ -97,9 +93,7 @@ void numeric_data::set(size_t data_size, const byte* data) { ); gsl::span span(const_cast(data), data_size); - _winml::StoreSpanIntoDisjointBuffers( - buffers_.size(), [this](size_t i) { return buffer_at(i); }, span - ); + _winml::StoreSpanIntoDisjointBuffers(buffers_.size(), [this](size_t i) { return buffer_at(i); }, span); } static gsl::span get_span_from_ibuffer(wss::IBuffer buffer) { diff --git a/winml/lib/Api/impl/FeatureCompatibility.h b/winml/lib/Api/impl/FeatureCompatibility.h index 3fff488be23f7..1b124097f3f80 100644 --- a/winml/lib/Api/impl/FeatureCompatibility.h +++ b/winml/lib/Api/impl/FeatureCompatibility.h @@ -375,11 +375,11 @@ static void (*FeatureKindCompatibilityMatrix[4][4])( ) = { // Tensor, Sequence, Map, Image /* Tensor */ {verify, not_compatible, not_compatible, verify}, - /* Sequence */ + /* Sequence */ {not_compatible, verify, not_compatible, not_compatible}, - /* Map */ + /* Map */ {not_compatible, not_compatible, verify, not_compatible}, - /* Image */ + /* Image */ {verify, not_compatible, not_compatible, verify} }; } // namespace compatibility_details diff --git a/winml/lib/Common/CommonDeviceHelpers.cpp b/winml/lib/Common/CommonDeviceHelpers.cpp index 01615005a8947..b4ada6c498212 100644 --- a/winml/lib/Common/CommonDeviceHelpers.cpp +++ b/winml/lib/Common/CommonDeviceHelpers.cpp @@ -65,8 +65,10 @@ HRESULT GetDXCoreAdapterMetadata( RETURN_IF_FAILED(spFactory->GetAdapterByLuid(device.GetAdapterLuid(), IID_PPV_ARGS(spAdapter.put()))); if (spAdapter->IsAttributeSupported(DXCORE_ADAPTER_ATTRIBUTE_D3D12_CORE_COMPUTE) && - (!(spAdapter->IsAttributeSupported(DXCORE_ADAPTER_ATTRIBUTE_D3D12_GRAPHICS) || - spAdapter->IsAttributeSupported(DXCORE_ADAPTER_ATTRIBUTE_D3D11_GRAPHICS)))) { + (!( + spAdapter->IsAttributeSupported(DXCORE_ADAPTER_ATTRIBUTE_D3D12_GRAPHICS) || + spAdapter->IsAttributeSupported(DXCORE_ADAPTER_ATTRIBUTE_D3D11_GRAPHICS) + ))) { isMcdmAdapter = true; } else { isMcdmAdapter = false; diff --git a/winml/test/api/raw/buffer_backed_random_access_stream_reference.h b/winml/test/api/raw/buffer_backed_random_access_stream_reference.h index e9539c188e45a..6f492bf8340c9 100644 --- a/winml/test/api/raw/buffer_backed_random_access_stream_reference.h +++ b/winml/test/api/raw/buffer_backed_random_access_stream_reference.h @@ -347,8 +347,9 @@ struct BufferBackedRandomAccessStreamReference } virtual HRESULT STDMETHODCALLTYPE OpenReadAsync( - /* [retval, out] */ __RPC__deref_out_opt - __FIAsyncOperation_1_Windows__CStorage__CStreams__CIRandomAccessStreamWithContentType** operation + /* [retval, out] */ + __RPC__deref_out_opt __FIAsyncOperation_1_Windows__CStorage__CStreams__CIRandomAccessStreamWithContentType** + operation ) override { auto open_read_async = Microsoft::WRL::Make(); open_read_async.CopyTo(operation); diff --git a/winml/test/api/raw/winml_microsoft.h b/winml/test/api/raw/winml_microsoft.h index 92094188793d5..60527b238d8cd 100644 --- a/winml/test/api/raw/winml_microsoft.h +++ b/winml/test/api/raw/winml_microsoft.h @@ -141,8 +141,8 @@ struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; }; -__declspec(selectany -) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Microsoft_AI_MachineLearning_TensorFloat; +__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = + RuntimeClass_Microsoft_AI_MachineLearning_TensorFloat; __declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Microsoft_AI_MachineLearning_TensorFloat16Bit; __declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = @@ -161,10 +161,10 @@ __declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClas RuntimeClass_Microsoft_AI_MachineLearning_TensorUInt64Bit; __declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Microsoft_AI_MachineLearning_TensorInt64Bit; -__declspec(selectany -) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Microsoft_AI_MachineLearning_TensorBoolean; -__declspec(selectany -) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Microsoft_AI_MachineLearning_TensorDouble; +__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = + RuntimeClass_Microsoft_AI_MachineLearning_TensorBoolean; +__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = + RuntimeClass_Microsoft_AI_MachineLearning_TensorDouble; template struct TensorFactory {}; @@ -319,30 +319,30 @@ struct TensorFactoryIID { static const GUID IID; }; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorFloatStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorFloat16BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorInt8BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt8BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt16BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorInt16BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt32BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorInt32BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt64BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorInt64BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorBooleanStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorDoubleStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorFloatStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorFloat16BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorInt8BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt8BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt16BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorInt16BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt32BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorInt32BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt64BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorInt64BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorBooleanStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorDoubleStatics; template struct TensorFactory2IID {}; @@ -395,30 +395,30 @@ struct TensorFactory2IID { static const GUID IID; }; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorFloatStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorFloat16BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorInt8BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt8BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt16BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorInt16BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt32BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorInt32BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt64BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorInt64BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorBooleanStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Microsoft::AI::MachineLearning::IID_ITensorDoubleStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorFloatStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorFloat16BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorInt8BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt8BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt16BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorInt16BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt32BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorInt32BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorUInt64BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorInt64BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorBooleanStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Microsoft::AI::MachineLearning::IID_ITensorDoubleStatics2; inline HRESULT GetActivationFactory(const wchar_t* p_class_id, const IID& iid, void** factory) noexcept { // Fallback to OS binary if the redistributable is not present! diff --git a/winml/test/api/raw/winml_windows.h b/winml/test/api/raw/winml_windows.h index 944daff6dd10a..8e72743f3d98b 100644 --- a/winml/test/api/raw/winml_windows.h +++ b/winml/test/api/raw/winml_windows.h @@ -141,12 +141,12 @@ struct TensorRuntimeClassID { static const wchar_t* RuntimeClass_ID; }; -__declspec(selectany -) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorFloat; +__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = + RuntimeClass_Windows_AI_MachineLearning_TensorFloat; __declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorFloat16Bit; -__declspec(selectany -) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorInt8Bit; +__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = + RuntimeClass_Windows_AI_MachineLearning_TensorInt8Bit; __declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorUInt8Bit; __declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = @@ -161,10 +161,10 @@ __declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClas RuntimeClass_Windows_AI_MachineLearning_TensorUInt64Bit; __declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorInt64Bit; -__declspec(selectany -) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorBoolean; -__declspec(selectany -) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = RuntimeClass_Windows_AI_MachineLearning_TensorDouble; +__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = + RuntimeClass_Windows_AI_MachineLearning_TensorBoolean; +__declspec(selectany) const wchar_t* TensorRuntimeClassID::RuntimeClass_ID = + RuntimeClass_Windows_AI_MachineLearning_TensorDouble; template struct TensorFactory {}; @@ -319,30 +319,30 @@ struct TensorFactoryIID { static const GUID IID; }; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorFloatStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorFloat16BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt8BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt8BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt16BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt16BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt32BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt32BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt64BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt64BitStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorBooleanStatics; -__declspec(selectany -) const GUID TensorFactoryIID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorDoubleStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorFloatStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorFloat16BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorInt8BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorUInt8BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorUInt16BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorInt16BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorUInt32BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorInt32BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorUInt64BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorInt64BitStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorBooleanStatics; +__declspec(selectany) const GUID TensorFactoryIID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorDoubleStatics; template struct TensorFactory2IID {}; @@ -395,30 +395,30 @@ struct TensorFactory2IID { static const GUID IID; }; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorFloatStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorFloat16BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt8BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt8BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt16BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt16BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt32BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt32BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorUInt64BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorInt64BitStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorBooleanStatics2; -__declspec(selectany -) const GUID TensorFactory2IID::IID = ABI::Windows::AI::MachineLearning::IID_ITensorDoubleStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorFloatStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorFloat16BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorInt8BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorUInt8BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorUInt16BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorInt16BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorUInt32BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorInt32BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorUInt64BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorInt64BitStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorBooleanStatics2; +__declspec(selectany) const GUID TensorFactory2IID::IID = + ABI::Windows::AI::MachineLearning::IID_ITensorDoubleStatics2; inline HRESULT GetActivationFactory(const wchar_t* p_class_id, const IID& iid, void** factory) noexcept { // Fallback to OS binary if the redistributable is not present! diff --git a/winml/test/image/imagetests.cpp b/winml/test/image/imagetests.cpp index b408c0315f94a..04717c75aa150 100644 --- a/winml/test/image/imagetests.cpp +++ b/winml/test/image/imagetests.cpp @@ -212,13 +212,16 @@ class ImageTests : public ::testing::Test { const std::wstring& model_file_name, const std::wstring& image_file_name, const InputImageSource input_image_source ) { // Case that the tensor's shape doesn't match model's shape should be skipped - if ((L"1080.jpg" == image_file_name || L"kitten_224.png" == image_file_name) && (InputImageSource::FromGPUResource == input_image_source || InputImageSource::FromCPUResource == input_image_source)) { + if ((L"1080.jpg" == image_file_name || L"kitten_224.png" == image_file_name) && + (InputImageSource::FromGPUResource == input_image_source || + InputImageSource::FromCPUResource == input_image_source)) { return true; } // Case that the images's shape doesn't match model's shape which expects free dimension should be skipped. // Because the fns-candy is not real model that can handle free dimensional input - if ((L"1080.jpg" == image_file_name || L"kitten_224.png" == image_file_name) && L"fns-candy_Bgr8_freeDimInput.onnx" == model_file_name) { + if ((L"1080.jpg" == image_file_name || L"kitten_224.png" == image_file_name) && + L"fns-candy_Bgr8_freeDimInput.onnx" == model_file_name) { return true; } @@ -385,7 +388,8 @@ TEST_P(ImageTest, ImageTest) { GTEST_SKIP() << "This test is disabled"; } - if (LearningModelDeviceKind::Cpu != param.device_kind || InputImageSource::FromGPUResource == param.input_image_source) { + if (LearningModelDeviceKind::Cpu != param.device_kind || + InputImageSource::FromGPUResource == param.input_image_source) { GPUTEST; } @@ -482,13 +486,14 @@ TEST_P(BatchTest, BatchSupport) { if (param.use_session_options) { optimized_batch_size = param.use_session_options; } - if (VideoFrameSource::FromDirect3DSurface == param.video_frame_source && LearningModelDeviceKind::Cpu == param.device_kind) { + if (VideoFrameSource::FromDirect3DSurface == param.video_frame_source && + LearningModelDeviceKind::Cpu == param.device_kind) { return; } if (LearningModelDeviceKind::Cpu != param.device_kind || - VideoFrameSource::FromDirect3DSurface == param.video_frame_source || - VideoFrameSource::FromDirect3DSurface == param.output_video_frame_source || - VideoFrameSource::FromUnsupportedD3DSurface == param.output_video_frame_source) { + VideoFrameSource::FromDirect3DSurface == param.video_frame_source || + VideoFrameSource::FromDirect3DSurface == param.output_video_frame_source || + VideoFrameSource::FromUnsupportedD3DSurface == param.output_video_frame_source) { GPUTEST; } @@ -556,7 +561,7 @@ TEST_P(BatchTest, BatchSupport) { for (int i = 0; i < param.batch_size; ++i) { std::wstring bm_image_path = FileHelpers::GetModulePath() + L"batchGroundTruth\\" + param.input_images[i]; if (VideoFrameSource::FromSoftwareBitmap != param.output_video_frame_source && - OutputBindingStrategy::Unbound != param.output_binding_strategy) { + OutputBindingStrategy::Unbound != param.output_binding_strategy) { VideoFrame D3D_video_frame = output_video_frames.GetAt(i); VideoFrame SB_video_frame(BitmapPixelFormat::Bgra8, 720, 720); D3D_video_frame.as().CopyToAsync(SB_video_frame).get(); diff --git a/winml/test/model/compare_feature_value.cpp b/winml/test/model/compare_feature_value.cpp index 30b16c4ad5f73..ac2553987f5ad 100644 --- a/winml/test/model/compare_feature_value.cpp +++ b/winml/test/model/compare_feature_value.cpp @@ -13,7 +13,8 @@ template bool IsResultCloselyMatch(const T& outvalue, const T& expected_value, const double diff, const double tol) { if (diff > tol) return false; - if (std::isnan(diff) && !(std::isnan(outvalue) && std::isnan(expected_value)) && !(std::isinf(outvalue) && std::isinf(expected_value))) + if (std::isnan(diff) && !(std::isnan(outvalue) && std::isnan(expected_value)) && + !(std::isinf(outvalue) && std::isinf(expected_value))) return false; return true; } diff --git a/winml/test/model/model_tests.cpp b/winml/test/model/model_tests.cpp index 859914014b8bb..4087bfd87caa7 100644 --- a/winml/test/model/model_tests.cpp +++ b/winml/test/model/model_tests.cpp @@ -150,7 +150,8 @@ std::string GetTestDataPath() { std::string testDataPath(MAX_PATH, '\0'); auto environmentVariableFetchSuceeded = GetEnvironmentVariableA("WINML_TEST_DATA_PATH", testDataPath.data(), MAX_PATH); - if (environmentVariableFetchSuceeded == 0 && GetLastError() == ERROR_ENVVAR_NOT_FOUND || environmentVariableFetchSuceeded > MAX_PATH) { + if (environmentVariableFetchSuceeded == 0 && GetLastError() == ERROR_ENVVAR_NOT_FOUND || + environmentVariableFetchSuceeded > MAX_PATH) { // if the WINML_TEST_DATA_PATH environment variable cannot be found, attempt to find the hardcoded models folder std::wstring modulePath = FileHelpers::GetModulePath(); std::filesystem::path currPath = modulePath.substr(0, modulePath.find_last_of(L"\\")); @@ -357,7 +358,8 @@ bool ModifyNameIfDisabledTest(/*inout*/ std::string& testName, winml::LearningMo if (SkipGpuTests()) { reason = "GPU tests are not enabled for this build."; shouldSkip = true; - } else if (disabledGpuAdapterTests.find(testName) != disabledGpuAdapterTests.end() && ShouldSkipTestOnGpuAdapter(testName)) { + } else if (disabledGpuAdapterTests.find(testName) != disabledGpuAdapterTests.end() && + ShouldSkipTestOnGpuAdapter(testName)) { reason = disabledGpuAdapterTests[testName].second; shouldSkip = true; } @@ -386,9 +388,7 @@ std::string GetFullNameOfTest(ITestCase* testCase, winml::LearningModelDeviceKin name += tokenizedModelPath[tokenizedModelPath.size() - 2] += "_"; // model name name += tokenizedModelPath[tokenizedModelPath.size() - 3]; // opset version - std::replace_if( - name.begin(), name.end(), [](char c) { return !absl::ascii_isalnum(c); }, '_' - ); + std::replace_if(name.begin(), name.end(), [](char c) { return !absl::ascii_isalnum(c); }, '_'); // Determine if test should be skipped, using the generic name (no CPU or GPU suffix yet). bool isDisabled = ModifyNameIfDisabledTest(/*inout*/ name, deviceKind); diff --git a/winml/test/model/skip_model_tests.h b/winml/test/model/skip_model_tests.h index cf55d8bcbae7e..349332c6ae0e3 100644 --- a/winml/test/model/skip_model_tests.h +++ b/winml/test/model/skip_model_tests.h @@ -114,14 +114,14 @@ std::unordered_map disabledTests({ { "coreml_DecisionTreeClassifier_OpenML_1464_blood_transfusion_opset7", disabledTestDefaultReason}, { "coreml_AgeNet_ImageNet_opset7", disabledTestDefaultReason}, - // GPU specific cases: + // GPU specific cases: // ONNX zoo models { "mask_rcnn_opset10_GPU", "Bug 31005388: mask_rcnn opset 10 onnx zoo model fails to evaluate on DirectML https://microsoft.visualstudio.com/OS/_workitems/edit/31005388" }, { "faster_rcnn_opset10_GPU", "Bug 31005511: Failed to extract tensor data from evaluate result of faster_rcnn opset 10 model in DirectML https://microsoft.visualstudio.com/OS/_workitems/edit/31005511" }, - // ONNX model zoo's int8/qdq models generally do not work on CPUs that lack 8-bit instructions. + // ONNX model zoo's int8/qdq models generally do not work on CPUs that lack 8-bit instructions. { "YOLOv3_12_int8_opset12", disabledTestDefaultReason}, { "VGG_16_int8_opset12", disabledTestDefaultReason}, { "SSD_int8_opset12", disabledTestDefaultReason}, @@ -137,7 +137,7 @@ std::unordered_map disabledTests({ { "EfficientNet_Lite4_qdq_opset11", disabledTestDefaultReason}, { "EfficientNet_Lite4_int8_opset11", disabledTestDefaultReason}, - // Tier 2 models + // Tier 2 models { "fp16_test_tiny_yolov2_opset7_GPU", "Bug 31005780: Result of fp16_test_tiny_yolov2_opset7 and fp16_coreml_FNS_Candy_opset7 models on DirectML aren't as accurate as on CPU https://microsoft.visualstudio.com/OS/_workitems/edit/31005780"}, { "fp16_tiny_yolov2_opset8_GPU", diff --git a/winml/test/scenario/cppwinrt/NoisyReluCpu.h b/winml/test/scenario/cppwinrt/NoisyReluCpu.h index 5cccbae67407c..e419205fd52dc 100644 --- a/winml/test/scenario/cppwinrt/NoisyReluCpu.h +++ b/winml/test/scenario/cppwinrt/NoisyReluCpu.h @@ -65,12 +65,14 @@ struct NoisyReluOperator : winrt::implementsGetTensorDataType() == MLOperatorTensorDataType::Float && inputTensor->GetTensorDataType() == MLOperatorTensorDataType::Float) { + if (outputTensor->GetTensorDataType() == MLOperatorTensorDataType::Float && + inputTensor->GetTensorDataType() == MLOperatorTensorDataType::Float) { // For cpu data if (outputTensor->IsCpuData() && inputTensor->IsCpuData()) { ComputeInternal(inputTensor.get(), outputTensor.get(), inputDataSize); } - } else if (outputTensor->GetTensorDataType() == MLOperatorTensorDataType::Double && inputTensor->GetTensorDataType() == MLOperatorTensorDataType::Double) { + } else if (outputTensor->GetTensorDataType() == MLOperatorTensorDataType::Double && + inputTensor->GetTensorDataType() == MLOperatorTensorDataType::Double) { // For cpu data if (outputTensor->IsCpuData() && inputTensor->IsCpuData()) { ComputeInternal(inputTensor.get(), outputTensor.get(), inputDataSize); diff --git a/winml/test/scenario/cppwinrt/ReluCpu.h b/winml/test/scenario/cppwinrt/ReluCpu.h index 7bb275f7b399b..e8e91489fe872 100644 --- a/winml/test/scenario/cppwinrt/ReluCpu.h +++ b/winml/test/scenario/cppwinrt/ReluCpu.h @@ -60,12 +60,14 @@ struct ReluOperator : winrt::implements { } // If the tensor types are both float type - if (outputTensor->GetTensorDataType() == MLOperatorTensorDataType::Float && inputTensor->GetTensorDataType() == MLOperatorTensorDataType::Float) { + if (outputTensor->GetTensorDataType() == MLOperatorTensorDataType::Float && + inputTensor->GetTensorDataType() == MLOperatorTensorDataType::Float) { // For cpu data if (outputTensor->IsCpuData() && inputTensor->IsCpuData()) { ComputeInternal(inputTensor.get(), outputTensor.get(), inputDataSize); } - } else if (outputTensor->GetTensorDataType() == MLOperatorTensorDataType::Double && inputTensor->GetTensorDataType() == MLOperatorTensorDataType::Double) { + } else if (outputTensor->GetTensorDataType() == MLOperatorTensorDataType::Double && + inputTensor->GetTensorDataType() == MLOperatorTensorDataType::Double) { // For cpu data if (outputTensor->IsCpuData() && inputTensor->IsCpuData()) { ComputeInternal(inputTensor.get(), outputTensor.get(), inputDataSize);