diff --git a/onnxruntime/core/providers/dml/DmlExecutionProvider/src/DmlGraphFusionHelper.cpp b/onnxruntime/core/providers/dml/DmlExecutionProvider/src/DmlGraphFusionHelper.cpp index 8f67174157b8a..8ee31d4b84f2f 100644 --- a/onnxruntime/core/providers/dml/DmlExecutionProvider/src/DmlGraphFusionHelper.cpp +++ b/onnxruntime/core/providers/dml/DmlExecutionProvider/src/DmlGraphFusionHelper.cpp @@ -368,7 +368,7 @@ namespace DmlGraphFusionHelper DML_CONSTANT_DATA_GRAPH_NODE_DESC* constantNode = allocator.template Allocate(); constantNode->Name = node.Name.data(); - constantNode->DataSize = constantData.dataSize; + constantNode->DataSize = gsl::narrow_cast(constantData.dataSize); constantNode->Data = constantData.data; dmlGraphNodes.push_back(DML_GRAPH_NODE_DESC{DML_GRAPH_NODE_TYPE_CONSTANT, constantNode}); } @@ -845,11 +845,11 @@ namespace DmlGraphFusionHelper .Provider(onnxruntime::kDmlExecutionProvider); // Force the CPU inputs to be allocated on the CPU - for (int i = 0; i < subGraphInputArgNames.size(); ++i) + for (size_t i = 0; i < subGraphInputArgNames.size(); ++i) { if (dynamicCpuInputMap.find(subGraphInputArgNames[i]) != dynamicCpuInputMap.end()) { - builder.InputMemoryType(OrtMemTypeCPUInput, i); + builder.InputMemoryType(OrtMemTypeCPUInput, static_cast(i)); } } diff --git a/onnxruntime/core/providers/dml/DmlExecutionProvider/src/DmlRuntimeFusedGraphKernel.cpp b/onnxruntime/core/providers/dml/DmlExecutionProvider/src/DmlRuntimeFusedGraphKernel.cpp index 1b4034f10eaa3..7cd23256214dd 100644 --- a/onnxruntime/core/providers/dml/DmlExecutionProvider/src/DmlRuntimeFusedGraphKernel.cpp +++ b/onnxruntime/core/providers/dml/DmlExecutionProvider/src/DmlRuntimeFusedGraphKernel.cpp @@ -122,7 +122,7 @@ namespace Dml { if (initializerIter->second.first->raw_data().length() == inputProto.raw_data().length()) { - for (int i = 0; i < inputProto.raw_data().length(); ++i) + for (size_t i = 0; i < inputProto.raw_data().length(); ++i) { if (initializerIter->second.first->raw_data()[i] != inputProto.raw_data()[i]) { diff --git a/onnxruntime/core/providers/dml/DmlExecutionProvider/src/GraphDescBuilder.cpp b/onnxruntime/core/providers/dml/DmlExecutionProvider/src/GraphDescBuilder.cpp index 387767f821b3e..22de743f6e718 100644 --- a/onnxruntime/core/providers/dml/DmlExecutionProvider/src/GraphDescBuilder.cpp +++ b/onnxruntime/core/providers/dml/DmlExecutionProvider/src/GraphDescBuilder.cpp @@ -333,7 +333,7 @@ namespace Dml::GraphDescBuilder EdgeShapes inputShapesOverrides(node.InputDefs().size()); // Override the input shapes with shapes that were previously inferred - for (int inputIndex = 0; inputIndex < node.InputDefs().size(); ++inputIndex) + for (size_t inputIndex = 0; inputIndex < node.InputDefs().size(); ++inputIndex) { auto inputDef = node.InputDefs()[inputIndex]; @@ -344,7 +344,8 @@ namespace Dml::GraphDescBuilder } else if (inputDef->HasTensorOrScalarShape()) { - for (int i = 0; i < inputDef->Shape()->dim_size(); ++i) + int dimSize = gsl::narrow_cast(inputDef->Shape()->dim_size()); + for (int i = 0; i < dimSize; ++i) { ORT_THROW_HR_IF(E_INVALIDARG, !inputDef->Shape()->dim(i).has_dim_value()); inputShapesOverrides.GetMutableShape(inputIndex).push_back(gsl::narrow_cast(inputDef->Shape()->dim(i).dim_value())); @@ -364,7 +365,7 @@ namespace Dml::GraphDescBuilder ); ORT_THROW_HR_IF(E_UNEXPECTED, outputShapes.EdgeCount() != node.OutputDefs().size()); - for (int i = 0; i < node.OutputDefs().size(); ++i) + for (size_t i = 0; i < node.OutputDefs().size(); ++i) { inferredOutputShapes[node.OutputDefs()[i]->Name()] = outputShapes.GetShape(i); } diff --git a/onnxruntime/core/providers/dml/DmlExecutionProvider/src/Operators/DmlOperatorResize.cpp b/onnxruntime/core/providers/dml/DmlExecutionProvider/src/Operators/DmlOperatorResize.cpp index d31203308aef7..3edc275e630ad 100644 --- a/onnxruntime/core/providers/dml/DmlExecutionProvider/src/Operators/DmlOperatorResize.cpp +++ b/onnxruntime/core/providers/dml/DmlExecutionProvider/src/Operators/DmlOperatorResize.cpp @@ -60,7 +60,7 @@ void ComputePixelOffsetsAndScales( // Fill in all the input/output pixel offset for each axis, // and recompute the scale for certain modes. - for (uint64_t i = 0; i < rank; ++i) + for (uint32_t i = 0; i < rank; ++i) { float inputPixelOffset = 0; float outputPixelOffset = 0; diff --git a/onnxruntime/core/providers/dml/OperatorAuthorHelper/OperatorHelper.cpp b/onnxruntime/core/providers/dml/OperatorAuthorHelper/OperatorHelper.cpp index deed62901dfb0..5e5d5f9f27cfc 100644 --- a/onnxruntime/core/providers/dml/OperatorAuthorHelper/OperatorHelper.cpp +++ b/onnxruntime/core/providers/dml/OperatorAuthorHelper/OperatorHelper.cpp @@ -1149,11 +1149,11 @@ namespace OperatorHelper HandleEmptyAxes(axes, inputShape, false); } - uint32_t numAxes = gsl::narrow_cast(axes.size()); - for (int32_t i = 0; i < axes.size(); i++) + size_t numAxes = axes.size(); + for (size_t i = 0; i < numAxes; i++) { auto xi_begin = padding[i]; - auto xi_end = padding[i+axes.size()]; + auto xi_end = padding[i+numAxes]; m_startPadding[axes[i]] = xi_begin; m_endPadding[axes[i]] = xi_end; } @@ -1888,7 +1888,7 @@ namespace OperatorHelper m_outputShape.resize(2 + m_imageShape.size()); m_outputShape[0] = m_inputShape[0]; // N m_outputShape[1] = m_inputShape[1] / blockShapeProduct; // C - for (int i = 2; i < m_outputShape.size(); i++) + for (size_t i = 2; i < m_outputShape.size(); i++) { m_outputShape[i] = m_imageShape[i - 2]; };