Skip to content

Commit

Permalink
Lint runner
Browse files Browse the repository at this point in the history
  • Loading branch information
raoanag committed Feb 22, 2024
1 parent 2e63289 commit 1d573c9
Show file tree
Hide file tree
Showing 4 changed files with 45 additions and 10 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ class DmlOperatorMatMulIntegerToFloat : public DmlOperator
{

Check warning on line 76 in onnxruntime/core/providers/dml/DmlExecutionProvider/src/Operators/DmlOperatorMatMulIntegerToFloat.cpp

View workflow job for this annotation

GitHub Actions / Lint C++

[cpplint] reported by reviewdog 🐶 Redundant blank line at the start of a code block should be deleted. [whitespace/blank_line] [2] Raw Output: onnxruntime/core/providers/dml/DmlExecutionProvider/src/Operators/DmlOperatorMatMulIntegerToFloat.cpp:76: Redundant blank line at the start of a code block should be deleted. [whitespace/blank_line] [2]
m_inputTensorDescs[DmlInputIndex::dmlAZeroPoint] = CreateTensorDescFromInput(
kernelInfo,
kernelInfo,
OrtInputTensors::ortAZeroPoint,
TensorAxis::DoNotCoerce,

Check warning on line 80 in onnxruntime/core/providers/dml/DmlExecutionProvider/src/Operators/DmlOperatorMatMulIntegerToFloat.cpp

View workflow job for this annotation

GitHub Actions / Lint C++

[cpplint] reported by reviewdog 🐶 Line ends in whitespace. Consider deleting these extra spaces. [whitespace/end_of_line] [4] Raw Output: onnxruntime/core/providers/dml/DmlExecutionProvider/src/Operators/DmlOperatorMatMulIntegerToFloat.cpp:80: Line ends in whitespace. Consider deleting these extra spaces. [whitespace/end_of_line] [4]
TensorAxis::H,
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/test/optimizer/graph_transform_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -5680,15 +5680,15 @@ TEST_F(GraphTransformationTests, MatMulIntegerToFloatTest) {
}

#ifdef USE_DML
TEST_F(GraphTransformationTests, MatMulIntegerToFloat16Test) {
TEST_F(GraphTransformationTests, MatMulIntegerToFloat16Test) {
constexpr const ORTCHAR_T* model_uri = MODEL_FOLDER "fusion/matmul_integer_to_float16_int8.onnx";
std::shared_ptr<Model> p_model;
ASSERT_STATUS_OK(Model::Load(model_uri, p_model, nullptr, *logger_));
Graph& graph = p_model->MainGraph();

for (auto& node : graph.Nodes()) {
node.SetExecutionProviderType(kDmlExecutionProvider);
}
}
onnxruntime::GraphTransformerManager graph_transformation_mgr{5};
ASSERT_STATUS_OK(graph_transformation_mgr.Register(std::make_unique<MatMulIntegerToFloatFusion>(), TransformerLevel::Level2));
ASSERT_STATUS_OK(graph_transformation_mgr.ApplyTransformers(graph, TransformerLevel::Level2, *logger_));
Expand Down
47 changes: 41 additions & 6 deletions onnxruntime/test/testdata/matmul_integer_to_float.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,13 @@ def GenerateModel(model_name, sign_i, sign_w, output_type_fp16, has_zp=True, bia
"MatMulInteger",
),
helper.make_node("Mul", ["a_scale", "b_scale"], ["multiplier"], "mul_right"),
helper.make_node("Cast", ["matmul_output_int32"], ["matmul_output_float"], "cast", to=TensorProto.FLOAT16 if output_type_fp16 else TensorProto.FLOAT),
helper.make_node(
"Cast",
["matmul_output_int32"],
["matmul_output_float"],
"cast",
to=TensorProto.FLOAT16 if output_type_fp16 else TensorProto.FLOAT,
),
helper.make_node(
"Mul",
["matmul_output_float", "multiplier"],
Expand Down Expand Up @@ -48,14 +54,22 @@ def GenerateModel(model_name, sign_i, sign_w, output_type_fp16, has_zp=True, bia
if bias:
nodes.extend([helper.make_node("Add", ["mul_bottom_output", "bias"], ["Y"], "add")])

inputs.extend([helper.make_tensor_value_info("bias", TensorProto.FLOAT16 if output_type_fp16 else TensorProto.FLOAT, ["N"])])
inputs.extend(
[
helper.make_tensor_value_info(
"bias", TensorProto.FLOAT16 if output_type_fp16 else TensorProto.FLOAT, ["N"]
)
]
)

graph = helper.make_graph(
nodes,
"DynamicQuantizeMatMul_fusion", # name
inputs,
[ # outputs
helper.make_tensor_value_info("Y", TensorProto.FLOAT16 if output_type_fp16 else TensorProto.FLOAT, ["M", "N"]),
helper.make_tensor_value_info(
"Y", TensorProto.FLOAT16 if output_type_fp16 else TensorProto.FLOAT, ["M", "N"]
),
],
)

Expand All @@ -67,8 +81,29 @@ def GenerateModel(model_name, sign_i, sign_w, output_type_fp16, has_zp=True, bia
GenerateModel("matmul_integer_to_float16_int8.onnx", sign_i=False, sign_w=True, output_type_fp16=True)
GenerateModel("matmul_integer_to_float_int8.onnx", sign_i=False, sign_w=True, output_type_fp16=False)
GenerateModel("matmul_integer_to_float_uint8.onnx", sign_i=False, sign_w=False, output_type_fp16=False)
GenerateModel("matmul_integer_to_float_int8_bias.onnx", sign_i=False, sign_w=True, output_type_fp16=False, has_zp=False, bias=True)
GenerateModel("matmul_integer_to_float_uint8_bias.onnx", sign_i=False, sign_w=False, output_type_fp16=False, has_zp=False, bias=True)
GenerateModel(
"matmul_integer_to_float_int8_bias.onnx",
sign_i=False,
sign_w=True,
output_type_fp16=False,
has_zp=False,
bias=True,
)
GenerateModel(
"matmul_integer_to_float_uint8_bias.onnx",
sign_i=False,
sign_w=False,
output_type_fp16=False,
has_zp=False,
bias=True,
)

GenerateModel("matmul_integer_to_float_int8_int8.onnx", sign_i=True, sign_w=True, output_type_fp16=False)
GenerateModel("matmul_integer_to_float_int8_int8_bias.onnx", sign_i=True, sign_w=True, output_type_fp16=False, has_zp=False, bias=True)
GenerateModel(
"matmul_integer_to_float_int8_int8_bias.onnx",
sign_i=True,
sign_w=True,
output_type_fp16=False,
has_zp=False,
bias=True,
)
Original file line number Diff line number Diff line change
Expand Up @@ -104,4 +104,4 @@ def GenerateModel(model_name): # noqa: N802


if __name__ == "__main__":
GenerateModel("matmul_integer_to_float.onnx")
GenerateModel("matmul_integer_to_float.onnx")

0 comments on commit 1d573c9

Please sign in to comment.