Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce new optimizer MatMul + BatchNormalization #17915

Merged
merged 27 commits into from
Oct 25, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
de5bc5e
Add new fusion Matmul + BN
sumitsays Oct 12, 2023
4cb3d7e
Update comments
sumitsays Oct 12, 2023
c797f40
Remove redundant code
sumitsays Oct 12, 2023
2024d64
Remove extra method scale_to_axis
sumitsays Oct 12, 2023
6ea436f
Refactored the code as per ORT style
sumitsays Oct 12, 2023
f63bd11
Added testcase
sumitsays Oct 13, 2023
7cc2013
Added test file
sumitsays Oct 13, 2023
c92ed58
Added extra assertion
sumitsays Oct 13, 2023
8bf29cf
Merge branch 'main' into user/sumita/matmulbn
sumitsays Oct 16, 2023
7ddeecf
Use inlinedVector instead of initializer_list
sumitsays Oct 16, 2023
d1842c9
Add override specifier
sumitsays Oct 16, 2023
2ef8343
Merge branch 'main' into user/sumita/matmulbn
sumitsays Oct 17, 2023
57ea97f
Merge branch 'main' into user/sumita/matmulbn
sumitsays Oct 17, 2023
f367a36
Addressed bot PR feedback
sumitsays Oct 17, 2023
e604ea4
Update the pattern as mentioned by Jeff
sumitsays Oct 18, 2023
96d0137
Apply LintRunner formatting changes
sumitsays Oct 18, 2023
79984f1
Addressed PR comment
sumitsays Oct 20, 2023
b306623
Modified pattern matching to incoroprate any combination
sumitsays Oct 20, 2023
0d7f524
updated comment
sumitsays Oct 20, 2023
23c23da
Apply lintrunner changes
sumitsays Oct 20, 2023
1a26722
Replaced recursion with iteration
sumitsays Oct 20, 2023
95e3efb
updated test model
sumitsays Oct 20, 2023
009b86c
Addressed PR comment
sumitsays Oct 21, 2023
490dec8
Added comments
sumitsays Oct 21, 2023
65e067d
Updated comment
sumitsays Oct 21, 2023
018cdfb
Add test case without batchnormalization
sumitsays Oct 23, 2023
d79a607
Apply lintrunner
sumitsays Oct 23, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions onnxruntime/core/optimizer/graph_transformer_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
#include "core/optimizer/matmul_integer_to_float.h"
#include "core/optimizer/matmul_scale_fusion.h"
#include "core/optimizer/matmul_transpose_fusion.h"
#include "core/optimizer/matmul_bn_fusion.h"
#include "core/optimizer/nchwc_transformer.h"
#include "core/optimizer/noop_elimination.h"
#include "core/optimizer/not_where_fusion.h"
Expand Down Expand Up @@ -127,6 +128,7 @@ InlinedVector<std::unique_ptr<RewriteRule>> GenerateRewriteRules(
rules.push_back(std::make_unique<ConvAddFusion>());
rules.push_back(std::make_unique<ConvMulFusion>());
rules.push_back(std::make_unique<ConvBNFusion>());
rules.push_back(std::make_unique<MatmulBNFusion>());
rules.push_back(std::make_unique<ClipQuantFusion>());
rules.push_back(std::make_unique<ReluQuantFusion>());
break;
Expand Down
28 changes: 20 additions & 8 deletions onnxruntime/core/optimizer/initializer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,11 @@ Initializer& Initializer::sqrt() {
namespace {
template <typename T>
struct ScaleByAxis {
void operator()(Tensor& data, const Tensor& scalers, const size_t block_size, const size_t num_blocks) const {
void operator()(Tensor& data,
const Tensor& scalers,
const size_t block_size,
const size_t num_blocks,
const bool column_major) const {
ToNumeric<T> to_numeric;
const auto scaler_size = scalers.Shape().Size();
T* dst = data.MutableData<T>();
Expand All @@ -303,24 +307,32 @@ struct ScaleByAxis {
}
} else {
for (size_t block_offset = 0, i = 0; i < num_blocks; i++) {
const auto numeric_scaler = to_numeric(scalers_data[i]);
for (size_t j = 0; j < block_size; ++j, ++block_offset) {
dst[block_offset] = T(to_numeric(dst[block_offset]) * numeric_scaler);
if (column_major) {
for (size_t j = 0; j < block_size; ++j, ++block_offset) {
const auto numeric_scaler = to_numeric(scalers_data[j]);
dst[block_offset] = T(to_numeric(dst[block_offset]) * numeric_scaler);
}
} else {
const auto numeric_scaler = to_numeric(scalers_data[i]);
for (size_t j = 0; j < block_size; ++j, ++block_offset) {
dst[block_offset] = T(to_numeric(dst[block_offset]) * numeric_scaler);
}
}
}
}
}
};

} // namespace

void Initializer::scale_by_axis(const Initializer& scalers, int axis) {
void Initializer::scale_by_axis(const Initializer& scalers, int axis, bool column_major) {
ORT_ENFORCE(axis >= 0, "Axis must be non-negative");
const size_t block_size = narrow<size_t>(data_.Shape().SizeFromDimension(gsl::narrow_cast<size_t>(axis)));
const size_t num_blocks = size() / block_size;
ORT_ENFORCE(scalers.size() == 1 || scalers.size() == num_blocks, "Invalid other(scalers) size");
ORT_ENFORCE(scalers.size() == 1 ||
(column_major ? scalers.size() == block_size : scalers.size() == num_blocks),
"Invalid other(scalers) size");
utils::MLTypeCallDispatcher<MLFloat16, BFloat16, float, double, int32_t, int64_t> t_disp(data_.GetElementType());
t_disp.Invoke<ScaleByAxis>(data_, scalers.data_, block_size, num_blocks);
t_disp.Invoke<ScaleByAxis>(data_, scalers.data_, block_size, num_blocks, column_major);
}
#endif // ORT_EXTENDED_MINIMAL_BUILD
} // namespace onnxruntime
2 changes: 1 addition & 1 deletion onnxruntime/core/optimizer/initializer.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ class Initializer final {

Initializer& sqrt();

void scale_by_axis(const Initializer& other, int axis);
void scale_by_axis(const Initializer& other, int axis, bool column_major = false);
#endif // ORT_EXTENDED_MINIMAL_BUILD
private:
std::string name_;
Expand Down
230 changes: 230 additions & 0 deletions onnxruntime/core/optimizer/matmul_bn_fusion.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,230 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
Fixed Show fixed Hide fixed
// Licensed under the MIT License.

#include "core/optimizer/matmul_bn_fusion.h"
Fixed Show fixed Hide fixed
sumitsays marked this conversation as resolved.
Show resolved Hide resolved
#include "core/graph/graph_utils.h"
#include "core/optimizer/initializer.h"
#include "core/optimizer/utils.h"

namespace onnxruntime {

namespace {
const std::vector<std::pair<std::string, InlinedVector<ONNX_NAMESPACE::OperatorSetVersion>>> ignorable_nodes{

Check warning on line 12 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L12

Add #include <vector> for vector<> [build/include_what_you_use] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:12:  Add #include <vector> for vector<>  [build/include_what_you_use] [4]
{"Reshape", {1, 5, 13, 14, 19}},
{"Transpose", {1, 13}}};
const std::pair<std::string, InlinedVector<ONNX_NAMESPACE::OperatorSetVersion>> dest = {"BatchNormalization", {1, 6, 7, 9, 14, 15}};

Check warning on line 15 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L15

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:15:  Lines should be <= 120 characters long  [whitespace/line_length] [2]

Check warning on line 15 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L15

Add #include <utility> for pair<> [build/include_what_you_use] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:15:  Add #include <utility> for pair<>  [build/include_what_you_use] [4]
} // namespace

bool NodeIsIgnorable(const Graph& graph, const Node& root_node, NodeIndex curr_node_index) {
const Node* curr_node = graph.GetNode(curr_node_index);

// curr_node has different execution provider then it's parent or
// has output edge != 1 (this condition will handle the case when ignorable node
// is graph output i.e. a graph like this "MatMul->Transpose")
if (curr_node->GetExecutionProviderType() != root_node.GetExecutionProviderType() ||
curr_node->GetOutputEdgesCount() != 1) {
return false;
}

// curr_node can be any of the ignorable_nodes.
for (size_t index = 0; index < ignorable_nodes.size(); index++) {
if (graph_utils::IsSupportedOptypeVersionAndDomain(*curr_node, ignorable_nodes[index].first, ignorable_nodes[index].second)) {

Check warning on line 31 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L31

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:31:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
return true;
}
}

return false;
}

std::optional<NodeIndex> MatchPath(const Graph& graph, const Node& root_node, NodeIndex curr_node_index) {
while (NodeIsIgnorable(graph, root_node, curr_node_index)) {
curr_node_index = graph.GetNode(curr_node_index)->OutputNodesBegin()->Index();
}

// curr_node is neither ignorable nor dest
const Node* curr_node = graph.GetNode(curr_node_index);
if (curr_node->OpType() != dest.first) {
return std::nullopt;
}

if (curr_node->GetExecutionProviderType() == root_node.GetExecutionProviderType() &&
graph_utils::IsSupportedOptypeVersionAndDomain(*curr_node, dest.first, dest.second)) {
return curr_node_index;
}

// either curr_node has different execution provider or
// has invalid opset.
return std::nullopt;
}

/*
* Given a MatMul node, it will verify the following pattern.
* MatMul GEMM
* | |
* Reshape ^ ---> Reshape ^
* | |
* Transpose ^ Transpose ^
* |
* BatchNormalization
* Note: ^ means there can be 0 or any occurrences of that node.
* Few example fusable pattern:
* - MatMul -> Reshape -> Transpose -> BatchNormalization ---> GEMM -> Reshape -> Transpose
* - MatMul -> Reshape -> BatchNormalization ---> GEMM -> Reshape
* - MatMul -> Transpose -> BatchNormalization ---> GEMM -> Transpose
* - MatMul -> Reshape -> Reshape -> BatchNormalization ---> GEMM -> Reshape -> Reshape
* - MatMul -> Reshape -> Transpose -> Reshape -> BatchNormalization ---> GEMM -> Reshape -> Transpose -> Reshape
* - MatMul -> BatchNormalization ---> GEMM
* Other Conditions:
* - B tensor of MatMul should be constant.
* - scale, B, mean, var tensors of BatchNormalization should be constant.
* - Every node in the path, except the BatchNormalization, should have only 1 output edge.
*/
bool MatmulBNFusion::SatisfyCondition(const Graph& graph, const Node& node, const logging::Logger&) const {
if (!graph_utils::IsSupportedOptypeVersionAndDomain(node, "MatMul", {1, 9, 13}) ||
node.GetOutputEdgesCount() != 1) {
return false;
}

if (graph.NodeProducesGraphOutput(node)) {
return false;
}

// because <node> is not producing graph output, it means it will have a child node
NodeIndex child_node_index = node.OutputNodesBegin()->Index();
std::optional<NodeIndex> batch_norm_index = MatchPath(graph, node, child_node_index);
if (!batch_norm_index.has_value()) {
return false;
}
sumitsays marked this conversation as resolved.
Show resolved Hide resolved

const Node* batch_norm_node = graph.GetNode(*batch_norm_index);

// Check that the appropriate inputs to the Matmul and BN nodes are constants.
if (!graph_utils::NodeArgIsConstant(graph, *node.InputDefs()[1]) ||
!graph_utils::NodeArgIsConstant(graph, *batch_norm_node->InputDefs()[1]) ||
!graph_utils::NodeArgIsConstant(graph, *batch_norm_node->InputDefs()[2]) ||
!graph_utils::NodeArgIsConstant(graph, *batch_norm_node->InputDefs()[3]) ||
!graph_utils::NodeArgIsConstant(graph, *batch_norm_node->InputDefs()[4])) {
return false;
}

// First output from BN is required. Others are optional. If any optional outputs exist we can't fuse.
const auto& output_defs = batch_norm_node->OutputDefs();
if (output_defs.size() > 1) {
for (size_t i = 1, end = output_defs.size(); i < end; ++i) {
if (output_defs[i] != nullptr && output_defs[i]->Exists()) {
return false;
}
}
}

return true;
}

/*
* BatchNormalization: [https://learn.microsoft.com/en-us/windows/win32/api/directml/ns-directml-dml_batch_normalization_operator_desc]
* Scale * ((Input - Mean) / sqrt(Variance + Epsilon)) + Bias // ignore the FusedActivation in the above definition, that's very specific to DML
* Expanding out the terms:
* Output = (Scale / sqrt(Variance + Epsilon)) * Input + (Scale / sqrt(Variance + Epsilon)) * -Mean + Bias
* Here,
* [Scale/sqrt(Variance + Epsilon)] is constant, and let's call it `alpha`
* [(Scale / sqrt(Variance + Epsilon)) * -Mean + Bias] is also constant, and let's call it `beta`
* Output = alpha * Input + beta, Input = B tensor of MatMul.
*
*/
Status MatmulBNFusion::Apply(Graph& graph, Node& matmul_node, RewriteRuleEffect& rule_effect, const logging::Logger&) const {

Check warning on line 134 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L134

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:134:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
NodeIndex child_node_index = matmul_node.OutputNodesBegin()->Index();
NodeIndex batch_norm_node_index = MatchPath(graph, matmul_node, child_node_index).value();

Node& batch_norm_node = *graph.GetNode(batch_norm_node_index); // need mutable node, that's why extracting node from graph

Check warning on line 138 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L138

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:138:  Lines should be <= 120 characters long  [whitespace/line_length] [2]

// only perform fusion if epsilon is present and is of float_32 type
auto epsilon_attribute = batch_norm_node.GetAttributes().find("epsilon");
if (epsilon_attribute == batch_norm_node.GetAttributes().end() ||
epsilon_attribute->second.type() != ONNX_NAMESPACE::AttributeProto_AttributeType_FLOAT) {
return Status::OK();
}
const float epsilon = epsilon_attribute->second.f();

const onnx::TensorProto* scale_tensor = graph_utils::GetConstantInitializer(graph, batch_norm_node.InputDefs()[1]->Name());

Check warning on line 148 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L148

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:148:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
ORT_ENFORCE(scale_tensor);
const onnx::TensorProto* bias_tensor = graph_utils::GetConstantInitializer(graph, batch_norm_node.InputDefs()[2]->Name());

Check warning on line 150 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L150

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:150:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
ORT_ENFORCE(bias_tensor);
const onnx::TensorProto* mean_tensor = graph_utils::GetConstantInitializer(graph, batch_norm_node.InputDefs()[3]->Name());

Check warning on line 152 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L152

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:152:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
ORT_ENFORCE(mean_tensor);
const onnx::TensorProto* var_tensor = graph_utils::GetConstantInitializer(graph, batch_norm_node.InputDefs()[4]->Name());

Check warning on line 154 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L154

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:154:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
ORT_ENFORCE(var_tensor);
const onnx::TensorProto* matmul_b_tensor = graph_utils::GetConstantInitializer(graph, matmul_node.InputDefs()[1]->Name());

Check warning on line 156 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L156

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:156:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
ORT_ENFORCE(matmul_b_tensor);

if (!optimizer_utils::IsFloatingPointDataType(*matmul_b_tensor) ||
!optimizer_utils::IsFloatingPointDataType(*scale_tensor) ||
!optimizer_utils::IsFloatingPointDataType(*bias_tensor) ||
!optimizer_utils::IsFloatingPointDataType(*mean_tensor) ||
!optimizer_utils::IsFloatingPointDataType(*var_tensor) ||
scale_tensor->dims_size() != 1 ||
bias_tensor->dims_size() != 1 ||
mean_tensor->dims_size() != 1 ||
var_tensor->dims_size() != 1 ||
scale_tensor->dims(0) != matmul_b_tensor->dims(1) ||
bias_tensor->dims(0) != matmul_b_tensor->dims(1) ||
mean_tensor->dims(0) != matmul_b_tensor->dims(1) ||
var_tensor->dims(0) != matmul_b_tensor->dims(1)) {
return Status::OK();
}

/*
* temp = scale / sqrt(var + epsilon)
* output = (temp * Input) - ((temp * mean) + bias)
*/
Initializer scale(*scale_tensor, graph.ModelPath());
Initializer bias(*bias_tensor, graph.ModelPath());
Initializer mean(*mean_tensor, graph.ModelPath());
Initializer var(*var_tensor, graph.ModelPath());
Initializer matmul_b(*matmul_b_tensor, graph.ModelPath());

var.add(epsilon);
var.sqrt();
scale.div(var); // this is the temp
matmul_b.scale_by_axis(scale, 1, true);

mean.mul(scale);
bias.sub(mean);

// create B tensorProto for new Gemm node from <matmulB> initializer.
ONNX_NAMESPACE::TensorProto new_gemm_b_tensor(*matmul_b_tensor);
matmul_b.ToProto(new_gemm_b_tensor);
const std::string new_gemm_b_name = graph.GenerateNodeArgName("MatMulBnFusion_GemmB_" + matmul_b_tensor->name());
new_gemm_b_tensor.set_name(new_gemm_b_name);
NodeArg& new_gemm_b_node_arg = graph_utils::AddInitializer(graph, new_gemm_b_tensor);

// create bias tensorProto for new Gemm node from <bias> initializer.
ONNX_NAMESPACE::TensorProto new_gemm_bias_tensor(*bias_tensor);
bias.ToProto(new_gemm_bias_tensor);
const std::string new_gemm_bias_name = graph.GenerateNodeArgName("MatMulBnFusion_GemmBias");

Check warning on line 203 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L203

Add #include <string> for string [build/include_what_you_use] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:203:  Add #include <string> for string  [build/include_what_you_use] [4]
new_gemm_bias_tensor.set_name(new_gemm_bias_name);
NodeArg& new_gemm_bias_node_arg = graph_utils::AddInitializer(graph, new_gemm_bias_tensor);

Node& gemm_node = graph.AddNode(
graph.GenerateNodeArgName("MatMulBnFusion_Gemm"),
"Gemm",
"Generated from Matmul BatchNormalization fusion",
{matmul_node.MutableInputDefs()[0], &new_gemm_b_node_arg, &new_gemm_bias_node_arg},
matmul_node.MutableOutputDefs(),
nullptr,
kOnnxDomain);

// Remove MatMul node.
Node* node = graph.GetNode(matmul_node.Index());
graph_utils::RemoveNodeOutputEdges(graph, *node);
graph.RemoveNode(matmul_node.Index());

// Delete optional empty output defs.
// Delete BatchNormalization node and update the input of the child of BatchNormalization
batch_norm_node.MutableOutputDefs().resize(1);
NodeIndex batch_norm_parent_index = graph.GetNode(child_node_index)->OpType() == "BatchNormalization" ? gemm_node.Index() : batch_norm_node.InputNodesBegin()->Index();

Check warning on line 224 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L224

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:224:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
graph_utils::FinalizeNodeFusion(graph, *graph.GetNode(batch_norm_parent_index), batch_norm_node);

rule_effect = RewriteRuleEffect::kRemovedCurrentNode;
return Status::OK();
}
} // namespace onnxruntime

Check warning on line 230 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L230

Could not find a newline character at the end of the file. [whitespace/ending_newline] [5]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:230:  Could not find a newline character at the end of the file.  [whitespace/ending_newline] [5]
27 changes: 27 additions & 0 deletions onnxruntime/core/optimizer/matmul_bn_fusion.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
// Copyright (c) Microsoft Corporation. All rights reserved.
Fixed Show fixed Hide fixed
// Licensed under the MIT License.

#pragma once
Fixed Show fixed Hide fixed

#include "core/optimizer/rewrite_rule.h"

namespace onnxruntime {
/*
* This fusion submerges a BatchNormalization operator to it's super
* precedding MatMul operator, if and only if MatmulBNFusion::SatisfyCondition()
* is true.
*/
class MatmulBNFusion : public RewriteRule {
public:
MatmulBNFusion() : RewriteRule("MatMul_BatchNormalization_Fusion") {}

std::vector<std::string> TargetOpTypes() const noexcept override {

Check warning on line 18 in onnxruntime/core/optimizer/matmul_bn_fusion.h

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.h#L18

Add #include <string> for string [build/include_what_you_use] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.h:18:  Add #include <string> for string  [build/include_what_you_use] [4]

Check warning on line 18 in onnxruntime/core/optimizer/matmul_bn_fusion.h

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.h#L18

Add #include <vector> for vector<> [build/include_what_you_use] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.h:18:  Add #include <vector> for vector<>  [build/include_what_you_use] [4]
return {"MatMul"};
}

private:
bool SatisfyCondition(const Graph& graph, const Node& node, const logging::Logger& logger) const override;

Status Apply(Graph& graph, Node& matmul_node, RewriteRuleEffect& rule_effect, const logging::Logger& logger) const override;

Check warning on line 25 in onnxruntime/core/optimizer/matmul_bn_fusion.h

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.h#L25

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.h:25:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
};
} // namespace onnxruntime

Check warning on line 27 in onnxruntime/core/optimizer/matmul_bn_fusion.h

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.h#L27

Could not find a newline character at the end of the file. [whitespace/ending_newline] [5]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.h:27:  Could not find a newline character at the end of the file.  [whitespace/ending_newline] [5]
Loading
Loading