Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Introduce new optimizer MatMul + BatchNormalization #17915

Merged
merged 27 commits into from
Oct 25, 2023
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
Show all changes
27 commits
Select commit Hold shift + click to select a range
de5bc5e
Add new fusion Matmul + BN
sumitsays Oct 12, 2023
4cb3d7e
Update comments
sumitsays Oct 12, 2023
c797f40
Remove redundant code
sumitsays Oct 12, 2023
2024d64
Remove extra method scale_to_axis
sumitsays Oct 12, 2023
6ea436f
Refactored the code as per ORT style
sumitsays Oct 12, 2023
f63bd11
Added testcase
sumitsays Oct 13, 2023
7cc2013
Added test file
sumitsays Oct 13, 2023
c92ed58
Added extra assertion
sumitsays Oct 13, 2023
8bf29cf
Merge branch 'main' into user/sumita/matmulbn
sumitsays Oct 16, 2023
7ddeecf
Use inlinedVector instead of initializer_list
sumitsays Oct 16, 2023
d1842c9
Add override specifier
sumitsays Oct 16, 2023
2ef8343
Merge branch 'main' into user/sumita/matmulbn
sumitsays Oct 17, 2023
57ea97f
Merge branch 'main' into user/sumita/matmulbn
sumitsays Oct 17, 2023
f367a36
Addressed bot PR feedback
sumitsays Oct 17, 2023
e604ea4
Update the pattern as mentioned by Jeff
sumitsays Oct 18, 2023
96d0137
Apply LintRunner formatting changes
sumitsays Oct 18, 2023
79984f1
Addressed PR comment
sumitsays Oct 20, 2023
b306623
Modified pattern matching to incoroprate any combination
sumitsays Oct 20, 2023
0d7f524
updated comment
sumitsays Oct 20, 2023
23c23da
Apply lintrunner changes
sumitsays Oct 20, 2023
1a26722
Replaced recursion with iteration
sumitsays Oct 20, 2023
95e3efb
updated test model
sumitsays Oct 20, 2023
009b86c
Addressed PR comment
sumitsays Oct 21, 2023
490dec8
Added comments
sumitsays Oct 21, 2023
65e067d
Updated comment
sumitsays Oct 21, 2023
018cdfb
Add test case without batchnormalization
sumitsays Oct 23, 2023
d79a607
Apply lintrunner
sumitsays Oct 23, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions onnxruntime/core/optimizer/graph_transformer_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@
#include "core/optimizer/matmul_integer_to_float.h"
#include "core/optimizer/matmul_scale_fusion.h"
#include "core/optimizer/matmul_transpose_fusion.h"
#include "core/optimizer/matmul_bn_fusion.h"
#include "core/optimizer/nchwc_transformer.h"
#include "core/optimizer/noop_elimination.h"
#include "core/optimizer/not_where_fusion.h"
Expand Down Expand Up @@ -127,6 +128,7 @@ InlinedVector<std::unique_ptr<RewriteRule>> GenerateRewriteRules(
rules.push_back(std::make_unique<ConvAddFusion>());
rules.push_back(std::make_unique<ConvMulFusion>());
rules.push_back(std::make_unique<ConvBNFusion>());
rules.push_back(std::make_unique<MatmulBNFusion>());
rules.push_back(std::make_unique<ClipQuantFusion>());
rules.push_back(std::make_unique<ReluQuantFusion>());
break;
Expand Down
31 changes: 31 additions & 0 deletions onnxruntime/core/optimizer/initializer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -310,6 +310,28 @@ struct ScaleByAxis {
}
};

template <typename T>
struct ScaleToAxis {
void operator()(Tensor& data, const Tensor& scalers, const size_t block_size, const size_t num_blocks) const {
ToNumeric<T> to_numeric;
const auto scaler_size = scalers.Shape().Size();
T* dst = data.MutableData<T>();
const T* scalers_data = scalers.Data<T>();
if (scaler_size == 1) {
const auto numeric_scaler = to_numeric(scalers_data[0]);
for (size_t block_offset = 0, limit = block_size * num_blocks; block_offset < limit; ++block_offset) {
dst[block_offset] = T(to_numeric(dst[block_offset]) * numeric_scaler);
}
} else {
for (size_t block_offset = 0, i = 0; i < num_blocks; i++) {
for (size_t j = 0; j < block_size; ++j, ++block_offset) {
const auto numeric_scaler = to_numeric(scalers_data[j]);
dst[block_offset] = T(to_numeric(dst[block_offset]) * numeric_scaler);
}
}
}
}
};
} // namespace

void Initializer::scale_by_axis(const Initializer& scalers, int axis) {
Expand All @@ -320,5 +342,14 @@ void Initializer::scale_by_axis(const Initializer& scalers, int axis) {
utils::MLTypeCallDispatcher<MLFloat16, BFloat16, float, double, int32_t, int64_t> t_disp(data_.GetElementType());
t_disp.Invoke<ScaleByAxis>(data_, scalers.data_, block_size, num_blocks);
}

void Initializer::scale_to_axis(const Initializer& scalers, int axis) {
ORT_ENFORCE(axis >= 0, "Axis must be non-negative");
const size_t block_size = narrow<size_t>(data_.Shape().SizeFromDimension(gsl::narrow_cast<size_t>(axis)));
const size_t num_blocks = size() / block_size;
ORT_ENFORCE(scalers.size() == 1 || scalers.size() == block_size, "Invalid other(scalers) size");
utils::MLTypeCallDispatcher<MLFloat16, BFloat16, float, double, int32_t, int64_t> t_disp(data_.GetElementType());
t_disp.Invoke<ScaleToAxis>(data_, scalers.data_, block_size, num_blocks);
}
#endif // ORT_EXTENDED_MINIMAL_BUILD
} // namespace onnxruntime
2 changes: 2 additions & 0 deletions onnxruntime/core/optimizer/initializer.h
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ class Initializer final {
Initializer& sqrt();

void scale_by_axis(const Initializer& other, int axis);

void scale_to_axis(const Initializer& other, int axis);
#endif // ORT_EXTENDED_MINIMAL_BUILD
private:
std::string name_;
Expand Down
294 changes: 294 additions & 0 deletions onnxruntime/core/optimizer/matmul_bn_fusion.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,294 @@
#include "core/optimizer/matmul_bn_fusion.h"
Fixed Show fixed Hide fixed
sumitsays marked this conversation as resolved.
Show resolved Hide resolved
#include "core/graph/graph_utils.h"
#include "core/optimizer/initializer.h"
#include "core/optimizer/utils.h"


namespace onnxruntime
{

Check warning on line 8 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L8

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:8:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
void AddNodesToRemove(
Node::NodeConstIterator currItr,
sumitsays marked this conversation as resolved.
Show resolved Hide resolved
const NodeIndex& destNodeIndex,
std::vector<NodeIndex>& nodesToRemove)
{

Check warning on line 13 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L13

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:13:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
while (currItr->Index() != destNodeIndex) {
nodesToRemove.push_back(currItr->Index());
currItr = currItr->OutputNodesBegin();
}
}

NodeIndex GetOtherParentOfNode(
const Node& node,
NodeIndex firstParentIndex)
{

Check warning on line 23 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L23

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:23:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
NodeIndex otherParentIndex = std::numeric_limits<size_t>::max();
if (node.GetInputEdgesCount() != 2)
{

Check warning on line 26 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L26

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:26:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
return otherParentIndex;
}

auto parentNodeItr = node.InputNodesBegin();
if (parentNodeItr->Index() != firstParentIndex)
{

Check warning on line 32 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L32

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:32:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
otherParentIndex = parentNodeItr->Index();
}
++parentNodeItr;
if (parentNodeItr->Index() != firstParentIndex)
{

Check warning on line 37 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L37

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:37:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
otherParentIndex = parentNodeItr->Index();
}
return otherParentIndex;
}

bool MatmulBNFusion::MatchPath(
const Node& parentNode,
const gsl::span<std::pair<std::string, std::initializer_list<int>>>& path,
const Node& childNode) const
{

Check warning on line 47 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L47

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:47:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
if (path.size() == 0)
{

Check warning on line 49 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L49

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:49:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
return true;
}

if (!graph_utils::IsSupportedOptypeVersionAndDomain(childNode, path[0].first, path[0].second) ||
childNode.GetExecutionProviderType() != parentNode.GetExecutionProviderType())
{

Check warning on line 55 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L55

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:55:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
return false;
}

// last node in the path can have more than one output
// because all those outputs will be preserved by the addition of new Gemm node
if (path.size() > 1 && childNode.GetOutputEdgesCount() != 1)
{

Check warning on line 62 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L62

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:62:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
return false;
}

return MatchPath(childNode, path.subspan(1), *childNode.OutputNodesBegin());
}

/*
* Given a MatMul node, it will verify the following pattern.
* MatMul
* |
* / \
* / \
* / \
* Reshape Shape
* | |
* Transpose Cast
* | |
* BatchNormalization Cast
* | |
* Transpose |
* | /
* \ /
* \ /
* \ /
* |
* Reshape
* As of writing this fusion, we are being conversative in the pattern because the customer
* model we are targeting has this exact pattern. Above pattern will evolve in the future
* as we tend to add separate fusion to eliminate Transpose around the BatchNormalization,
* update the model optimizer script to eliminate adjacent Cast operator, etc.
*
* We have to match the path (MatMul->Shape->Cast->Cast->Reshape) because sub-merging the
* BatchNormalization into the MatMul will change MatMul's output and thus we have to make
* sure that MatMul's output is not used by any operator to which MatMul's output matters.
* Other Conditions:
* - B tensor of MatMul should be constant.
* - scale, B, mean, var tensors of BatchNormalization should be constant.
* - Every node in the path except first and last node, should have only 1 output edge.
*/
bool MatmulBNFusion::SatisfyCondition(
const Graph& graph,
const Node& node,
const logging::Logger&) const
{

Check warning on line 106 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L106

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:106:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
if (!graph_utils::IsSupportedOptypeVersionAndDomain(node, "MatMul", { 1, 9, 13 }) ||
node.GetOutputEdgesCount() != 2)
{

Check warning on line 109 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L109

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:109:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
return false;
}

auto childNodeIterator = node.OutputNodesBegin();
const Node& firstChildNode = *childNodeIterator;
++childNodeIterator;
const Node& secondChildNode = *childNodeIterator;

std::vector<std::pair<std::string, std::initializer_list<int>>> firstPath =

Check warning on line 118 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L118

Line ends in whitespace. Consider deleting these extra spaces. [whitespace/end_of_line] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:118:  Line ends in whitespace.  Consider deleting these extra spaces.  [whitespace/end_of_line] [4]
{{"Reshape", {1, 5}},
{"Transpose", {1}},
{"BatchNormalization", {1, 6, 7}},
{"Transpose", {1}},
{"Reshape", {1, 5}}};

std::vector<std::pair<std::string, std::initializer_list<int>>> secondPath =

Check warning on line 125 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L125

Add #include <utility> for pair<> [build/include_what_you_use] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:125:  Add #include <utility> for pair<>  [build/include_what_you_use] [4]
{{"Shape", {1}},
{"Cast", {1, 6}},
{"Cast", {1, 6}},
{"Reshape", {1, 5}}};

if (!(MatchPath(node, firstPath, firstChildNode) ^ MatchPath(node, secondPath, firstChildNode)))
{

Check warning on line 132 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L132

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:132:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
return false;
}

if (!(MatchPath(node, firstPath, secondChildNode) ^ MatchPath(node, secondPath, secondChildNode))) {
return false;
}


Check warning on line 140 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L140

Line ends in whitespace. Consider deleting these extra spaces. [whitespace/end_of_line] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:140:  Line ends in whitespace.  Consider deleting these extra spaces.  [whitespace/end_of_line] [4]
const auto& batchNormNode = firstChildNode.OpType() == "Reshape" ?
*firstChildNode.OutputNodesBegin()->OutputNodesBegin() :
*secondChildNode.OutputNodesBegin()->OutputNodesBegin();

Check warning on line 144 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L144

Line ends in whitespace. Consider deleting these extra spaces. [whitespace/end_of_line] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:144:  Line ends in whitespace.  Consider deleting these extra spaces.  [whitespace/end_of_line] [4]
// Check that the appropriate inputs to the Matmul and BN nodes are constants.
if (!graph_utils::NodeArgIsConstant(graph, *node.InputDefs()[1]) ||
!graph_utils::NodeArgIsConstant(graph, *batchNormNode.InputDefs()[1]) ||
!graph_utils::NodeArgIsConstant(graph, *batchNormNode.InputDefs()[2]) ||
!graph_utils::NodeArgIsConstant(graph, *batchNormNode.InputDefs()[3]) ||
!graph_utils::NodeArgIsConstant(graph, *batchNormNode.InputDefs()[4]))
{

Check warning on line 151 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L151

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:151:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
return false;
}

// First output from BN is required. Others are optional. If any optional outputs exist we can't fuse.
const auto& output_defs = batchNormNode.OutputDefs();
if (output_defs.size() > 1) {
for (size_t i = 1, end = output_defs.size(); i < end; ++i) {
if (output_defs[i] != nullptr && output_defs[i]->Exists())
return false;
}
}

if (graph.NodeProducesGraphOutput(node)) {
return false;
}

return true;
}

Status MatmulBNFusion::Apply(
Graph& graph,
Node& matmulNode,
RewriteRuleEffect& ruleEffect,
const logging::Logger&) const
{

Check warning on line 176 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L176

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:176:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
auto childNodeIterator = matmulNode.OutputNodesBegin();
const Node& firstChildNode = *childNodeIterator;
++childNodeIterator;
const Node& secondChildNode = *childNodeIterator;

const Node& firstReshape = firstChildNode.OpType() == "Reshape" ? firstChildNode : secondChildNode;

NodeIndex batchNormNodeIndex = firstReshape.OutputNodesBegin()->OutputNodesBegin()->Index();
Node& batchNormNode = *graph.GetNode(batchNormNodeIndex);

// only perform fusion if eplison is present and is of float_32 type
auto epsilonAttr = batchNormNode.GetAttributes().find("epsilon");
if (epsilonAttr == batchNormNode.GetAttributes().end() ||
epsilonAttr->second.type() != ONNX_NAMESPACE::AttributeProto_AttributeType_FLOAT)
{

Check warning on line 191 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L191

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:191:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
return Status::OK();
}
const float epsilon = epsilonAttr->second.f();

const onnx::TensorProto* scaleTensor = graph_utils::GetConstantInitializer(graph, batchNormNode.InputDefs()[1]->Name());

Check warning on line 196 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L196

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:196:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
ORT_ENFORCE(scaleTensor);
const onnx::TensorProto* biasTensor = graph_utils::GetConstantInitializer(graph, batchNormNode.InputDefs()[2]->Name());

Check warning on line 198 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L198

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:198:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
ORT_ENFORCE(biasTensor);
const onnx::TensorProto* meanTensor = graph_utils::GetConstantInitializer(graph, batchNormNode.InputDefs()[3]->Name());

Check warning on line 200 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L200

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:200:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
ORT_ENFORCE(meanTensor);
const onnx::TensorProto* varTensor = graph_utils::GetConstantInitializer(graph, batchNormNode.InputDefs()[4]->Name());

Check warning on line 202 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L202

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:202:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
ORT_ENFORCE(varTensor);
const onnx::TensorProto* matmulBTensor = graph_utils::GetConstantInitializer(graph, matmulNode.InputDefs()[1]->Name());

Check warning on line 204 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L204

Lines should be <= 120 characters long [whitespace/line_length] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:204:  Lines should be <= 120 characters long  [whitespace/line_length] [2]
ORT_ENFORCE(matmulBTensor);

if (!optimizer_utils::IsFloatingPointDataType(*matmulBTensor) ||
!optimizer_utils::IsFloatingPointDataType(*scaleTensor) ||
!optimizer_utils::IsFloatingPointDataType(*biasTensor) ||
!optimizer_utils::IsFloatingPointDataType(*meanTensor) ||
!optimizer_utils::IsFloatingPointDataType(*varTensor) ||
scaleTensor->dims_size() != 1 ||
biasTensor->dims_size() != 1 ||
meanTensor->dims_size() != 1 ||
varTensor->dims_size() != 1 ||
scaleTensor->dims(0) != matmulBTensor->dims(1) ||
biasTensor->dims(0) != matmulBTensor->dims(1) ||
meanTensor->dims(0) != matmulBTensor->dims(1) ||
varTensor->dims(0) != matmulBTensor->dims(1))
{

Check warning on line 220 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L220

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:220:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
return Status::OK();
}

Check warning on line 223 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L223

Line ends in whitespace. Consider deleting these extra spaces. [whitespace/end_of_line] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:223:  Line ends in whitespace.  Consider deleting these extra spaces.  [whitespace/end_of_line] [4]
/*
* temp = scale / sqrt(var + epsilon)
* output = (temp * Input) - ((temp * mean) + bias)
*/
Initializer scale(*scaleTensor, graph.ModelPath());
Initializer bias(*biasTensor, graph.ModelPath());
Initializer mean(*meanTensor, graph.ModelPath());
Initializer var(*varTensor, graph.ModelPath());
Initializer matmulB(*matmulBTensor, graph.ModelPath());

var.add(epsilon);
var.sqrt();
scale.div(var); // this is the temp

Check warning on line 236 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L236

At least two spaces is best between code and comments [whitespace/comments] [2]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:236:  At least two spaces is best between code and comments  [whitespace/comments] [2]
matmulB.scale_to_axis(scale, 1);

mean.mul(scale);
bias.sub(mean);

Check warning on line 241 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L241

Line ends in whitespace. Consider deleting these extra spaces. [whitespace/end_of_line] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:241:  Line ends in whitespace.  Consider deleting these extra spaces.  [whitespace/end_of_line] [4]
// create B tensorProto for new Gemm node from <matmulB> initializer.
ONNX_NAMESPACE::TensorProto newGemmBTensor(*matmulBTensor);
matmulB.ToProto(newGemmBTensor);
const std::string newGemmBName = graph.GenerateNodeArgName("MatMulBnFusion_GemmB_" + matmulBTensor->name());
newGemmBTensor.set_name(newGemmBName);
NodeArg& newGemmBNodeArg = graph_utils::AddInitializer(graph, newGemmBTensor);

// create bias tensorProto for new Gemm node from <bias> initializer.
ONNX_NAMESPACE::TensorProto newGemmBiasTensor(*biasTensor);
bias.ToProto(newGemmBiasTensor);
const std::string newGemmBiasName = graph.GenerateNodeArgName("MatMulBnFusion_GemmBias");

Check warning on line 252 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L252

Add #include <string> for string [build/include_what_you_use] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:252:  Add #include <string> for string  [build/include_what_you_use] [4]
newGemmBiasTensor.set_name(newGemmBiasName);
NodeArg& newGemmBiasNodeArg = graph_utils::AddInitializer(graph, newGemmBiasTensor);

NodeIndex lastReshapeNodeIndex = firstReshape.OutputNodesBegin()->OutputNodesBegin()->
OutputNodesBegin()->OutputNodesBegin()->Index();
graph.AddNode(
graph.GenerateNodeArgName("MatMulBnFusion_Gemm"),
"Gemm",
"Generated from Matmul BatchNormalization fusion",
{matmulNode.MutableInputDefs()[0], &newGemmBNodeArg, &newGemmBiasNodeArg},
graph.GetNode(lastReshapeNodeIndex)->MutableOutputDefs(),
nullptr,
kOnnxDomain);

Check warning on line 266 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L266

Line ends in whitespace. Consider deleting these extra spaces. [whitespace/end_of_line] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:266:  Line ends in whitespace.  Consider deleting these extra spaces.  [whitespace/end_of_line] [4]
std::vector<NodeIndex> nodesToRemove;

Check warning on line 267 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L267

Add #include <vector> for vector<> [build/include_what_you_use] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:267:  Add #include <vector> for vector<>  [build/include_what_you_use] [4]
nodesToRemove.push_back(matmulNode.Index());

// Remove non-Matmul parent of Reshape if and only if
// that parent has only 1 output.
NodeIndex nonMatmulParentOfFirstReshape = GetOtherParentOfNode(firstReshape, matmulNode.Index());
if (nonMatmulParentOfFirstReshape != std::numeric_limits<size_t>::max() &&

Check warning on line 273 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L273

Add #include <limits> for numeric_limits<> [build/include_what_you_use] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:273:  Add #include <limits> for numeric_limits<>  [build/include_what_you_use] [4]
graph.GetNode(nonMatmulParentOfFirstReshape)->GetOutputEdgesCount() == 1)
{

Check warning on line 275 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L275

{ should almost always be at the end of the previous line [whitespace/braces] [4]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:275:  { should almost always be at the end of the previous line  [whitespace/braces] [4]
nodesToRemove.push_back(nonMatmulParentOfFirstReshape);
}

auto currItr = matmulNode.OutputNodesBegin();
AddNodesToRemove(currItr, lastReshapeNodeIndex, nodesToRemove);
++currItr;
AddNodesToRemove(currItr, lastReshapeNodeIndex, nodesToRemove);
nodesToRemove.push_back(lastReshapeNodeIndex);

for (const auto& nodeIndex : nodesToRemove) {
Node* node = graph.GetNode(nodeIndex);
graph_utils::RemoveNodeOutputEdges(graph, *node);
graph.RemoveNode(nodeIndex);
}

ruleEffect = RewriteRuleEffect::kRemovedCurrentNode;
return Status::OK();
}
}

Check warning on line 294 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L294

Namespace should be terminated with "// namespace onnxruntime" [readability/namespace] [5]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:294:  Namespace should be terminated with "// namespace onnxruntime"  [readability/namespace] [5]

Check warning on line 294 in onnxruntime/core/optimizer/matmul_bn_fusion.cc

View workflow job for this annotation

GitHub Actions / cpplint

[cpplint] onnxruntime/core/optimizer/matmul_bn_fusion.cc#L294

Could not find a newline character at the end of the file. [whitespace/ending_newline] [5]
Raw output
onnxruntime/core/optimizer/matmul_bn_fusion.cc:294:  Could not find a newline character at the end of the file.  [whitespace/ending_newline] [5]
Loading
Loading