Skip to content

Commit

Permalink
Fix formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
adamreeve committed Jul 28, 2024
1 parent 77d403a commit f4a6d48
Show file tree
Hide file tree
Showing 2 changed files with 14 additions and 8 deletions.
18 changes: 12 additions & 6 deletions onnxruntime/core/providers/cpu/math/element_wise_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -729,7 +729,8 @@ struct Min_8::ComputeImpl {
},
[](BroadcastHelper& per_iter_bh) {
per_iter_bh.OutputEigen<T>() =
per_iter_bh.EigenInput0<T>().array().template min<Eigen::PropagateNaN>(per_iter_bh.EigenInput1<T>().array());
per_iter_bh.EigenInput0<T>().array().template min<Eigen::PropagateNaN>(
per_iter_bh.EigenInput1<T>().array());
}};

int input_count = inst.Node().InputArgCount().front();
Expand All @@ -756,9 +757,11 @@ static Status MinMaxMLFloat16(const OpKernel& inst, OpKernelContext* context) {
EigenVectorArrayMap<Eigen::half> output_vec_map(output, num_elements);

if (is_min) {
output_vec_map = input_1_vec_map.template min<Eigen::PropagateNaN>(static_cast<Eigen::half>(per_iter_bh.ScalarInput0<MLFloat16>()));
output_vec_map = input_1_vec_map.template min<Eigen::PropagateNaN>(
static_cast<Eigen::half>(per_iter_bh.ScalarInput0<MLFloat16>()));
} else {
output_vec_map = input_1_vec_map.template max<Eigen::PropagateNaN>(static_cast<Eigen::half>(per_iter_bh.ScalarInput0<MLFloat16>()));
output_vec_map = input_1_vec_map.template max<Eigen::PropagateNaN>(
static_cast<Eigen::half>(per_iter_bh.ScalarInput0<MLFloat16>()));
}
},
[](BroadcastHelper& per_iter_bh) {
Expand All @@ -771,9 +774,11 @@ static Status MinMaxMLFloat16(const OpKernel& inst, OpKernelContext* context) {
EigenVectorArrayMap<Eigen::half> output_vec_map(output, num_elements);

if (is_min) {
output_vec_map = input_0_vec_map.template min<Eigen::PropagateNaN>(static_cast<Eigen::half>(per_iter_bh.ScalarInput1<MLFloat16>()));
output_vec_map = input_0_vec_map.template min<Eigen::PropagateNaN>(
static_cast<Eigen::half>(per_iter_bh.ScalarInput1<MLFloat16>()));
} else {
output_vec_map = input_0_vec_map.template max<Eigen::PropagateNaN>(static_cast<Eigen::half>(per_iter_bh.ScalarInput1<MLFloat16>()));
output_vec_map = input_0_vec_map.template max<Eigen::PropagateNaN>(
static_cast<Eigen::half>(per_iter_bh.ScalarInput1<MLFloat16>()));
}
},
[](BroadcastHelper& per_iter_bh) {
Expand Down Expand Up @@ -851,7 +856,8 @@ struct Max_8::ComputeImpl {
},
[](BroadcastHelper& per_iter_bh) {
per_iter_bh.OutputEigen<T>() =
per_iter_bh.EigenInput0<T>().array().template max<Eigen::PropagateNaN>(per_iter_bh.EigenInput1<T>().array());
per_iter_bh.EigenInput0<T>().array().template max<Eigen::PropagateNaN>(
per_iter_bh.EigenInput1<T>().array());
}};

int input_count = inst.Node().InputArgCount().front();
Expand Down
4 changes: 2 additions & 2 deletions onnxruntime/test/providers/cpu/math/element_wise_ops_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -1752,7 +1752,7 @@ TEST(MathOpTest, Min_12_MLFloat16_Nan) {
-0.5f, 0.0f, -2.0f,
0.5f, 0.0f, 2.0f}));
test.AddInput<MLFloat16>("data_1", {3, 1},
MakeMLFloat16({0.0f, -1.0f, 1.0f}));
MakeMLFloat16({0.0f, -1.0f, 1.0f}));
test.AddOutput<MLFloat16>("min", {3, 3},
MakeMLFloat16({std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(),
Expand Down Expand Up @@ -2093,7 +2093,7 @@ TEST(MathOpTest, Max_12_MLFloat16_Nan) {
std::numeric_limits<float>::quiet_NaN(),
std::numeric_limits<float>::quiet_NaN(),
-0.5f, 0.0f, -2.0f,
0.5f, 0.0f, 2.0f}));
0.5f, 0.0f, 2.0f}));
test.AddInput<MLFloat16>("data_1", {3, 1},
MakeMLFloat16({0.0f, -1.0f, 1.0f}));
test.AddOutput<MLFloat16>("max", {3, 3},
Expand Down

0 comments on commit f4a6d48

Please sign in to comment.