Skip to content

Commit

Permalink
ToBFloat16 -> MakeBFloat16
Browse files Browse the repository at this point in the history
  • Loading branch information
Prathik Rao committed Jan 8, 2024
1 parent 1b7927e commit e49b859
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 19 deletions.
9 changes: 0 additions & 9 deletions onnxruntime/test/common/tensor_op_test_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -156,15 +156,6 @@ inline std::vector<MLFloat16> ToFloat16(const std::vector<float>& data) {
return result;
}

inline std::vector<BFloat16> ToBFloat16(const std::vector<float>& data) {
std::vector<BFloat16> result;
result.reserve(data.size());
for (size_t i = 0; i < data.size(); i++) {
result.push_back(BFloat16(data[i]));
}
return result;
}

inline void CheckTensor(const Tensor& expected_tensor, const Tensor& output_tensor, double rtol, double atol) {
ORT_ENFORCE(expected_tensor.Shape() == output_tensor.Shape(),
"Expected output shape [" + expected_tensor.Shape().ToString() +
Expand Down
13 changes: 3 additions & 10 deletions onnxruntime/test/contrib_ops/layer_norm_op_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -76,20 +76,13 @@ TEST(LayerNormTest, LayerNorm) {
}

TEST(LayerNormTest, LayerNorm_BFloat16Input) {
#ifdef USE_CUDA
int min_cuda_architecture = 530;
if (!HasCudaEnvironment(min_cuda_architecture)) {
LOGS_DEFAULT(WARNING) << "Hardware NOT support BFP16";
return;
}
#endif
OpTester test("LayerNormalization");
test.AddAttribute<float>("epsilon", 1e-05f);

std::vector<int64_t> dims{1, 2, 3};
test.AddInput<BFloat16>("x", dims, ToBFloat16({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
test.AddInput<BFloat16>("gamma", {3}, ToBFloat16({1.0f, 1.0f, 1.0f}));
test.AddOutput<BFloat16>("output", dims, ToBFloat16({-1.2247f, 0.0f, 1.2247f, -1.2247f, 0.0f, 1.2247f}));
test.AddInput<BFloat16>("x", dims, MakeBFloat16({1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}));
test.AddInput<BFloat16>("gamma", {3}, MakeBFloat16({1.0f, 1.0f, 1.0f}));
test.AddOutput<BFloat16>("output", dims, MakeBFloat16({-1.2247f, 0.0f, 1.2247f, -1.2247f, 0.0f, 1.2247f}));
test.Run(OpTester::ExpectResult::kExpectSuccess, "", {kCudaExecutionProvider});
}

Expand Down

0 comments on commit e49b859

Please sign in to comment.