Skip to content

Commit

Permalink
add float16 softmax test
Browse files Browse the repository at this point in the history
  • Loading branch information
zhijxu-MS committed Mar 25, 2024
1 parent 81ef0ce commit 0d21659
Showing 1 changed file with 17 additions and 0 deletions.
17 changes: 17 additions & 0 deletions orttraining/orttraining/test/training_ops/cuda/softmax_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,23 @@ TEST(CudaKernelTest, Softmax_LargeTensor_LastAxis_Float16_NoPowerOfTwo) {
TestSoftmax<MLFloat16>(X_dims, Y_dims, 2, false, 1e-3, 1e-3);
}

TEST(CudaKernelTest, Softmax_LargeTensor_LastAxis_Float16_NoPowerOfTwo2) {
// at fp16 case, when input is all -65504, the output can't be inf
std::vector<int64_t> X_dims{8192, 1, 1050};
std::vector<int64_t> Y_dims{8192, 1, 1050};
TestSoftmax<MLFloat16>(X_dims, Y_dims, 2, false, 1e-3, 1e-3);
CompareOpTester test("Softmax");
test.AddAttribute<int64_t>("axis", 1);

std::vector<MLFloat16> X_data(detail::SizeFromDims(X_dims), (MLFloat16)-65504.0f);
test.AddInput<MLFloat16>("X", X_dims, X_data);

std::vector<MLFloat16> Y_data = FillZeros<MLFloat16>(Y_dims);
test.AddOutput<MLFloat16>("Y", Y_dims, Y_data);

test.CompareWithCPU(kGpuExecutionProvider, 1e-4, 1e-4);
}

TEST(CudaKernelTest, Softmax_LargeTensor_AllAxis) {
std::vector<int64_t> X_dims{8, 16, 512};
std::vector<int64_t> Y_dims{8, 16, 512};
Expand Down

0 comments on commit 0d21659

Please sign in to comment.