diff --git a/docs/OperatorKernels.md b/docs/OperatorKernels.md
index e2d500006b05f..de1eba558367c 100644
--- a/docs/OperatorKernels.md
+++ b/docs/OperatorKernels.md
@@ -156,8 +156,10 @@ Do not modify directly.*
|||[1, 10]|**B** = tensor(bool)
**V** = tensor(bfloat16), tensor(bool), tensor(double), tensor(float), tensor(float16), tensor(int16), tensor(int32), tensor(int64), tensor(int8), tensor(string), tensor(uint16), tensor(uint32), tensor(uint64), tensor(uint8)|
|ImageScaler|*in* input:**T**
*out* output:**T**|1+|**T** = tensor(float)|
|InstanceNormalization|*in* input:**T**
*in* scale:**T**
*in* B:**T**
*out* output:**T**|6+|**T** = tensor(float)|
-|IsInf|*in* X:**T1**
*out* Y:**T2**|10+|**T1** = tensor(double), tensor(float)
**T2** = tensor(bool)|
-|IsNaN|*in* X:**T1**
*out* Y:**T2**|13+|**T1** = tensor(double), tensor(float), tensor(float16)
**T2** = tensor(bool)|
+|IsInf|*in* X:**T1**
*out* Y:**T2**|20+|**T1** = tensor(double), tensor(float), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz)
**T2** = tensor(bool)|
+|||[10, 19]|**T1** = tensor(double), tensor(float)
**T2** = tensor(bool)|
+|IsNaN|*in* X:**T1**
*out* Y:**T2**|20+|**T1** = tensor(double), tensor(float), tensor(float16), tensor(float8e4m3fn), tensor(float8e4m3fnuz), tensor(float8e5m2), tensor(float8e5m2fnuz)
**T2** = tensor(bool)|
+|||[13, 19]|**T1** = tensor(double), tensor(float), tensor(float16)
**T2** = tensor(bool)|
|||[9, 12]|**T1** = tensor(double), tensor(float), tensor(float16)
**T2** = tensor(bool)|
|LRN|*in* X:**T**
*out* Y:**T**|13+|**T** = tensor(float)|
|||[1, 12]|**T** = tensor(float)|
diff --git a/onnxruntime/core/providers/cpu/cpu_execution_provider.cc b/onnxruntime/core/providers/cpu/cpu_execution_provider.cc
index 7b57058f0ca49..8bc8ddfc5772c 100644
--- a/onnxruntime/core/providers/cpu/cpu_execution_provider.cc
+++ b/onnxruntime/core/providers/cpu/cpu_execution_provider.cc
@@ -1990,11 +1990,11 @@ Status RegisterOnnxOperatorKernels(KernelRegistry& kernel_registry) {
BuildKernelCreateInfo,
BuildKernelCreateInfo,
BuildKernelCreateInfo,
+ IsNaN)>,
BuildKernelCreateInfo,
+ IsNaN)>,
BuildKernelCreateInfo,
+ IsNaN)>,
BuildKernelCreateInfo,
BuildKernelCreateInfo;
ORT_SPECIFY_OP_KERNEL_ARG_DEFAULT_TYPE_LIST(
- kCpuExecutionProvider,
- kOnnxDomain,
- IsInf,
- 20,
- Input,
- 0,
- IsInfTypesOpset20);
+ kCpuExecutionProvider,
+ kOnnxDomain,
+ IsInf,
+ 20,
+ Input,
+ 0,
+ IsInfTypesOpset20);
} // namespace op_kernel_type_control
-
-
class IsInf final : public OpKernel {
public:
using EnabledDataTypes10 = ORT_OP_KERNEL_ARG_ENABLED_TYPE_LIST(kCpuExecutionProvider, kOnnxDomain,
- IsInf, 10, Input, 0);
+ IsInf, 10, Input, 0);
using EnabledDataTypes20 = ORT_OP_KERNEL_ARG_ENABLED_TYPE_LIST(kCpuExecutionProvider, kOnnxDomain,
IsInf, 20, Input, 0);
@@ -76,7 +75,6 @@ ONNX_CPU_OPERATOR_KERNEL(
.TypeConstraint("T2", DataTypeImpl::GetTensorType()),
IsInf);
-
IsInf::IsInf(const OpKernelInfo& info) : OpKernel(info) {
Status status = info.GetAttr("detect_positive", &detect_positive_);
ORT_ENFORCE(status.IsOK(), "Failed to obtain detect_positive");
@@ -116,6 +114,7 @@ struct ComputeDispatchTarget {
}
};
+#if !defined(DISABLE_FLOAT8_TYPES)
template <>
struct ComputeDispatchTarget {
void operator()(const Tensor&, Tensor& Y, bool, bool) const {
@@ -156,6 +155,7 @@ struct ComputeDispatchTarget {
EigenMap(Y).array() = false;
}
};
+#endif
} // namespace isinf_internal
Status IsInf::Compute(OpKernelContext* context) const {
diff --git a/onnxruntime/core/providers/cpu/tensor/isnan.cc b/onnxruntime/core/providers/cpu/tensor/isnan.cc
index bcdd11adad895..34495e382278a 100644
--- a/onnxruntime/core/providers/cpu/tensor/isnan.cc
+++ b/onnxruntime/core/providers/cpu/tensor/isnan.cc
@@ -49,10 +49,13 @@ ADD_TYPED_ISNAN_OP_13(MLFloat16);
ADD_TYPED_ISNAN_OP(float);
ADD_TYPED_ISNAN_OP(double);
ADD_TYPED_ISNAN_OP(MLFloat16);
+
+#if !defined(DISABLE_FLOAT8_TYPES)
ADD_TYPED_ISNAN_OP(Float8E4M3FN);
ADD_TYPED_ISNAN_OP(Float8E4M3FNUZ);
ADD_TYPED_ISNAN_OP(Float8E5M2);
ADD_TYPED_ISNAN_OP(Float8E5M2FNUZ);
+#endif
template
Status IsNaN::Compute(OpKernelContext* context) const {
@@ -88,17 +91,18 @@ Status IsNaN::Compute(OpKernelContext* context) const {
return Status::OK();
}
+#if !defined(DISABLE_FLOAT8_TYPES)
template <>
Status IsNaN::Compute(OpKernelContext* context) const {
const auto* X = context->Input(0);
auto& dims = X->Shape();
- auto& Y = *context->Output(0, dims);
+ auto& Y = *context->Output(0, dims);
auto input = ConstEigenVectorMap(static_cast(static_cast(X->Data())), onnxruntime::narrow(dims.Size()));
auto output = EigenMap(Y);
// S.1111.111
- std::transform(input.begin(), input.end(), output.begin(), [](uint8_t c) { return (c & 0x7f) == 0x7f;});
+ std::transform(input.begin(), input.end(), output.begin(), [](uint8_t c) { return (c & 0x7f) == 0x7f; });
return Status::OK();
}
@@ -126,9 +130,8 @@ Status IsNaN::Compute(OpKernelContext* context) const {
auto input = ConstEigenVectorMap(static_cast(static_cast(X->Data())), onnxruntime::narrow(dims.Size()));
auto output = EigenMap(Y);
- // S.11111.{01, 10, 11}
- std::transform(input.begin(), input.end(), output.begin(), [](uint8_t c) {
- return ((c & 0x7c) == 0x7c) && ((c & 0x03) != 0x00); });
+ // S.11111.{01, 10, 11}
+ std::transform(input.begin(), input.end(), output.begin(), [](uint8_t c) { return ((c & 0x7c) == 0x7c) && ((c & 0x03) != 0x00); });
return Status::OK();
}
@@ -145,4 +148,5 @@ Status IsNaN::Compute(OpKernelContext* context) const {
return Status::OK();
}
+#endif
} // namespace onnxruntime
diff --git a/onnxruntime/test/providers/cpu/tensor/isinf_test.cc b/onnxruntime/test/providers/cpu/tensor/isinf_test.cc
index e2c809846d020..2e583c5d2547b 100644
--- a/onnxruntime/test/providers/cpu/tensor/isinf_test.cc
+++ b/onnxruntime/test/providers/cpu/tensor/isinf_test.cc
@@ -17,7 +17,7 @@ constexpr double DOUBLE_INF = std::numeric_limits::infinity();
constexpr double DOUBLE_NINF = -std::numeric_limits::infinity();
constexpr double DOUBLE_NAN = std::numeric_limits::quiet_NaN();
-template
+template
void run_is_inf_test(int opset, int64_t detect_positive, int64_t detect_negative, const std::initializer_list& input, const std::initializer_list& output) {
OpTester test("IsInf", opset);
test.AddAttribute("detect_positive", detect_positive);
@@ -99,10 +99,10 @@ TEST(IsInfTest, test_isinf_negative_double20) {
run_is_inf_test(20, 0, 1, input, output);
}
+#if !defined(DISABLE_FLOAT8_TYPES)
TEST(IsInfTest, test_Float8E4M3FN) {
std::initializer_list input = {
- Float8E4M3FN(-1.0f), Float8E4M3FN(FLOAT_NAN, false), Float8E4M3FN(1.0f), Float8E4M3FN(FLOAT_NINF, false), Float8E4M3FN(FLOAT_NINF, false), Float8E4M3FN(FLOAT_INF, false)
- };
+ Float8E4M3FN(-1.0f), Float8E4M3FN(FLOAT_NAN, false), Float8E4M3FN(1.0f), Float8E4M3FN(FLOAT_NINF, false), Float8E4M3FN(FLOAT_NINF, false), Float8E4M3FN(FLOAT_INF, false)};
std::initializer_list output = {false, false, false, false, false, false};
run_is_inf_test(20, 1, 1, input, output);
}
@@ -148,5 +148,6 @@ TEST(IsInfTest, test_Float8E5M2FNUZ) {
std::initializer_list output = {false, false, false, false, false, false};
run_is_inf_test(20, 1, 1, input, output);
}
+#endif
} // namespace test
} // namespace onnxruntime
diff --git a/onnxruntime/test/providers/cpu/tensor/isnan_test.cc b/onnxruntime/test/providers/cpu/tensor/isnan_test.cc
index 03019aa08a3e5..0f1e5c07cdd9b 100644
--- a/onnxruntime/test/providers/cpu/tensor/isnan_test.cc
+++ b/onnxruntime/test/providers/cpu/tensor/isnan_test.cc
@@ -59,6 +59,7 @@ TEST(IsNaNOpTest, IsNaNDouble20) {
run_is_nan_test(20, dims, input, output);
}
+#if !defined(DISABLE_FLOAT8_TYPES)
TEST(IsNaNOpTest, IsNaNFloat8E4M3FN) {
std::vector dims{2, 2};
std::initializer_list input = {Float8E4M3FN(1.0f), Float8E4M3FN(-NAN), Float8E4M3FN(2.0f), Float8E4M3FN(NAN)};
@@ -86,5 +87,6 @@ TEST(IsNaNOpTest, IsNaN_Float8E5M2FNUZ) {
std::initializer_list output = {false, true, false, true};
run_is_nan_test(20, dims, input, output);
}
+#endif
} // namespace test
} // namespace onnxruntime