From bbb231fa81f7a42fe295d1c4777c884a4be721f0 Mon Sep 17 00:00:00 2001 From: dmitrygo Date: Wed, 21 Feb 2024 17:09:16 +0400 Subject: [PATCH] [CPU] Fixed BF16 Matmul inference precision --- .../src/nodes/executors/fullyconnected_implementations.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp index cae4a605f65964..2d031040c1be50 100644 --- a/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp +++ b/src/plugins/intel_cpu/src/nodes/executors/fullyconnected_implementations.cpp @@ -40,7 +40,7 @@ static const LayoutConfig dnnlFCLayoutConfig{LayoutType::ncsp, LayoutType::ncsp, // clang-format off static const TypeMapping dnnlFCTypeMapping { // {src, wei, bia, dst} pt - {{_bf16, _bf16, _any, _bf16 | _f32}, pt(bypass(), bypass(), use<3>(), use<3>())}, + {{_bf16, _bf16 | _f32, _any, _bf16 | _f32}, pt(bypass(), bypass(), use<3>(), use<3>())}, {{_f16, _f16, _any, _f16 | _f32}, pt(bypass(), bypass(), use<3>(), use<3>())}, // integer precision outputs are not supported for float precision inputs {{_f32 | _bf16 | _f16, _any, _any, _i8 | _u8}, pt(bypass(), bypass(), use<0>(), use<0>())}, @@ -63,7 +63,7 @@ static const MappingNotation dnnlConvolutionMappingNotation { static const TypeMapping dnnlConvolutionTypeMapping { // {src, wei, bia, dst} pt - {{_bf16, _bf16, _any, _bf16 | _f32}, pt(bypass(), bypass(), use<3>(), use<3>())}, + {{_bf16, _bf16 | _f32, _any, _bf16 | _f32}, pt(bypass(), bypass(), use<3>(), use<3>())}, {{_f16, _f16, _any, _f16 | _f32}, pt(bypass(), bypass(), use<3>(), use<3>())}, // integer precision outputs are not supported for float precision inputs {{_f32 | _bf16 | _f16, _any, _any, _i8 | _u8}, pt(bypass(), bypass(), use<0>(), use<0>())},