Skip to content

Commit

Permalink
#0: fix matmul dram sharded validation
Browse files Browse the repository at this point in the history
  • Loading branch information
yugaoTT authored and arakhmati committed Jun 4, 2024
1 parent 58e7370 commit 798628c
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion tt_eager/tt_dnn/op_library/bmm/bmm_op.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1041,7 +1041,7 @@ void Matmul::validate(
// subbblock constraint
TT_FATAL(program_config.out_subblock_w == per_core_N || program_config.out_subblock_h == 1);
// tensor in1
TT_FATAL(input_tensor_b.memory_config().memory_layout == TensorMemoryLayout::INTERLEAVED);
TT_FATAL(input_tensor_b.memory_config().memory_layout == TensorMemoryLayout::WIDTH_SHARDED);
} else if constexpr (std::is_same_v<ProgramConfigType, MatmulMultiCoreReuseMultiCastProgramConfig>) {
if (input_tensor_a.memory_config().is_sharded()) {
auto tensor_a_memory_layout = input_tensor_a.memory_config().memory_layout;
Expand Down

0 comments on commit 798628c

Please sign in to comment.