Skip to content

Commit

Permalink
Update bfloat16 conversion property (#1945)
Browse files Browse the repository at this point in the history
The corresponding change in
pytorch/pytorch@08860b2

Blocked on intel/intel-extension-for-pytorch#694

Signed-off-by: Anatoly Myachev <[email protected]>
  • Loading branch information
anmyachev authored Aug 20, 2024
1 parent e5ca533 commit c451d56
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions third_party/intel/backend/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ def parse_target(self, tgt_prop) -> dict:
dev_prop['has_subgroup_matrix_multiply_accumulate_tensor_float32'] = tgt_prop.get(
'has_subgroup_matrix_multiply_accumulate_tensor_float32', False)
dev_prop['has_subgroup_2d_block_io'] = tgt_prop.get('has_subgroup_2d_block_io', False)
dev_prop['has_bf16_conversion'] = tgt_prop.get('has_bf16_conversion', True)
dev_prop['has_bfloat16_conversions'] = tgt_prop.get('has_bfloat16_conversions', True)
return dev_prop

def parse_options(self, opts) -> Any:
Expand Down Expand Up @@ -186,7 +186,7 @@ def make_ttgir(mod, metadata, opt, properties):
intel.passes.ttgpuir.add_triton_annotate_module(pm, min(properties["sub_group_sizes"]),
properties["has_subgroup_2d_block_io"],
properties["has_subgroup_matrix_multiply_accumulate"],
properties["has_bf16_conversion"], opt.threads_per_warp)
properties["has_bfloat16_conversions"], opt.threads_per_warp)
pm.run(mod)

# Overwrite the threads_per_warp option with the module annotation.
Expand Down

0 comments on commit c451d56

Please sign in to comment.