Skip to content

Commit

Permalink
#9889: Remove oldversions of sliding window code and pybind cpp funct…
Browse files Browse the repository at this point in the history
…ions instead
  • Loading branch information
mywoodstock committed Oct 18, 2024
1 parent 4e1fef9 commit a828f89
Show file tree
Hide file tree
Showing 16 changed files with 152 additions and 1,966 deletions.
65 changes: 36 additions & 29 deletions models/experimental/functional_unet/tt/unet_shallow_ttnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@

from typing import List

from ttnn.operations.conv2d import determine_parallel_config, create_sharded_memory_config_from_parallel_config

from models.utility_functions import nearest_32
from ttnn.model_preprocessing import fold_batch_norm2d_into_conv2d, ParameterDict

Expand Down Expand Up @@ -261,23 +259,26 @@ def __init__(

self.should_reshard = should_reshard
if self.should_reshard:
parallel_config = determine_parallel_config(
is_1d_systolic=True,
parallel_config = ttnn._ttnn.operations.conv2d.determine_parallel_config(
shard_layout=ttnn.TensorMemoryLayout.HEIGHT_SHARDED,
batch_size=self.conv1.batch_size,
input_channels=self.conv1.in_channels,
output_height=self.conv2.input_height,
output_width=self.conv2.input_width,
output_channels=self.conv1.out_channels,
device=device,
block_shard_orientation=ttnn.ShardOrientation.ROW_MAJOR,
is_out_tiled=True,
)
self.sharded_memory_config = create_sharded_memory_config_from_parallel_config(
tensor_shape=[
1,
1,
self.conv1.input_width * self.conv1.input_height * self.conv1.batch_size,
nearest_32(self.conv1.in_channels),
],
self.sharded_memory_config = ttnn._ttnn.operations.conv2d.create_sharded_memory_config_from_parallel_config(
tensor_shape=ttnn.Shape(
[
1,
1,
self.conv1.input_width * self.conv1.input_height * self.conv1.batch_size,
nearest_32(self.conv1.in_channels),
]
),
parallel_config=parallel_config,
tile_size=32 if conv1.dtype == ttnn.bfloat8_b else 1,
)
Expand Down Expand Up @@ -312,23 +313,26 @@ def __init__(

self.should_reshard = should_reshard
if self.should_reshard:
parallel_config = determine_parallel_config(
is_1d_systolic=True,
parallel_config = ttnn._ttnn.operations.conv2d.determine_parallel_config(
shard_layout=ttnn.TensorMemoryLayout.HEIGHT_SHARDED,
batch_size=self.conv1.batch_size,
input_channels=self.conv1.in_channels,
output_height=self.conv2.input_height,
output_width=self.conv2.input_width,
output_channels=self.conv1.out_channels,
device=device,
block_shard_orientation=ttnn.ShardOrientation.ROW_MAJOR,
is_out_tiled=True,
)
self.sharded_memory_config = create_sharded_memory_config_from_parallel_config(
tensor_shape=[
1,
1,
self.conv1.input_width * self.conv1.input_height * self.conv1.batch_size,
self.conv1.in_channels,
],
self.sharded_memory_config = ttnn._ttnn.operations.conv2d.create_sharded_memory_config_from_parallel_config(
tensor_shape=ttnn.Shape(
[
1,
1,
self.conv1.input_width * self.conv1.input_height * self.conv1.batch_size,
self.conv1.in_channels,
]
),
parallel_config=parallel_config,
tile_size=32 if conv1.dtype == ttnn.bfloat8_b else 1,
)
Expand Down Expand Up @@ -437,23 +441,26 @@ def __init__(self, parameters: ParameterDict, device, mesh_mapper=None) -> None:
self.bnc2 = UNetConv2D(
parameters.bnc_2, parameters.bnb_2, device, cache=self.conv_cache, mesh_mapper=mesh_mapper
)
bnc_parallel_config = determine_parallel_config(
is_1d_systolic=True,
bnc_parallel_config = ttnn._ttnn.operations.conv2d.determine_parallel_config(
shard_layout=ttnn.TensorMemoryLayout.HEIGHT_SHARDED,
batch_size=self.bnc.batch_size,
input_channels=self.bnc.in_channels,
output_height=self.bnc2.input_height,
output_width=self.bnc2.input_width,
output_channels=self.bnc.out_channels,
device=device,
block_shard_orientation=ttnn.ShardOrientation.ROW_MAJOR,
is_out_tiled=True,
)
self.bnc_sharded_memory_config = create_sharded_memory_config_from_parallel_config(
tensor_shape=[
1,
1,
self.bnc.input_width * self.bnc.input_height * self.bnc.batch_size,
self.bnc.in_channels,
],
self.bnc_sharded_memory_config = ttnn._ttnn.operations.conv2d.create_sharded_memory_config_from_parallel_config(
tensor_shape=ttnn.Shape(
[
1,
1,
self.bnc.input_width * self.bnc.input_height * self.bnc.batch_size,
self.bnc.in_channels,
]
),
parallel_config=bnc_parallel_config,
tile_size=(32 if self.bnc.conv_config.dtype == ttnn.bfloat8_b else 1),
)
Expand Down

This file was deleted.

Loading

0 comments on commit a828f89

Please sign in to comment.