Skip to content

Commit

Permalink
[Transform] Make fpga part required argument for SpecializeLayers
Browse files Browse the repository at this point in the history
  • Loading branch information
auphelia committed May 29, 2024
1 parent b36c5b1 commit 4012378
Show file tree
Hide file tree
Showing 41 changed files with 103 additions and 92 deletions.
2 changes: 1 addition & 1 deletion notebooks/advanced/3_folding.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -567,7 +567,7 @@
"from finn.transformation.fpgadataflow.specialize_layers import SpecializeLayers\n",
"\n",
"model_updated = model_updated.transform(InsertDWC())\n",
"model_updated = model_updated.transform(SpecializeLayers())\n",
"model_updated = model_updated.transform(SpecializeLayers(\"xc7z020clg400-1\"))\n",
"model_updated = model_updated.transform(GiveUniqueNodeNames())"
]
},
Expand Down
13 changes: 8 additions & 5 deletions notebooks/end2end_example/bnn-pynq/cnv_end2end_example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -282,6 +282,12 @@
"metadata": {},
"outputs": [],
"source": [
"from finn.util.basic import pynq_part_map\n",
"# change this if you have a different PYNQ board, see list above\n",
"pynq_board = \"Pynq-Z1\"\n",
"fpga_part = pynq_part_map[pynq_board]\n",
"target_clk_ns = 10\n",
"\n",
"import finn.transformation.fpgadataflow.convert_to_hw_layers as to_hw\n",
"from finn.transformation.fpgadataflow.create_dataflow_partition import (\n",
" CreateDataflowPartition,\n",
Expand Down Expand Up @@ -314,7 +320,7 @@
"# save the dataflow partition with a different name for easier access\n",
"# and specialize the layers to HLS variants\n",
"dataflow_model = ModelWrapper(dataflow_model_filename)\n",
"dataflow_model = dataflow_model.transform(SpecializeLayers())\n",
"dataflow_model = dataflow_model.transform(SpecializeLayers(fpga_part))\n",
"dataflow_model.save(build_dir + \"/end2end_cnv_w1a1_dataflow_model.onnx\")"
]
},
Expand Down Expand Up @@ -432,12 +438,9 @@
"metadata": {},
"outputs": [],
"source": [
"test_pynq_board = \"Pynq-Z1\"\n",
"target_clk_ns = 10\n",
"\n",
"from finn.transformation.fpgadataflow.make_zynq_proj import ZynqBuild\n",
"model = ModelWrapper(build_dir+\"/end2end_cnv_w1a1_folded.onnx\")\n",
"model = model.transform(ZynqBuild(platform = test_pynq_board, period_ns = target_clk_ns))"
"model = model.transform(ZynqBuild(platform = pynq_board, period_ns = target_clk_ns))"
]
},
{
Expand Down
59 changes: 32 additions & 27 deletions notebooks/end2end_example/bnn-pynq/tfc_end2end_example.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -547,6 +547,36 @@
"thresh_node_inst.set_nodeattr(\"preferred_impl_style\", \"hls\")"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"We'll define two helper variables that describe the Xilinx FPGA part name and the PYNQ board name that we are targeting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# print the names of the supported PYNQ boards\n",
"from finn.util.basic import pynq_part_map\n",
"print(pynq_part_map.keys())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# change this if you have a different PYNQ board, see list above\n",
"pynq_board = \"Pynq-Z1\"\n",
"fpga_part = pynq_part_map[pynq_board]\n",
"target_clk_ns = 10"
]
},
{
"cell_type": "markdown",
"metadata": {},
Expand All @@ -561,7 +591,7 @@
"outputs": [],
"source": [
"from finn.transformation.fpgadataflow.specialize_layers import SpecializeLayers\n",
"model = model.transform(SpecializeLayers())\n",
"model = model.transform(SpecializeLayers(fpga_part))\n",
"\n",
"model.save(build_dir+\"/tfc_w1_a1_specialize_layers.onnx\")\n",
"showInNetron(build_dir+\"/tfc_w1_a1_specialize_layers.onnx\")"
Expand Down Expand Up @@ -687,32 +717,7 @@
"source": [
"## 3. Hardware Build <a id='vivado'></a>\n",
"\n",
"We're finally ready to start generating hardware from our network. Depending on whether you want to target a Zynq or Alveo platform, FINN offers two transformations to build the accelerator, integrate into an appropriate shell and build a bitfile. These are `ZynqBuild` and `VitisBuild` for Zynq and Alveo, respectively. In this notebook we'll demonstrate the `ZynqBuild` as these boards are more common and it's much faster to complete bitfile generation for the smaller FPGAs found on them.\n",
"\n",
"As we will be dealing with FPGA synthesis tools in these tasks, we'll define two helper variables that describe the Xilinx FPGA part name and the PYNQ board name that we are targeting."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# print the names of the supported PYNQ boards\n",
"from finn.util.basic import pynq_part_map\n",
"print(pynq_part_map.keys())"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# change this if you have a different PYNQ board, see list above\n",
"pynq_board = \"Pynq-Z1\"\n",
"fpga_part = pynq_part_map[pynq_board]\n",
"target_clk_ns = 10"
"We're finally ready to start generating hardware from our network. Depending on whether you want to target a Zynq or Alveo platform, FINN offers two transformations to build the accelerator, integrate into an appropriate shell and build a bitfile. These are `ZynqBuild` and `VitisBuild` for Zynq and Alveo, respectively. In this notebook we'll demonstrate the `ZynqBuild` as these boards are more common and it's much faster to complete bitfile generation for the smaller FPGAs found on them."
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -396,7 +396,7 @@
"child_model = child_model.transform(InsertDWC()) \n",
"child_model = child_model.transform(InsertFIFO(create_shallow_fifos=True))\n",
"# DWC and FIFOs need to be specialized to either HLS or RTL variants\n",
"child_model = child_model.transform(SpecializeLayers())\n",
"child_model = child_model.transform(SpecializeLayers(test_fpga_part))\n",
"child_model.save(build_dir + \"/test.onnx\");\n",
"child_model = child_model.transform(GiveUniqueNodeNames())\n",
"child_model = child_model.transform(PrepareIP(test_fpga_part, target_clk_ns))\n",
Expand Down
6 changes: 3 additions & 3 deletions src/finn/builder/build_dataflow_steps.py
Original file line number Diff line number Diff line change
Expand Up @@ -541,7 +541,7 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig):
if cfg.auto_fifo_depths:
if cfg.auto_fifo_strategy == "characterize":
model = model.transform(InsertDWC())
model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers(cfg._resolve_fpga_part()))
model = model.transform(GiveUniqueNodeNames())
model = model.transform(
PrepareIP(cfg._resolve_fpga_part(), cfg._resolve_hls_clk_period())
Expand All @@ -559,7 +559,7 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig):
create_shallow_fifos=True,
)
)
model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers(cfg._resolve_fpga_part()))
model = model.transform(GiveUniqueNodeNames())
model = model.transform(GiveReadableTensorNames())
elif cfg.auto_fifo_strategy == "largefifo_rtlsim":
Expand Down Expand Up @@ -591,7 +591,7 @@ def step_set_fifo_depths(model: ModelWrapper, cfg: DataflowBuildConfig):
# need to make sure all FIFOs are created so that their depth can be
# set by ApplyConfig, so create_shallow_fifos=True
model = model.transform(InsertFIFO(create_shallow_fifos=True))
model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers(cfg._resolve_fpga_part()))
model = model.transform(GiveUniqueNodeNames())
model = model.transform(GiveReadableTensorNames())
if cfg.folding_config_file is not None:
Expand Down
4 changes: 2 additions & 2 deletions src/finn/transformation/fpgadataflow/make_zynq_proj.py
Original file line number Diff line number Diff line change
Expand Up @@ -322,7 +322,7 @@ def apply(self, model):
prep_transforms = [
InsertIODMA(self.axi_port_width),
InsertDWC(),
SpecializeLayers(),
SpecializeLayers(self.fpga_part),
Floorplan(),
CreateDataflowPartition(partition_model_dir=self.partition_model_dir),
]
Expand All @@ -338,7 +338,7 @@ def apply(self, model):
dataflow_model_filename = sdp_node.get_nodeattr("model")
kernel_model = ModelWrapper(dataflow_model_filename)
kernel_model = kernel_model.transform(InsertFIFO())
kernel_model = kernel_model.transform(SpecializeLayers())
kernel_model = kernel_model.transform(SpecializeLayers(self.fpga_part))
kernel_model = kernel_model.transform(GiveUniqueNodeNames(prefix))
kernel_model.save(dataflow_model_filename)
kernel_model = kernel_model.transform(PrepareIP(self.fpga_part, self.period_ns))
Expand Down
2 changes: 1 addition & 1 deletion src/finn/transformation/fpgadataflow/specialize_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,7 +290,7 @@ def _vvu_rtl_possible(n, fpgapart):
class SpecializeLayers(Transformation):
"""Specialize all layers to either HLS or RTL variants"""

def __init__(self, fpgapart=""):
def __init__(self, fpgapart):
super().__init__()
self.fpgapart = fpgapart

Expand Down
4 changes: 2 additions & 2 deletions src/finn/transformation/fpgadataflow/vitis_build.py
Original file line number Diff line number Diff line change
Expand Up @@ -383,7 +383,7 @@ def __init__(
def apply(self, model):
_check_vitis_envvars()
# prepare at global level, then break up into kernels
prep_transforms = [InsertIODMA(512), InsertDWC(), SpecializeLayers()]
prep_transforms = [InsertIODMA(512), InsertDWC(), SpecializeLayers(self.fpga_part)]
for trn in prep_transforms:
model = model.transform(trn)
model = model.transform(GiveUniqueNodeNames())
Expand All @@ -405,7 +405,7 @@ def apply(self, model):
dataflow_model_filename = sdp_node.get_nodeattr("model")
kernel_model = ModelWrapper(dataflow_model_filename)
kernel_model = kernel_model.transform(InsertFIFO())
kernel_model = kernel_model.transform(SpecializeLayers())
kernel_model = kernel_model.transform(SpecializeLayers(self.fpga_part))
kernel_model = kernel_model.transform(RemoveUnusedTensors())
kernel_model = kernel_model.transform(GiveUniqueNodeNames(prefix))
kernel_model.save(dataflow_model_filename)
Expand Down
5 changes: 3 additions & 2 deletions tests/end2end/test_end2end_bnn_pynq.py
Original file line number Diff line number Diff line change
Expand Up @@ -596,6 +596,7 @@ def test_convert_to_hw_layers(self, topology, wbits, abits, board):
assert len(model.get_nodes_by_op_type(op_type)) == exp_count

def test_specialize_layers(self, topology, wbits, abits, board):
build_data = get_build_env(board, target_clk_ns)
prev_chkpt_name = get_checkpoint_name(topology, wbits, abits, "convert_to_hw_layers")
model = load_test_checkpoint_or_skip(prev_chkpt_name)
# set preferred impl style to hls for all layers
Expand All @@ -605,7 +606,7 @@ def test_specialize_layers(self, topology, wbits, abits, board):
if is_fpgadataflow_node(node):
inst = getCustomOp(node)
inst.set_nodeattr("preferred_impl_style", "hls")
model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers(build_data["part"]))
model = model.transform(GiveUniqueNodeNames())
model.save(get_checkpoint_name(topology, wbits, abits, "specialize_layers"))
exp_layer_counts = {
Expand Down Expand Up @@ -739,7 +740,7 @@ def test_ipstitch_rtlsim(self, topology, wbits, abits, board):
model = load_test_checkpoint_or_skip(prev_chkpt_name)
test_fpga_part = get_build_env(board, target_clk_ns)["part"]
model = model.transform(InsertDWC())
model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers(test_fpga_part))
model = model.transform(GiveUniqueNodeNames())
model = model.transform(AnnotateCycles())
perf = model.analysis(dataflow_performance)
Expand Down
2 changes: 1 addition & 1 deletion tests/end2end/test_end2end_mobilenet_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ def test_end2end_mobilenet_convert_to_hw_layers():
@pytest.mark.end2end
def test_end2end_mobilenet_specialize_layers():
model = load_test_checkpoint_or_skip(build_dir + "/end2end_mobilenet_hw_layers.onnx")
model = model.transform(SpecializeLayers(fpgapart=fpga_part))
model = model.transform(SpecializeLayers(fpga_part))
model = model.transform(GiveUniqueNodeNames())
model = model.transform(GiveReadableTensorNames())
model.save(build_dir + "/end2end_mobilenet_specialize_layers.onnx")
Expand Down
4 changes: 2 additions & 2 deletions tests/fpgadataflow/test_convert_to_hw_1d_conv_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,10 +143,10 @@ def test_convert_to_hw_1d_conv_layer(conv_config, depthwise, use_rtl_swg, exec_m
inst.set_nodeattr("preferred_impl_style", "hls")
if depthwise is True:
new_model = new_model.transform(to_hw.InferVectorVectorActivation())
new_model = new_model.transform(SpecializeLayers())
new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1"))
else:
new_model = new_model.transform(to_hw.InferQuantizedMatrixVectorActivation())
new_model = new_model.transform(SpecializeLayers())
new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1"))
# set folding parameters for MVAU
if new_model.get_nodes_by_op_type("MVAU_hls"):
fc_node = new_model.get_nodes_by_op_type("MVAU_hls")[0]
Expand Down
2 changes: 1 addition & 1 deletion tests/fpgadataflow/test_convert_to_hw_channelwise_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ def test_convert_to_hw_channelwise_layer(pdt, idt, onnx_op_name, scalar_param, e
assert (y_produced == y_expected).all()
assert model.graph.node[1].op_type == "ChannelwiseOp"

model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers("xc7z020clg400-1"))

if exec_mode == "cppsim":
model = model.transform(PrepareCppSim())
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,7 +204,7 @@ def test_convert_to_hw_conv_fc_transition(conv_config, depthwise, use_reshape):
if is_fpgadataflow_node(node):
inst = getCustomOp(node)
inst.set_nodeattr("preferred_impl_style", "hls")
new_model = new_model.transform(SpecializeLayers())
new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1"))
new_model = new_model.transform(GiveUniqueNodeNames())
new_model = new_model.transform(InferDataLayouts())

Expand Down
4 changes: 2 additions & 2 deletions tests/fpgadataflow/test_convert_to_hw_conv_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,10 +131,10 @@ def test_convert_to_hw_conv_layer(conv_config, depthwise, use_rtl_swg, exec_mode
inst.set_nodeattr("preferred_impl_style", "hls")
if depthwise is True:
new_model = new_model.transform(to_hw.InferVectorVectorActivation())
new_model = new_model.transform(SpecializeLayers())
new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1"))
else:
new_model = new_model.transform(to_hw.InferQuantizedMatrixVectorActivation())
new_model = new_model.transform(SpecializeLayers())
new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1"))
# set folding parameters for MVAU
if new_model.get_nodes_by_op_type("MVAU_hls"):
fc_node = new_model.get_nodes_by_op_type("MVAU_hls")[0]
Expand Down
2 changes: 1 addition & 1 deletion tests/fpgadataflow/test_convert_to_hw_layers_cnv.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def test_convert_to_hw_layers_cnv_w1a1(fused_activation):
if is_fpgadataflow_node(node):
inst = getCustomOp(node)
inst.set_nodeattr("preferred_impl_style", "hls")
model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers("xc7z020clg400-1"))
for node in model.graph.node:
if node.op_type == "MVAU_hls":
inst = getCustomOp(node)
Expand Down
4 changes: 2 additions & 2 deletions tests/fpgadataflow/test_convert_to_hw_layers_fc.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def test_convert_to_hw_layers_tfc_w1a1():
model = model.transform(absorb.AbsorbMulIntoMultiThreshold())
model = model.transform(RoundAndClipThresholds())
model = model.transform(to_hw.InferBinaryMatrixVectorActivation())
model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers("xc7z020clg400-1"))
fc0 = model.graph.node[2]
assert fc0.op_type.startswith("MVAU")
assert model.get_tensor_shape(fc0.input[0]) == [1, 784]
Expand Down Expand Up @@ -154,7 +154,7 @@ def test_convert_to_hw_layers_tfc_w1a2():
model = model.transform(GiveReadableTensorNames())
model = model.transform(Streamline())
model = model.transform(to_hw.InferQuantizedMatrixVectorActivation())
model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers("xc7z020clg400-1"))

fc0 = model.graph.node[2]
assert fc0.op_type.startswith("MVAU")
Expand Down
2 changes: 1 addition & 1 deletion tests/fpgadataflow/test_convert_to_hw_layers_synthetic.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def test_convert_to_hw_layers_synthetic(ch, ifmdim, idt):

output_hw = oxe.execute_onnx(model, input_dict, True)

model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers("xc7z020clg400-1"))

# check topology status

Expand Down
2 changes: 1 addition & 1 deletion tests/fpgadataflow/test_convert_to_hw_pool_batch.py
Original file line number Diff line number Diff line change
Expand Up @@ -186,7 +186,7 @@ def test_convert_to_hw_pool(idt, odt, pool_config, ifm_ch, pe, op_type, exec_mod
inst.set_nodeattr("preferred_impl_style", "hls")
y_produced = oxe.execute_onnx(new_model, input_dict)["outp"]
assert (y_produced == y_expected).all()
new_model = new_model.transform(SpecializeLayers())
new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1"))

# Folding
for n in new_model.graph.node:
Expand Down
4 changes: 2 additions & 2 deletions tests/fpgadataflow/test_depthwise_convolution.py
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ def test_depthwise_conv_hw_cppsim(act, pe, k, stride, padding):
new_model = model.transform(InferConvInpGen())
new_model = new_model.transform(InferVectorVectorActivation())

new_model = new_model.transform(SpecializeLayers())
new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1"))

# set SIMD in ConvInputGen node and PE in VVAU node
for n in new_model.graph.node:
Expand Down Expand Up @@ -226,7 +226,7 @@ def test_depthwise_conv_hw_rtlsim(act, pe, k, stride, padding):
new_model = model.transform(InferConvInpGen())
new_model = new_model.transform(InferVectorVectorActivation())

new_model = new_model.transform(SpecializeLayers())
new_model = new_model.transform(SpecializeLayers("xc7z020clg400-1"))

# set SIMD in ConvInputGen node and PE in VVAU node
for n in new_model.graph.node:
Expand Down
2 changes: 1 addition & 1 deletion tests/fpgadataflow/test_fpgadataflow_addstreams.py
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ def test_fpgadataflow_addstreams(idt, ch, fold, exec_mode):
y_produced = oxe.execute_onnx(model, input_dict)["outp"]
assert (y_produced == y_expected).all(), "Execution of hw layer failed"

model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers("xc7z020clg400-1"))

if exec_mode == "cppsim":
model = model.transform(PrepareCppSim())
Expand Down
2 changes: 1 addition & 1 deletion tests/fpgadataflow/test_fpgadataflow_channelwise_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def test_fpgadataflow_channelwise_ops(idt, act, pdt, nf, ich, func, vecs, exec_m

assert (y_produced == y_expected).all(), "HW layer execution failed"

model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers("xc7z020clg400-1"))

if exec_mode == "cppsim":
model = model.transform(PrepareCppSim())
Expand Down
2 changes: 1 addition & 1 deletion tests/fpgadataflow/test_fpgadataflow_checksum.py
Original file line number Diff line number Diff line change
Expand Up @@ -176,7 +176,7 @@ def test_fpgadataflow_checksum():

# rtlsim
model = model.transform(InsertFIFO(True))
model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers(test_fpga_part))
model = model.transform(GiveUniqueNodeNames())
model = model.transform(PrepareIP(test_fpga_part, target_clk_ns))
model = model.transform(HLSSynthIP())
Expand Down
6 changes: 3 additions & 3 deletions tests/fpgadataflow/test_fpgadataflow_concat.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ def test_fpgadataflow_concat(exec_mode, idt):
assert model.graph.node[0].domain == "finn.custom_op.fpgadataflow"
ret = execute_onnx(model, inp_dict)
assert (ret[oname] == exp_out).all()
model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers("xc7z020clg400-1"))
assert model.graph.node[0].op_type == "StreamingConcat_hls"
assert model.graph.node[0].domain == "finn.custom_op.fpgadataflow.hls"
if exec_mode == "cppsim":
Expand Down Expand Up @@ -141,11 +141,11 @@ def test_fpgadataflow_concat_stitchedip():
model = model.transform(InferConcatLayer())
assert model.graph.node[0].op_type == "StreamingConcat"
assert model.graph.node[0].domain == "finn.custom_op.fpgadataflow"
model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers(fpga_part))
assert model.graph.node[0].op_type == "StreamingConcat_hls"
assert model.graph.node[0].domain == "finn.custom_op.fpgadataflow.hls"
model = model.transform(InsertFIFO(create_shallow_fifos=True))
model = model.transform(SpecializeLayers())
model = model.transform(SpecializeLayers(fpga_part))
model = model.transform(GiveUniqueNodeNames())
model = model.transform(PrepareIP(fpga_part, clk_ns))
model = model.transform(HLSSynthIP())
Expand Down
Loading

0 comments on commit 4012378

Please sign in to comment.