Skip to content

Commit

Permalink
Merge branch 'fastmachinelearning:main' into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
iksnagreb authored Oct 4, 2023
2 parents f831be0 + 47e4357 commit 1754562
Show file tree
Hide file tree
Showing 34 changed files with 2,152 additions and 205 deletions.
2 changes: 2 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
![Tests](https://github.com/fastmachinelearning/qonnx/actions/workflows/test.yml/badge.svg)
![Code style](https://img.shields.io/badge/code%20style-black-000000.svg)
[![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.7622236.svg)](https://doi.org/10.5281/zenodo.7622236)
[![PyPI version](https://badge.fury.io/py/qonnx.svg)](https://badge.fury.io/py/qonnx)
[![Downloads](https://static.pepy.tech/personalized-badge/qonnx?period=total&units=international_system&left_color=grey&right_color=orange&left_text=Downloads)](https://pepy.tech/project/qonnx)

<img align="left" src="https://xilinx.github.io/finn/img/TFC_1W2A.onnx.png" alt="QONNX example" style="margin-right: 20px" width="200"/>

Expand Down
17 changes: 11 additions & 6 deletions setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -45,14 +45,15 @@ package_dir =
# For more information, check out https://semver.org/.
install_requires =
importlib-metadata; python_version<"3.8"
clize==4.1.1
attrs>=22.2.0
clize>=5.0.1
protobuf==3.20.3
bitstring>=3.1.7
numpy==1.24.1
onnx==1.13.0
onnxruntime==1.15.0
sigtools==2.0.3
toposort==1.7.0
numpy>=1.24.1
onnx>=1.13.0
onnxruntime>=1.15.0
sigtools>=4.0.1
toposort>=1.7.0


[options.packages.find]
Expand Down Expand Up @@ -91,6 +92,10 @@ console_scripts =
qonnx-to-channels-last = qonnx.util.to_channels_last:main
qonnx-inference-cost = qonnx.util.inference_cost:main
qonnx-convert = qonnx.util.convert:main
qonnx-range-analysis = qonnx.util.range_analysis:main
qonnx-prune-channels = qonnx.util.prune_channels:main
qonnx-download-model = qonnx.util.test:qonnx_download_model
qonnx-tensor-stats = qonnx.analysis.tensor_stats:main
pytest_randomly.random_seeder =
qonnx = qonnx.util.random_reseed:reseed
# Add here console scripts like:
Expand Down
5 changes: 5 additions & 0 deletions src/qonnx/analysis/inference_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,6 +209,8 @@ def inference_cost(model, discount_sparsity=True):
"MaxPool",
"AveragePool",
"Quant",
"QuantizeLinear",
"DequantizeLinear",
"Reshape",
"Concat",
"Transpose",
Expand All @@ -223,6 +225,9 @@ def inference_cost(model, discount_sparsity=True):
"Sigmoid",
"Identity",
"Flatten",
"Pad",
"Clip",
"Trunc",
]
unsupported_ops = set()
inference_cost_fxn_map = {
Expand Down
151 changes: 151 additions & 0 deletions src/qonnx/analysis/tensor_stats.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,151 @@
# Copyright (c) 2023 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Xilinx nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

import clize
import numpy as np
import os
from matplotlib import pyplot as plt
from tqdm import tqdm

from qonnx.core.modelwrapper import ModelWrapper
from qonnx.util.range_analysis import (
REPORT_MODE_RANGE,
REPORT_MODE_STUCKCHANNEL,
REPORT_MODE_ZEROSTUCKCHANNEL,
report_mode_options,
report_modes,
)


def update_tensor_stats(tensor, axes, ret_dict={}):
shp = tensor.shape
if ret_dict == {}:
ret_dict["shape"] = shp
else:
assert ret_dict["shape"] == shp
for axis in axes:
tensor_new = np.moveaxis(tensor, axis, 0).reshape(shp[axis], -1)
ret_axis = {
"min": np.min(tensor_new, axis=1),
"max": np.max(tensor_new, axis=1),
}
axis_name = "axis%d" % axis
if axis_name in ret_dict:
ret_dict[axis_name] = ret_axis
ret_axis["min"] = np.minimum(ret_axis["min"], ret_dict[axis_name]["min"])
ret_axis["max"] = np.maximum(ret_axis["max"], ret_dict[axis_name]["max"])
ret_dict[axis_name] = ret_axis
return ret_dict


def tensor_stats(
modelwrapper_or_filename,
act_dump_dir: str,
output_stats_dir: str,
*,
axes="1",
plot=True,
report_mode: report_mode_options = REPORT_MODE_RANGE
):
assert report_mode in report_modes, "Unknown report_mode"
if not isinstance(modelwrapper_or_filename, ModelWrapper):
model = ModelWrapper(modelwrapper_or_filename)
else:
model = modelwrapper_or_filename
if not isinstance(axes, list):
axes = [int(x.strip()) for x in axes.split(",")]
if not os.path.isdir(output_stats_dir):
os.makedirs(output_stats_dir)

all_tensor_dump_files = []
all_tensor_dump_files = [f for f in os.listdir(act_dump_dir) if os.path.isfile(os.path.join(act_dump_dir, f))]
all_tensor_dump_files = [f for f in all_tensor_dump_files if f.endswith(".npy")]
tensorwise_stats = {}
stuck_chans = {}
for outp in tqdm(model.graph.output, "Tensors"):
tname = outp.name
t_files = [f for f in all_tensor_dump_files if f.startswith(tname)]
tensorwise_stats[tname] = {}
for f in tqdm(t_files, "Batches"):
t_file = np.load(os.path.join(act_dump_dir, f))
tensorwise_stats[tname] = update_tensor_stats(t_file, axes=axes, ret_dict=tensorwise_stats[tname])
tstats = tensorwise_stats[tname]
for axis in axes:
axis_name = "axis%d" % axis
axis_data = tstats[axis_name]
axis_min = axis_data["min"]
axis_max = axis_data["max"]
tensor_stuck_chans = np.nonzero(axis_min == axis_max)[0]
if report_mode in [REPORT_MODE_STUCKCHANNEL, REPORT_MODE_ZEROSTUCKCHANNEL]:
if len(tensor_stuck_chans) > 0:
list_stuck_chans = list(tensor_stuck_chans)
list_stuck_values = list(axis_min[tensor_stuck_chans])
stuck_chans[tname] = list(zip(list_stuck_chans, list_stuck_values))
if plot:
for axis in axes:
axis_name = "axis%d" % axis
data = tensorwise_stats[tname][axis_name]
axis_min = data["min"]
axis_max = data["max"]
axis_range = axis_max - axis_min
chans = [i for i in range(len(axis_min))]
plt.clf()
plt.figure(constrained_layout=True, figsize=(5, len(axis_min) / 3))
bars = plt.barh(chans, axis_range, left=axis_min)
bar_labels = [str((axis_min[i], axis_max[i])) for i in range(len(axis_min))]
plt.bar_label(bars, bar_labels)
plt.yticks([x for x in range(len(axis_range))])
plt.xlabel("Channel number")
plt.ylabel("Channel range")
plt.title("Observed range for %s_%s" % (tname, axis_name))

plt.savefig(output_stats_dir + "/%s_%s.png" % (tname, axis_name))

if report_mode == REPORT_MODE_RANGE:
ret = tensorwise_stats
elif report_mode == REPORT_MODE_STUCKCHANNEL:
ret = stuck_chans
elif report_mode == REPORT_MODE_ZEROSTUCKCHANNEL:
# only leave channels that are stuck at zero
# value info removed since implicitly 0
ret = stuck_chans
new_ret = {}
for tname, schans in ret.items():
schans_only_zero = set([x[0] for x in schans if x[1] == 0])
if len(schans_only_zero) > 0:
new_ret[tname] = schans_only_zero
ret = new_ret
return ret


def main():
clize.run(tensor_stats)


if __name__ == "__main__":
main()
Binary file not shown.
Binary file not shown.
59 changes: 59 additions & 0 deletions src/qonnx/transformation/change_batchsize.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
# Copyright (c) 2023 Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of Xilinx nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

from qonnx.core.modelwrapper import ModelWrapper
from qonnx.transformation.base import Transformation


class ChangeBatchSize(Transformation):
"""Change the batch size dimension to the given value for the entire graph
by changing it for the global input/output and removing all intermediate
shapes (will need a call to shape inference to restore shapes).
Will attempt to handle any Reshape nodes with constant shape parameters by
changing the batch size dimension value in the parameter."""

def __init__(self, bsize):
super().__init__()
self.bsize = int(bsize)

def apply(self, model: ModelWrapper):
onnx_model = model.model
bsize = self.bsize
onnx_model.graph.input[0].type.tensor_type.shape.dim[0].dim_value = bsize
onnx_model.graph.output[0].type.tensor_type.shape.dim[0].dim_value = bsize
while len(onnx_model.graph.value_info) > 0:
onnx_model.graph.value_info.remove(onnx_model.graph.value_info[0])
reshape_nodes = model.get_nodes_by_op_type("Reshape")
for reshape_node in reshape_nodes:
rs_param_name = reshape_node.input[1]
rs_param = model.get_initializer(rs_param_name)
if rs_param is not None:
rs_param = rs_param.copy()
rs_param[0] = bsize
model.set_initializer(rs_param_name, rs_param)
return (model, False)
66 changes: 66 additions & 0 deletions src/qonnx/transformation/expose_intermediate.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
# Copyright (c) 2023, Advanced Micro Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of QONNX nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

from qonnx.core.modelwrapper import ModelWrapper
from qonnx.transformation.base import Transformation


class ExposeIntermediateTensorsLambda(Transformation):
def __init__(self, tensor_filter=lambda tname, model: True):
super().__init__()
self.tensor_filter = tensor_filter

def apply(self, model: ModelWrapper):
all_tensor_names = model.get_all_tensor_names()
for tname in all_tensor_names:
if self.tensor_filter(tname, model):
# check whether this tensor is already in the outputs
if tname in [x.name for x in model.graph.output]:
# already part of outputs, skip
continue
else:
# append ValueInfo to outputs
tensor_vi = model.get_tensor_valueinfo(tname)
model.graph.output.append(tensor_vi)
# remove existing ValueInfo to avoid duplicate
model.graph.value_info.remove(tensor_vi)

return (model, False)


class ExposeIntermediateTensorsPatternList(ExposeIntermediateTensorsLambda):
def pattern_filter(self, tname, model):
if self.dynamic_only:
return any([(pat in tname) and (model.get_initializer(tname) is None) for pat in self.pattern_list])
else:
return any([(pat in tname) for pat in self.pattern_list])

def __init__(self, pattern_list, dynamic_only=True):
self.pattern_list = pattern_list
self.dynamic_only = dynamic_only
super().__init__(tensor_filter=self.pattern_filter)
12 changes: 8 additions & 4 deletions src/qonnx/transformation/fold_constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,11 +92,15 @@ def apply(self, model):
node_inp_dyn = list(filter(lambda x: x is None, node_inp_inits))
node_out = n.output[0]
is_all_constant_inputs = len(node_inp_dyn) == 0
ishape = model.get_tensor_shape(n.input[0])
is_const_shape = (n.op_type == "Shape") and (ishape is not None)
if len(n.input) > 0:
ishape = model.get_tensor_shape(n.input[0])
is_const_shape = (n.op_type == "Shape") and (ishape is not None)
is_no_input = False
else:
is_no_input = True
exclude = n.op_type in self.exclude_op_types
if (is_all_constant_inputs or is_const_shape) and not exclude:
# this node has no dynamic inputs, only constant ones -- so we can
if (is_all_constant_inputs or is_const_shape or is_no_input) and not exclude:
# this node has no (dynamic) inputs, only constant ones -- so we can
# do constant folding. to ensure any missing ValueInfos from initializers
# are populated, we 'touch' the shape of all inputs first below.
for inp in n.input:
Expand Down
Loading

0 comments on commit 1754562

Please sign in to comment.