Skip to content

Commit

Permalink
Merge branch 'fastmachinelearning:main' into dev
Browse files Browse the repository at this point in the history
  • Loading branch information
iksnagreb authored Jul 7, 2023
2 parents f1fa779 + 0aec35a commit f831be0
Show file tree
Hide file tree
Showing 8 changed files with 32 additions and 11 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@ jobs:
strategy:
fail-fast: false
matrix:
python-version: ['3.8', '3.9', '3.10', '3.11']
python-version: ['3.8', '3.9', '3.10']

steps:
- uses: actions/checkout@v2
Expand Down
2 changes: 1 addition & 1 deletion docs/qonnx-custom-ops/quant_op.md
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ This operator is not part of the ONNX standard and is not currently versioned.
<dt><tt>narrow</tt> : int (default is 0)</dt>
<dd>Defines if the value range should be interpreted as narrow, when signed=1. E.g. at 8b regular=[-128, 127] vs narrow=[-127, 127].</dd>
<dt><tt>rounding_mode</tt> : string (default is "ROUND")</dt>
<dd>Defines how rounding should be applied during quantization. Currently available modes are: "ROUND", "CEIL" and "FLOOR". Here "ROUND" implies a round-to-even operation.</dd>
<dd>Defines how rounding should be applied during quantization. Currently available modes are: "ROUND", "CEIL" and "FLOOR". Here "ROUND" implies a round-to-even operation. Lowercase variants for the rounding mode string are also supported: "round", "ceil", "floor".</dd>
</dl>

#### Inputs
Expand Down
2 changes: 1 addition & 1 deletion docs/qonnx-custom-ops/trunc_op.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ This operator is not part of the ONNX standard and is not currently versioned.

<dl>
<dt><tt>rounding_mode</tt> : string (default is "FLOOR")</dt>
<dd>Defines how rounding should be applied during truncation. Currently available modes are: "ROUND", "CEIL" and "FLOOR". Here "ROUND" implies a round-to-even operation.</dd>
<dd>Defines how rounding should be applied during truncation. Currently available modes are: "ROUND", "CEIL" and "FLOOR". Here "ROUND" implies a round-to-even operation. Lowercase variants for the rounding mode string are also supported: "round", "ceil", "floor".</dd>
</dl>

#### Inputs
Expand Down
5 changes: 4 additions & 1 deletion src/qonnx/custom_op/general/maxpoolnhwc.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,10 @@ def execute_node(self, context, graph):
inp_vi = helper.make_tensor_value_info(inp_name, TensorProto.FLOAT, inp.shape)
out_vi = helper.make_tensor_value_info(out_name, TensorProto.FLOAT, dummy_out.shape)
tmp_graph = helper.make_graph(nodes=[node], name="tmp_graph", inputs=[inp_vi], outputs=[out_vi])
tmp_model = qonnx_make_model(tmp_graph, producer_name="finn")
opset_version = self.onnx_opset_version
opset_imports = [helper.make_opsetid("", opset_version)]
onnx_kwargs = {"opset_imports": opset_imports}
tmp_model = qonnx_make_model(tmp_graph, producer_name="finn", **onnx_kwargs)
tmp_model = ModelWrapper(tmp_model)
new_ctx = {inp_name: inp}
from qonnx.core.onnx_exec import execute_onnx
Expand Down
9 changes: 5 additions & 4 deletions src/qonnx/custom_op/general/quant.py
Original file line number Diff line number Diff line change
Expand Up @@ -134,14 +134,15 @@ def quant(inp_tensor, scale, zeropt, bitwidth, signed, narrow, rounding_mode):
def resolve_rounding_mode(mode_string):
"""Resolve the rounding mode string of Quant and Trunc ops
to the corresponding numpy functions."""
if mode_string == "ROUND":
normalized_mode_string = mode_string.upper()
if normalized_mode_string == "ROUND":
return np.round
elif mode_string == "CEIL":
elif normalized_mode_string == "CEIL":
return np.ceil
elif mode_string == "FLOOR":
elif normalized_mode_string == "FLOOR":
return np.floor
else:
raise ValueError(f"Could not resolve rounding mode called: {mode_string}")
raise ValueError(f"Could not resolve rounding mode called: {normalized_mode_string}")


class Quant(CustomOp):
Expand Down
6 changes: 5 additions & 1 deletion src/qonnx/custom_op/general/quantavgpool2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,11 @@ def execute_node(self, context, graph):
inputs=[inp],
outputs=[outp],
)
model_avgpool = qonnx_make_model(graph_avgpool)

opset_version = self.onnx_opset_version
opset_imports = [helper.make_opsetid("", opset_version)]
onnx_kwargs = {"opset_imports": opset_imports}
model_avgpool = qonnx_make_model(graph_avgpool, **onnx_kwargs)
idict = {node.input[0]: inp_values}
sess = rt.InferenceSession(model_avgpool.SerializeToString())
result_temp = sess.run(None, idict)
Expand Down
12 changes: 11 additions & 1 deletion src/qonnx/transformation/merge_onnx_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,14 @@ def apply(self, model):
# to avoid mix-ups, start by giving all tensors random names
pre_model = pre_model.transform(GiveRandomTensorNames())
post_model = post_model.transform(GiveRandomTensorNames())
pre_model_opset = pre_model.model.opset_import[0].version
post_model_opset = post_model.model.opset_import[0].version
merged_model_opset = max(pre_model_opset, post_model_opset)
if pre_model_opset != post_model_opset:
warnings.warn(
"[MergeONNXModels] opsets for models to merge differ: %d vs %d, output model will use opset %d"
% (pre_model_opset, post_model_opset, merged_model_opset)
)

# check for dynamic outputs of pre model
dyn_outp = []
Expand Down Expand Up @@ -143,7 +151,9 @@ def apply(self, model):
value_info=vi_new,
)

new_model = qonnx_make_model(new_graph, producer_name="fuse_model")
new_model = qonnx_make_model(
new_graph, producer_name="fuse_model", opset_imports=[helper.make_opsetid("", merged_model_opset)]
)
new_model = ModelWrapper(new_model)

for i in init_new:
Expand Down
5 changes: 4 additions & 1 deletion tests/transformation/test_merge_onnx_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,8 @@ def test_merge_onnx_models():
value_info=[a0, a1],
)

model2 = qonnx_make_model(graph, producer_name="model2")
exp_opset_id = 13
model2 = qonnx_make_model(graph, producer_name="model2", opset_imports=[helper.make_opsetid("", exp_opset_id)])
model2 = ModelWrapper(model2)
# initialize model2
a0_value = np.random.uniform(low=0, high=1, size=(1)).astype(np.float32)
Expand Down Expand Up @@ -122,3 +123,5 @@ def test_merge_onnx_models():

# check if finn datatype of graph.input[0] is still set to UINT8
assert model_transformed.get_tensor_datatype("global_in") == DataType["UINT8"]
# check that the merged model uses the greater of the two input opsets
assert model_transformed.model.opset_import[0].version == exp_opset_id

0 comments on commit f831be0

Please sign in to comment.