Skip to content

Commit

Permalink
Do not treat initializer inputs to Add/Mul as weights
Browse files Browse the repository at this point in the history
  • Loading branch information
adrianlizarraga committed Nov 20, 2023
1 parent 476f10a commit c452352
Showing 1 changed file with 1 addition and 2 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,6 @@
Q16_TYPES = {QuantType.QInt16, QuantType.QUInt16}
Q8_TYPES = {QuantType.QInt8, QuantType.QUInt8}
OP_TYPES_TO_EXCLUDE = {"Cast"}
OP_TYPES_INITIALIZER_AS_WEIGHTS = {"MatMul", "Add", "Sub", "Mul", "Div"}


def get_qnn_qdq_config(
Expand All @@ -35,7 +34,7 @@ def get_qnn_qdq_config(
for node in model.graph.node:
op_types.add(node.op_type)

if node.op_type in OP_TYPES_INITIALIZER_AS_WEIGHTS and activation_type in Q16_TYPES and weight_type in Q8_TYPES:
if node.op_type == "MatMul" and activation_type in Q16_TYPES and weight_type in Q8_TYPES:
weight_symmetric = weight_type == QuantType.QInt8

# Override initializers to use the weight_type
Expand Down

0 comments on commit c452352

Please sign in to comment.