Skip to content

Commit

Permalink
Skip annotate boolean input (#2957)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #2957

ghstack-source-id: 222200589
exported-using-ghexport

It only makes sense to quantize fp tensor, but not boolean. Add a check to make sure only fp tensor are annotated in quantizer

Reviewed By: jerryzh168

Differential Revision: D55946526

fbshipit-source-id: d94bfee38ab2d29fc9672ab631b4d5d0c5239d25
  • Loading branch information
cccclai authored and facebook-github-bot committed Apr 11, 2024
1 parent 62a4dd3 commit ce344bc
Showing 1 changed file with 15 additions and 2 deletions.
17 changes: 15 additions & 2 deletions backends/qualcomm/quantizer/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@
import torch

from torch._ops import OpOverload
from torch._subclasses import FakeTensor

from torch.ao.quantization.quantizer import (
QuantizationAnnotation,
Expand Down Expand Up @@ -41,6 +42,18 @@ def decorator(annotator: Callable):

return decorator

def _is_input_float_tensor(node: Node):
"""Check if the input is not a float tensor, so that we can skip quantization for the node
since observers only works with float Tensors
"""
if (
not isinstance(node, Node)
or "val" not in node.meta
or not isinstance(node.meta["val"], FakeTensor)
):
return False
return node.meta["val"].dtype == torch.float32


def _is_annotated(nodes: List[Node]):
"""
Expand Down Expand Up @@ -123,11 +136,11 @@ def annotate_binary(node: Node, quantization_config: QuantizationConfig) -> None

input_qspec_map = {}
input_act0 = node.args[0]
if isinstance(input_act0, Node):
if _is_input_float_tensor(input_act0):
input_qspec_map[input_act0] = input_act_qspec

input_act1 = node.args[1]
if isinstance(input_act1, Node):
if _is_input_float_tensor(input_act1):
input_qspec_map[input_act1] = input_act_qspec

node.meta[QUANT_ANNOTATION_KEY] = QuantizationAnnotation(
Expand Down

0 comments on commit ce344bc

Please sign in to comment.