diff --git a/tests/torch/fx/test_min_max.py b/tests/torch/fx/test_min_max.py index bfd19e8e75..5efce2ec01 100644 --- a/tests/torch/fx/test_min_max.py +++ b/tests/torch/fx/test_min_max.py @@ -14,6 +14,7 @@ import pytest from nncf.common.graph.graph import NNCFGraph +from nncf.common.graph.layer_attributes import BaseLayerAttributes from nncf.common.graph.transformations.commands import TargetType from nncf.quantization.algorithms.min_max.backend import MinMaxAlgoBackend from nncf.quantization.algorithms.min_max.torch_fx_backend import FXMinMaxAlgoBackend @@ -61,19 +62,21 @@ def matmul_metatype(self): return PTLinearMetatype @staticmethod - def get_conv_node_attrs(weight_port_id: int, weight_shape: Tuple[int]): + def get_conv_node_attrs(weight_port_id: int, weight_shape: Tuple[int]) -> BaseLayerAttributes: # This method isn't needed for Torch FX backend - pass + return None @staticmethod - def get_depthwiseconv_node_attrs(weight_port_id: int, weight_shape: Tuple[int]): + def get_depthwiseconv_node_attrs(weight_port_id: int, weight_shape: Tuple[int]) -> BaseLayerAttributes: # This method isn't needed for Torch FX backend - pass + return None @staticmethod - def get_matmul_node_attrs(weight_port_id: int, transpose_weight: Tuple[int], weight_shape: Tuple[int]): + def get_matmul_node_attrs( + weight_port_id: int, transpose_weight: Tuple[int], weight_shape: Tuple[int] + ) -> BaseLayerAttributes: # This method isn't needed for Torch FX backend - pass + return None def test_get_channel_axes_matmul_node_ov_onnx(self): pytest.skip("Test is not applied for Torch FX backend.") diff --git a/tests/torch/ptq/test_min_max.py b/tests/torch/ptq/test_min_max.py index 20c91ff2d1..d099fd587c 100644 --- a/tests/torch/ptq/test_min_max.py +++ b/tests/torch/ptq/test_min_max.py @@ -13,6 +13,7 @@ import pytest from nncf.common.graph.graph import NNCFGraph +from nncf.common.graph.layer_attributes import BaseLayerAttributes from nncf.common.graph.transformations.commands import TargetType from nncf.quantization.algorithms.min_max.backend import MinMaxAlgoBackend from nncf.quantization.algorithms.min_max.torch_backend import PTMinMaxAlgoBackend @@ -60,19 +61,21 @@ def matmul_metatype(self): return PTLinearMetatype @staticmethod - def get_conv_node_attrs(weight_port_id: int, weight_shape: Tuple[int]): + def get_conv_node_attrs(weight_port_id: int, weight_shape: Tuple[int]) -> BaseLayerAttributes: # This method isn't needed for Torch backend - pass + return None @staticmethod - def get_depthwiseconv_node_attrs(weight_port_id: int, weight_shape: Tuple[int]): + def get_depthwiseconv_node_attrs(weight_port_id: int, weight_shape: Tuple[int]) -> BaseLayerAttributes: # This method isn't needed for Torch backend - pass + return None @staticmethod - def get_matmul_node_attrs(weight_port_id: int, transpose_weight: Tuple[int], weight_shape: Tuple[int]): + def get_matmul_node_attrs( + weight_port_id: int, transpose_weight: Tuple[int], weight_shape: Tuple[int] + ) -> BaseLayerAttributes: # This method isn't needed for Torch backend - pass + return None def test_get_channel_axes_matmul_node_ov_onnx(self): pytest.skip("Test is not applied for Torch backend.")