Skip to content

Commit

Permalink
Remove checks on quant_min/quant_max
Browse files Browse the repository at this point in the history
Differential Revision: D66912431

Pull Request resolved: #7256
  • Loading branch information
zonglinpeng authored Dec 10, 2024
1 parent 579d958 commit c5848b2
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions backends/cadence/fusion_g3/operators/op_quantize.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -570,7 +570,7 @@ Tensor& quantize_per_tensor_out(
err == torch::executor::Error::Ok,
"Failed to resize out Tensor in quantize_per_tensor_out");

check_quantize_per_tensor_args(input, quant_min, quant_max, dtype, out);
// check_quantize_per_tensor_args(input, quant_min, quant_max, dtype, out);

float scale_data = (float)scale;
int zero_point_data = (int)zero_point;
Expand Down Expand Up @@ -696,7 +696,7 @@ Tensor& quantize_per_channel_out(
zero_point.numel(),
input.size(axis));

check_quantize_per_tensor_args(input, quant_min, quant_max, dtype, out);
// check_quantize_per_tensor_args(input, quant_min, quant_max, dtype, out);

const double* scale_dt = scale.const_data_ptr<double>();
const int64_t* zero_point_dt = zero_point.const_data_ptr<int64_t>();
Expand Down

0 comments on commit c5848b2

Please sign in to comment.