From 662975dbfd0ea2358e6fb2c1588d4c7e2fbc3959 Mon Sep 17 00:00:00 2001 From: evelynmitchell Date: Fri, 29 Dec 2023 18:49:12 -0700 Subject: [PATCH] quantize default bits tolerance --- tests/utils/test_absmax.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/utils/test_absmax.py b/tests/utils/test_absmax.py index 45dd5a87..6ac9e394 100644 --- a/tests/utils/test_absmax.py +++ b/tests/utils/test_absmax.py @@ -7,7 +7,7 @@ def test_absmax_quantize_default_bits(): quant, dequant = absmax_quantize(x) assert quant.dtype == torch.int8 assert dequant.dtype == torch.float32 - assert torch.allclose(dequant, x, atol=1e-4) + assert torch.allclose(dequant, x, atol=1e-3) def test_absmax_quantize_custom_bits():