diff --git a/tests/torch/test_hybrid_converter.py b/tests/torch/test_hybrid_converter.py index cd1440417..2d2cdec07 100644 --- a/tests/torch/test_hybrid_converter.py +++ b/tests/torch/test_hybrid_converter.py @@ -313,7 +313,7 @@ def prepare_data(x, y, test_size=0.1, random_state=42): assert numpy.all(numpy.allclose(y_torch, y_hybrid_torch, rtol=1, atol=0.001)) # The clear quantization vs fp32 test has more tolerance - threshold_fhe = 0.01 + threshold_fhe = 0.1 diff = numpy.abs(y_torch - y_glwe) > threshold_fhe if numpy.any(diff): @@ -334,4 +334,4 @@ def prepare_data(x, y, test_size=0.1, random_state=42): else: # For non-GLWE cases, just verify the torch outputs match assert numpy.all(numpy.allclose(y_torch, y_hybrid_torch, rtol=1, atol=0.001)) - assert numpy.all(numpy.allclose(y_qm, y_hybrid_torch, rtol=1, atol=0.01)) + assert numpy.all(numpy.allclose(y_qm, y_hybrid_torch, rtol=1, atol=0.1))