diff --git a/tests/neural_compressor/test_optimization.py b/tests/neural_compressor/test_optimization.py index a5f39460fd..bd7b6519c6 100644 --- a/tests/neural_compressor/test_optimization.py +++ b/tests/neural_compressor/test_optimization.py @@ -88,7 +88,7 @@ def test_dynamic_quantization(self, task, model_name, expected_quantized_matmuls tokenizer = AutoTokenizer.from_pretrained(model_name) save_onnx_model = False quantized_model = None - model_kwargs = {"use_cache" : False, "use_io_binding": False} if task == "text-generation" else {} + model_kwargs = {"use_cache": False, "use_io_binding": False} if task == "text-generation" else {} with tempfile.TemporaryDirectory() as tmp_dir: for backend in ["torch", "ort"]: if backend == "torch": diff --git a/tests/neural_compressor/utils_tests.py b/tests/neural_compressor/utils_tests.py index c1f0086e53..d08ad636e6 100644 --- a/tests/neural_compressor/utils_tests.py +++ b/tests/neural_compressor/utils_tests.py @@ -96,7 +96,7 @@ def check_model_outputs( file_name=None, ): tokens = tokenizer("This is a sample input", return_tensors="pt") - file_name = ONNX_WEIGHTS_NAME if task!="text-generation" else "decoder_model.onnx" + file_name = ONNX_WEIGHTS_NAME if task != "text-generation" else "decoder_model.onnx" model_kwargs = ( {"decoder_file_name": file_name, "use_cache": False, "use_io_binding": False}