diff --git a/conftest.py b/conftest.py index c4fa713c72..f54e5b9efc 100644 --- a/conftest.py +++ b/conftest.py @@ -145,21 +145,25 @@ def pytest_sessionfinish(session: pytest.Session, exitstatus): # pylint: disabl @pytest.fixture def default_configuration(): - """Return the default test compilation configuration.""" + # Return the default test compilation configuration. + def default_configuration_impl(mode: Optional[str] = "MULTI"): + if mode == "MULTI": + parameter_selection_strategy = ParameterSelectionStrategy.MULTI + single_precision = False + else: + parameter_selection_strategy = ParameterSelectionStrategy.MONO + single_precision = True + + return Configuration( + dump_artifacts_on_unexpected_failures=False, + enable_unsafe_features=True, + use_insecure_key_cache=True, + insecure_key_cache_location="ConcreteNumpyKeyCache", + parameter_selection_strategy=parameter_selection_strategy, + single_precision=single_precision, + ) - # Remove parameter_selection_strategy once it is set to multi-parameter in Concrete Python - # by default - # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3860 - # Parameter `enable_unsafe_features` and `use_insecure_key_cache` are needed in order to be - # able to cache generated keys through `insecure_key_cache_location`. As the name suggests, - # these parameters are unsafe and should only be used for debugging in development - return Configuration( - dump_artifacts_on_unexpected_failures=False, - enable_unsafe_features=True, - use_insecure_key_cache=True, - insecure_key_cache_location="ConcreteNumpyKeyCache", - parameter_selection_strategy=ParameterSelectionStrategy.MULTI, - ) + return default_configuration_impl REMOVE_COLOR_CODES_RE = re.compile(r"\x1b[^m]*m") diff --git a/src/concrete/ml/pytest/utils.py b/src/concrete/ml/pytest/utils.py index 3bef4b8a13..ba36f4cf66 100644 --- a/src/concrete/ml/pytest/utils.py +++ b/src/concrete/ml/pytest/utils.py @@ -103,8 +103,8 @@ pytest.param( model, { - "n_samples": 10, - "n_features": 3, + "n_samples": 4, + "n_features": 2, "n_classes": n_classes, "n_informative": 2, "n_redundant": 0, diff --git a/tests/common/test_pbs_error_probability_settings.py b/tests/common/test_pbs_error_probability_settings.py index 4066119eb9..507b570afa 100644 --- a/tests/common/test_pbs_error_probability_settings.py +++ b/tests/common/test_pbs_error_probability_settings.py @@ -41,24 +41,17 @@ def test_config_sklearn(model_class, parameters, kwargs, load_data, default_conf # Fit the model model.fit(x, y) - if get_model_name(model_class) == "KNeighborsClassifier": - - default_configuration = Configuration( - dump_artifacts_on_unexpected_failures=False, - enable_unsafe_features=True, - use_insecure_key_cache=True, - insecure_key_cache_location="ConcreteNumpyKeyCache", - parameter_selection_strategy=fhe.ParameterSelectionStrategy.MONO, - single_precision=True, - ) + # KNN works only for MONO in the latest concrete Python version + # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3978 + mode = "MONO" if get_model_name(model) == "KNeighborsClassifier" else "MULTI" if kwargs.get("p_error", None) is not None and kwargs.get("global_p_error", None) is not None: with pytest.raises(ValueError) as excinfo: - model.compile(x, default_configuration, verbose=True, **kwargs) + model.compile(x, default_configuration(mode), verbose=True, **kwargs) assert "Please only set one of (p_error, global_p_error) values" in str(excinfo.value) else: - model.compile(x, default_configuration, verbose=True, **kwargs) + model.compile(x, default_configuration(mode), verbose=True, **kwargs) # We still need to check that we have the expected probabilities # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/2206 diff --git a/tests/deployment/test_client_server.py b/tests/deployment/test_client_server.py index f5e4a8e438..d748d39fac 100644 --- a/tests/deployment/test_client_server.py +++ b/tests/deployment/test_client_server.py @@ -97,24 +97,17 @@ def test_client_server_sklearn( # Compile extra_params = {"global_p_error": 1 / 100_000} - - if get_model_name(model_class) == "KNeighborsClassifier": - - default_configuration = Configuration( - dump_artifacts_on_unexpected_failures=False, - enable_unsafe_features=True, - use_insecure_key_cache=True, - insecure_key_cache_location="ConcreteNumpyKeyCache", - parameter_selection_strategy=fhe.ParameterSelectionStrategy.MONO, - single_precision=True, - ) + # KNN works only for MONO in the latest concrete Python version + # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3978 + mode = "MONO" if get_model_name(model) == "KNeighborsClassifier" else "MULTI" # Running the simulation using a model that is not compiled should not be possible with pytest.raises(AttributeError, match=".* model is not compiled.*"): - client_server_simulation(x_train, x_test, model, default_configuration) + client_server_simulation(x_train, x_test, model, default_configuration(mode)) + # With n_bits = 3, KNN is not compilable fhe_circuit = model.compile( - x_train, default_configuration, **extra_params, show_mlir=(n_bits <= 8) + x_train, default_configuration(mode), **extra_params, show_mlir=(n_bits <= 8) ) max_bit_width = fhe_circuit.graph.maximum_integer_bit_width() print(f"Max width {max_bit_width}") @@ -122,7 +115,7 @@ def test_client_server_sklearn( # Check that the FHE execution is correct. # With a global_p_error of 1/100_000 we only allow one run. check_is_good_execution_for_cml_vs_circuit(x_test, model, simulate=False, n_allowed_runs=1) - client_server_simulation(x_train, x_test, model, default_configuration) + client_server_simulation(x_train, x_test, model, default_configuration(mode)) def test_client_server_custom_model( @@ -138,7 +131,7 @@ def test_client_server_custom_model( # Instantiate an empty QuantizedModule object quantized_module = QuantizedModule() - client_server_simulation(x_train, x_test, quantized_module, default_configuration) + client_server_simulation(x_train, x_test, quantized_module, default_configuration("MULTI")) torch_model = FCSmall(2, nn.ReLU) n_bits = 2 @@ -147,7 +140,7 @@ def test_client_server_custom_model( quantized_numpy_module = compile_torch_model( torch_model, x_train, - configuration=default_configuration, + configuration=default_configuration("MULTI"), n_bits=n_bits, global_p_error=1 / 100_000, ) @@ -158,7 +151,7 @@ def test_client_server_custom_model( x_test, quantized_numpy_module, simulate=False, n_allowed_runs=1 ) - client_server_simulation(x_train, x_test, quantized_numpy_module, default_configuration) + client_server_simulation(x_train, x_test, quantized_numpy_module, default_configuration()) def client_server_simulation(x_train, x_test, model, default_configuration): diff --git a/tests/parameter_search/test_p_error_binary_search.py b/tests/parameter_search/test_p_error_binary_search.py index 5ab3ffee61..a24479ce44 100644 --- a/tests/parameter_search/test_p_error_binary_search.py +++ b/tests/parameter_search/test_p_error_binary_search.py @@ -312,6 +312,7 @@ def test_binary_search_for_built_in_models(model_class, parameters, threshold, p # Skorch but since Scikit-Learn does not, we don't as well. This issue could be fixed by making # neural networks not inherit from Skorch. # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3373 + # Skipping predict_proba for KNN, doesn't work for now. # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3962 diff --git a/tests/quantization/test_compilation.py b/tests/quantization/test_compilation.py index 20be11e4f9..664dd2fc64 100644 --- a/tests/quantization/test_compilation.py +++ b/tests/quantization/test_compilation.py @@ -78,7 +78,7 @@ def test_quantized_module_compilation( # Compile quantized_model.compile( numpy_input, - default_configuration, + default_configuration(), verbose=verbose, ) @@ -138,7 +138,7 @@ def test_quantized_cnn_compilation( # Compile quantized_model.compile( numpy_input, - default_configuration, + default_configuration(), verbose=verbose, ) check_is_good_execution_for_cml_vs_circuit(numpy_input, quantized_model, simulate=simulate) @@ -317,7 +317,7 @@ def test_compile_multi_input_nn_with_input_tlus( # Compile quantized_model.compile( numpy_input, - default_configuration, + default_configuration(), ) check_is_good_execution_for_cml_vs_circuit(numpy_input, quantized_model, simulate=True) diff --git a/tests/quantization/test_quantized_module.py b/tests/quantization/test_quantized_module.py index 49072a2f50..c1ae8fa1d1 100644 --- a/tests/quantization/test_quantized_module.py +++ b/tests/quantization/test_quantized_module.py @@ -246,7 +246,7 @@ def test_bitwidth_report(model_class, input_shape, activation_function, default_ torch_fc_model, torch_input, False, - default_configuration, + default_configuration(), n_bits=2, p_error=0.01, ) @@ -336,7 +336,7 @@ def test_quantized_module_rounding_fhe(model_class, input_shape, default_configu torch_fc_model, torch_input, False, - default_configuration, + default_configuration(), n_bits=2, p_error=0.01, rounding_threshold_bits=6, diff --git a/tests/seeding/test_seeding.py b/tests/seeding/test_seeding.py index 27af16c06e..7d54f962c4 100644 --- a/tests/seeding/test_seeding.py +++ b/tests/seeding/test_seeding.py @@ -106,7 +106,7 @@ def test_seed_sklearn(model_class, parameters, load_data, default_configuration) # Test the determinism of our package (even if the bit-width may be too large) try: - model.compile(x, configuration=default_configuration, show_mlir=True) + model.compile(x, configuration=default_configuration("MULTI"), show_mlir=True) except RuntimeError as err: print(err) except AssertionError as err: diff --git a/tests/sklearn/test_dump_onnx.py b/tests/sklearn/test_dump_onnx.py index f1949a6ca3..4967dea155 100644 --- a/tests/sklearn/test_dump_onnx.py +++ b/tests/sklearn/test_dump_onnx.py @@ -37,9 +37,15 @@ def check_onnx_file_dump(model_class, parameters, load_data, str_expected, defau model.set_params(**model_params) if get_model_name(model) == "KNeighborsClassifier": - model.n_bits = 4 - default_configuration.parameter_selection_strategy = fhe.ParameterSelectionStrategy.MONO - default_configuration.single_precision = True + # KNN works only for MONO in the latest concrete Python version + # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3978 + mode = "MONO" + # KNN works only for small quantization bits + # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3979 + model.n_bits = 2 + + else: + mode = "MULTI" with warnings.catch_warnings(): # Sometimes, we miss convergence, which is not a problem for our test @@ -49,7 +55,7 @@ def check_onnx_file_dump(model_class, parameters, load_data, str_expected, defau with warnings.catch_warnings(): # Use FHE simulation to not have issues with precision - model.compile(x, default_configuration) + model.compile(x, default_configuration(mode)) # Get ONNX model onnx_model = model.onnx_model @@ -423,7 +429,7 @@ def test_dump( return %variable }""", "KNeighborsClassifier": """graph torch_jit ( - %input_0[DOUBLE, symx3] + %input_0[DOUBLE, symx2] ) { %/_operators.0/Constant_output_0 = Constant[value = ]() %/_operators.0/Unsqueeze_output_0 = Unsqueeze(%input_0, %/_operators.0/Constant_output_0) diff --git a/tests/sklearn/test_qnn.py b/tests/sklearn/test_qnn.py index f164acb388..884fd65e7b 100644 --- a/tests/sklearn/test_qnn.py +++ b/tests/sklearn/test_qnn.py @@ -224,7 +224,7 @@ def test_compile_and_calib( # Compile the model model.compile( x_train, - configuration=default_configuration, + configuration=default_configuration("MULTI"), ) # Execute in FHE, but don't check the value. @@ -406,7 +406,7 @@ def _get_number_of_neurons(module: SparseQuantNeuralNetwork): # Compile the model model.compile( x_train, - configuration=default_configuration, + configuration=default_configuration("MULTI"), ) pruned_model = model.prune(x_train, y_train, 0.5) @@ -415,7 +415,7 @@ def _get_number_of_neurons(module: SparseQuantNeuralNetwork): # Compile the pruned model, this will also perform ONNX export and calibration pruned_model.compile( x_train, - configuration=default_configuration, + configuration=default_configuration("MULTI"), ) with pytest.raises( @@ -592,7 +592,7 @@ def test_power_of_two_scaling( # in compilation model.compile( x_train, - configuration=default_configuration, + configuration=default_configuration("MULTI"), ) # Compute the results with simulation, which uses the actual diff --git a/tests/sklearn/test_sklearn_models.py b/tests/sklearn/test_sklearn_models.py index 931d0c3222..76a395db1d 100644 --- a/tests/sklearn/test_sklearn_models.py +++ b/tests/sklearn/test_sklearn_models.py @@ -656,8 +656,8 @@ def check_pipeline(model_class, x, y): param_grid = { "model__n_bits": [2, 3], } - - grid_search = GridSearchCV(pipe_cv, param_grid, error_score="raise", cv=3) + # Since the data-set is really small for KNN, we have to decrease the number of splits + grid_search = GridSearchCV(pipe_cv, param_grid, error_score="raise", cv=2) # Sometimes, we miss convergence, which is not a problem for our test with warnings.catch_warnings(): @@ -686,9 +686,7 @@ def check_grid_search(model_class, x, y, scoring): "n_jobs": [1], } elif model_class in get_sklearn_neighbors_models(): - param_grid = { - "n_bits": [3], - } + param_grid = {"n_bits": [2], "n_neighbors": [2]} else: param_grid = { "n_bits": [20], @@ -706,8 +704,11 @@ def check_grid_search(model_class, x, y, scoring): # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3962 pytest.skip("Skipping predict_proba for KNN, doesn't work for now") + # pylint: disable=invalid-name + cv = 2 if get_model_name(model_class) == "KNeighborsClassifier" else 5 + _ = GridSearchCV( - model_class(), param_grid, cv=5, scoring=scoring, error_score="raise", n_jobs=1 + model_class(), param_grid, cv=cv, scoring=scoring, error_score="raise", n_jobs=1 ).fit(x, y) @@ -1074,14 +1075,12 @@ def check_exposition_structural_methods_decision_trees(model, x, y): def check_mono_parameter_warnings(model, x, default_configuration): """Check that setting voluntarily a mono-parameter strategy properly raises a warning.""" - # Set the parameter strategy to mono-parameter - default_configuration.parameter_selection_strategy = ParameterSelectionStrategy.MONO - with pytest.warns( UserWarning, match="Setting the parameter_selection_strategy to mono-parameter is not recommended.*", ): - model.compile(x, default_configuration) + # Set the parameter strategy to mono-parameter + model.compile(x, default_configuration(mode="MONO")) @pytest.mark.parametrize("model_class, parameters", sklearn_models_and_datasets) @@ -1270,7 +1269,7 @@ def test_serialization( model, x = preamble(model_class, parameters, n_bits, load_data, is_weekly_option) # Compile the model to make sure we consider all possible attributes during the serialization - model.compile(x, default_configuration) + model.compile(x, default_configuration("MULTI")) if verbose: print("Run check_serialization") @@ -1353,7 +1352,7 @@ def test_input_support( if verbose: print("Run input_support") - check_input_support(model_class, n_bits, default_configuration, x, y, input_type) + check_input_support(model_class, n_bits, default_configuration("MULTI"), x, y, input_type) @pytest.mark.parametrize("model_class, parameters", sklearn_models_and_datasets) @@ -1475,14 +1474,13 @@ def test_predict_correctness( print("Compile the model") with warnings.catch_warnings(): + # KNN works only for MONO in the latest concrete Python version + # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3978 + mode = "MONO" if get_model_name(model) == "KNeighborsClassifier" else "MULTI" - if get_model_name(model) == "KNeighborsClassifier": - default_configuration.parameter_selection_strategy = ( - ParameterSelectionStrategy.MONO - ) fhe_circuit = model.compile( x, - default_configuration, + default_configuration(mode), show_mlir=verbose and (n_bits <= 8), ) @@ -1567,13 +1565,18 @@ def test_p_error_global_p_error_simulation( if "global_p_error" in error_param: pytest.skip("global_p_error behave very differently depending on the type of model.") - # Get data-set - n_bits = min(N_BITS_REGULAR_BUILDS) if get_model_name(model_class) == "KNeighborsClassifier": - n_bits = min(n_bits, 2) - default_configuration.parameter_selection_strategy = ParameterSelectionStrategy.MONO + # KNN works only for MONO in the latest concrete Python version + # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3978 + mode = "MONO" + # KNN works only for smaller quantization bits + # FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3979 + n_bits = min([2] + N_BITS_REGULAR_BUILDS) + else: + mode = "MULTI" + n_bits = min(N_BITS_REGULAR_BUILDS) - # Initialize and fit the model + # Get data-set, initialize and fit the model model, x = preamble(model_class, parameters, n_bits, load_data, is_weekly_option) # Check if model is linear @@ -1583,7 +1586,7 @@ def test_p_error_global_p_error_simulation( is_knn_model = is_model_class_in_a_list(model_class, get_sklearn_neighbors_models()) # Compile with a large p_error to be sure the result is random. - model.compile(x, default_configuration, **error_param) + model.compile(x, default_configuration(mode), **error_param) def check_for_divergent_predictions(x, model, fhe, max_iterations=N_ALLOWED_FHE_RUN): """Detect divergence between simulated/FHE execution and clear run.""" diff --git a/tests/torch/test_brevitas_qat.py b/tests/torch/test_brevitas_qat.py index 72e46d09ab..4db09ec875 100644 --- a/tests/torch/test_brevitas_qat.py +++ b/tests/torch/test_brevitas_qat.py @@ -222,7 +222,7 @@ def test_with_concrete(quantized_module, test_loader, use_fhe_simulation): q_module_simulated = compile_brevitas_qat_model( net, x_all, - configuration=default_configuration, + configuration=default_configuration("MULTI"), ) fhe_s_correct = test_with_concrete( @@ -491,7 +491,7 @@ def test_brevitas_constant_folding(default_configuration): compile_brevitas_qat_model( model.to("cpu"), torch_inputset=data, - configuration=default_configuration, + configuration=default_configuration("MULTI"), ) @@ -522,7 +522,7 @@ def test_brevitas_power_of_two( quantized_module = compile_brevitas_qat_model( net.to("cpu"), torch_inputset=x_all, - configuration=default_configuration, + configuration=default_configuration("MULTI"), rounding_threshold_bits=manual_rounding, ) diff --git a/tests/torch/test_compile_keras.py b/tests/torch/test_compile_keras.py index 8bf4b065b3..6eafdbd06a 100644 --- a/tests/torch/test_compile_keras.py +++ b/tests/torch/test_compile_keras.py @@ -119,7 +119,7 @@ def test_compile_keras_networks( input_output_feature, model, OPSET_VERSION_FOR_ONNX_EXPORT, - default_configuration, + default_configuration("MULTI"), simulate, check_is_good_execution_for_cml_vs_circuit, ) diff --git a/tests/torch/test_compile_torch.py b/tests/torch/test_compile_torch.py index be5f50af61..d4568be017 100644 --- a/tests/torch/test_compile_torch.py +++ b/tests/torch/test_compile_torch.py @@ -479,7 +479,7 @@ def test_compile_torch_or_onnx_networks( model_class=model, activation_function=activation_function, qat_bits=qat_bits, - default_configuration=default_configuration, + default_configuration=default_configuration("MULTI"), simulate=simulate, is_onnx=is_onnx, check_is_good_execution_for_cml_vs_circuit=check_is_good_execution_for_cml_vs_circuit, @@ -529,7 +529,7 @@ def test_compile_torch_or_onnx_conv_networks( # pylint: disable=unused-argument model_class=model, activation_function=activation_function, qat_bits=qat_bits, - default_configuration=default_configuration, + default_configuration=default_configuration("MULTI"), simulate=simulate, is_onnx=is_onnx, check_is_good_execution_for_cml_vs_circuit=check_is_good_execution_for_cml_vs_circuit, @@ -605,7 +605,7 @@ def test_compile_torch_or_onnx_activations( model, activation_function, qat_bits, - default_configuration, + default_configuration("MULTI"), simulate, is_onnx, check_is_good_execution_for_cml_vs_circuit, @@ -649,7 +649,7 @@ def test_compile_torch_qat( model, nn.Sigmoid, qat_bits, - default_configuration, + default_configuration("MULTI"), simulate, is_onnx, check_is_good_execution_for_cml_vs_circuit, @@ -692,7 +692,7 @@ def test_compile_brevitas_qat( model_class=model_class, activation_function=None, qat_bits=qat_bits, - default_configuration=default_configuration, + default_configuration=default_configuration("MULTI"), simulate=simulate, is_onnx=False, check_is_good_execution_for_cml_vs_circuit=check_is_good_execution_for_cml_vs_circuit, @@ -764,7 +764,7 @@ def test_dump_torch_network( model_class, activation_function, qat_bits, - default_configuration, + default_configuration("MULTI"), simulate, is_onnx, check_is_good_execution_for_cml_vs_circuit, @@ -907,7 +907,7 @@ def test_qat_import_bits_check(default_configuration): quantized_numpy_module = compile_brevitas_qat_model( model, inputset, - configuration=default_configuration, + configuration=default_configuration("MULTI"), ) n_percent_inputset_examples_test = 0.1 @@ -925,7 +925,7 @@ def test_qat_import_bits_check(default_configuration): model, inputset, n_bits=n_bits, - configuration=default_configuration, + configuration=default_configuration("MULTI"), ) new_predictions = quantized_numpy_module.forward(*x_test, fhe="disable") @@ -968,7 +968,7 @@ def test_qat_import_check(default_configuration, check_is_good_execution_for_cml partial(SimpleQAT, n_bits=6, disable_bit_check=True), nn.ReLU, qat_bits, - default_configuration, + default_configuration("MULTI"), simulate, False, check_is_good_execution_for_cml_vs_circuit, @@ -984,7 +984,7 @@ def test_qat_import_check(default_configuration, check_is_good_execution_for_cml CNNOther, nn.ReLU, qat_bits, - default_configuration, + default_configuration("MULTI"), simulate, False, check_is_good_execution_for_cml_vs_circuit, @@ -1016,7 +1016,7 @@ def __init__(self, input_output, activation_function): AllZeroCNN, nn.ReLU, qat_bits, - default_configuration, + default_configuration("MULTI"), simulate, False, check_is_good_execution_for_cml_vs_circuit, @@ -1095,7 +1095,7 @@ def decorate_name(self): quantized_numpy_module = compile_brevitas_qat_model( net, inputset, - configuration=default_configuration, + configuration=default_configuration("MULTI"), ) else: # Compile with PTQ. Note that this will have zero-point>0 @@ -1103,7 +1103,7 @@ def decorate_name(self): net, inputset, import_qat=False, - configuration=default_configuration, + configuration=default_configuration("MULTI"), n_bits=n_bits, ) @@ -1136,14 +1136,14 @@ def test_shape_operations_net( quantized_module = compile_brevitas_qat_model( net, inputset, - configuration=default_configuration, + configuration=default_configuration("MULTI"), p_error=0.01, ) else: quantized_module = compile_torch_model( net, inputset, - configuration=default_configuration, + configuration=default_configuration("MULTI"), n_bits=3, p_error=0.01, ) @@ -1184,7 +1184,7 @@ def test_torch_padding(default_configuration, check_circuit_has_no_tlu): quant_model = compile_brevitas_qat_model( net, inputset, - configuration=default_configuration, + configuration=default_configuration("MULTI"), p_error=0.01, ) @@ -1245,7 +1245,7 @@ def test_fancy_indexing_torch(model_object, default_configuration): """Test fancy indexing torch.""" model = model_object(10, 10, 2, 4, 3) x = numpy.random.randint(0, 2, size=(100, 3, 10)).astype(numpy.float64) - compile_brevitas_qat_model(model, x, n_bits=4, configuration=default_configuration) + compile_brevitas_qat_model(model, x, n_bits=4, configuration=default_configuration("MULTI")) @pytest.mark.parametrize( @@ -1267,9 +1267,6 @@ def test_mono_parameter_rounding_warning( # The QAT bits is set to 0 in order to signal that the network is not using QAT qat_bits = 0 - # Set the parameter strategy to mono-parameter - default_configuration.parameter_selection_strategy = ParameterSelectionStrategy.MONO - with pytest.warns( UserWarning, match=".* set the optimization strategy to multi-parameter when using rounding.*", @@ -1279,7 +1276,8 @@ def test_mono_parameter_rounding_warning( model_class=model, activation_function=nn.ReLU, qat_bits=qat_bits, - default_configuration=default_configuration, + # Set the parameter strategy to mono-parameter + default_configuration=default_configuration("MONO"), simulate=True, is_onnx=is_onnx, check_is_good_execution_for_cml_vs_circuit=check_is_good_execution_for_cml_vs_circuit, diff --git a/tests/torch/test_reduce_sum.py b/tests/torch/test_reduce_sum.py index dca1f3b16d..de528fb6c6 100644 --- a/tests/torch/test_reduce_sum.py +++ b/tests/torch/test_reduce_sum.py @@ -75,7 +75,7 @@ def test_sum( quantized_module = compile_torch_model( torch_model, inputset, - configuration=default_configuration, + configuration=default_configuration("MULTI"), n_bits=n_bits, ) diff --git a/tests/virtual_lib/test_virtual_lib.py b/tests/virtual_lib/test_virtual_lib.py index 47d73ebb55..c1aa92603e 100644 --- a/tests/virtual_lib/test_virtual_lib.py +++ b/tests/virtual_lib/test_virtual_lib.py @@ -18,7 +18,7 @@ def f(x, weights): inputset = [thousand_ones] fhe_simulation_circuit = matmul_thousand_ones_compiler.compile( inputset, - default_configuration, + default_configuration(), ) assert isinstance(fhe_simulation_circuit, Circuit) @@ -40,7 +40,7 @@ def f(x, weights): inputset = [numpy.ones((3000,), dtype=numpy.int64)] fhe_simulation_circuit = matmul_three_thousand_plus_minus_ones_compiler.compile( inputset, - default_configuration, + default_configuration(), ) assert isinstance(fhe_simulation_circuit, Circuit) @@ -62,7 +62,7 @@ def g(x, weights): inputset = [numpy.ones((3000,), dtype=numpy.int64)] fhe_simulation_circuit = sin_matmul_three_thousand_plus_minus_ones_compiler.compile( inputset, - default_configuration, + default_configuration(), ) assert isinstance(fhe_simulation_circuit, Circuit)