Skip to content

Commit

Permalink
chore: activate input compression for encrypted data-frames
Browse files Browse the repository at this point in the history
  • Loading branch information
RomanBredehoft committed Jun 13, 2024
1 parent 847d31c commit 7c8f05e
Show file tree
Hide file tree
Showing 5 changed files with 10 additions and 18 deletions.
4 changes: 2 additions & 2 deletions src/concrete/ml/pandas/_client_server_files/client.zip
Git LFS file not shown
4 changes: 2 additions & 2 deletions src/concrete/ml/pandas/_client_server_files/server.zip
Git LFS file not shown
8 changes: 4 additions & 4 deletions src/concrete/ml/pandas/_development.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,12 +164,12 @@ def save_client_server(client_path: Path = CLIENT_PATH, server_path: Path = SERV
# Get the input-set and circuit generating functions
inputset = config["get_inputset"]()
cp_func = config["to_compile"]
compilation_configuration = Configuration(compress_evaluation_keys=True)
configuration = Configuration(
composable=True, compress_evaluation_keys=True, compress_input_ciphertexts=True
)

# Compile the circuit and allow it to be composable with itself
merge_circuit = cp_func.compile(
inputset, composable=True, configuration=compilation_configuration
)
merge_circuit = cp_func.compile(inputset, configuration=configuration)

# Save the client and server files using the MLIR
merge_circuit.client.save(client_path)
Expand Down
2 changes: 1 addition & 1 deletion src/concrete/ml/sklearn/linear_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ def _get_training_quantized_module(
# Enable the underlying FHE circuit to be composed with itself
# This feature is used in order to be able to iterate in the clear n times without having
# to encrypt/decrypt the weight/bias values between each loop
configuration = Configuration(composable=True, compress_evaluation_keys=True)
configuration = Configuration(composable=True)

composition_mapping = {0: 2, 1: 3}

Expand Down
10 changes: 1 addition & 9 deletions tests/torch/test_hybrid_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,17 +37,9 @@ def run_hybrid_llm_test(
):
"""Run the test for any model with its private module names."""

# Multi-parameter strategy is used in order to speed-up the FHE executions
configuration = Configuration(
single_precision=False,
compress_input_ciphertexts=True,
)

# Create a hybrid model
hybrid_model = HybridFHEModel(model, module_names)
hybrid_model.compile_model(
inputs, p_error=0.01, n_bits=8, rounding_threshold_bits=8, configuration=configuration
)
hybrid_model.compile_model(inputs, p_error=0.01, n_bits=8, rounding_threshold_bits=8)

# Check we can run the simulate locally
logits_simulate = hybrid_model(inputs, fhe="simulate").logits
Expand Down

0 comments on commit 7c8f05e

Please sign in to comment.