Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore: integrate Concrete Python 2.5.0rc1 #367

Merged
merged 7 commits into from
Oct 18, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
chore: put back old VL as default
  • Loading branch information
RomanBredehoft committed Oct 17, 2023
commit 2dec63713598d8f763e975b2591c595da5ea25b4
4 changes: 3 additions & 1 deletion src/concrete/ml/common/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,9 @@

# Indicate if the old virtual library method should be used instead of the compiler simulation
# when simulating FHE executions
USE_OLD_VL = False
# Set 'USE_OLD_VL' to False by default once the new simulation is fixed
# FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/4091
USE_OLD_VL = True

# Debug option for testing round PBS optimization
# Setting this option to true will make quantizers "round half up"
Expand Down
4 changes: 2 additions & 2 deletions src/concrete/ml/quantization/quantized_module.py
Original file line number Diff line number Diff line change
Expand Up @@ -486,13 +486,13 @@ def _fhe_forward(self, *q_x: numpy.ndarray, simulate: bool = True) -> numpy.ndar

# If the old simulation method should be used
if USE_OLD_VL:
predict_method = partial( # pragma: no cover
predict_method = partial(
self.fhe_circuit.graph, p_error=self.fhe_circuit.p_error
)

# Else, use the official simulation method
else:
predict_method = self.fhe_circuit.simulate
predict_method = self.fhe_circuit.simulate # pragma: no cover

# Else, use the FHE execution method
else:
Expand Down
4 changes: 2 additions & 2 deletions src/concrete/ml/sklearn/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -634,13 +634,13 @@ def predict(self, X: Data, fhe: Union[FheMode, str] = FheMode.DISABLE) -> numpy.

# If the old simulation method should be used
if USE_OLD_VL:
predict_method = partial( # pragma: no cover
predict_method = partial(
self.fhe_circuit.graph, p_error=self.fhe_circuit.p_error
)

# Else, use the official simulation method
else:
predict_method = self.fhe_circuit.simulate
predict_method = self.fhe_circuit.simulate # pragma: no cover

# Else, use the FHE execution method
else:
Expand Down
16 changes: 6 additions & 10 deletions tests/torch/test_compile_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -409,16 +409,12 @@ def accuracy_test_rounding(

# Check modules predictions FHE simulation vs Concrete ML.
check_is_good_execution_for_cml_vs_circuit(x_test, quantized_numpy_module, simulate=simulate)

# Enable back the rounding tests (simulate and FHE) once Concrete Python fixes the simulation
# issue with rounding
# FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/4048
# check_is_good_execution_for_cml_vs_circuit(
# x_test, quantized_numpy_module_round_high_precision, simulate=simulate
# )
# check_is_good_execution_for_cml_vs_circuit(
# x_test, quantized_numpy_module_round_low_precision, simulate=simulate
# )
check_is_good_execution_for_cml_vs_circuit(
x_test, quantized_numpy_module_round_high_precision, simulate=simulate
)
check_is_good_execution_for_cml_vs_circuit(
x_test, quantized_numpy_module_round_low_precision, simulate=simulate
)

# Check that high precision gives a better match than low precision
# MSE is preferred over MAE here to spot a lack of diversity in the 2 bits rounded model
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
import os
import time
from functools import partial
from pathlib import Path

import torch
Expand Down Expand Up @@ -133,16 +134,17 @@ def wrapper(*args, **kwargs):
print(f"Quantization of a single input (image) took {quantization_execution_time} seconds")
print(f"Size of CLEAR input is {q_x_numpy.nbytes} bytes\n")

# Use new VL with .simulate() once CP's multi-parameter/precision bug is fixed
# FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3856
p_error = quantized_numpy_module.fhe_circuit.p_error
expected_quantized_prediction, clear_inference_time = measure_execution_time(
quantized_numpy_module.fhe_circuit.simulate
partial(quantized_numpy_module.fhe_circuit.graph, p_error=p_error)
)(q_x_numpy)

# Encrypt the input
encrypted_q_x_numpy, encryption_execution_time = measure_execution_time(
quantized_numpy_module.fhe_circuit.encrypt
)(q_x_numpy)

print(f"Encryption of a single input (image) took {encryption_execution_time} seconds\n")

print(f"Size of ENCRYPTED input is {quantized_numpy_module.fhe_circuit.size_of_inputs} bytes")
Expand Down
4 changes: 3 additions & 1 deletion use_case_examples/llm/QuantFrameworkExample.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -363,7 +363,9 @@
"clear_evaluation = f(input_0)\n",
"\n",
"# Compute the result in the clear using FHE simulation\n",
"simulated_evaluation = circuit.simulate(input_0)\n",
"# Use new VL with .simulate() once CP's multi-parameter/precision bug is fixed\n",
"# FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3856\n",
"simulated_evaluation = circuit.graph(input_0, p_error=circuit.p_error)\n",
"\n",
"# Compute the result in FHE\n",
"fhe_evaluation = circuit.encrypt_run_decrypt(input_0)\n",
Expand Down
4 changes: 3 additions & 1 deletion use_case_examples/llm/qgpt2_class.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,9 @@ def run_torch(self, inputs: torch.Tensor, fhe: str = "disable", true_float: bool
q_x = np.expand_dims(q_x, axis=0)

if fhe == "simulate":
q_y = self.circuit.simulate(q_x)
# Use new VL with .simulate() once CP's multi-parameter/precision bug is fixed
# FIXME: https://github.com/zama-ai/concrete-ml-internal/issues/3856
q_y = self.circuit.graph(q_x, p_error=self.circuit.p_error)

elif fhe == "execute":
q_y = self.circuit.encrypt_run_decrypt(q_x)
Expand Down