Skip to content

Commit

Permalink
chore: improve notebook for training, less verbose
Browse files Browse the repository at this point in the history
  • Loading branch information
andrei-stoian-zama committed Jan 12, 2024
1 parent f0871d4 commit 10419c2
Show file tree
Hide file tree
Showing 2 changed files with 164 additions and 230 deletions.
389 changes: 162 additions & 227 deletions docs/advanced_examples/LogisticRegressionTraining.ipynb

Large diffs are not rendered by default.

5 changes: 2 additions & 3 deletions src/concrete/ml/sklearn/linear_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -508,7 +508,8 @@ def _fit_encrypted(
X_indexes = numpy.arange(0, len(X))

if self.verbose:
print("Training starts")
mode_string = " (simulation)" if fhe == "simulate" else ""
print(f"Training on encrypted data{mode_string}...")

# Iterate on the training quantized module in the clear
for iteration_step in range(self.max_iter):
Expand All @@ -533,8 +534,6 @@ def _fit_encrypted(

to = time.time()
# Train the model over one iteration
if self.verbose:
print("Starting iteration ...")
weights, bias = self.training_quantized_module.forward( # type: ignore[assignment]
X_batch, y_batch, weights, bias, fhe=fhe
)
Expand Down

0 comments on commit 10419c2

Please sign in to comment.