From 4922992b8c29c1019076844206dbe5b5aa946eca Mon Sep 17 00:00:00 2001 From: jfrery Date: Tue, 17 Dec 2024 08:52:48 +0100 Subject: [PATCH] chore: Concrete-ML -> Concrete ML --- .github/workflows/ci_timing.yaml | 2 +- .github/workflows/release.yaml | 4 +-- .../DecisionTreeRegressor.ipynb | 18 +++++----- docs/advanced_examples/LinearSVR.ipynb | 4 +-- .../RegressorComparison.ipynb | 8 ++--- docs/advanced_examples/SVMClassifier.ipynb | 36 +++++++++---------- docs/advanced_examples/aggregated_code.txt | 14 ++++---- docs/conventions.md | 2 +- ...oncrete.ml.common.serialization.encoder.md | 2 +- .../ml/common/serialization/encoder.py | 2 +- src/concrete/ml/sklearn/glm.py | 8 ++--- src/concrete/ml/sklearn/neighbors.py | 2 +- src/concrete/ml/sklearn/qnn.py | 8 ++--- src/concrete/ml/sklearn/rf.py | 8 ++--- src/concrete/ml/sklearn/svm.py | 8 ++--- src/concrete/ml/sklearn/tree.py | 8 ++--- src/concrete/ml/sklearn/xgb.py | 8 ++--- .../lora_finetuning/GPT2FineTuneHybrid.ipynb | 4 +-- 18 files changed, 73 insertions(+), 73 deletions(-) diff --git a/.github/workflows/ci_timing.yaml b/.github/workflows/ci_timing.yaml index 0e20cbf70..a48a6dad7 100644 --- a/.github/workflows/ci_timing.yaml +++ b/.github/workflows/ci_timing.yaml @@ -1,4 +1,4 @@ -# This workflow uses GitHub CLI to get timings of last 50 runs of Concrete-ML main CI +# This workflow uses GitHub CLI to get timings of last 50 runs of Concrete ML main CI # and send it to slack and add it as an artifact on the workflow name: CML build time on: diff --git a/.github/workflows/release.yaml b/.github/workflows/release.yaml index 90c3b7c07..c96c543ea 100644 --- a/.github/workflows/release.yaml +++ b/.github/workflows/release.yaml @@ -288,7 +288,7 @@ jobs: tags: true # This action creates docker and pypi images directly on the AWS EC2 instance - # The 'PRIVATE_RELEASE_IMAGE_BASE' variable is kept here in case Concrete-ML starts to publish + # The 'PRIVATE_RELEASE_IMAGE_BASE' variable is kept here in case Concrete ML starts to publish # private nightly releases one day. Currently, release candidates and actual releases are all # done through the 'PUBLIC_RELEASE_IMAGE_BASE' image. The private image is also used to list all # tags easily @@ -471,7 +471,7 @@ jobs: echo "" >> "${SECRETS_FILE}" echo "SECRETS_FILE=${SECRETS_FILE}" >> "$GITHUB_ENV" - - name: Build Docker Concrete-ML Image + - name: Build Docker Concrete ML Image if: ${{ success() && !cancelled() }} uses: docker/build-push-action@48aba3b46d1b1fec4febb7c5d0c644b249a11355 with: diff --git a/docs/advanced_examples/DecisionTreeRegressor.ipynb b/docs/advanced_examples/DecisionTreeRegressor.ipynb index 82aa6dd3c..29b1371e1 100644 --- a/docs/advanced_examples/DecisionTreeRegressor.ipynb +++ b/docs/advanced_examples/DecisionTreeRegressor.ipynb @@ -6,9 +6,9 @@ "id": "5755bc04", "metadata": {}, "source": [ - "# Decision Tree Regression Using Concrete-ML\n", + "# Decision Tree Regression Using Concrete ML\n", "\n", - "In this tutorial, we show how to create, train and evaluate a decision tree regression model using Concrete-ML library.\n", + "In this tutorial, we show how to create, train and evaluate a decision tree regression model using Concrete ML library.\n", "\n" ] }, @@ -18,16 +18,16 @@ "id": "2c256087-c16a-4249-9c90-3f4863938385", "metadata": {}, "source": [ - "### Introducing Concrete-ML\n", + "### Introducing Concrete ML\n", "\n", - "> Concrete-ML is an open-source, privacy-preserving, machine learning inference framework based on fully homomorphic encryption (FHE).\n", + "> Concrete ML is an open-source, privacy-preserving, machine learning inference framework based on fully homomorphic encryption (FHE).\n", "> It enables data scientists without any prior knowledge of cryptography to automatically turn machine learning models into their FHE equivalent,using familiar APIs from Scikit-learn and PyTorch.\n", "> — [Zama documentation](../README.md)\n", "\n", "This tutorial does not require a deep understanding of the technology behind concrete-ML.\n", "Nonetheless, newcomers might be interested in reading introductory sections of the official documentation such as:\n", "\n", - "- [What is Concrete-ML](../README.md)\n", + "- [What is Concrete ML](../README.md)\n", "- [Key Concepts](../getting-started/concepts.md)\n", "\n", "In the tutorial, we will be using the following terminology:\n", @@ -233,10 +233,10 @@ "source": [ "## Training A Decision Tree\n", "\n", - "ConcreteDecisionTreeRegressor is the Concrete-ML equivalent of scikit-learn's DecisionTreeRegressor.\n", + "ConcreteDecisionTreeRegressor is the Concrete ML equivalent of scikit-learn's DecisionTreeRegressor.\n", "It supports the same parameters and a similar interface, with the extra capability of predicting directly on ciphertext without the need to decipher it, thus preservacy privacy.\n", "\n", - "Currently, Concrete-ML models must be trained on plaintext. To see how it works, we train a DecisionTreeRegressor with default parameters and estimate its accuracy on test data. Note here that predictions are done on plaintext too, but soon, we will predict on ciphertext." + "Currently, Concrete ML models must be trained on plaintext. To see how it works, we train a DecisionTreeRegressor with default parameters and estimate its accuracy on test data. Note here that predictions are done on plaintext too, but soon, we will predict on ciphertext." ] }, { @@ -479,7 +479,7 @@ "source": [ "## Predicting on Ciphertext\n", "If the predictions are similar although slightly less accurate, the real advantage of ConcreteML is privacy.\n", - "We now show how we can perform prediction on ciphertext with Concrete-ML, so that the model does not need to decipher the data at all to compute its estimate." + "We now show how we can perform prediction on ciphertext with Concrete ML, so that the model does not need to decipher the data at all to compute its estimate." ] }, { @@ -798,7 +798,7 @@ "Once the model is carefully trained and quantized, it is ready to be deployed and used in production. Here are some useful links on the subject:\n", " \n", " - [Inference in the Cloud](../getting-started/cloud.md) summarize the steps for cloud deployment\n", - " - [Production Deployment](../guides/client_server.md) offers a high-level view of how to deploy a Concrete-ML model in a client/server setting.\n", + " - [Production Deployment](../guides/client_server.md) offers a high-level view of how to deploy a Concrete ML model in a client/server setting.\n", " - [Client Server in Concrete ML](./ClientServer.ipynb) provides a more hands-on approach as another tutorial." ] } diff --git a/docs/advanced_examples/LinearSVR.ipynb b/docs/advanced_examples/LinearSVR.ipynb index 7be591052..00b91e8f3 100644 --- a/docs/advanced_examples/LinearSVR.ipynb +++ b/docs/advanced_examples/LinearSVR.ipynb @@ -88,7 +88,7 @@ "\n", "\n", "def get_concrete_plot_config(mse_score=None):\n", - " label = \"Concrete-ML\"\n", + " label = \"Concrete ML\"\n", " if mse_score is not None:\n", " label += f\", {'$MSE$'}={mse_score:.4f}\"\n", " return {\"c\": \"orange\", \"linewidth\": 2.5, \"label\": label}" @@ -646,7 +646,7 @@ "y_pred_sklearn = sklearn_rgs.predict(X_test)\n", "print(f\"Execution time: {(time.time() - time_begin) / len(X_test):.4f} seconds per sample\")\n", "\n", - "# Now predict using clear quantized Concrete-ML model on testing set\n", + "# Now predict using clear quantized Concrete ML model on testing set\n", "time_begin = time.time()\n", "y_preds_quantized = concrete_rgs.predict(X_test)\n", "print(f\"Execution time: {(time.time() - time_begin) / len(X_test):.4f} seconds per sample\")" diff --git a/docs/advanced_examples/RegressorComparison.ipynb b/docs/advanced_examples/RegressorComparison.ipynb index b003b0d87..0d5cef665 100644 --- a/docs/advanced_examples/RegressorComparison.ipynb +++ b/docs/advanced_examples/RegressorComparison.ipynb @@ -210,7 +210,7 @@ " # Instantiate the model\n", " model = regressor()\n", "\n", - " # Train the model and retrieve both the Concrete-ML model and its equivalent one from\n", + " # Train the model and retrieve both the Concrete ML model and its equivalent one from\n", " # scikit-learn\n", " # If the model is a NeuralNetClassifier, instantiate a scikit-learn MLPClassifier\n", " # separately in order to be able to be able to compare the results with a float model\n", @@ -249,7 +249,7 @@ " time_end = time.time()\n", " print(f\"Key generation time: {time_end - time_begin:.2f} seconds\")\n", "\n", - " # Compute the predictions in FHE using the Concrete-ML model\n", + " # Compute the predictions in FHE using the Concrete ML model\n", " time_begin = time.time()\n", " concrete_y_pred = concrete_model.predict(X_poly_test[:1], fhe=\"execute\")\n", " time_end = time.time()\n", @@ -276,7 +276,7 @@ " bitwidth = circuit.graph.maximum_integer_bit_width()\n", "\n", " # Plot the predictions\n", - " ax.plot(X_test, concrete_y_pred, c=\"blue\", linewidth=2.5, label=\"Concrete-ML\")\n", + " ax.plot(X_test, concrete_y_pred, c=\"blue\", linewidth=2.5, label=\"Concrete ML\")\n", "\n", " # Plot the predictions\n", " ax.plot(X_test, sklearn_y_pred, c=\"red\", linewidth=2.5, label=\"scikit-learn\")\n", @@ -284,7 +284,7 @@ " ax.text(\n", " 0.5,\n", " 0.80,\n", - " f\"Concrete-ML R2: {concrete_score:.2f}\\n scikit-learn R2: {sklearn_score:.2f}\\n\",\n", + " f\"Concrete ML R2: {concrete_score:.2f}\\n scikit-learn R2: {sklearn_score:.2f}\\n\",\n", " transform=ax.transAxes,\n", " fontsize=12,\n", " va=\"top\",\n", diff --git a/docs/advanced_examples/SVMClassifier.ipynb b/docs/advanced_examples/SVMClassifier.ipynb index b68037489..c6bbfc02b 100644 --- a/docs/advanced_examples/SVMClassifier.ipynb +++ b/docs/advanced_examples/SVMClassifier.ipynb @@ -6,12 +6,12 @@ "id": "d07c3896", "metadata": {}, "source": [ - "# Support Vector Machine (SVM) classification using Concrete-ML\n", + "# Support Vector Machine (SVM) classification using Concrete ML\n", "\n", - " In this tutorial, we show how to create, train, and evaluate a Support Vector Machine (SVM) model using Concrete-ML library for a classification task.\n", + " In this tutorial, we show how to create, train, and evaluate a Support Vector Machine (SVM) model using Concrete ML library for a classification task.\n", "\n", "It is cut in 2 parts:\n", - "1. a quick setup of a LinearSVC model with Concrete-ML\n", + "1. a quick setup of a LinearSVC model with Concrete ML\n", "2. a more in-depth approach taking a closer look to the concrete-ml specifics\n" ] }, @@ -30,15 +30,15 @@ "id": "d3654d52", "metadata": {}, "source": [ - "### Concrete-ML and useful links\n", + "### Concrete ML and useful links\n", "\n", - "> Concrete-ML is an open-source, privacy-preserving, machine learning inference framework based on fully homomorphic encryption (FHE). It enables data scientists without any prior knowledge of cryptography to automatically turn machine learning models into their FHE equivalent, using familiar APIs from Scikit-learn and PyTorch.\n", + "> Concrete ML is an open-source, privacy-preserving, machine learning inference framework based on fully homomorphic encryption (FHE). It enables data scientists without any prior knowledge of cryptography to automatically turn machine learning models into their FHE equivalent, using familiar APIs from Scikit-learn and PyTorch.\n", "> \n", "> — [Zama documentation](../README.md)\n", "\n", - "This tutorial does not require any knowledge of Concrete-ML. Newcomers might nonetheless be interested in reading some of the introductory sections of the official documentation, such as:\n", + "This tutorial does not require any knowledge of Concrete ML. Newcomers might nonetheless be interested in reading some of the introductory sections of the official documentation, such as:\n", "\n", - "- [What is Concrete-ML](../README.md)\n", + "- [What is Concrete ML](../README.md)\n", "- [Key Concepts](../getting-started/concepts.md)\n", "\n", "### Support Vector Machine\n", @@ -46,7 +46,7 @@ "SVM is a machine learning algorithm for classification and regression. LinearSVC is an efficient implementation of SVM\n", "that works best when the data is linearly separable. In this tutorial, we use the [pulsar star dataset](https://www.kaggle.com/datasets/colearninglounge/predicting-pulsar-starintermediate) to determine whether a neutron star can be classified as a pulsar star.\n", "\n", - "Concrete-ML exposes a LinearSVC class which implements the\n", + "Concrete ML exposes a LinearSVC class which implements the\n", "[scikit-learn LinearSVC](https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html) interface, so you should feel right at home.\n", "\n", "### Setup code\n", @@ -342,9 +342,9 @@ "id": "12e827d0", "metadata": {}, "source": [ - "## Part 1: Train a simple model with Concrete-ML\n", + "## Part 1: Train a simple model with Concrete ML\n", "\n", - "The following code quickly scaffolds a Concrete-ML LinearSVC code, which should sound familiar.\n" + "The following code quickly scaffolds a Concrete ML LinearSVC code, which should sound familiar.\n" ] }, { @@ -403,7 +403,7 @@ } ], "source": [ - "# Perform the same steps with the Concrete-ML LinearSVC implementation\n", + "# Perform the same steps with the Concrete ML LinearSVC implementation\n", "svm_concrete = ConcreteLinearSVC(max_iter=100, n_bits=8)\n", "svm_concrete.fit(X_train, y_train)\n", "# plot the boundary\n", @@ -468,15 +468,15 @@ "\n", "#### Simplicity of execution\n", "\n", - "For a high-level use-case, Concrete-ML offers a very similar interface to scikit-learn. The main difference is *a model needs to be compiled to allow execution in FHE*.\n", + "For a high-level use-case, Concrete ML offers a very similar interface to scikit-learn. The main difference is *a model needs to be compiled to allow execution in FHE*.\n", "\n", "#### Model Accuracy\n", "\n", - "Concrete-ML prediction accuracy can be slightly worse than a regular scikit-learn implementation. This is because of [quantization](../explanations/quantization.md): number precision needs to be fixed-size for the model to be evaluated in FHE. This can be alleviated down to where the accuracy difference is none or negligible (which is the case here with a 8 bit size).\n", + "Concrete ML prediction accuracy can be slightly worse than a regular scikit-learn implementation. This is because of [quantization](../explanations/quantization.md): number precision needs to be fixed-size for the model to be evaluated in FHE. This can be alleviated down to where the accuracy difference is none or negligible (which is the case here with a 8 bit size).\n", "\n", "#### Execution time\n", "\n", - "The execution speed can be slower in Concrete-ML, especially during compilation and FHE inference phases, because enabling FHE operations uses more resources than regular inference on plain data. However, the speed can be improved by decreasing the precision of the data and model's weights thanks to the n_bits parameter. But, depending on the project, there is a trade-off between a slower but more accurate model and a faster but less accurate model." + "The execution speed can be slower in Concrete ML, especially during compilation and FHE inference phases, because enabling FHE operations uses more resources than regular inference on plain data. However, the speed can be improved by decreasing the precision of the data and model's weights thanks to the n_bits parameter. But, depending on the project, there is a trade-off between a slower but more accurate model and a faster but less accurate model." ] }, { @@ -536,7 +536,7 @@ "\n", "### Step b: quantize the model\n", "\n", - "So far most of Concrete-ML specificities have conveniently been avoided for the sake of simplicity. The first Concrete-ML specific step of developping a model is to quantize it, which soberly means to turn the model into an integer equivalent.\n", + "So far most of Concrete ML specificities have conveniently been avoided for the sake of simplicity. The first Concrete ML specific step of developping a model is to quantize it, which soberly means to turn the model into an integer equivalent.\n", "\n", "Although it is strongly encouraged to read the [Zama introduction to quantization](../explanations/quantization.md), the key takeaway is **a model needs to be reduced to a *discrete*, smaller set in order for the encryption to happen**. Otherwise the data becomes too large to be manipulated in FHE. \n", "\n", @@ -764,7 +764,7 @@ "- the model itself\n", "- the hardware executing the model\n", "\n", - "Setting up a model in Concrete-ML requires some additional work compared to standard models. For instance, users must select the quantization bit-width for both the model's weight and input data, which can be complex and time-consuming while using real FHE inference. However, Concrete-ML provides an FHE simulation mode that allows users to identify optimal hyper-parameters with the best trade-off between latency and performance.\n", + "Setting up a model in Concrete ML requires some additional work compared to standard models. For instance, users must select the quantization bit-width for both the model's weight and input data, which can be complex and time-consuming while using real FHE inference. However, Concrete ML provides an FHE simulation mode that allows users to identify optimal hyper-parameters with the best trade-off between latency and performance.\n", "\n", "> Testing FHE models on very large data-sets can take a long time. Furthermore, not all models are compatible with FHE constraints out-of-the-box. Simulation using the FHE simulation allows you to execute a model that was quantized, to measure the accuracy it would have in FHE, but also to determine the modifications required to make it FHE compatible.\n", ">\n", @@ -849,13 +849,13 @@ "source": [ "## Conclusion\n", "\n", - "Setting up FHE with Concrete-ML on a LinearSVC model is very simple, in the regard that Concrete-ML provides an implementation of the [scikit-learn LinearSVC interface](https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html). As a matter of fact, a working FHE model can be setup with just a few lines of code.\n", + "Setting up FHE with Concrete ML on a LinearSVC model is very simple, in the regard that Concrete ML provides an implementation of the [scikit-learn LinearSVC interface](https://scikit-learn.org/stable/modules/generated/sklearn.svm.LinearSVC.html). As a matter of fact, a working FHE model can be setup with just a few lines of code.\n", "\n", "Setting up a model with FHE benefits nonetheless from some additional work. For LinearSVC models, the main point is to select a relevant bit-size for [quantizing](../explanations/quantization.md) the model. Some additional tools can smooth up the development workflow, such as alleviating the [compilation](../explanations/compilation.md) time by making use of the [FHE simulation](../explanations/compilation.md#fhe-simulation) \n", "\n", "Once the model is carefully trained and quantized, it is ready to be deployed and used in production. Here are some useful links that cover this subject:\n", "- [Inference in the Cloud](../getting-started/cloud.md) summarize the steps for cloud deployment\n", - "- [Production Deployment](../guides/client_server.md) offers a high-level view of how to deploy a Concrete-ML model in a client/server setting.\n", + "- [Production Deployment](../guides/client_server.md) offers a high-level view of how to deploy a Concrete ML model in a client/server setting.\n", "- [Client Server in Concrete ML](ClientServer.ipynb) provides a more hands-on approach as another tutorial." ] } diff --git a/docs/advanced_examples/aggregated_code.txt b/docs/advanced_examples/aggregated_code.txt index b142c7788..c3501acad 100644 --- a/docs/advanced_examples/aggregated_code.txt +++ b/docs/advanced_examples/aggregated_code.txt @@ -1428,7 +1428,7 @@ svm_sklearn.fit(X_train, y_train) # plot the boundary plot_decision_boundary(svm_sklearn, X_test, y_test) -# Perform the same steps with the Concrete-ML LinearSVC implementation +# Perform the same steps with the Concrete ML LinearSVC implementation svm_concrete = ConcreteLinearSVC(max_iter=100, n_bits=8) svm_concrete.fit(X_train, y_train) # plot the boundary @@ -1544,7 +1544,7 @@ def get_sklearn_plot_config(mse_score=None): def get_concrete_plot_config(mse_score=None): - label = "Concrete-ML" + label = "Concrete ML" if mse_score is not None: label += f", {'$MSE$'}={mse_score:.4f}" return {"c": "orange", "linewidth": 2.5, "label": label} @@ -1671,7 +1671,7 @@ time_begin = time.time() y_pred_sklearn = sklearn_rgs.predict(X_test) print(f"Execution time: {(time.time() - time_begin) / len(X_test):.4f} seconds per sample") -# Now predict using clear quantized Concrete-ML model on testing set +# Now predict using clear quantized Concrete ML model on testing set time_begin = time.time() y_preds_quantized = concrete_rgs.predict(X_test) print(f"Execution time: {(time.time() - time_begin) / len(X_test):.4f} seconds per sample") @@ -4567,7 +4567,7 @@ def make_regressor_comparison(title, regressors, **kwargs): # Instantiate the model model = regressor() - # Train the model and retrieve both the Concrete-ML model and its equivalent one from + # Train the model and retrieve both the Concrete ML model and its equivalent one from # scikit-learn # If the model is a NeuralNetClassifier, instantiate a scikit-learn MLPClassifier # separately in order to be able to be able to compare the results with a float model @@ -4606,7 +4606,7 @@ def make_regressor_comparison(title, regressors, **kwargs): time_end = time.time() print(f"Key generation time: {time_end - time_begin:.2f} seconds") - # Compute the predictions in FHE using the Concrete-ML model + # Compute the predictions in FHE using the Concrete ML model time_begin = time.time() concrete_y_pred = concrete_model.predict(X_poly_test[:1], fhe="execute") time_end = time.time() @@ -4633,7 +4633,7 @@ def make_regressor_comparison(title, regressors, **kwargs): bitwidth = circuit.graph.maximum_integer_bit_width() # Plot the predictions - ax.plot(X_test, concrete_y_pred, c="blue", linewidth=2.5, label="Concrete-ML") + ax.plot(X_test, concrete_y_pred, c="blue", linewidth=2.5, label="Concrete ML") # Plot the predictions ax.plot(X_test, sklearn_y_pred, c="red", linewidth=2.5, label="scikit-learn") @@ -4641,7 +4641,7 @@ def make_regressor_comparison(title, regressors, **kwargs): ax.text( 0.5, 0.80, - f"Concrete-ML R2: {concrete_score:.2f}\n scikit-learn R2: {sklearn_score:.2f}\n", + f"Concrete ML R2: {concrete_score:.2f}\n scikit-learn R2: {sklearn_score:.2f}\n", transform=ax.transAxes, fontsize=12, va="top", diff --git a/docs/conventions.md b/docs/conventions.md index b230b8813..72b8fc801 100644 --- a/docs/conventions.md +++ b/docs/conventions.md @@ -23,7 +23,7 @@ Let's use following conventions for the docs. If a new convention needs to be de 1. google is a verb ("you can google" but not "you can Google") : but try to avoid this 1. Programs: - Jupyter - - Concrete ML (no Concrete-ML) + - Concrete ML (no Concrete ML) - pytest except when title where it is capitalized - Python - torch (for the code) and PyTorch (for the product) diff --git a/docs/references/api/concrete.ml.common.serialization.encoder.md b/docs/references/api/concrete.ml.common.serialization.encoder.md index 031e6f15d..763fd11fe 100644 --- a/docs/references/api/concrete.ml.common.serialization.encoder.md +++ b/docs/references/api/concrete.ml.common.serialization.encoder.md @@ -45,7 +45,7 @@ Non-native types are serialized manually and dumped in a custom dict format that The name should be unique for each type, as it is used in the ConcreteDecoder class to detect the initial type and apply the proper load method to the serialized object. The serialized value is the value that was serialized manually in a native type. Additional arguments such as a numpy array's dtype are also properly serialized. If an object has an unexpected type or is not serializable, an error is thrown. -The ConcreteEncoder is only meant to encode Concrete-ML's built-in models and therefore only supports the necessary types. For example, torch.Tensor objects are not serializable using this encoder as built-in models only use numpy arrays. However, the list of supported types might expand in future releases if new models are added and need new types. +The ConcreteEncoder is only meant to encode Concrete ML's built-in models and therefore only supports the necessary types. For example, torch.Tensor objects are not serializable using this encoder as built-in models only use numpy arrays. However, the list of supported types might expand in future releases if new models are added and need new types. ______________________________________________________________________ diff --git a/src/concrete/ml/common/serialization/encoder.py b/src/concrete/ml/common/serialization/encoder.py index d18b14227..1e6dcbaf4 100644 --- a/src/concrete/ml/common/serialization/encoder.py +++ b/src/concrete/ml/common/serialization/encoder.py @@ -68,7 +68,7 @@ class ConcreteEncoder(JSONEncoder): as a numpy array's dtype are also properly serialized. If an object has an unexpected type or is not serializable, an error is thrown. - The ConcreteEncoder is only meant to encode Concrete-ML's built-in models and therefore only + The ConcreteEncoder is only meant to encode Concrete ML's built-in models and therefore only supports the necessary types. For example, torch.Tensor objects are not serializable using this encoder as built-in models only use numpy arrays. However, the list of supported types might expand in future releases if new models are added and need new types. diff --git a/src/concrete/ml/sklearn/glm.py b/src/concrete/ml/sklearn/glm.py index ecdbcf56a..4c7d0b6c8 100644 --- a/src/concrete/ml/sklearn/glm.py +++ b/src/concrete/ml/sklearn/glm.py @@ -83,7 +83,7 @@ def dump_dict(self) -> Dict: metadata: Dict[str, Any] = {} - # Concrete-ML + # Concrete ML metadata["n_bits"] = self.n_bits metadata["sklearn_model"] = self.sklearn_model metadata["_is_fitted"] = self._is_fitted @@ -113,7 +113,7 @@ def load_dict(cls, metadata: Dict): # Instantiate the model obj = cls(n_bits=metadata["n_bits"]) - # Concrete-ML + # Concrete ML obj.n_bits = metadata["n_bits"] obj.sklearn_model = metadata["sklearn_model"] obj.onnx_model_ = metadata["onnx_model_"] @@ -327,7 +327,7 @@ def dump_dict(self) -> Dict: metadata: Dict[str, Any] = {} - # Concrete-ML + # Concrete ML metadata["n_bits"] = self.n_bits metadata["sklearn_model"] = self.sklearn_model metadata["_is_fitted"] = self._is_fitted @@ -358,7 +358,7 @@ def load_dict(cls, metadata: Dict): # Instantiate the model obj = cls(n_bits=metadata["n_bits"]) - # Concrete-ML + # Concrete ML obj.sklearn_model = metadata["sklearn_model"] obj.onnx_model_ = metadata["onnx_model_"] obj._is_fitted = metadata["_is_fitted"] diff --git a/src/concrete/ml/sklearn/neighbors.py b/src/concrete/ml/sklearn/neighbors.py index 727529419..9737d624f 100644 --- a/src/concrete/ml/sklearn/neighbors.py +++ b/src/concrete/ml/sklearn/neighbors.py @@ -97,7 +97,7 @@ def load_dict(cls, metadata: Dict): # Instantiate the model obj = cls(n_bits=metadata["n_bits"]) - # Concrete-ML + # Concrete ML obj.sklearn_model = metadata["sklearn_model"] obj._is_fitted = metadata["_is_fitted"] obj._is_compiled = metadata["_is_compiled"] diff --git a/src/concrete/ml/sklearn/qnn.py b/src/concrete/ml/sklearn/qnn.py index 2981a66de..95c4914ad 100644 --- a/src/concrete/ml/sklearn/qnn.py +++ b/src/concrete/ml/sklearn/qnn.py @@ -228,7 +228,7 @@ def dump_dict(self) -> Dict[str, Any]: metadata["optimizer"] = optimizer.getvalue().hex() metadata["criterion"] = criterion.getvalue().hex() - # Concrete-ML + # Concrete ML metadata["_is_fitted"] = self._is_fitted metadata["_is_compiled"] = self._is_compiled metadata["input_quantizers"] = self.input_quantizers @@ -314,7 +314,7 @@ def load_dict(cls, metadata: Dict): module__n_layers=metadata["module__n_layers"], ) - # Concrete-ML + # Concrete ML obj._is_fitted = metadata["_is_fitted"] obj._is_compiled = metadata["_is_compiled"] obj.input_quantizers = metadata["input_quantizers"] @@ -540,7 +540,7 @@ def dump_dict(self) -> Dict[str, Any]: metadata["optimizer"] = optimizer.getvalue().hex() metadata["criterion"] = criterion.getvalue().hex() - # Concrete-ML + # Concrete ML metadata["_is_fitted"] = self._is_fitted metadata["_is_compiled"] = self._is_compiled metadata["input_quantizers"] = self.input_quantizers @@ -628,7 +628,7 @@ def load_dict(cls, metadata: Dict): classes=metadata["classes_"], ) - # Concrete-ML + # Concrete ML obj._is_fitted = metadata["_is_fitted"] obj._is_compiled = metadata["_is_compiled"] obj.input_quantizers = metadata["input_quantizers"] diff --git a/src/concrete/ml/sklearn/rf.py b/src/concrete/ml/sklearn/rf.py index a3181d12a..c0673b36b 100644 --- a/src/concrete/ml/sklearn/rf.py +++ b/src/concrete/ml/sklearn/rf.py @@ -77,7 +77,7 @@ def post_processing(self, y_preds: numpy.ndarray) -> numpy.ndarray: def dump_dict(self) -> Dict[str, Any]: metadata: Dict[str, Any] = {} - # Concrete-ML + # Concrete ML metadata["n_bits"] = self.n_bits metadata["sklearn_model"] = self.sklearn_model metadata["_is_fitted"] = self._is_fitted @@ -117,7 +117,7 @@ def load_dict(cls, metadata: Dict): # Instantiate the model obj = RandomForestClassifier(n_bits=metadata["n_bits"]) - # Concrete-ML + # Concrete ML obj.sklearn_model = metadata["sklearn_model"] obj._is_fitted = metadata["_is_fitted"] obj._is_compiled = metadata["_is_compiled"] @@ -219,7 +219,7 @@ def __init__( def dump_dict(self) -> Dict[str, Any]: metadata: Dict[str, Any] = {} - # Concrete-ML + # Concrete ML metadata["n_bits"] = self.n_bits metadata["sklearn_model"] = self.sklearn_model metadata["_is_fitted"] = self._is_fitted @@ -259,7 +259,7 @@ def load_dict(cls, metadata: Dict): # Instantiate the model obj = RandomForestRegressor(n_bits=metadata["n_bits"]) - # Concrete-ML + # Concrete ML obj.sklearn_model = metadata["sklearn_model"] obj._is_fitted = metadata["_is_fitted"] obj._is_compiled = metadata["_is_compiled"] diff --git a/src/concrete/ml/sklearn/svm.py b/src/concrete/ml/sklearn/svm.py index 509500b3f..093d7e141 100644 --- a/src/concrete/ml/sklearn/svm.py +++ b/src/concrete/ml/sklearn/svm.py @@ -61,7 +61,7 @@ def dump_dict(self) -> Dict[str, Any]: metadata: Dict[str, Any] = {} - # Concrete-ML + # Concrete ML metadata["n_bits"] = self.n_bits metadata["sklearn_model"] = self.sklearn_model metadata["_is_fitted"] = self._is_fitted @@ -94,7 +94,7 @@ def load_dict(cls, metadata: Dict): # Instantiate the model obj = LinearSVR() - # Concrete-ML + # Concrete ML obj.n_bits = metadata["n_bits"] obj.sklearn_model = metadata["sklearn_model"] obj._is_fitted = metadata["_is_fitted"] @@ -180,7 +180,7 @@ def dump_dict(self) -> Dict[str, Any]: metadata: Dict[str, Any] = {} - # Concrete-ML + # Concrete ML metadata["n_bits"] = self.n_bits metadata["sklearn_model"] = self.sklearn_model metadata["_is_fitted"] = self._is_fitted @@ -215,7 +215,7 @@ def load_dict(cls, metadata: Dict): # Instantiate the model obj = LinearSVC() - # Concrete-ML + # Concrete ML obj.n_bits = metadata["n_bits"] obj.sklearn_model = metadata["sklearn_model"] obj._is_fitted = metadata["_is_fitted"] diff --git a/src/concrete/ml/sklearn/tree.py b/src/concrete/ml/sklearn/tree.py index fba10ca3f..048bd6046 100644 --- a/src/concrete/ml/sklearn/tree.py +++ b/src/concrete/ml/sklearn/tree.py @@ -77,7 +77,7 @@ def post_processing(self, y_preds: numpy.ndarray) -> numpy.ndarray: def dump_dict(self) -> Dict[str, Any]: metadata: Dict[str, Any] = {} - # Concrete-ML + # Concrete ML metadata["n_bits"] = self.n_bits metadata["sklearn_model"] = self.sklearn_model metadata["_is_fitted"] = self._is_fitted @@ -112,7 +112,7 @@ def load_dict(cls, metadata: Dict): # Instantiate the model obj = cls(n_bits=metadata["n_bits"]) - # Concrete-ML + # Concrete ML obj.sklearn_model = metadata["sklearn_model"] obj._is_fitted = metadata["_is_fitted"] obj._is_compiled = metadata["_is_compiled"] @@ -208,7 +208,7 @@ def __getattr__(self, attr: str): def dump_dict(self) -> Dict[str, Any]: metadata: Dict[str, Any] = {} - # Concrete-ML + # Concrete ML metadata["n_bits"] = self.n_bits metadata["sklearn_model"] = self.sklearn_model metadata["_is_fitted"] = self._is_fitted @@ -242,7 +242,7 @@ def load_dict(cls, metadata: Dict): # Instantiate the model obj = cls(n_bits=metadata["n_bits"]) - # Concrete-ML + # Concrete ML obj.sklearn_model = metadata["sklearn_model"] obj._is_fitted = metadata["_is_fitted"] obj._fhe_ensembling = metadata["_fhe_ensembling"] diff --git a/src/concrete/ml/sklearn/xgb.py b/src/concrete/ml/sklearn/xgb.py index e0687da78..366a3ae58 100644 --- a/src/concrete/ml/sklearn/xgb.py +++ b/src/concrete/ml/sklearn/xgb.py @@ -137,7 +137,7 @@ def __init__( def dump_dict(self) -> Dict[str, Any]: metadata: Dict[str, Any] = {} - # Concrete-ML + # Concrete ML metadata["n_bits"] = self.n_bits metadata["sklearn_model"] = self.sklearn_model metadata["_is_fitted"] = self._is_fitted @@ -208,7 +208,7 @@ def load_dict(cls, metadata: Dict): # Instantiate the model obj = XGBClassifier(n_bits=metadata["n_bits"]) - # Concrete-ML + # Concrete ML obj.sklearn_model = metadata["sklearn_model"] obj._is_fitted = metadata["_is_fitted"] obj._is_compiled = metadata["_is_compiled"] @@ -417,7 +417,7 @@ def post_processing(self, y_preds: numpy.ndarray) -> numpy.ndarray: def dump_dict(self) -> Dict[str, Any]: metadata: Dict[str, Any] = {} - # Concrete-ML + # Concrete ML metadata["n_bits"] = self.n_bits metadata["sklearn_model"] = self.sklearn_model metadata["_is_fitted"] = self._is_fitted @@ -487,7 +487,7 @@ def load_dict(cls, metadata: Dict): # Instantiate the model obj = XGBRegressor(n_bits=metadata["n_bits"]) - # Concrete-ML + # Concrete ML obj.sklearn_model = metadata["sklearn_model"] obj._is_fitted = metadata["_is_fitted"] obj._is_compiled = metadata["_is_compiled"] diff --git a/use_case_examples/lora_finetuning/GPT2FineTuneHybrid.ipynb b/use_case_examples/lora_finetuning/GPT2FineTuneHybrid.ipynb index d102766fa..c229016a0 100644 --- a/use_case_examples/lora_finetuning/GPT2FineTuneHybrid.ipynb +++ b/use_case_examples/lora_finetuning/GPT2FineTuneHybrid.ipynb @@ -5,9 +5,9 @@ "id": "dfccd8e6", "metadata": {}, "source": [ - "# Fine-Tuning GPT-2 on Encrypted Data with LoRA and Concrete-ML\n", + "# Fine-Tuning GPT-2 on Encrypted Data with LoRA and Concrete ML\n", "\n", - "In this notebook, we perform fine-tuning of a GPT-2 model using LoRA and Concrete-ML." + "In this notebook, we perform fine-tuning of a GPT-2 model using LoRA and Concrete ML." ] }, {