diff --git a/.acrolinx-config.edn b/.acrolinx-config.edn index 919922c789..2020cbb0f8 100644 --- a/.acrolinx-config.edn +++ b/.acrolinx-config.edn @@ -1,2 +1,2 @@ {:allowed-branchname-matches ["master" "release-.*"] - :allowed-filename-matches ["notebooks" "website"]} + :allowed-filename-matches ["docs" "website"]} diff --git a/.github/workflows/check-dead-links.yml b/.github/workflows/check-dead-links.yml new file mode 100644 index 0000000000..10d1fe4320 --- /dev/null +++ b/.github/workflows/check-dead-links.yml @@ -0,0 +1,26 @@ +name: "Check Dead Links" + +on: + workflow_dispatch: + push: + branches: [ "master" ] + pull_request: + # The branches below must be a subset of the branches above + branches: [ "master" ] + +jobs: + scan_links: + name: Scan Website for Dead Links + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Install dependencies + run: | + sudo apt-get update + sudo apt-get install -y wget + - name: Scan for dead links + run: | + wget --spider --recursive --no-verbose --tries=3 --retry-connrefused --no-clobber --directory-prefix=site-check https://microsoft.github.io/SynapseML/ diff --git a/.github/workflows/clean-acr.yml b/.github/workflows/clean-acr.yml index e1e133e9f8..eb82134c75 100644 --- a/.github/workflows/clean-acr.yml +++ b/.github/workflows/clean-acr.yml @@ -31,7 +31,7 @@ jobs: with: creds: ${{ secrets.clean_acr }} - name: checkout repo content - uses: actions/checkout@v3 # checkout the repo + uses: actions/checkout@v4 # checkout the repo - name: setup python uses: actions/setup-python@v4 with: diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 5c5a6a6e7c..524b7024fd 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -42,7 +42,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 # Initializes the CodeQL tools for scanning. - name: Initialize CodeQL diff --git a/.github/workflows/on-pull-request-target-review.yml b/.github/workflows/on-pull-request-target-review.yml index d901d9d326..47e569c66e 100644 --- a/.github/workflows/on-pull-request-target-review.yml +++ b/.github/workflows/on-pull-request-target-review.yml @@ -10,7 +10,7 @@ jobs: name: Azure OpenAI PR Comment steps: - id: review - uses: microsoft/gpt-review@v0.9.4 + uses: microsoft/gpt-review@v0.9.5 with: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} AZURE_OPENAI_API: ${{ secrets.AZURE_OPENAI_API }} diff --git a/.github/workflows/scorecards.yml b/.github/workflows/scorecards.yml index 9aa466912d..83ddf41ea2 100644 --- a/.github/workflows/scorecards.yml +++ b/.github/workflows/scorecards.yml @@ -32,7 +32,7 @@ jobs: steps: - name: "Checkout code" - uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # v3.1.0 + uses: actions/checkout@v4 # v3.1.0 with: persist-credentials: false diff --git a/.gitignore b/.gitignore index d6858515cb..94c129677f 100644 --- a/.gitignore +++ b/.gitignore @@ -86,3 +86,4 @@ metastore_db/ **/build/* **/dist/* **/*.egg-info/* + diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index a1188611b9..12eee07c4a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -45,14 +45,14 @@ this process: #### Implement tests -- Set up build environment using the [developer guide](https://microsoft.github.io/SynapseML/docs/reference/developer-readme/) +- Set up build environment using the [developer guide](https://microsoft.github.io/SynapseML/docs/Reference/Developer%20Setup/) - Test your code locally. - Add tests using ScalaTests — unit tests are required. - A sample notebook is required as an end-to-end test. #### Implement documentation -- Add a [sample Jupyter notebook](notebooks/) that shows the intended use +- Add a [sample Jupyter notebook](docs/) that shows the intended use case of your algorithm, with instructions in step-by-step manner. (The same notebook could be used for testing the code.) - Add in-line ScalaDoc comments to your source code, to generate the [API diff --git a/README.md b/README.md index 691ca43aff..c5e78d9124 100644 --- a/README.md +++ b/README.md @@ -6,15 +6,16 @@ SynapseML (previously known as MMLSpark), is an open-source library that simplif With SynapseML, you can build scalable and intelligent systems to solve challenges in domains such as anomaly detection, computer vision, deep learning, text analytics, and others. SynapseML can train and evaluate models on single-node, multi-node, and elastically resizable clusters of computers. This lets you scale your work without wasting resources. SynapseML is usable across Python, R, Scala, Java, and .NET. Furthermore, its API abstracts over a wide variety of databases, file systems, and cloud data stores to simplify experiments no matter where data is located. -SynapseML requires Scala 2.12, Spark 3.2+, and Python 3.8+. +SynapseML requires Scala 2.12, Spark 3.2+, and Python 3.8+. | Topics | Links | | :------ | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | Build | [![Build Status](https://msdata.visualstudio.com/A365/_apis/build/status/microsoft.SynapseML?branchName=master)](https://msdata.visualstudio.com/A365/_build/latest?definitionId=17563&branchName=master) [![codecov](https://codecov.io/gh/Microsoft/SynapseML/branch/master/graph/badge.svg)](https://codecov.io/gh/Microsoft/SynapseML) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) | -| Version | [![Version](https://img.shields.io/badge/version-0.11.1-blue)](https://github.com/Microsoft/SynapseML/releases) [![Release Notes](https://img.shields.io/badge/release-notes-blue)](https://github.com/Microsoft/SynapseML/releases) [![Snapshot Version](https://mmlspark.blob.core.windows.net/icons/badges/master_version3.svg)](#sbt) | -| Docs | [![Scala Docs](https://img.shields.io/static/v1?label=api%20docs&message=scala&color=blue&logo=scala)](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/index.html#package) [![PySpark Docs](https://img.shields.io/static/v1?label=api%20docs&message=python&color=blue&logo=python)](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/index.html) [![Academic Paper](https://img.shields.io/badge/academic-paper-7fdcf7)](https://arxiv.org/abs/1810.08744) | +| Version | [![Version](https://img.shields.io/badge/version-0.11.2-blue)](https://github.com/Microsoft/SynapseML/releases) [![Release Notes](https://img.shields.io/badge/release-notes-blue)](https://github.com/Microsoft/SynapseML/releases) [![Snapshot Version](https://mmlspark.blob.core.windows.net/icons/badges/master_version3.svg)](#sbt) | +| Docs | [![Website](https://img.shields.io/badge/SynapseML-Website-blue)](https://aka.ms/spark) [![Scala Docs](https://img.shields.io/static/v1?label=api%20docs&message=scala&color=blue&logo=scala)](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/index.html#package) [![PySpark Docs](https://img.shields.io/static/v1?label=api%20docs&message=python&color=blue&logo=python)](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/index.html) [![Academic Paper](https://img.shields.io/badge/academic-paper-7fdcf7)](https://arxiv.org/abs/1810.08744) | | Support | [![Gitter](https://badges.gitter.im/Microsoft/MMLSpark.svg)](https://gitter.im/Microsoft/MMLSpark?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) [![Mail](https://img.shields.io/badge/mail-synapseml--support-brightgreen)](mailto:synapseml-support@microsoft.com) | -| Binder | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/microsoft/SynapseML/v0.11.1?labpath=notebooks%2Ffeatures) | +| Binder | [![Binder](https://mybinder.org/badge_logo.svg)](https://mybinder.org/v2/gh/microsoft/SynapseML/v0.11.2?labpath=notebooks%2Ffeatures) | +| Usage | [![Downloads](https://static.pepy.tech/badge/synapseml)](https://pepy.tech/project/synapseml) |
@@ -49,17 +50,17 @@ SynapseML requires Scala 2.12, Spark 3.2+, and Python 3.8+. | | | | | | :----------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------: | -| [**Vowpal Wabbit on Spark**](https://microsoft.github.io/SynapseML/docs/features/vw/about/) | [**The Cognitive Services for Big Data**](https://microsoft.github.io/SynapseML/docs/features/cognitive_services/CognitiveServices%20-%20Overview/) | [**LightGBM on Spark**](https://microsoft.github.io/SynapseML/docs/features/lightgbm/about/) | [**Spark Serving**](https://microsoft.github.io/SynapseML/docs/features/spark_serving/about/) | +| [**Vowpal Wabbit on Spark**](https://microsoft.github.io/SynapseML/docs/Explore%20Algorithms/Vowpal%20Wabbit/Overview/) | [**The Cognitive Services for Big Data**](https://microsoft.github.io/SynapseML/docs/Explore%20Algorithms/AI%20Services/Overview/) | [**LightGBM on Spark**](https://microsoft.github.io/SynapseML/docs/Explore%20Algorithms/LightGBM/Overview/) | [**Spark Serving**](https://microsoft.github.io/SynapseML/docs/Deploy%20Models/Overview/) | | Fast, Sparse, and Effective Text Analytics | Leverage the Microsoft Cognitive Services at Unprecedented Scales in your existing SparkML pipelines | Train Gradient Boosted Machines with LightGBM | Serve any Spark Computation as a Web Service with Sub-Millisecond Latency | -| | | | | -| :----------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------: | -| [**HTTP on Spark**](https://microsoft.github.io/SynapseML/docs/features/cognitive_services/CognitiveServices%20-%20Overview/#arbitrary-web-apis) | [**ONNX on Spark**](https://microsoft.github.io/SynapseML/docs/features/onnx/about/) | [**Responsible AI**](https://microsoft.github.io/SynapseML/docs/features/responsible_ai/Model%20Interpretation%20on%20Spark/) | [**Spark Binding Autogeneration**](https://microsoft.github.io/SynapseML/docs/reference/developer-readme/#packagepython) | -| An Integration Between Spark and the HTTP Protocol, enabling Distributed Microservice Orchestration | Distributed and Hardware Accelerated Model Inference on Spark | Understand Opaque-box Models and Measure Dataset Biases | Automatically Generate Spark bindings for PySpark and SparklyR | +| | | | | +| :----------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------: |:-----------------------------------------------------------------------------------------------------------------------:| +| [**HTTP on Spark**](https://microsoft.github.io/SynapseML/docs/Explore%20Algorithms/AI%20Services/Overview/#arbitrary-web-apis) | [**ONNX on Spark**](https://microsoft.github.io/SynapseML/docs/Explore%20Algorithms/Deep%20Learning/ONNX/) | [**Responsible AI**](https://microsoft.github.io/SynapseML/docs/Explore%20Algorithms/Responsible%20AI/Interpreting%20Model%20Predictions/) | [**Spark Binding Autogeneration**](https://microsoft.github.io/SynapseML/docs/Reference/Developer%20Setup/#packagepython) | +| An Integration Between Spark and the HTTP Protocol, enabling Distributed Microservice Orchestration | Distributed and Hardware Accelerated Model Inference on Spark | Understand Opaque-box Models and Measure Dataset Biases | Automatically Generate Spark bindings for PySpark and SparklyR | | | | | | :-----------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------: | -| [**Isolation Forest on Spark**](https://microsoft.github.io/SynapseML/docs/documentation/estimators/estimators_core/#isolationforest) | [**CyberML**](https://github.com/microsoft/SynapseML/blob/master/notebooks/features/other/CyberML%20-%20Anomalous%20Access%20Detection.ipynb) | [**Conditional KNN**](https://microsoft.github.io/SynapseML/docs/features/other/ConditionalKNN%20-%20Exploring%20Art%20Across%20Cultures/) | +| [**Isolation Forest on Spark**](https://microsoft.github.io/SynapseML/docs/Explore%20Algorithms/Anomaly%20Detection/Quickstart%20-%20Isolation%20Forests/) | [**CyberML**](https://microsoft.github.io/SynapseML/docs/Explore%20Algorithms/Other%20Algorithms/Cyber%20ML/) | [**Conditional KNN**](https://microsoft.github.io/SynapseML/docs/Explore%20Algorithms/Other%20Algorithms/Quickstart%20-%20Exploring%20Art%20Across%20Cultures/) | | Distributed Nonlinear Outlier Detection | Machine Learning Tools for Cyber Security | Scalable KNN Models with Conditional Queries | @@ -94,7 +95,7 @@ In Azure Synapse notebooks please place the following in the first cell of your { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1-spark3.3", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2-spark3.3", "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true", @@ -110,7 +111,7 @@ In Azure Synapse notebooks please place the following in the first cell of your { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1,org.apache.spark:spark-avro_2.12:3.3.1", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2,org.apache.spark:spark-avro_2.12:3.3.1", "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true", @@ -130,7 +131,7 @@ cloud](http://community.cloud.databricks.com), create a new [library from Maven coordinates](https://docs.databricks.com/user-guide/libraries.html#libraries-from-maven-pypi-or-spark-packages) in your workspace. -For the coordinates use: `com.microsoft.azure:synapseml_2.12:0.11.1` +For the coordinates use: `com.microsoft.azure:synapseml_2.12:0.11.2` with the resolver: `https://mmlspark.azureedge.net/maven`. Ensure this library is attached to your target cluster(s). @@ -138,7 +139,7 @@ Finally, ensure that your Spark cluster has at least Spark 3.2 and Scala 2.12. I You can use SynapseML in both your Scala and PySpark notebooks. To get started with our example notebooks import the following databricks archive: -`https://mmlspark.blob.core.windows.net/dbcs/SynapseMLExamplesv0.11.1.dbc` +`https://mmlspark.blob.core.windows.net/dbcs/SynapseMLExamplesv0.11.2.dbc` ### Microsoft Fabric @@ -151,7 +152,7 @@ In Microsoft Fabric notebooks please place the following in the first cell of yo { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1-spark3.3", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2-spark3.3", "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true", @@ -167,7 +168,7 @@ In Microsoft Fabric notebooks please place the following in the first cell of yo { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1,org.apache.spark:spark-avro_2.12:3.3.1", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2,org.apache.spark:spark-avro_2.12:3.3.1", "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true", @@ -186,7 +187,7 @@ the above example, or from python: ```python import pyspark spark = pyspark.sql.SparkSession.builder.appName("MyApp") \ - .config("spark.jars.packages", "com.microsoft.azure:synapseml_2.12:0.11.1") \ + .config("spark.jars.packages", "com.microsoft.azure:synapseml_2.12:0.11.2") \ .getOrCreate() import synapse.ml ``` @@ -197,9 +198,9 @@ SynapseML can be conveniently installed on existing Spark clusters via the `--packages` option, examples: ```bash -spark-shell --packages com.microsoft.azure:synapseml_2.12:0.11.1 -pyspark --packages com.microsoft.azure:synapseml_2.12:0.11.1 -spark-submit --packages com.microsoft.azure:synapseml_2.12:0.11.1 MyApp.jar +spark-shell --packages com.microsoft.azure:synapseml_2.12:0.11.2 +pyspark --packages com.microsoft.azure:synapseml_2.12:0.11.2 +spark-submit --packages com.microsoft.azure:synapseml_2.12:0.11.2 MyApp.jar ``` ### SBT @@ -208,7 +209,7 @@ If you are building a Spark application in Scala, add the following lines to your `build.sbt`: ```scala -libraryDependencies += "com.microsoft.azure" % "synapseml_2.12" % "0.11.1" +libraryDependencies += "com.microsoft.azure" % "synapseml_2.12" % "0.11.2" ``` ### Apache Livy and HDInsight @@ -222,7 +223,7 @@ Excluding certain packages from the library may be necessary due to current issu { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind" } } @@ -238,25 +239,25 @@ docker run -it -p 8888:8888 -e ACCEPT_EULA=yes mcr.microsoft.com/mmlspark/releas ``` Navigate to in your web browser to run the sample -notebooks. See the [documentation](docs/docker.md) for more on Docker use. +notebooks. See the [documentation](https://microsoft.github.io/SynapseML/docs/Reference/Docker%20Setup/) for more on Docker use. > To read the EULA for using the docker image, run `docker run -it -p 8888:8888 mcr.microsoft.com/mmlspark/release eula` ### R To try out SynapseML using the R autogenerated wrappers [see our -instructions](website/docs/reference/R-setup.md). Note: This feature is still under development +instructions](https://microsoft.github.io/SynapseML/docs/Reference/R%20Setup/). Note: This feature is still under development and some necessary custom wrappers may be missing. ### C# (.NET) -To try out SynapseML with .NET, please follow the [.NET Installation Guide](website/docs/reference/dotnet-setup.md). +To try out SynapseML with .NET, please follow the [.NET Installation Guide](https://microsoft.github.io/SynapseML/docs/Reference/Dotnet%20Setup/). Please note that some classes including the `AzureSearchWriter`, `DiagnosticInfo`, `UDPyFParam`, `ParamSpaceParam`, `BallTreeParam`, `ConditionalBallTreeParam`, `LightGBMBoosterParam` are still under development and not exposed in .NET yet. ### Building from source SynapseML has recently transitioned to a new build infrastructure. -For detailed developer docs please see the [Developer Readme](website/docs/reference/developer-readme.md) +For detailed developer docs please see the [Developer Readme](https://microsoft.github.io/SynapseML/docs/Reference/Developer%20Setup/) If you are an existing synapsemldeveloper, you will need to reconfigure your development setup. We now support platform independent development and diff --git a/build.sbt b/build.sbt index 9d15620b08..8ff8b4c4e1 100644 --- a/build.sbt +++ b/build.sbt @@ -25,6 +25,7 @@ val coreDependencies = Seq( "org.apache.spark" %% "spark-mllib" % sparkVersion % "compile", "org.apache.spark" %% "spark-avro" % sparkVersion % "provided", "org.apache.spark" %% "spark-tags" % sparkVersion % "test", + "com.globalmentor" % "hadoop-bare-naked-local-fs" % "0.1.0" % "test", "org.scalatest" %% "scalatest" % "3.2.14" % "test") val extraDependencies = Seq( "org.scalactic" %% "scalactic" % "3.2.14", @@ -220,7 +221,7 @@ publishDotnetBase := { packDotnetAssemblyCmd(join(dotnetBaseDir, "target").getAbsolutePath, dotnetBaseDir) val packagePath = join(dotnetBaseDir, // Update the version whenever there's a new release - "target", s"SynapseML.DotnetBase.${dotnetedVersion("0.11.1")}.nupkg").getAbsolutePath + "target", s"SynapseML.DotnetBase.${dotnetedVersion("0.11.2")}.nupkg").getAbsolutePath publishDotnetAssemblyCmd(packagePath, genSleetConfig.value) } @@ -381,11 +382,11 @@ publishBadges := { uploadBadge("master version", version.value, "blue", "master_version3.svg") } -val uploadNotebooks = TaskKey[Unit]("uploadNotebooks", "upload notebooks to blob storage") +val uploadNotebooks = TaskKey[Unit]("uploadNotebooks", "upload docs to blob storage") uploadNotebooks := { - val localNotebooksFolder = join(baseDirectory.value.toString, "notebooks").toString + val localNotebooksFolder = join(baseDirectory.value.toString, "docs").toString val blobNotebooksFolder = version.value - uploadToBlob(localNotebooksFolder, blobNotebooksFolder, "notebooks") + uploadToBlob(localNotebooksFolder, blobNotebooksFolder, "docs") } val settings = Seq( @@ -493,8 +494,8 @@ setupTask := { val convertNotebooks = TaskKey[Unit]("convertNotebooks", "convert notebooks to markdown for website display") convertNotebooks := { - runCmdStr("python -m docs.python.documentprojection " + - "--customchannels docs/python/synapseml_channels -c website . docs/manifest.yaml -p") + runCmd(Seq("pip", "install", "-e", "."), wd=join(baseDirectory.value, "tools/docgen")) + runCmd(Seq("python", "__main__.py"), wd=join(baseDirectory.value, "tools/docgen/docgen")) } val testWebsiteDocs = TaskKey[Unit]("testWebsiteDocs", diff --git a/cognitive/src/main/python/synapse/ml/cognitive/langchain/LangchainTransform.py b/cognitive/src/main/python/synapse/ml/cognitive/langchain/LangchainTransform.py index cbf6b528b8..fffbf13cdc 100644 --- a/cognitive/src/main/python/synapse/ml/cognitive/langchain/LangchainTransform.py +++ b/cognitive/src/main/python/synapse/ml/cognitive/langchain/LangchainTransform.py @@ -44,6 +44,7 @@ ) from pyspark.sql.functions import udf from typing import cast, Optional, TypeVar, Type +from synapse.ml.core.platform import running_on_synapse_internal OPENAI_API_VERSION = "2022-12-01" RL = TypeVar("RL", bound="MLReadable") @@ -125,6 +126,14 @@ def __init__( self.subscriptionKey = Param(self, "subscriptionKey", "openai api key") self.url = Param(self, "url", "openai api base") self.apiVersion = Param(self, "apiVersion", "openai api version") + self.running_on_synapse_internal = running_on_synapse_internal() + if running_on_synapse_internal(): + from synapse.ml.fabric.service_discovery import get_fabric_env_config + + self._setDefault( + url=get_fabric_env_config().fabric_env_config.ml_workload_endpoint + + "cognitive/openai" + ) kwargs = self._input_kwargs if subscriptionKey: kwargs["subscriptionKey"] = subscriptionKey @@ -196,10 +205,15 @@ def _transform(self, dataset): def udfFunction(x): import openai - openai.api_type = "azure" - openai.api_key = self.getSubscriptionKey() - openai.api_base = self.getUrl() - openai.api_version = self.getApiVersion() + if self.running_on_synapse_internal and not self.isSet(self.url): + from synapse.ml.fabric.prerun.openai_prerun import OpenAIPrerun + + OpenAIPrerun(api_base=self.getUrl()).init_personalized_session(None) + else: + openai.api_type = "azure" + openai.api_key = self.getSubscriptionKey() + openai.api_base = self.getUrl() + openai.api_version = self.getApiVersion() return self.getChain().run(x) outCol = self.getOutputCol() diff --git a/cognitive/src/main/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearch.scala b/cognitive/src/main/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearch.scala index d4db72e3f3..54764d6404 100644 --- a/cognitive/src/main/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearch.scala +++ b/cognitive/src/main/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearch.scala @@ -18,6 +18,8 @@ import org.apache.spark.internal.{Logging => SLogging} import org.apache.spark.ml.param._ import org.apache.spark.ml.util._ import org.apache.spark.ml.{ComplexParamsReadable, NamespaceInjections, PipelineModel} +import org.apache.spark.ml.linalg.SQLDataTypes.VectorType +import org.apache.spark.ml.functions.vector_to_array import org.apache.spark.sql.functions.{col, expr, struct, to_json} import org.apache.spark.sql.streaming.DataStreamWriter import org.apache.spark.sql.types._ @@ -142,7 +144,7 @@ class AddDocuments(override val uid: String) extends CognitiveServicesBase(uid) override def responseDataType: DataType = ASResponses.schema } -object AzureSearchWriter extends IndexParser with SLogging { +object AzureSearchWriter extends IndexParser with IndexJsonGetter with SLogging { val Logger: Logger = LogManager.getRootLogger @@ -166,9 +168,11 @@ object AzureSearchWriter extends IndexParser with SLogging { private def convertFields(fields: Seq[StructField], keyCol: String, searchActionCol: String, + vectorCols: Option[Seq[VectorColParams]], prefix: Option[String]): Seq[IndexField] = { fields.filterNot(_.name == searchActionCol).map { sf => val fullName = prefix.map(_ + sf.name).getOrElse(sf.name) + val isVector = vectorCols.exists(_.exists(_.name == fullName)) val (innerType, _) = sparkTypeToEdmType(sf.dataType) IndexField( sf.name, @@ -177,7 +181,9 @@ object AzureSearchWriter extends IndexParser with SLogging { if (keyCol == fullName) Some(true) else None, None, None, None, None, structFieldToSearchFields(sf.dataType, - keyCol, searchActionCol, prefix = Some(prefix.getOrElse("") + sf.name + ".")) + keyCol, searchActionCol, None, prefix = Some(prefix.getOrElse("") + sf.name + ".")), + if (isVector) vectorCols.get.find(_.name == fullName).map(_.dimension) else None, + if (isVector) Some(AzureSearchAPIConstants.VectorConfigName) else None ) } } @@ -185,23 +191,34 @@ object AzureSearchWriter extends IndexParser with SLogging { private def structFieldToSearchFields(schema: DataType, keyCol: String, searchActionCol: String, + vectorCols: Option[Seq[VectorColParams]], prefix: Option[String] = None ): Option[Seq[IndexField]] = { schema match { - case StructType(fields) => Some(convertFields(fields, keyCol, searchActionCol, prefix)) - case ArrayType(StructType(fields), _) => Some(convertFields(fields, keyCol, searchActionCol, prefix)) + case StructType(fields) => Some(convertFields(fields, keyCol, searchActionCol, vectorCols, prefix)) + // TODO: Support vector search in nested fields + case ArrayType(StructType(fields), _) => Some(convertFields(fields, keyCol, searchActionCol, None, prefix)) case _ => None } } + private def parseVectorColsJson(str: String): Seq[VectorColParams] = { + str.parseJson.convertTo[Seq[VectorColParams]] + } + private def dfToIndexJson(schema: StructType, indexName: String, keyCol: String, - searchActionCol: String): String = { + searchActionCol: String, + vectorCols: Option[Seq[VectorColParams]]): String = { + + val vectorConfig = Some(VectorSearch(Seq(AlgorithmConfigs(AzureSearchAPIConstants.VectorConfigName, + AzureSearchAPIConstants.VectorSearchAlgorithm)))) val is = IndexInfo( Some(indexName), - structFieldToSearchFields(schema, keyCol, searchActionCol).get, - None, None, None, None, None, None, None, None + structFieldToSearchFields(schema, keyCol, searchActionCol, vectorCols).get, + None, None, None, None, None, None, None, None, + if (vectorCols.isEmpty) None else vectorConfig ) is.toJson.compactPrint } @@ -210,7 +227,7 @@ object AzureSearchWriter extends IndexParser with SLogging { options: Map[String, String] = Map()): DataFrame = { val applicableOptions = Set( "subscriptionKey", "actionCol", "serviceName", "indexName", "indexJson", - "apiVersion", "batchSize", "fatalErrors", "filterNulls", "keyCol" + "apiVersion", "batchSize", "fatalErrors", "filterNulls", "keyCol", "vectorCols" ) options.keys.foreach(k => @@ -224,11 +241,12 @@ object AzureSearchWriter extends IndexParser with SLogging { val batchSize = options.getOrElse("batchSize", "100").toInt val fatalErrors = options.getOrElse("fatalErrors", "true").toBoolean val filterNulls = options.getOrElse("filterNulls", "false").toBoolean + val vectorColsInfo = options.get("vectorCols") val keyCol = options.get("keyCol") val indexName = options.getOrElse("indexName", parseIndexJson(indexJsonOpt.get).name.get) if (indexJsonOpt.isDefined) { - List("keyCol", "indexName").foreach(opt => + List("keyCol", "indexName", "vectorCols").foreach(opt => assert(!options.contains(opt), s"Cannot set both indexJson options and $opt") ) } @@ -242,22 +260,41 @@ object AzureSearchWriter extends IndexParser with SLogging { } } - val indexJson = indexJsonOpt.getOrElse { - dfToIndexJson(df.schema, indexName, keyCol.get, actionCol) + val (indexJson, preppedDF) = if (getExisting(subscriptionKey, serviceName, apiVersion).contains(indexName)) { + if (indexJsonOpt.isDefined) { + println(f"indexJsonOpt is specified, however an index for $indexName already exists," + + f"we will use the index definition obtained from the existing index instead") + } + val existingIndexJson = getIndexJsonFromExistingIndex(subscriptionKey, serviceName, indexName) + val vectorColNameTypeTuple = getVectorColConf(existingIndexJson) + (existingIndexJson, makeColsCompatible(vectorColNameTypeTuple, df)) + } else if (indexJsonOpt.isDefined) { + val vectorColNameTypeTuple = getVectorColConf(indexJsonOpt.get) + (indexJsonOpt.get, makeColsCompatible(vectorColNameTypeTuple, df)) + } else { + val vectorCols = vectorColsInfo.map(parseVectorColsJson) + val vectorColNameTypeTuple = vectorCols.map(_.map(vc => (vc.name, "Collection(Edm.Single)"))).getOrElse(Seq.empty) + val newDF = makeColsCompatible(vectorColNameTypeTuple, df) + val inferredIndexJson = dfToIndexJson(newDF.schema, indexName, keyCol.getOrElse(""), actionCol, vectorCols) + (inferredIndexJson, newDF) } + // TODO: Support vector search in nested fields + // Throws an exception if any nested field is a vector in the schema + parseIndexJson(indexJson).fields.foreach(_.fields.foreach(assertNoNestedVectors)) + SearchIndex.createIfNoneExists(subscriptionKey, serviceName, indexJson, apiVersion) logInfo("checking schema parity") - checkSchemaParity(df.schema, indexJson, actionCol) + checkSchemaParity(preppedDF.schema, indexJson, actionCol) val df1 = if (filterNulls) { val collectionColumns = parseIndexJson(indexJson).fields .filter(_.`type`.startsWith("Collection")) .map(_.name) - collectionColumns.foldLeft(df) { (ndf, c) => filterOutNulls(ndf, c) } + collectionColumns.foldLeft(preppedDF) { (ndf, c) => filterOutNulls(ndf, c) } } else { - df + preppedDF } new AddDocuments() @@ -273,6 +310,48 @@ object AzureSearchWriter extends IndexParser with SLogging { UDFUtils.oldUdf(checkForErrors(fatalErrors) _, ErrorUtils.ErrorSchema)(col("error"), col("input"))) } + private def assertNoNestedVectors(fields: Seq[IndexField]): Unit = { + def checkVectorField(field: IndexField): Unit = { + if (field.dimensions.nonEmpty && field.vectorSearchConfiguration.nonEmpty) { + throw new IllegalArgumentException(s"Nested field ${field.name} is a vector field, vector fields in nested" + + s" fields are not supported.") + } + field.fields.foreach(_.foreach(checkVectorField)) + } + fields.foreach(checkVectorField) + } + + private def getVectorColConf(indexJson: String): Seq[(String, String)] = { + parseIndexJson(indexJson).fields + .filter(f => f.vectorSearchConfiguration.nonEmpty && f.dimensions.nonEmpty) + .map(f => (f.name, f.`type`)) + } + private def makeColsCompatible(vectorColNameTypeTuple: Seq[(String, String)], + df: DataFrame): DataFrame = { + vectorColNameTypeTuple.foldLeft(df) { case (accDF, (colName, colType)) => + if (!accDF.columns.contains(colName)) { + println(s"Column $colName is specified in either indexJson or vectorCols but not found in dataframe " + + s"columns ${accDF.columns.toList}") + accDF + } + else { + val colDataType = accDF.schema(colName).dataType + assert(colDataType match { + case ArrayType(elementType, _) => elementType == FloatType || elementType == DoubleType + case VectorType => true + case _ => false + }, s"Vector column $colName needs to be one of (ArrayType(FloatType), ArrayType(DoubleType), VectorType)") + if (colDataType.isInstanceOf[ArrayType]) { + accDF.withColumn(colName, accDF(colName).cast(edmTypeToSparkType(colType, None))) + } else { + // first cast vectorUDT to array, then cast it to correct array type + val modifiedDF = accDF.withColumn(colName, vector_to_array(accDF(colName))) + modifiedDF.withColumn(colName, modifiedDF(colName).cast(edmTypeToSparkType(colType, None))) + } + } + } + } + private def isEdmCollection(t: String): Boolean = { t.startsWith("Collection(") && t.endsWith(")") } @@ -290,6 +369,7 @@ object AzureSearchWriter extends IndexParser with SLogging { case "Edm.Int64" => LongType case "Edm.Int32" => IntegerType case "Edm.Double" => DoubleType + case "Edm.Single" => FloatType case "Edm.DateTimeOffset" => StringType //See if there's a way to use spark datetimes case "Edm.GeographyPoint" => StringType case "Edm.ComplexType" => StructType(fields.get.map(f => @@ -310,10 +390,12 @@ object AzureSearchWriter extends IndexParser with SLogging { case IntegerType => ("Edm.Int32", None) case LongType => ("Edm.Int64", None) case DoubleType => ("Edm.Double", None) + case FloatType => ("Edm.Single", None) case DateType => ("Edm.DateTimeOffset", None) case StructType(fields) => ("Edm.ComplexType", Some(fields.map { f => val (innerType, innerFields) = sparkTypeToEdmType(f.dataType) - IndexField(f.name, innerType, None, None, None, None, None, None, None, None, None, None, innerFields) + IndexField(f.name, innerType, None, None, None, None, None, None, None, None, None, None, innerFields, + None, None) // TODO: Support vector search in nested fields })) } } diff --git a/cognitive/src/main/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearchAPI.scala b/cognitive/src/main/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearchAPI.scala index 9a9860857e..f30ab9cd92 100644 --- a/cognitive/src/main/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearchAPI.scala +++ b/cognitive/src/main/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearchAPI.scala @@ -14,7 +14,9 @@ import spray.json._ import scala.util.{Failure, Success, Try} object AzureSearchAPIConstants { - val DefaultAPIVersion = "2019-05-06" + val DefaultAPIVersion = "2023-07-01-Preview" + val VectorConfigName = "vectorConfig" + val VectorSearchAlgorithm = "hnsw" } import com.microsoft.azure.synapse.ml.cognitive.search.AzureSearchAPIConstants._ @@ -39,6 +41,26 @@ trait IndexLister { } } +trait IndexJsonGetter extends IndexLister { + def getIndexJsonFromExistingIndex(key: String, + serviceName: String, + indexName: String, + apiVersion: String = DefaultAPIVersion): String = { + val existingIndexNames = getExisting(key, serviceName, apiVersion) + assert(existingIndexNames.contains(indexName), s"Cannot find an existing index name with $indexName") + + val indexJsonRequest = new HttpGet( + s"https://$serviceName.search.windows.net/indexes/$indexName?api-version=$apiVersion" + ) + indexJsonRequest.setHeader("api-key", key) + indexJsonRequest.setHeader("Content-Type", "application/json") + val indexJsonResponse = safeSend(indexJsonRequest, close = false) + val indexJson = IOUtils.toString(indexJsonResponse.getEntity.getContent, "utf-8") + indexJsonResponse.close() + indexJson + } +} + object SearchIndex extends IndexParser with IndexLister { import AzureSearchProtocol._ @@ -94,7 +116,9 @@ object SearchIndex extends IndexParser with IndexLister { _ <- validAnalyzer(field.analyzer, field.searchAnalyzer, field.indexAnalyzer) _ <- validSearchAnalyzer(field.analyzer, field.searchAnalyzer, field.indexAnalyzer) _ <- validIndexAnalyzer(field.analyzer, field.searchAnalyzer, field.indexAnalyzer) - _ <- validSynonymMaps(field.synonymMap) + _ <- validVectorField(field.dimensions, field.vectorSearchConfiguration) + // TODO: Fix and add back validSynonymMaps check. SynonymMaps needs to be Option[Seq[String]] type + //_ <- validSynonymMaps(field.synonymMap) } yield field } @@ -182,6 +206,15 @@ object SearchIndex extends IndexParser with IndexLister { } } + private def validVectorField(d: Option[Int], v: Option[String]): Try[Option[String]] = { + if ((d.isDefined && v.isEmpty) || (v.isDefined && d.isEmpty)) { + Failure(new IllegalArgumentException("Both dimensions and vectorSearchConfig fields need to be defined for " + + "vector search")) + } else { + Success(v) + } + } + def getStatistics(indexName: String, key: String, serviceName: String, diff --git a/cognitive/src/main/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearchSchemas.scala b/cognitive/src/main/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearchSchemas.scala index a8d9142e09..7b0612330c 100644 --- a/cognitive/src/main/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearchSchemas.scala +++ b/cognitive/src/main/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearchSchemas.scala @@ -5,7 +5,7 @@ package com.microsoft.azure.synapse.ml.cognitive.search import com.microsoft.azure.synapse.ml.core.schema.SparkBindings import spray.json.DefaultJsonProtocol._ -import spray.json.{JsonFormat, RootJsonFormat} +import spray.json.{DefaultJsonProtocol, JsonFormat, RootJsonFormat} object ASResponses extends SparkBindings[ASResponses] @@ -23,9 +23,19 @@ case class IndexInfo( tokenizers: Option[Seq[String]], tokenFilters: Option[Seq[String]], defaultScoringProfile: Option[Seq[String]], - corsOptions: Option[Seq[String]] + corsOptions: Option[Seq[String]], + vectorSearch: Option[VectorSearch] ) +case class AlgorithmConfigs( + name: String, + kind: String + ) + +case class VectorSearch( + algorithmConfigurations: Seq[AlgorithmConfigs] + ) + case class IndexField( name: String, `type`: String, @@ -38,21 +48,32 @@ case class IndexField( analyzer: Option[String], searchAnalyzer: Option[String], indexAnalyzer: Option[String], - synonymMap: Option[String], - fields: Option[Seq[IndexField]] + synonymMap: Option[Seq[String]], + fields: Option[Seq[IndexField]], + dimensions: Option[Int], + vectorSearchConfiguration: Option[String] ) +case class VectorColParams( + name: String, + dimension: Int + ) + case class IndexStats(documentCount: Int, storageSize: Int) case class IndexList(`@odata.context`: String, value: Seq[IndexName]) case class IndexName(name: String) -object AzureSearchProtocol { +object AzureSearchProtocol extends DefaultJsonProtocol { implicit val IfEnc: JsonFormat[IndexField] = lazyFormat(jsonFormat( IndexField,"name","type","searchable","filterable","sortable", - "facetable","retrievable", "key","analyzer","searchAnalyzer", "indexAnalyzer", "synonymMaps", "fields")) - implicit val IiEnc: RootJsonFormat[IndexInfo] = jsonFormat10(IndexInfo.apply) + "facetable","retrievable", "key","analyzer","searchAnalyzer", "indexAnalyzer", "synonymMaps", "fields", + "dimensions", "vectorSearchConfiguration")) + implicit val AcEnc: RootJsonFormat[AlgorithmConfigs] = jsonFormat2(AlgorithmConfigs.apply) + implicit val VsEnc: RootJsonFormat[VectorSearch] = jsonFormat1(VectorSearch.apply) + implicit val IiEnc: RootJsonFormat[IndexInfo] = jsonFormat11(IndexInfo.apply) implicit val IsEnc: RootJsonFormat[IndexStats] = jsonFormat2(IndexStats.apply) implicit val InEnc: RootJsonFormat[IndexName] = jsonFormat1(IndexName.apply) implicit val IlEnc: RootJsonFormat[IndexList] = jsonFormat2(IndexList.apply) + implicit val VcpEnc: RootJsonFormat[VectorColParams] = jsonFormat2(VectorColParams.apply) } diff --git a/cognitive/src/test/scala/com/microsoft/azure/synapse/ml/cognitive/anomaly/MultivariateAnamolyDetectionSuite.scala b/cognitive/src/test/scala/com/microsoft/azure/synapse/ml/cognitive/anomaly/MultivariateAnamolyDetectionSuite.scala index 7a4150a0d0..d0204c2365 100644 --- a/cognitive/src/test/scala/com/microsoft/azure/synapse/ml/cognitive/anomaly/MultivariateAnamolyDetectionSuite.scala +++ b/cognitive/src/test/scala/com/microsoft/azure/synapse/ml/cognitive/anomaly/MultivariateAnamolyDetectionSuite.scala @@ -4,7 +4,7 @@ package com.microsoft.azure.synapse.ml.cognitive.anomaly import com.microsoft.azure.synapse.ml.Secrets -import com.microsoft.azure.synapse.ml.core.test.base.TestBase +import com.microsoft.azure.synapse.ml.core.test.base.{Flaky, TestBase} import com.microsoft.azure.synapse.ml.core.test.benchmarks.DatasetUtils import com.microsoft.azure.synapse.ml.core.test.fuzzing.{EstimatorFuzzing, TestObject, TransformerFuzzing} import org.apache.hadoop.conf.Configuration @@ -62,7 +62,8 @@ trait MADTestUtils extends TestBase with AnomalyKey with StorageCredentials { } -class SimpleFitMultivariateAnomalySuite extends EstimatorFuzzing[SimpleFitMultivariateAnomaly] with MADTestUtils { +class SimpleFitMultivariateAnomalySuite extends EstimatorFuzzing[SimpleFitMultivariateAnomaly] + with MADTestUtils with Flaky { def simpleMultiAnomalyEstimator: SimpleFitMultivariateAnomaly = new SimpleFitMultivariateAnomaly() .setSubscriptionKey(anomalyKey) diff --git a/cognitive/src/test/scala/com/microsoft/azure/synapse/ml/cognitive/search/SearchWriterSuite.scala b/cognitive/src/test/scala/com/microsoft/azure/synapse/ml/cognitive/search/SearchWriterSuite.scala index 433a0f17ed..2a92b78d12 100644 --- a/cognitive/src/test/scala/com/microsoft/azure/synapse/ml/cognitive/search/SearchWriterSuite.scala +++ b/cognitive/src/test/scala/com/microsoft/azure/synapse/ml/cognitive/search/SearchWriterSuite.scala @@ -5,6 +5,7 @@ package com.microsoft.azure.synapse.ml.cognitive.search import com.microsoft.azure.synapse.ml.Secrets import com.microsoft.azure.synapse.ml.cognitive._ +import com.microsoft.azure.synapse.ml.cognitive.openai.{OpenAIAPIKey, OpenAIEmbedding} import com.microsoft.azure.synapse.ml.cognitive.vision.AnalyzeImage import com.microsoft.azure.synapse.ml.core.test.base.TestBase import com.microsoft.azure.synapse.ml.core.test.fuzzing.{TestObject, TransformerFuzzing} @@ -12,6 +13,7 @@ import com.microsoft.azure.synapse.ml.io.http.RESTHelpers._ import org.apache.http.client.methods.HttpDelete import org.apache.spark.ml.util.MLReadable import org.apache.spark.sql.DataFrame +import org.apache.spark.ml.linalg.Vectors import java.time.LocalDateTime import java.time.format.{DateTimeFormatterBuilder, DateTimeParseException, SignStyle} @@ -25,8 +27,8 @@ trait AzureSearchKey { } //scalastyle:off null -class SearchWriterSuite extends TestBase with AzureSearchKey with IndexLister - with TransformerFuzzing[AddDocuments] with CognitiveKey { +class SearchWriterSuite extends TestBase with AzureSearchKey with IndexJsonGetter with IndexParser + with TransformerFuzzing[AddDocuments] with CognitiveKey with OpenAIAPIKey { import spark.implicits._ @@ -44,6 +46,12 @@ class SearchWriterSuite extends TestBase with AzureSearchKey with IndexLister .toDF("searchAction", "id", "fileName", "text") } + private def createTestDataWithVector(numDocs: Int): DataFrame = { + (0 until numDocs) + .map(i => ("upload", s"$i", s"file$i", Array(0.001, 0.002, 0.003).map(_ * i))) + .toDF("searchAction", "id", "fileName", "vectorCol") + } + private def createSimpleIndexJson(indexName: String): String = { s""" |{ @@ -74,6 +82,43 @@ class SearchWriterSuite extends TestBase with AzureSearchKey with IndexLister """.stripMargin } + private def createSimpleIndexJsonWithVector(indexName: String): String = { + s""" + |{ + | "name": "$indexName", + | "fields": [ + | { + | "name": "id", + | "type": "Edm.String", + | "key": true, + | "facetable": false + | }, + | { + | "name": "fileName", + | "type": "Edm.String", + | "searchable": false, + | "sortable": false, + | "facetable": false + | }, + | { + | "name": "vectorCol", + | "type": "Collection(Edm.Single)", + | "dimensions": 3, + | "vectorSearchConfiguration": "vectorConfig" + | } + | ], + | "vectorSearch": { + | "algorithmConfigurations": [ + | { + | "name": "vectorConfig", + | "kind": "hnsw" + | } + | ] + | } + | } + """.stripMargin + } + private val createdIndexes: mutable.ListBuffer[String] = mutable.ListBuffer() private def generateIndexName(): String = { @@ -105,7 +150,7 @@ class SearchWriterSuite extends TestBase with AzureSearchKey with IndexLister println("Cleaning up services") val successfulCleanup = getExisting(azureSearchKey, testServiceName) .intersect(createdIndexes).map { n => - deleteIndex(n) + deleteIndex(n) }.forall(_ == 204) cleanOldIndexes() super.afterAll() @@ -173,12 +218,15 @@ class SearchWriterSuite extends TestBase with AzureSearchKey with IndexLister def writeHelper(df: DataFrame, indexName: String, + isVectorField: Boolean, extraParams: Map[String, String] = Map()): Unit = { + val indexJson = if (isVectorField) createSimpleIndexJsonWithVector(indexName) else createSimpleIndexJson(indexName) AzureSearchWriter.write(df, Map("subscriptionKey" -> azureSearchKey, "actionCol" -> "searchAction", "serviceName" -> testServiceName, - "indexJson" -> createSimpleIndexJson(indexName)) ++ extraParams) + "indexJson" -> indexJson) + ++ extraParams) } def assertSize(indexName: String, size: Int): Unit = { @@ -186,15 +234,15 @@ class SearchWriterSuite extends TestBase with AzureSearchKey with IndexLister () } - ignore("clean up all search indexes"){ + ignore("clean up all search indexes") { getExisting(azureSearchKey, testServiceName) .foreach { n => - val deleteRequest = new HttpDelete( - s"https://$testServiceName.search.windows.net/indexes/$n?api-version=2017-11-11") - deleteRequest.setHeader("api-key", azureSearchKey) - val response = safeSend(deleteRequest) - println(s"Deleted index $n, status code ${response.getStatusLine.getStatusCode}") - } + val deleteRequest = new HttpDelete( + s"https://$testServiceName.search.windows.net/indexes/$n?api-version=2017-11-11") + deleteRequest.setHeader("api-key", azureSearchKey) + val response = safeSend(deleteRequest) + println(s"Deleted index $n, status code ${response.getStatusLine.getStatusCode}") + } } test("Run azure-search tests with waits") { @@ -209,17 +257,17 @@ class SearchWriterSuite extends TestBase with AzureSearchKey with IndexLister //create new index and add docs lazy val in1 = generateIndexName() - dependsOn(1, writeHelper(df4, in1)) + dependsOn(1, writeHelper(df4, in1, isVectorField=false)) //push docs to existing index lazy val in2 = generateIndexName() lazy val dfA = df10.limit(4) lazy val dfB = df10.except(dfA) - dependsOn(2, writeHelper(dfA, in2)) + dependsOn(2, writeHelper(dfA, in2, isVectorField=false)) dependsOn(2, retryWithBackoff({ if (getExisting(azureSearchKey, testServiceName).contains(in2)) { - writeHelper(dfB, in2) + writeHelper(dfB, in2, isVectorField=false) } else { throw new RuntimeException("No existing service found") } @@ -227,7 +275,7 @@ class SearchWriterSuite extends TestBase with AzureSearchKey with IndexLister //push docs with custom batch size lazy val in3 = generateIndexName() - dependsOn(3, writeHelper(bigDF, in3, Map("batchSize" -> "2000"))) + dependsOn(3, writeHelper(bigDF, in3, isVectorField=false, Map("batchSize" -> "2000"))) dependsOn(1, retryWithBackoff(assertSize(in1, 4))) dependsOn(2, retryWithBackoff(assertSize(in2, 10))) @@ -276,17 +324,17 @@ class SearchWriterSuite extends TestBase with AzureSearchKey with IndexLister .map { i => ("upload", s"$i", s"file$i", s"text$i") } .toDF("searchAction", "badkeyname", "fileName", "text") assertThrows[IllegalArgumentException] { - writeHelper(mismatchDF, generateIndexName()) + writeHelper(mismatchDF, generateIndexName(), isVectorField=false) } } /** - * All the Edm Types are nullable in Azure Search except for Collection(Edm.String). - * Because it is not possible to store a null value in a Collection(Edm.String) field, - * there is an option to set a boolean flag, filterNulls, that will remove null values - * from the dataset in the Collection(Edm.String) fields before writing the data to the search index. - * The default value for this boolean flag is False. - */ + * All the Edm Types are nullable in Azure Search except for Collection(Edm.String). + * Because it is not possible to store a null value in a Collection(Edm.String) field, + * there is an option to set a boolean flag, filterNulls, that will remove null values + * from the dataset in the Collection(Edm.String) fields before writing the data to the search index. + * The default value for this boolean flag is False. + */ test("Handle null values for Collection(Edm.String) fields") { val in = generateIndexName() val phraseIndex = @@ -387,4 +435,233 @@ class SearchWriterSuite extends TestBase with AzureSearchKey with IndexLister retryWithBackoff(assertSize(in, 2)) } + test("Run azure-search tests with vector fields") { + val in1 = generateIndexName() + val vectorDF4 = createTestDataWithVector(4) + + writeHelper(vectorDF4, in1, isVectorField=true) + + val in2 = generateIndexName() + val vectorDF10 = createTestDataWithVector(10) + val dfA = vectorDF10.limit(4) + val dfB = vectorDF10.except(dfA) + + writeHelper(dfA, in2, isVectorField=true) + + retryWithBackoff({ + if (getExisting(azureSearchKey, testServiceName).contains(in2)) { + writeHelper(dfB, in2, isVectorField=true) + } else { + throw new RuntimeException("No existing service found") + } + }) + + retryWithBackoff(assertSize(in1, 4)) + retryWithBackoff(assertSize(in2, 10)) + + val indexJson = retryWithBackoff(getIndexJsonFromExistingIndex(azureSearchKey, testServiceName, in1)) + // assert if vectorCol is a vector field + assert(parseIndexJson(indexJson).fields.find(_.name == "vectorCol").get.vectorSearchConfiguration.nonEmpty) + } + + test("Infer the structure of the index from the dataframe with vector columns") { + val in = generateIndexName() + val phraseDF = Seq( + ("upload", "0", "file0", Array(1.1, 2.1, 3.1), Vectors.dense(0.11, 0.21, 0.31), + Vectors.sparse(3, Array(0, 1, 2), Array(0.11, 0.21, 0.31))), + ("upload", "1", "file1", Array(1.2, 2.2, 3.2), Vectors.dense(0.12, 0.22, 0.32), + Vectors.sparse(3, Array(0, 1, 2), Array(0.11, 0.21, 0.31)))) + .toDF("searchAction", "id", "fileName", "vectorCol1", "vectorCol2", "vectorCol3") + + val vectorCols = + """ + |[ + | {"name": "vectorCol1", "dimension": 3}, + | {"name": "vectorCol2", "dimension": 3}, + | {"name": "vectorCol3", "dimension": 3} + |] + |""".stripMargin + + AzureSearchWriter.write(phraseDF, + Map( + "subscriptionKey" -> azureSearchKey, + "actionCol" -> "searchAction", + "serviceName" -> testServiceName, + "filterNulls" -> "true", + "indexName" -> in, + "keyCol" -> "id", + "vectorCols" -> vectorCols + )) + + retryWithBackoff(assertSize(in, 2)) + + // assert if vectorCols are a vector field + val indexJson = retryWithBackoff(getIndexJsonFromExistingIndex(azureSearchKey, testServiceName, in)) + assert(parseIndexJson(indexJson).fields.find(_.name == "vectorCol1").get.vectorSearchConfiguration.nonEmpty) + assert(parseIndexJson(indexJson).fields.find(_.name == "vectorCol2").get.vectorSearchConfiguration.nonEmpty) + assert(parseIndexJson(indexJson).fields.find(_.name == "vectorCol3").get.vectorSearchConfiguration.nonEmpty) + } + + test("Throw useful error when given vector columns in nested fields") { + val in = generateIndexName() + val badJson = + s""" + |{ + | "name": "$in", + | "fields": [ + | { + | "name": "id", + | "type": "Edm.String", + | "key": true, + | "facetable": false + | }, + | { + | "name": "someCollection", + | "type": "Edm.String" + | }, + | { + | "name": "complexField", + | "type": "Edm.ComplexType", + | "fields": [ + | { + | "name": "StreetAddress", + | "type": "Edm.String" + | }, + | { + | "name": "contentVector", + | "type": "Collection(Edm.Single)", + | "dimensions": 3, + | "vectorSearchConfiguration": "vectorConfig" + | } + | ] + | } + | ] + |} + """.stripMargin + + assertThrows[IllegalArgumentException] { + AzureSearchWriter.write(df4, + Map( + "subscriptionKey" -> azureSearchKey, + "actionCol" -> "searchAction", + "serviceName" -> testServiceName, + "filterNulls" -> "true", + "indexJson" -> badJson + )) + } + } + + test("Throw useful error when one of dimensions or vectorSearchConfig is not defined") { + val in = generateIndexName() + val badJson = + s""" + |{ + | "name": "$in", + | "fields": [ + | { + | "name": "id", + | "type": "Edm.String", + | "key": true, + | "facetable": false + | }, + | { + | "name": "someCollection", + | "type": "Edm.String" + | }, + | { + | "name": "contentVector", + | "type": "Collection(Edm.Single)", + | "dimensions": 3 + | } + | ] + |} + """.stripMargin + + assertThrows[IllegalArgumentException] { + SearchIndex.createIfNoneExists(azureSearchKey, testServiceName, badJson) + } + } + + test("Handle non-existent vector column specified in vectorCols option") { + val in = generateIndexName() + val phraseDF = Seq( + ("upload", "0", "file0"), + ("upload", "1", "file1")) + .toDF("searchAction", "id", "fileName") + + AzureSearchWriter.write(phraseDF, + Map( + "subscriptionKey" -> azureSearchKey, + "actionCol" -> "searchAction", + "serviceName" -> testServiceName, + "indexName" -> in, + "keyCol" -> "id", + "vectorCols" -> """[{"name": "vectorCol", "dimension": 3}]""" + )) + + retryWithBackoff(assertSize(in, 2)) + } + + test("Handle non-existing vector column specified in index JSON option") { + val in = generateIndexName() + val phraseDF = Seq( + ("upload", "0", "file0"), + ("upload", "1", "file1")) + .toDF("searchAction", "id", "fileName") + + AzureSearchWriter.write(phraseDF, + Map( + "subscriptionKey" -> azureSearchKey, + "actionCol" -> "searchAction", + "serviceName" -> testServiceName, + "indexJson" -> createSimpleIndexJsonWithVector(in) + )) + + retryWithBackoff(assertSize(in, 2)) + } + + test("Throw useful error when the vector column is an unsupported type") { + val in = generateIndexName() + val badDF = Seq( + ("upload", "0", "file0", Array("p1", "p2", "p3")), + ("upload", "1", "file1", Array("p4", "p5", "p6"))) + .toDF("searchAction", "id", "fileName", "vectorCol") + + assertThrows[AssertionError] { + writeHelper(badDF, in, isVectorField=true) + } + } + + test("pipeline with openai embedding") { + val in = generateIndexName() + + val df = Seq( + ("upload", "0", "this is the first sentence"), + ("upload", "1", "this is the second sentence") + ).toDF("searchAction", "id", "content") + + val tdf = new OpenAIEmbedding() + .setSubscriptionKey(openAIAPIKey) + .setDeploymentName("text-embedding-ada-002") + .setCustomServiceName(openAIServiceName) + .setTextCol("content") + .setErrorCol("error") + .setOutputCol("vectorContent") + .transform(df) + .drop("error") + + AzureSearchWriter.write(tdf, + Map( + "subscriptionKey" -> azureSearchKey, + "actionCol" -> "searchAction", + "serviceName" -> testServiceName, + "indexName" -> in, + "keyCol" -> "id", + "vectorCols" -> """[{"name": "vectorContent", "dimension": 1536}]""" + )) + + retryWithBackoff(assertSize(in, 2)) + val indexJson = retryWithBackoff(getIndexJsonFromExistingIndex(azureSearchKey, testServiceName, in)) + assert(parseIndexJson(indexJson).fields.find(_.name == "vectorContent").get.vectorSearchConfiguration.nonEmpty) + } } diff --git a/core/src/main/dotnet/src/dotnetBase.csproj b/core/src/main/dotnet/src/dotnetBase.csproj index 700a502249..18f10e5f3f 100644 --- a/core/src/main/dotnet/src/dotnetBase.csproj +++ b/core/src/main/dotnet/src/dotnetBase.csproj @@ -7,7 +7,7 @@ true SynapseML .NET Base - 0.11.1 + 0.11.2 diff --git a/core/src/main/scala/com/microsoft/azure/synapse/ml/codegen/DotnetCodegen.scala b/core/src/main/scala/com/microsoft/azure/synapse/ml/codegen/DotnetCodegen.scala index 608bbda7b5..dacb756e67 100644 --- a/core/src/main/scala/com/microsoft/azure/synapse/ml/codegen/DotnetCodegen.scala +++ b/core/src/main/scala/com/microsoft/azure/synapse/ml/codegen/DotnetCodegen.scala @@ -53,7 +53,7 @@ object DotnetCodegen { | | | - | + | | | $newtonsoftDep | diff --git a/core/src/main/scala/com/microsoft/azure/synapse/ml/core/env/FileUtilities.scala b/core/src/main/scala/com/microsoft/azure/synapse/ml/core/env/FileUtilities.scala index 7d82f1fc15..baecf3d8bd 100644 --- a/core/src/main/scala/com/microsoft/azure/synapse/ml/core/env/FileUtilities.scala +++ b/core/src/main/scala/com/microsoft/azure/synapse/ml/core/env/FileUtilities.scala @@ -68,6 +68,13 @@ object FileUtilities { () } + def copyAndRenameFile(from: File, toDir: File, newName: String, overwrite: Boolean = false): Unit = { + Files.copy(from.toPath, new File(toDir, newName).toPath, + (if (overwrite) Seq(StandardCopyOption.REPLACE_EXISTING) + else Seq()): _*) + () + } + // Perhaps this should move into a more specific place, not a generic file utils thing def zipFolder(dir: File, out: File): Unit = { import java.io.{BufferedInputStream, FileInputStream, FileOutputStream} diff --git a/core/src/test/scala/com/microsoft/azure/synapse/ml/Secrets.scala b/core/src/test/scala/com/microsoft/azure/synapse/ml/Secrets.scala index cac166f64f..9ede008c58 100644 --- a/core/src/test/scala/com/microsoft/azure/synapse/ml/Secrets.scala +++ b/core/src/test/scala/com/microsoft/azure/synapse/ml/Secrets.scala @@ -71,5 +71,4 @@ object Secrets { lazy val ServiceConnectionSecret: String = getSecret("service-connection-secret") lazy val ServicePrincipalClientId: String = getSecret("service-principal-clientId") - lazy val SecretRegexpFile: String = getSecret("secret-regexp-file") } diff --git a/core/src/test/scala/com/microsoft/azure/synapse/ml/codegen/DotnetTestGen.scala b/core/src/test/scala/com/microsoft/azure/synapse/ml/codegen/DotnetTestGen.scala index aa4d8b463f..2cfd4ad6f7 100644 --- a/core/src/test/scala/com/microsoft/azure/synapse/ml/codegen/DotnetTestGen.scala +++ b/core/src/test/scala/com/microsoft/azure/synapse/ml/codegen/DotnetTestGen.scala @@ -89,7 +89,7 @@ object DotnetTestGen { | runtime; build; native; contentfiles; analyzers | | - | + | | | | $referenceCore diff --git a/core/src/test/scala/com/microsoft/azure/synapse/ml/core/test/base/TestBase.scala b/core/src/test/scala/com/microsoft/azure/synapse/ml/core/test/base/TestBase.scala index 56d1f7581f..41c4583593 100644 --- a/core/src/test/scala/com/microsoft/azure/synapse/ml/core/test/base/TestBase.scala +++ b/core/src/test/scala/com/microsoft/azure/synapse/ml/core/test/base/TestBase.scala @@ -6,7 +6,9 @@ package com.microsoft.azure.synapse.ml.core.test.base import breeze.linalg.norm.Impl import breeze.linalg.{*, norm, DenseMatrix => BDM, DenseVector => BDV} import breeze.math.Field +import com.globalmentor.apache.hadoop.fs.BareLocalFileSystem import org.apache.commons.io.FileUtils +import org.apache.hadoop.fs.FileSystem import org.apache.spark._ import org.apache.spark.ml._ import org.apache.spark.sql.{DataFrame, _} @@ -67,6 +69,8 @@ trait SparkSessionManagement { .config(sparkConfiguration) .getOrCreate() sess.sparkContext.setLogLevel(logLevel) + sess.sparkContext.hadoopConfiguration + .setClass("fs.file.impl", classOf[BareLocalFileSystem], classOf[FileSystem]) sess } diff --git a/core/src/test/scala/com/microsoft/azure/synapse/ml/nbtest/DatabricksUtilities.scala b/core/src/test/scala/com/microsoft/azure/synapse/ml/nbtest/DatabricksUtilities.scala index 1ac4872632..5aee217d4b 100644 --- a/core/src/test/scala/com/microsoft/azure/synapse/ml/nbtest/DatabricksUtilities.scala +++ b/core/src/test/scala/com/microsoft/azure/synapse/ml/nbtest/DatabricksUtilities.scala @@ -84,19 +84,19 @@ object DatabricksUtilities { ).toJson.compactPrint // Execution Params - val TimeoutInMillis: Int = 40 * 60 * 1000 + val TimeoutInMillis: Int = 50 * 60 * 1000 - val NotebookFiles: Array[File] = FileUtilities.recursiveListFiles( - FileUtilities.join( - BuildInfo.baseDirectory.getParent, "notebooks", "features").getCanonicalFile) + val DocsDir = FileUtilities.join(BuildInfo.baseDirectory.getParent, "docs").getCanonicalFile() + val NotebookFiles: Array[File] = FileUtilities.recursiveListFiles(DocsDir) + .filter(_.toString.endsWith(".ipynb")) val ParallelizableNotebooks: Seq[File] = NotebookFiles.filterNot(_.isDirectory) val CPUNotebooks: Seq[File] = ParallelizableNotebooks - .filterNot(_.getAbsolutePath.contains("simple_deep_learning")) + .filterNot(_.getAbsolutePath.contains("Fine-tune")) .filterNot(_.getAbsolutePath.contains("Explanation Dashboard")) // TODO Remove this exclusion - val GPUNotebooks: Seq[File] = ParallelizableNotebooks.filter(_.getAbsolutePath.contains("simple_deep_learning")) + val GPUNotebooks: Seq[File] = ParallelizableNotebooks.filter(_.getAbsolutePath.contains("Fine-tune")) def databricksGet(path: String): JsValue = { val request = new HttpGet(BaseURL + path) @@ -336,13 +336,15 @@ object DatabricksUtilities { //scalastyle:on cyclomatic.complexity def uploadAndSubmitNotebook(clusterId: String, notebookFile: File): DatabricksNotebookRun = { - val destination: String = Folder + "/" + notebookFile.getName + val dirPaths = DocsDir.toURI.relativize(notebookFile.getParentFile.toURI).getPath + val folderToCreate = Folder + "/" + dirPaths + println(s"Creating folder $folderToCreate") + workspaceMkDir(folderToCreate) + val destination: String = folderToCreate + notebookFile.getName uploadNotebook(notebookFile, destination) val runId: Int = submitRun(clusterId, destination) val run: DatabricksNotebookRun = DatabricksNotebookRun(runId, notebookFile.getName) - println(s"Successfully submitted job run id ${run.runId} for notebook ${run.notebookName}") - run } @@ -413,9 +415,6 @@ abstract class DatabricksTestHelper extends TestBase { assert(areLibrariesInstalled(clusterId)) } - println(s"Creating folder $Folder") - workspaceMkDir(Folder) - println(s"Submitting jobs") val parNotebookRuns: Seq[DatabricksNotebookRun] = notebooks.map(uploadAndSubmitNotebook(clusterId, _)) parNotebookRuns.foreach(notebookRun => jobIdsToCancel.append(notebookRun.runId)) diff --git a/core/src/test/scala/com/microsoft/azure/synapse/ml/nbtest/SharedNotebookE2ETestUtilities.scala b/core/src/test/scala/com/microsoft/azure/synapse/ml/nbtest/SharedNotebookE2ETestUtilities.scala index 651b74916b..df72acab95 100644 --- a/core/src/test/scala/com/microsoft/azure/synapse/ml/nbtest/SharedNotebookE2ETestUtilities.scala +++ b/core/src/test/scala/com/microsoft/azure/synapse/ml/nbtest/SharedNotebookE2ETestUtilities.scala @@ -4,34 +4,76 @@ package com.microsoft.azure.synapse.ml.nbtest import com.microsoft.azure.synapse.ml.build.BuildInfo -import com.microsoft.azure.synapse.ml.core.env.FileUtilities +import com.microsoft.azure.synapse.ml.core.env.{FileUtilities, StreamUtilities} import org.apache.commons.io.FileUtils import java.io.File import java.lang.ProcessBuilder.Redirect import scala.sys.process._ - +import scala.io.Source +import java.io.{BufferedWriter, File, FileWriter} object SharedNotebookE2ETestUtilities { val ResourcesDirectory = new File(getClass.getResource("/").toURI) val NotebooksDir = new File(ResourcesDirectory, "generated-notebooks") + val NotebookPreamble: String = + """ + |# In[ ]: + | + | + |# This cell ensures make magic command like '%pip install' works on synapse scheduled spark jobs + |from synapse.ml.core.platform import running_on_synapse + | + |if running_on_synapse(): + | from IPython import get_ipython + | from IPython.terminal.interactiveshell import TerminalInteractiveShell + | from synapse.ml.core.platform import materializing_display as display + | from pyspark.sql import SparkSession + | + | spark = SparkSession.builder.getOrCreate() + | try: + | shell = TerminalInteractiveShell.instance() + | except: + | pass + | + |""".stripMargin + + def insertTextInFile(file: File, textToPrepend: String, locToInsert: Int): Unit = { + val existingLines = StreamUtilities.using(Source.fromFile(file)) { s => + s.getLines().toList + }.get + val linesBefore = existingLines.take(locToInsert) + val linesAfter = existingLines.takeRight(existingLines.length - locToInsert) + val linesInMiddle = textToPrepend.split("\n") + val newText = (linesBefore ++ linesInMiddle ++ linesAfter).mkString("\n") + StreamUtilities.using(new BufferedWriter(new FileWriter(file))) { writer => + writer.write(newText) + } + } def generateNotebooks(): Unit = { cleanUpGeneratedNotebooksDir() - FileUtilities.recursiveListFiles(FileUtilities - .join(BuildInfo.baseDirectory.getParent, "notebooks/features") - .getCanonicalFile) + val docsDir = FileUtilities.join(BuildInfo.baseDirectory.getParent, "docs").getCanonicalFile + val newFiles = FileUtilities.recursiveListFiles(docsDir) .filter(_.getName.endsWith(".ipynb")) .map { f => - FileUtilities.copyFile(f, NotebooksDir, true) - val newFile = new File(NotebooksDir, f.getName) - val targetName = new File(NotebooksDir, f.getName.replace(" ", "").replace("-", "")) - newFile.renameTo(targetName) - targetName + val relative = docsDir.toURI.relativize(f.toURI).getPath + val newName = relative + .replace("/", "") + .replace(" ", "") + .replace("-", "") + .replace(",", "") + FileUtilities.copyAndRenameFile(f, NotebooksDir, newName, true) + new File(NotebooksDir, newName) } runCmd(activateCondaEnv ++ Seq("jupyter", "nbconvert", "--to", "python", "*.ipynb"), NotebooksDir) + + newFiles.foreach { f => + insertTextInFile(new File(f.getPath.replace(".ipynb", ".py")), NotebookPreamble, 2) + } + } def cleanUpGeneratedNotebooksDir(): Unit = { diff --git a/core/src/test/scala/com/microsoft/azure/synapse/ml/nbtest/SynapseTests.scala b/core/src/test/scala/com/microsoft/azure/synapse/ml/nbtest/SynapseTests.scala index 5dfd76c4ae..195cd97800 100644 --- a/core/src/test/scala/com/microsoft/azure/synapse/ml/nbtest/SynapseTests.scala +++ b/core/src/test/scala/com/microsoft/azure/synapse/ml/nbtest/SynapseTests.scala @@ -44,11 +44,13 @@ class SynapseTests extends TestBase { val selectedPythonFiles: Array[File] = FileUtilities.recursiveListFiles(SharedNotebookE2ETestUtilities.NotebooksDir) .filter(_.getAbsolutePath.endsWith(".py")) - .filterNot(_.getAbsolutePath.contains("DeepLearningDeepTextClassification")) // Excluded by design task 1829306 - .filterNot(_.getAbsolutePath.contains("DeepLearningDeepVisionClassification")) // Excluded by design task 1829306 - .filterNot(_.getAbsolutePath.contains("VowpalWabbitClassificationusingVWnativeFormat")) - .filterNot(_.getAbsolutePath.contains("VowpalWabbitMulticlassclassification")) // Wait for Synpase fix - .filterNot(_.getAbsolutePath.contains("Langchain")) // Wait for Synpase fix + .filterNot(_.getAbsolutePath.contains("Finetune")) // Excluded by design task 1829306 + .filterNot(_.getAbsolutePath.contains("VWnativeFormat")) + .filterNot(_.getAbsolutePath.contains("VowpalWabbitMulticlassclassification")) // Wait for Synapse fix + .filterNot(_.getAbsolutePath.contains("Langchain")) // Wait for Synapse fix + .filterNot(_.getAbsolutePath.contains("DocumentQuestionandAnsweringwithPDFs")) // Wait for Synapse fix + .filterNot(_.getAbsolutePath.contains("SetupCognitive")) // No code to run + .filterNot(_.getAbsolutePath.contains("CreateaSparkCluster")) // No code to run .sortBy(_.getAbsolutePath) val expectedPoolCount: Int = selectedPythonFiles.length diff --git a/website/versioned_docs/version-0.10.1/features/spark_serving/about.md b/docs/Deploy Models/Overview.md similarity index 99% rename from website/versioned_docs/version-0.10.1/features/spark_serving/about.md rename to docs/Deploy Models/Overview.md index 1aaeadde49..4d0f54ea18 100644 --- a/website/versioned_docs/version-0.10.1/features/spark_serving/about.md +++ b/docs/Deploy Models/Overview.md @@ -33,7 +33,7 @@ sidebar_label: About ### Jupyter Notebook Examples -- [Deploy a classifier trained on the Adult Census Dataset](../SparkServing%20-%20Deploying%20a%20Classifier) +- [Deploy a classifier trained on the Adult Census Dataset](../Quickstart%20-%20Deploying%20a%20Classifier) - More coming soon! ### Spark Serving Hello World diff --git a/notebooks/features/spark_serving/SparkServing - Deploying a Classifier.ipynb b/docs/Deploy Models/Quickstart - Deploying a Classifier.ipynb similarity index 92% rename from notebooks/features/spark_serving/SparkServing - Deploying a Classifier.ipynb rename to docs/Deploy Models/Quickstart - Deploying a Classifier.ipynb index 92094e7743..3d98b12a8d 100644 --- a/notebooks/features/spark_serving/SparkServing - Deploying a Classifier.ipynb +++ b/docs/Deploy Models/Quickstart - Deploying a Classifier.ipynb @@ -9,31 +9,6 @@ "First, we import needed packages:" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "import sys\n", - "import numpy as np\n", - "import pandas as pd" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -229,4 +204,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/notebooks/features/cognitive_services/CognitiveServices - Advanced Usage Async, Batching, and Multi-Key.ipynb b/docs/Explore Algorithms/AI Services/Advanced Usage - Async, Batching, and Multi-Key.ipynb similarity index 89% rename from notebooks/features/cognitive_services/CognitiveServices - Advanced Usage Async, Batching, and Multi-Key.ipynb rename to docs/Explore Algorithms/AI Services/Advanced Usage - Async, Batching, and Multi-Key.ipynb index 6f7d8b12d4..f0a7b158fa 100644 --- a/notebooks/features/cognitive_services/CognitiveServices - Advanced Usage Async, Batching, and Multi-Key.ipynb +++ b/docs/Explore Algorithms/AI Services/Advanced Usage - Async, Batching, and Multi-Key.ipynb @@ -2,95 +2,99 @@ "cells": [ { "cell_type": "markdown", - "source": [ - "# Cognitive Services Advanced Guide: Asynchrony, Batching, Multi-Key" - ], "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "1a39046d-a692-44c3-b673-78dfc1f97e08", "inputWidgets": {}, + "nuid": "1a39046d-a692-44c3-b673-78dfc1f97e08", + "showTitle": false, "title": "" } - } + }, + "source": [ + "# Cognitive Services Advanced Guide: Asynchrony, Batching, Multi-Key" + ] }, { "cell_type": "markdown", - "source": [ - "## Step 1: Imports and Keys" - ], "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "0f8d5274-46d5-4604-be41-1f3f5d481d9a", "inputWidgets": {}, + "nuid": "0f8d5274-46d5-4604-be41-1f3f5d481d9a", + "showTitle": false, "title": "" } - } + }, + "source": [ + "## Step 1: Imports and Keys" + ] }, { "cell_type": "code", - "source": [ - "import os\n", - "from pyspark.sql import SparkSession\n", - "from synapse.ml.core.platform import running_on_synapse, find_secret\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "if running_on_synapse():\n", - " from notebookutils.visualization import display\n", - "\n", - "service_key = find_secret(\"cognitive-api-key\")\n", - "service_loc = \"eastus\"" - ], + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "84829dd7-0e7d-4ee3-aa9e-c3aa6ef96c8d", "inputWidgets": {}, + "nuid": "84829dd7-0e7d-4ee3-aa9e-c3aa6ef96c8d", + "showTitle": false, "title": "" } }, "outputs": [], - "execution_count": 0 + "source": [ + "from synapse.ml.core.platform import find_secret\n", + "\n", + "service_key = find_secret(\"cognitive-api-key\")\n", + "service_loc = \"eastus\"" + ] }, { "cell_type": "markdown", - "source": [ - "## Step 2: Basic Usage" - ], "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "8a49dfe2-f00d-4db5-95d5-f119fc09e2ee", "inputWidgets": {}, + "nuid": "8a49dfe2-f00d-4db5-95d5-f119fc09e2ee", + "showTitle": false, "title": "" } - } + }, + "source": [ + "## Step 2: Basic Usage" + ] }, { "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "93d1a1d0-96b5-48a2-9248-0d9facdae679", + "showTitle": false, + "title": "" + } + }, "source": [ "Image 1 | Image 2 | Image 3 \n", ":-------------------------:|:-------------------------:|:----------------------:|\n", "! | | " - ], + ] + }, + { + "cell_type": "code", + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "93d1a1d0-96b5-48a2-9248-0d9facdae679", "inputWidgets": {}, + "nuid": "9e1933f3-06b3-4dfd-a6a2-30d33d7da845", + "showTitle": false, "title": "" } - } - }, - { - "cell_type": "code", + }, + "outputs": [], "source": [ "from synapse.ml.cognitive.vision import AnalyzeImage\n", "\n", @@ -115,85 +119,83 @@ ")\n", "\n", "image_results = analyzer.transform(image_df).cache()" - ], - "metadata": { - "application/vnd.databricks.v1+cell": { - "showTitle": false, - "cellMetadata": {}, - "nuid": "9e1933f3-06b3-4dfd-a6a2-30d33d7da845", - "inputWidgets": {}, - "title": "" - } - }, - "outputs": [], - "execution_count": 0 + ] }, { "cell_type": "markdown", - "source": [ - "#### First we'll look at the full response objects:" - ], "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "8f759bfa-4b88-4659-a535-d768ddee9e4f", "inputWidgets": {}, + "nuid": "8f759bfa-4b88-4659-a535-d768ddee9e4f", + "showTitle": false, "title": "" } - } + }, + "source": [ + "#### First we'll look at the full response objects:" + ] }, { "cell_type": "code", - "source": [ - "display(image_results)" - ], + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "9fae2ca5-f16a-460b-94ec-e433f24f7fb4", "inputWidgets": {}, + "nuid": "9fae2ca5-f16a-460b-94ec-e433f24f7fb4", + "showTitle": false, "title": "" } }, "outputs": [], - "execution_count": 0 + "source": [ + "display(image_results)" + ] }, { "cell_type": "markdown", - "source": [ - "#### We can select out just what we need:" - ], "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "7b08b439-a505-4de3-a71e-63af30453163", "inputWidgets": {}, + "nuid": "7b08b439-a505-4de3-a71e-63af30453163", + "showTitle": false, "title": "" } - } + }, + "source": [ + "#### We can select out just what we need:" + ] }, { "cell_type": "code", - "source": [ - "display(image_results.select(\"analysis_results.description.captions.text\"))" - ], + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "88e738a6-f1bf-4077-8436-984aac858b1b", "inputWidgets": {}, + "nuid": "88e738a6-f1bf-4077-8436-984aac858b1b", + "showTitle": false, "title": "" } }, "outputs": [], - "execution_count": 0 + "source": [ + "display(image_results.select(\"analysis_results.description.captions.text\"))" + ] }, { "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "c6c2fd12-8c26-4f96-b0a5-0c55c04c182a", + "showTitle": false, + "title": "" + } + }, "source": [ "#### What's going on under the hood\n", "\n", @@ -201,98 +203,101 @@ "\n", "When we call the cognitive service transformer, we start cognitive service clients on each of your spark workers.\n", "These clients send requests to the cloud, and turn the JSON responses into Spark Struct Types so that you can access any field that the service returns." - ], + ] + }, + { + "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "c6c2fd12-8c26-4f96-b0a5-0c55c04c182a", "inputWidgets": {}, + "nuid": "31618622-57db-4973-8ab8-1bab6d7efd2e", + "showTitle": false, "title": "" } - } + }, + "source": [ + "## Step 3: Asynchronous Usage" + ] }, { "cell_type": "markdown", - "source": [ - "## Step 3: Asynchronous Usage" - ], "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "31618622-57db-4973-8ab8-1bab6d7efd2e", "inputWidgets": {}, + "nuid": "8e7e5ace-71c2-4170-8b5d-350297b907db", + "showTitle": false, "title": "" } - } - }, - { - "cell_type": "markdown", + }, "source": [ "\n", "\n", "Apache Spark ordinarily parallelizes a computation to all of it's worker threads. When working with services however this parallelism doesent fully maximize throughput because workers sit idle as requests are processed on the server. The `concurrency` parameter makes sure that each worker can stay busy as they wait for requests to complete." - ], - "metadata": { - "application/vnd.databricks.v1+cell": { - "showTitle": false, - "cellMetadata": {}, - "nuid": "8e7e5ace-71c2-4170-8b5d-350297b907db", - "inputWidgets": {}, - "title": "" - } - } + ] }, { "cell_type": "code", - "source": [ - "display(analyzer.setConcurrency(3).transform(image_df))" - ], + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "f874a63e-f22e-4c6f-9d54-83f93d140721", "inputWidgets": {}, + "nuid": "f874a63e-f22e-4c6f-9d54-83f93d140721", + "showTitle": false, "title": "" } }, "outputs": [], - "execution_count": 0 + "source": [ + "display(analyzer.setConcurrency(3).transform(image_df))" + ] }, { "cell_type": "markdown", - "source": [ - "#### Faster without extra hardware:\n" - ], "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "f82c9d17-77db-44fa-8d1c-b0b7905c0e31", "inputWidgets": {}, + "nuid": "f82c9d17-77db-44fa-8d1c-b0b7905c0e31", + "showTitle": false, "title": "" } - } + }, + "source": [ + "#### Faster without extra hardware:\n", + "" + ] }, { "cell_type": "markdown", - "source": [ - "## Step 4: Batching" - ], "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "d54b3f5e-8d44-486f-97a3-0b8528934e73", "inputWidgets": {}, + "nuid": "d54b3f5e-8d44-486f-97a3-0b8528934e73", + "showTitle": false, "title": "" } - } + }, + "source": [ + "## Step 4: Batching" + ] }, { "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "c3092f7b-105b-4171-9649-f04b189d76a0", + "showTitle": false, + "title": "" + } + }, + "outputs": [], "source": [ "from synapse.ml.cognitive.text import TextSentiment\n", "\n", @@ -318,36 +323,36 @@ "\n", "# Show the results of your text query\n", "display(sentiment.transform(text_df).select(\"text\", \"sentiment.document.sentiment\"))" - ], + ] + }, + { + "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "c3092f7b-105b-4171-9649-f04b189d76a0", "inputWidgets": {}, + "nuid": "ee4a9f18-d845-4059-9edd-9bd625a75a1a", + "showTitle": false, "title": "" } }, - "outputs": [], - "execution_count": 0 - }, - { - "cell_type": "markdown", "source": [ "## Step 5: Multi-Key" - ], + ] + }, + { + "cell_type": "code", + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "ee4a9f18-d845-4059-9edd-9bd625a75a1a", "inputWidgets": {}, + "nuid": "a6f89d8b-7cd1-42be-8310-62989c80deb2", + "showTitle": false, "title": "" } - } - }, - { - "cell_type": "code", + }, + "outputs": [], "source": [ "from synapse.ml.cognitive.text import TextSentiment\n", "from pyspark.sql.functions import udf\n", @@ -365,64 +370,60 @@ "image_df2 = image_df.withColumn(\"key\", random_key())\n", "\n", "results = analyzer.setSubscriptionKeyCol(\"key\").transform(image_df2)" - ], - "metadata": { - "application/vnd.databricks.v1+cell": { - "showTitle": false, - "cellMetadata": {}, - "nuid": "a6f89d8b-7cd1-42be-8310-62989c80deb2", - "inputWidgets": {}, - "title": "" - } - }, - "outputs": [], - "execution_count": 0 + ] }, { "cell_type": "code", - "source": [ - "display(results.select(\"key\", \"analysis_results.description.captions.text\"))" - ], + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "c2f0ff6f-688e-4ca0-88eb-9eb8bda66786", "inputWidgets": {}, + "nuid": "c2f0ff6f-688e-4ca0-88eb-9eb8bda66786", + "showTitle": false, "title": "" } }, "outputs": [], - "execution_count": 0 + "source": [ + "display(results.select(\"key\", \"analysis_results.description.captions.text\"))" + ] }, { "cell_type": "markdown", - "source": [ - "## Learn More\n- [Explore other cogntive services](https://microsoft.github.io/SynapseML/docs/features/cognitive_services/CognitiveServices%20-%20Overview/)\n- [Read our paper \"Large-Scale Intelligent Microservices\"](https://arxiv.org/abs/2009.08044)" - ], "metadata": { "application/vnd.databricks.v1+cell": { - "showTitle": false, "cellMetadata": {}, - "nuid": "1ed7401d-28f7-4133-93e3-08e145772502", "inputWidgets": {}, + "nuid": "1ed7401d-28f7-4133-93e3-08e145772502", + "showTitle": false, "title": "" } - } + }, + "source": [ + "## Learn More\n", + "- [Explore other cogntive services](../Overview)\n", + "- [Read our paper \"Large-Scale Intelligent Microservices\"](https://arxiv.org/abs/2009.08044)" + ] } ], "metadata": { "application/vnd.databricks.v1+notebook": { - "notebookName": "CognitiveServices - Advanced Usage: Async, Batching, and Multi-Key", "dashboards": [], + "language": "python", "notebookMetadata": { "pythonIndentUnit": 2 }, + "notebookName": "CognitiveServices - Advanced Usage: Async, Batching, and Multi-Key", + "notebookOrigID": 3743502060540796, + "widgets": {} + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", "language": "python", - "widgets": {}, - "notebookOrigID": 3743502060540796 + "name": "python3" } }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/notebooks/features/geospatial_services/GeospatialServices - Overview.ipynb b/docs/Explore Algorithms/AI Services/Geospatial Services.ipynb similarity index 96% rename from notebooks/features/geospatial_services/GeospatialServices - Overview.ipynb rename to docs/Explore Algorithms/AI Services/Geospatial Services.ipynb index 108c54c53a..b3159957b1 100644 --- a/notebooks/features/geospatial_services/GeospatialServices - Overview.ipynb +++ b/docs/Explore Algorithms/AI Services/Geospatial Services.ipynb @@ -53,12 +53,8 @@ "metadata": {}, "outputs": [], "source": [ - "from pyspark.sql.functions import udf, col\n", "from pyspark.sql.types import StructType, StructField, DoubleType\n", - "from pyspark.sql.functions import lit\n", - "from pyspark.ml import PipelineModel\n", "from pyspark.sql.functions import col\n", - "import os\n", "import requests\n", "from requests.adapters import HTTPAdapter\n", "from requests.packages.urllib3.util.retry import Retry\n", @@ -75,21 +71,6 @@ "http.mount(\"http://\", adapter)" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "from synapse.ml.core.platform import *\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" - ] - }, { "cell_type": "code", "execution_count": null, @@ -98,6 +79,7 @@ "source": [ "from synapse.ml.cognitive import *\n", "from synapse.ml.geospatial import *\n", + "from synapse.ml.core.platform import *\n", "\n", "# An Azure Maps account key\n", "maps_key = find_secret(\"azuremaps-api-key\")" diff --git a/docs/Explore Algorithms/AI Services/Multivariate Anomaly Detection.ipynb b/docs/Explore Algorithms/AI Services/Multivariate Anomaly Detection.ipynb new file mode 100644 index 0000000000..f6c97f49e1 --- /dev/null +++ b/docs/Explore Algorithms/AI Services/Multivariate Anomaly Detection.ipynb @@ -0,0 +1,523 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Recipe: Cognitive Services - Multivariate Anomaly Detection \n", + "This recipe shows how you can use SynapseML and Azure Cognitive Services on Apache Spark for multivariate anomaly detection. Multivariate anomaly detection allows for the detection of anomalies among many variables or timeseries, taking into account all the inter-correlations and dependencies between the different variables. In this scenario, we use SynapseML to train a model for multivariate anomaly detection using the Azure Cognitive Services, and we then use to the model to infer multivariate anomalies within a dataset containing synthetic measurements from three IoT sensors. \n", + "\n", + "To learn more about the Anomaly Detector Cognitive Service, refer to [this documentation page](https://docs.microsoft.com/azure/cognitive-services/anomaly-detector/). " + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "## Setup\n", + "### Create an Anomaly Detector resource\n", + "Follow the instructions to create an `Anomaly Detector` resource using the Azure portal or alternatively, you can also use the Azure CLI to create this resource.\n", + "\n", + "- In the Azure portal, click `Create` in your resource group, and then type `Anomaly Detector`. Click on the Anomaly Detector resource.\n", + "- Give the resource a name, and ideally use the same region as the rest of your resource group. Use the default options for the rest, and then click `Review + Create` and then `Create`.\n", + "- Once the Anomaly Detector resource is created, open it and click on the `Keys and Endpoints` panel on the left. Copy the key for the Anomaly Detector resource into the `ANOMALY_API_KEY` environment variable, or store it in the `anomalyKey` variable.\n", + "\n", + "### Create a Storage Account resource\n", + "In order to save intermediate data, you need to create an Azure Blob Storage Account. Within that storage account, create a container for storing the intermediate data. Make note of the container name, and copy the connection string to that container. You need it later to populate the `containerName` variable and the `BLOB_CONNECTION_STRING` environment variable." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Enter your service keys\n", + "Let's start by setting up the environment variables for our service keys. The next cell sets the `ANOMALY_API_KEY` and the `BLOB_CONNECTION_STRING` environment variables based on the values stored in our Azure Key Vault. If you're running this tutorial in your own environment, make sure you set these environment variables before you proceed." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, lets read the `ANOMALY_API_KEY` and `BLOB_CONNECTION_STRING` environment variables and set the `containerName` and `location` variables." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from synapse.ml.core.platform import find_secret\n", + "\n", + "# An Anomaly Dectector subscription key\n", + "anomalyKey = find_secret(\"anomaly-api-key\") # use your own anomaly api key\n", + "# Your storage account name\n", + "storageName = \"anomalydetectiontest\" # use your own storage account name\n", + "# A connection string to your blob storage account\n", + "storageKey = find_secret(\"madtest-storage-key\") # use your own storage key\n", + "# A place to save intermediate MVAD results\n", + "intermediateSaveDir = (\n", + " \"wasbs://madtest@anomalydetectiontest.blob.core.windows.net/intermediateData\"\n", + ")\n", + "# The location of the anomaly detector resource that you created\n", + "location = \"westus2\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First we connect to our storage account so that anomaly detector can save intermediate results there:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false, + "pycharm": { + "name": "#%%\n" + } + }, + "outputs": [], + "source": [ + "spark.sparkContext._jsc.hadoopConfiguration().set(\n", + " f\"fs.azure.account.key.{storageName}.blob.core.windows.net\", storageKey\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "collapsed": false + }, + "source": [ + "Let's import all the necessary modules." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import pandas as pd\n", + "\n", + "import pyspark\n", + "from pyspark.sql.functions import col\n", + "from pyspark.sql.functions import lit\n", + "from pyspark.sql.types import DoubleType\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import synapse.ml\n", + "from synapse.ml.cognitive import *" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's read our sample data into a Spark DataFrame." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "inputWidgets": {}, + "nuid": "58080b22-fff1-463b-ad80-0639d475ec89", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "df = (\n", + " spark.read.format(\"csv\")\n", + " .option(\"header\", \"true\")\n", + " .load(\"wasbs://publicwasb@mmlspark.blob.core.windows.net/MVAD/sample.csv\")\n", + ")\n", + "\n", + "df = (\n", + " df.withColumn(\"sensor_1\", col(\"sensor_1\").cast(DoubleType()))\n", + " .withColumn(\"sensor_2\", col(\"sensor_2\").cast(DoubleType()))\n", + " .withColumn(\"sensor_3\", col(\"sensor_3\").cast(DoubleType()))\n", + ")\n", + "\n", + "# Let's inspect the dataframe:\n", + "df.show(5)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "inputWidgets": {}, + "nuid": "e9bd6780-dcd1-4ee6-8116-eb2b4c6950c9", + "showTitle": false, + "title": "" + } + }, + "source": [ + "We can now create an `estimator` object, which is used to train our model. We specify the start and end times for the training data. We also specify the input columns to use, and the name of the column that contains the timestamps. Finally, we specify the number of data points to use in the anomaly detection sliding window, and we set the connection string to the Azure Blob Storage Account. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "trainingStartTime = \"2020-06-01T12:00:00Z\"\n", + "trainingEndTime = \"2020-07-02T17:55:00Z\"\n", + "timestampColumn = \"timestamp\"\n", + "inputColumns = [\"sensor_1\", \"sensor_2\", \"sensor_3\"]\n", + "\n", + "estimator = (\n", + " SimpleFitMultivariateAnomaly()\n", + " .setSubscriptionKey(anomalyKey)\n", + " .setLocation(location)\n", + " .setStartTime(trainingStartTime)\n", + " .setEndTime(trainingEndTime)\n", + " .setIntermediateSaveDir(intermediateSaveDir)\n", + " .setTimestampCol(timestampColumn)\n", + " .setInputCols(inputColumns)\n", + " .setSlidingWindow(200)\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have created the `estimator`, let's fit it to the data:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "model = estimator.fit(df)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Once the training is done, we can now use the model for inference. The code in the next cell specifies the start and end times for the data we would like to detect the anomalies in. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "inputWidgets": {}, + "nuid": "89b54ad2-3474-4e1e-a9c7-829e703831d0", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "inferenceStartTime = \"2020-07-02T18:00:00Z\"\n", + "inferenceEndTime = \"2020-07-06T05:15:00Z\"\n", + "\n", + "result = (\n", + " model.setStartTime(inferenceStartTime)\n", + " .setEndTime(inferenceEndTime)\n", + " .setOutputCol(\"results\")\n", + " .setErrorCol(\"errors\")\n", + " .setInputCols(inputColumns)\n", + " .setTimestampCol(timestampColumn)\n", + " .transform(df)\n", + ")\n", + "\n", + "result.show(5)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "When we called `.show(5)` in the previous cell, it showed us the first five rows in the dataframe. The results were all `null` because they weren't inside the inference window.\n", + "\n", + "To show the results only for the inferred data, lets select the columns we need. We can then order the rows in the dataframe by ascending order, and filter the result to only show the rows that are in the range of the inference window. In our case `inferenceEndTime` is the same as the last row in the dataframe, so can ignore that. \n", + "\n", + "Finally, to be able to better plot the results, lets convert the Spark dataframe to a Pandas dataframe.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "inputWidgets": {}, + "nuid": "18c9be87-c4e7-4221-9135-b80b3788c43e", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "rdf = (\n", + " result.select(\n", + " \"timestamp\",\n", + " *inputColumns,\n", + " \"results.interpretation\",\n", + " \"isAnomaly\",\n", + " \"results.severity\"\n", + " )\n", + " .orderBy(\"timestamp\", ascending=True)\n", + " .filter(col(\"timestamp\") >= lit(inferenceStartTime))\n", + " .toPandas()\n", + ")\n", + "\n", + "rdf" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's now format the `contributors` column that stores the contribution score from each sensor to the detected anomalies. The next cell formats this data, and splits the contribution score of each sensor into its own column." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "inputWidgets": {}, + "nuid": "5b4e072f-e0e6-4362-a321-bbfae41dea0c", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "def parse(x):\n", + " if len(x) > 0:\n", + " return dict([item[:2] for item in x])\n", + " else:\n", + " return {\"sensor_1\": 0, \"sensor_2\": 0, \"sensor_3\": 0}\n", + "\n", + "\n", + "rdf[\"contributors\"] = rdf[\"interpretation\"].apply(parse)\n", + "rdf = pd.concat(\n", + " [\n", + " rdf.drop([\"contributors\"], axis=1),\n", + " pd.json_normalize(rdf[\"contributors\"]).rename(\n", + " columns={\n", + " \"sensor_1\": \"series_1\",\n", + " \"sensor_2\": \"series_2\",\n", + " \"sensor_3\": \"series_3\",\n", + " }\n", + " ),\n", + " ],\n", + " axis=1,\n", + ")\n", + "rdf" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "inputWidgets": {}, + "nuid": "67943277-ef55-4a84-a478-0e89dbf33d6a", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Great! We now have the contribution scores of sensors 1, 2, and 3 in the `series_0`, `series_1`, and `series_2` columns respectively. \n", + "\n", + "Let's run the next cell to plot the results. The `minSeverity` parameter in the first line specifies the minimum severity of the anomalies to be plotted. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "application/vnd.databricks.v1+cell": { + "inputWidgets": {}, + "nuid": "5b259f82-9e91-4034-b5f9-4a2bc49a59ef", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "minSeverity = 0.1\n", + "\n", + "\n", + "####### Main Figure #######\n", + "plt.figure(figsize=(23, 8))\n", + "plt.plot(\n", + " rdf[\"timestamp\"],\n", + " rdf[\"sensor_1\"],\n", + " color=\"tab:orange\",\n", + " linestyle=\"solid\",\n", + " linewidth=2,\n", + " label=\"sensor_1\",\n", + ")\n", + "plt.plot(\n", + " rdf[\"timestamp\"],\n", + " rdf[\"sensor_2\"],\n", + " color=\"tab:green\",\n", + " linestyle=\"solid\",\n", + " linewidth=2,\n", + " label=\"sensor_2\",\n", + ")\n", + "plt.plot(\n", + " rdf[\"timestamp\"],\n", + " rdf[\"sensor_3\"],\n", + " color=\"tab:blue\",\n", + " linestyle=\"solid\",\n", + " linewidth=2,\n", + " label=\"sensor_3\",\n", + ")\n", + "plt.grid(axis=\"y\")\n", + "plt.tick_params(axis=\"x\", which=\"both\", bottom=False, labelbottom=False)\n", + "plt.legend()\n", + "\n", + "anoms = list(rdf[\"severity\"] >= minSeverity)\n", + "_, _, ymin, ymax = plt.axis()\n", + "plt.vlines(np.where(anoms), ymin=ymin, ymax=ymax, color=\"r\", alpha=0.8)\n", + "\n", + "plt.legend()\n", + "plt.title(\n", + " \"A plot of the values from the three sensors with the detected anomalies highlighted in red.\"\n", + ")\n", + "plt.show()\n", + "\n", + "####### Severity Figure #######\n", + "plt.figure(figsize=(23, 1))\n", + "plt.tick_params(axis=\"x\", which=\"both\", bottom=False, labelbottom=False)\n", + "plt.plot(\n", + " rdf[\"timestamp\"],\n", + " rdf[\"severity\"],\n", + " color=\"black\",\n", + " linestyle=\"solid\",\n", + " linewidth=2,\n", + " label=\"Severity score\",\n", + ")\n", + "plt.plot(\n", + " rdf[\"timestamp\"],\n", + " [minSeverity] * len(rdf[\"severity\"]),\n", + " color=\"red\",\n", + " linestyle=\"dotted\",\n", + " linewidth=1,\n", + " label=\"minSeverity\",\n", + ")\n", + "plt.grid(axis=\"y\")\n", + "plt.legend()\n", + "plt.ylim([0, 1])\n", + "plt.title(\"Severity of the detected anomalies\")\n", + "plt.show()\n", + "\n", + "####### Contributors Figure #######\n", + "plt.figure(figsize=(23, 1))\n", + "plt.tick_params(axis=\"x\", which=\"both\", bottom=False, labelbottom=False)\n", + "plt.bar(\n", + " rdf[\"timestamp\"], rdf[\"series_1\"], width=2, color=\"tab:orange\", label=\"sensor_1\"\n", + ")\n", + "plt.bar(\n", + " rdf[\"timestamp\"],\n", + " rdf[\"series_2\"],\n", + " width=2,\n", + " color=\"tab:green\",\n", + " label=\"sensor_2\",\n", + " bottom=rdf[\"series_1\"],\n", + ")\n", + "plt.bar(\n", + " rdf[\"timestamp\"],\n", + " rdf[\"series_3\"],\n", + " width=2,\n", + " color=\"tab:blue\",\n", + " label=\"sensor_3\",\n", + " bottom=rdf[\"series_1\"] + rdf[\"series_2\"],\n", + ")\n", + "plt.grid(axis=\"y\")\n", + "plt.legend()\n", + "plt.ylim([0, 1])\n", + "plt.title(\"The contribution of each sensor to the detected anomaly\")\n", + "plt.show()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "inputWidgets": {}, + "nuid": "d999ebc4-320b-45ab-9196-f3067e06ccd5", + "showTitle": false, + "title": "" + } + }, + "source": [ + "The plots show the raw data from the sensors (inside the inference window) in orange, green, and blue. The red vertical lines in the first figure show the detected anomalies that have a severity greater than or equal to `minSeverity`. \n", + "\n", + "The second plot shows the severity score of all the detected anomalies, with the `minSeverity` threshold shown in the dotted red line.\n", + "\n", + "Finally, the last plot shows the contribution of the data from each sensor to the detected anomalies. It helps us diagnose and understand the most likely cause of each anomaly." + ] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "language": "python", + "notebookMetadata": { + "pythonIndentUnit": 4 + }, + "notebookName": "sample_mvad_notebook", + "notebookOrigID": 595270988434496, + "widgets": {} + }, + "kernelspec": { + "display_name": "dev", + "language": "python", + "name": "dev" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.12" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/notebooks/features/cognitive_services/CognitiveServices - Overview.ipynb b/docs/Explore Algorithms/AI Services/Overview.ipynb similarity index 74% rename from notebooks/features/cognitive_services/CognitiveServices - Overview.ipynb rename to docs/Explore Algorithms/AI Services/Overview.ipynb index 56ecf05b6c..0a95ce0dcf 100644 --- a/notebooks/features/cognitive_services/CognitiveServices - Overview.ipynb +++ b/docs/Explore Algorithms/AI Services/Overview.ipynb @@ -1,89 +1,109 @@ { "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Cognitive Services" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "tags": [ + "hide-synapse-internal" + ] + }, + "source": [ + "" + ] + }, { "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "# Cognitive Services\n", - "\n", - "\n", - "\n", "[Azure Cognitive Services](https://azure.microsoft.com/services/cognitive-services/) are a suite of APIs, SDKs, and services available to help developers build intelligent applications without having direct AI or data science skills or knowledge by enabling developers to easily add cognitive features into their applications. The goal of Azure Cognitive Services is to help developers create applications that can see, hear, speak, understand, and even begin to reason. The catalog of services within Azure Cognitive Services can be categorized into five main pillars - Vision, Speech, Language, Web Search, and Decision.\n", "\n", "## Usage\n", "\n", "### Vision\n", "[**Computer Vision**](https://azure.microsoft.com/services/cognitive-services/computer-vision/)\n", - "- Describe: provides description of an image in human readable language ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/vision/DescribeImage.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.DescribeImage))\n", - "- Analyze (color, image type, face, adult/racy content): analyzes visual features of an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/vision/AnalyzeImage.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.AnalyzeImage))\n", - "- OCR: reads text from an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/vision/OCR.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.OCR))\n", - "- Recognize Text: reads text from an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/vision/RecognizeText.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.RecognizeText))\n", - "- Thumbnail: generates a thumbnail of user-specified size from the image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/vision/GenerateThumbnails.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.GenerateThumbnails))\n", - "- Recognize domain-specific content: recognizes domain-specific content (celebrity, landmark) ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/vision/RecognizeDomainSpecificContent.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.RecognizeDomainSpecificContent))\n", - "- Tag: identifies list of words that are relevant to the input image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/vision/TagImage.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.TagImage))\n", + "- Describe: provides description of an image in human readable language ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/vision/DescribeImage.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.DescribeImage))\n", + "- Analyze (color, image type, face, adult/racy content): analyzes visual features of an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/vision/AnalyzeImage.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.AnalyzeImage))\n", + "- OCR: reads text from an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/vision/OCR.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.OCR))\n", + "- Recognize Text: reads text from an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/vision/RecognizeText.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.RecognizeText))\n", + "- Thumbnail: generates a thumbnail of user-specified size from the image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/vision/GenerateThumbnails.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.GenerateThumbnails))\n", + "- Recognize domain-specific content: recognizes domain-specific content (celebrity, landmark) ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/vision/RecognizeDomainSpecificContent.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.RecognizeDomainSpecificContent))\n", + "- Tag: identifies list of words that are relevant to the input image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/vision/TagImage.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.vision.html#module-synapse.ml.cognitive.vision.TagImage))\n", "\n", "[**Face**](https://azure.microsoft.com/services/cognitive-services/face/)\n", - "- Detect: detects human faces in an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/face/DetectFace.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.face.html#module-synapse.ml.cognitive.face.DetectFace))\n", - "- Verify: verifies whether two faces belong to a same person, or a face belongs to a person ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/face/VerifyFaces.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.face.html#module-synapse.ml.cognitive.face.VerifyFaces))\n", - "- Identify: finds the closest matches of the specific query person face from a person group ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/face/IdentifyFaces.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.face.html#module-synapse.ml.cognitive.face.IdentifyFaces))\n", - "- Find similar: finds similar faces to the query face in a face list ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/face/FindSimilarFace.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.face.html#module-synapse.ml.cognitive.face.FindSimilarFace))\n", - "- Group: divides a group of faces into disjoint groups based on similarity ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/face/GroupFaces.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.face.html#module-synapse.ml.cognitive.face.GroupFaces))\n", + "- Detect: detects human faces in an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/face/DetectFace.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.face.html#module-synapse.ml.cognitive.face.DetectFace))\n", + "- Verify: verifies whether two faces belong to a same person, or a face belongs to a person ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/face/VerifyFaces.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.face.html#module-synapse.ml.cognitive.face.VerifyFaces))\n", + "- Identify: finds the closest matches of the specific query person face from a person group ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/face/IdentifyFaces.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.face.html#module-synapse.ml.cognitive.face.IdentifyFaces))\n", + "- Find similar: finds similar faces to the query face in a face list ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/face/FindSimilarFace.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.face.html#module-synapse.ml.cognitive.face.FindSimilarFace))\n", + "- Group: divides a group of faces into disjoint groups based on similarity ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/face/GroupFaces.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.face.html#module-synapse.ml.cognitive.face.GroupFaces))\n", "\n", "### Speech\n", "[**Speech Services**](https://azure.microsoft.com/services/cognitive-services/speech-services/)\n", - "- Speech-to-text: transcribes audio streams ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/speech/SpeechToText.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.speech.html#module-synapse.ml.cognitive.speech.SpeechToText))\n", - "- Conversation Transcription: transcribes audio streams into live transcripts with identified speakers. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/speech/ConversationTranscription.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.speech.html#module-synapse.ml.cognitive.speech.ConversationTranscription))\n", - "- Text to Speech: Converts text to realistic audio ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/speech/TextToSpeech.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.speech.html#module-synapse.ml.cognitive.speech.TextToSpeech))\n", + "- Speech-to-text: transcribes audio streams ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/speech/SpeechToText.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.speech.html#module-synapse.ml.cognitive.speech.SpeechToText))\n", + "- Conversation Transcription: transcribes audio streams into live transcripts with identified speakers. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/speech/ConversationTranscription.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.speech.html#module-synapse.ml.cognitive.speech.ConversationTranscription))\n", + "- Text to Speech: Converts text to realistic audio ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/speech/TextToSpeech.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.speech.html#module-synapse.ml.cognitive.speech.TextToSpeech))\n", "\n", "\n", "### Language\n", "[**Text Analytics**](https://azure.microsoft.com/services/cognitive-services/text-analytics/)\n", - "- Language detection: detects language of the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/text/LanguageDetector.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.text.html#module-synapse.ml.cognitive.text.LanguageDetector))\n", - "- Key phrase extraction: identifies the key talking points in the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/text/KeyPhraseExtractor.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.text.html#module-synapse.ml.cognitive.text.KeyPhraseExtractor))\n", - "- Named entity recognition: identifies known entities and general named entities in the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/text/NER.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.text.html#module-synapse.ml.cognitive.text.NER))\n", - "- Sentiment analysis: returns a score between 0 and 1 indicating the sentiment in the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/text/TextSentiment.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.text.html#module-synapse.ml.cognitive.text.TextSentiment))\n", - "- Healthcare Entity Extraction: Extracts medical entities and relationships from text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/text/AnalyzeHealthText.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.text.html#module-synapse.ml.cognitive.text.AnalyzeHealthText))\n", + "- Language detection: detects language of the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/text/LanguageDetector.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.text.html#module-synapse.ml.cognitive.text.LanguageDetector))\n", + "- Key phrase extraction: identifies the key talking points in the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/text/KeyPhraseExtractor.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.text.html#module-synapse.ml.cognitive.text.KeyPhraseExtractor))\n", + "- Named entity recognition: identifies known entities and general named entities in the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/text/NER.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.text.html#module-synapse.ml.cognitive.text.NER))\n", + "- Sentiment analysis: returns a score between 0 and 1 indicating the sentiment in the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/text/TextSentiment.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.text.html#module-synapse.ml.cognitive.text.TextSentiment))\n", + "- Healthcare Entity Extraction: Extracts medical entities and relationships from text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/text/AnalyzeHealthText.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.text.html#module-synapse.ml.cognitive.text.AnalyzeHealthText))\n", "\n", "\n", "### Translation\n", "[**Translator**](https://azure.microsoft.com/services/cognitive-services/translator/)\n", - "- Translate: Translates text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/translate/Translate.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.Translate))\n", - "- Transliterate: Converts text in one language from one script to another script. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/translate/Transliterate.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.Transliterate))\n", - "- Detect: Identifies the language of a piece of text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/translate/Detect.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.Detect))\n", - "- BreakSentence: Identifies the positioning of sentence boundaries in a piece of text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/translate/BreakSentence.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.BreakSentence))\n", - "- Dictionary Lookup: Provides alternative translations for a word and a small number of idiomatic phrases. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/translate/DictionaryLookup.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.DictionaryLookup))\n", - "- Dictionary Examples: Provides examples that show how terms in the dictionary are used in context. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/translate/DictionaryExamples.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.DictionaryExamples))\n", - "- Document Translation: Translates documents across all supported languages and dialects while preserving document structure and data format. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/translate/DocumentTranslator.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.DocumentTranslator))\n", + "- Translate: Translates text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/translate/Translate.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.Translate))\n", + "- Transliterate: Converts text in one language from one script to another script. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/translate/Transliterate.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.Transliterate))\n", + "- Detect: Identifies the language of a piece of text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/translate/Detect.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.Detect))\n", + "- BreakSentence: Identifies the positioning of sentence boundaries in a piece of text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/translate/BreakSentence.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.BreakSentence))\n", + "- Dictionary Lookup: Provides alternative translations for a word and a small number of idiomatic phrases. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/translate/DictionaryLookup.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.DictionaryLookup))\n", + "- Dictionary Examples: Provides examples that show how terms in the dictionary are used in context. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/translate/DictionaryExamples.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.DictionaryExamples))\n", + "- Document Translation: Translates documents across all supported languages and dialects while preserving document structure and data format. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/translate/DocumentTranslator.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.translate.html#module-synapse.ml.cognitive.translate.DocumentTranslator))\n", "\n", "### Form Recognizer\n", "[**Form Recognizer**](https://azure.microsoft.com/services/form-recognizer/)\n", - "- Analyze Layout: Extract text and layout information from a given document. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/form/AnalyzeLayout.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.AnalyzeLayout))\n", - "- Analyze Receipts: Detects and extracts data from receipts using optical character recognition (OCR) and our receipt model, enabling you to easily extract structured data from receipts such as merchant name, merchant phone number, transaction date, transaction total, and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/form/AnalyzeReceipts.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.AnalyzeReceipts))\n", - "- Analyze Business Cards: Detects and extracts data from business cards using optical character recognition (OCR) and our business card model, enabling you to easily extract structured data from business cards such as contact names, company names, phone numbers, emails, and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/form/AnalyzeBusinessCards.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.AnalyzeBusinessCards))\n", - "- Analyze Invoices: Detects and extracts data from invoices using optical character recognition (OCR) and our invoice understanding deep learning models, enabling you to easily extract structured data from invoices such as customer, vendor, invoice ID, invoice due date, total, invoice amount due, tax amount, ship to, bill to, line items and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/form/AnalyzeInvoices.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.AnalyzeInvoices))\n", - "- Analyze ID Documents: Detects and extracts data from identification documents using optical character recognition (OCR) and our ID document model, enabling you to easily extract structured data from ID documents such as first name, last name, date of birth, document number, and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/form/AnalyzeIDDocuments.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.AnalyzeIDDocuments))\n", - "- Analyze Custom Form: Extracts information from forms (PDFs and images) into structured data based on a model created from a set of representative training forms. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/form/AnalyzeCustomModel.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.AnalyzeCustomModel))\n", - "- Get Custom Model: Get detailed information about a custom model. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/form/GetCustomModel.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/form/ListCustomModels.html))\n", - "- List Custom Models: Get information about all custom models. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/form/ListCustomModels.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.ListCustomModels))\n", + "- Analyze Layout: Extract text and layout information from a given document. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/form/AnalyzeLayout.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.AnalyzeLayout))\n", + "- Analyze Receipts: Detects and extracts data from receipts using optical character recognition (OCR) and our receipt model, enabling you to easily extract structured data from receipts such as merchant name, merchant phone number, transaction date, transaction total, and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/form/AnalyzeReceipts.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.AnalyzeReceipts))\n", + "- Analyze Business Cards: Detects and extracts data from business cards using optical character recognition (OCR) and our business card model, enabling you to easily extract structured data from business cards such as contact names, company names, phone numbers, emails, and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/form/AnalyzeBusinessCards.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.AnalyzeBusinessCards))\n", + "- Analyze Invoices: Detects and extracts data from invoices using optical character recognition (OCR) and our invoice understanding deep learning models, enabling you to easily extract structured data from invoices such as customer, vendor, invoice ID, invoice due date, total, invoice amount due, tax amount, ship to, bill to, line items and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/form/AnalyzeInvoices.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.AnalyzeInvoices))\n", + "- Analyze ID Documents: Detects and extracts data from identification documents using optical character recognition (OCR) and our ID document model, enabling you to easily extract structured data from ID documents such as first name, last name, date of birth, document number, and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/form/AnalyzeIDDocuments.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.AnalyzeIDDocuments))\n", + "- Analyze Custom Form: Extracts information from forms (PDFs and images) into structured data based on a model created from a set of representative training forms. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/form/AnalyzeCustomModel.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.AnalyzeCustomModel))\n", + "- Get Custom Model: Get detailed information about a custom model. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/form/GetCustomModel.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/form/ListCustomModels.html))\n", + "- List Custom Models: Get information about all custom models. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/form/ListCustomModels.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.form.html#module-synapse.ml.cognitive.form.ListCustomModels))\n", "\n", "### Decision\n", "[**Anomaly Detector**](https://azure.microsoft.com/services/cognitive-services/anomaly-detector/)\n", - "- Anomaly status of latest point: generates a model using preceding points and determines whether the latest point is anomalous ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/anomaly/DetectLastAnomaly.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.anomaly.html#module-synapse.ml.cognitive.anomaly.DetectLastAnomaly))\n", - "- Find anomalies: generates a model using an entire series and finds anomalies in the series ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/anomaly/DetectAnomalies.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.anomaly.html#module-synapse.ml.cognitive.anomaly.DetectAnomalies))\n", + "- Anomaly status of latest point: generates a model using preceding points and determines whether the latest point is anomalous ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/anomaly/DetectLastAnomaly.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.anomaly.html#module-synapse.ml.cognitive.anomaly.DetectLastAnomaly))\n", + "- Find anomalies: generates a model using an entire series and finds anomalies in the series ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/anomaly/DetectAnomalies.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.anomaly.html#module-synapse.ml.cognitive.anomaly.DetectAnomalies))\n", "\n", "### Search\n", - "- [Bing Image search](https://azure.microsoft.com/services/cognitive-services/bing-image-search-api/) ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/bing/BingImageSearch.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.bing.html#module-synapse.ml.cognitive.bing.BingImageSearch))\n", - "- [Azure Cognitive search](https://docs.microsoft.com/azure/search/search-what-is-azure-search) ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearchWriter$.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cognitive.search.html#module-synapse.ml.cognitive.search.AzureSearchWriter))" + "- [Bing Image search](https://azure.microsoft.com/services/cognitive-services/bing-image-search-api/) ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/bing/BingImageSearch.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.bing.html#module-synapse.ml.cognitive.bing.BingImageSearch))\n", + "- [Azure Cognitive search](https://docs.microsoft.com/azure/search/search-what-is-azure-search) ([Scala](https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/com/microsoft/azure/synapse/ml/cognitive/search/AzureSearchWriter$.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cognitive.search.html#module-synapse.ml.cognitive.search.AzureSearchWriter))" ] }, { + "attachments": {}, "cell_type": "markdown", - "metadata": {}, + "metadata": { + "tags": [ + "hide-synapse-internal" + ] + }, "source": [ "## Prerequisites\n", "\n", - "1. Follow the steps in [Getting started](https://docs.microsoft.com/en-us/azure/cognitive-services/big-data/getting-started) to set up your Azure Databricks and Cognitive Services environment. This tutorial shows you how to install SynapseML and how to create your Spark cluster in Databricks.\n", + "1. Follow the steps in [Getting started](https://docs.microsoft.com/azure/cognitive-services/big-data/getting-started) to set up your Azure Databricks and Cognitive Services environment. This tutorial shows you how to install SynapseML and how to create your Spark cluster in Databricks.\n", "1. After you create a new notebook in Azure Databricks, copy the **Shared code** below and paste into a new cell in your notebook.\n", "1. Choose a service sample, below, and copy paste it into a second new cell in your notebook.\n", "1. Replace any of the service subscription key placeholders with your own key.\n", @@ -111,23 +131,7 @@ "from requests import Request\n", "from pyspark.sql.functions import lit\n", "from pyspark.ml import PipelineModel\n", - "from pyspark.sql.functions import col\n", - "import os" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "from synapse.ml.core.platform import *\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" + "from pyspark.sql.functions import col" ] }, { @@ -137,33 +141,45 @@ "outputs": [], "source": [ "from synapse.ml.cognitive import *\n", + "from synapse.ml.core.platform import *\n", "\n", "# A general Cognitive Services key for Text Analytics, Computer Vision and Form Recognizer (or use separate keys that belong to each service)\n", - "service_key = find_secret(\"cognitive-api-key\")\n", + "service_key = find_secret(\n", + " \"cognitive-api-key\"\n", + ") # Replace the call to find_secret with your key as a python string. e.g. service_key=\"27snaiw...\"\n", "service_loc = \"eastus\"\n", "\n", "# A Bing Search v7 subscription key\n", - "bing_search_key = find_secret(\"bing-search-key\")\n", - "\n", - "# An Anomaly Dectector subscription key\n", - "anomaly_key = find_secret(\"anomaly-api-key\")\n", + "bing_search_key = find_secret(\n", + " \"bing-search-key\"\n", + ") # Replace the call to find_secret with your key as a python string.\n", + "\n", + "# An Anomaly Detector subscription key\n", + "anomaly_key = find_secret(\n", + " \"anomaly-api-key\"\n", + ") # Replace the call to find_secret with your key as a python string.\n", "anomaly_loc = \"westus2\"\n", "\n", "# A Translator subscription key\n", - "translator_key = find_secret(\"translator-key\")\n", + "translator_key = find_secret(\n", + " \"translator-key\"\n", + ") # Replace the call to find_secret with your key as a python string.\n", "translator_loc = \"eastus\"\n", "\n", "# An Azure search key\n", - "search_key = find_secret(\"azure-search-key\")" + "search_key = find_secret(\n", + " \"azure-search-key\"\n", + ") # Replace the call to find_secret with your key as a python string." ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Text Analytics sample\n", "\n", - "The [Text Analytics](https://azure.microsoft.com/en-us/services/cognitive-services/text-analytics/) service provides several algorithms for extracting intelligent insights from text. For example, we can find the sentiment of given input text. The service will return a score between 0.0 and 1.0 where low scores indicate negative sentiment and high score indicates positive sentiment. This sample uses three simple sentences and returns the sentiment for each." + "The [Text Analytics](https://azure.microsoft.com/services/cognitive-services/text-analytics/) service provides several algorithms for extracting intelligent insights from text. For example, we can find the sentiment of given input text. The service will return a score between 0.0 and 1.0 where low scores indicate negative sentiment and high score indicates positive sentiment. This sample uses three simple sentences and returns the sentiment for each." ] }, { @@ -208,7 +224,7 @@ "source": [ "## Text Analytics for Health Sample\n", "\n", - "The [Text Analytics for Health Service](https://docs.microsoft.com/en-us/azure/cognitive-services/language-service/text-analytics-for-health/overview?tabs=ner) extracts and labels relevant medical information from unstructured texts such as doctor's notes, discharge summaries, clinical documents, and electronic health records." + "The [Text Analytics for Health Service](https://docs.microsoft.com/azure/cognitive-services/language-service/text-analytics-for-health/overview?tabs=ner) extracts and labels relevant medical information from unstructured texts such as doctor's notes, discharge summaries, clinical documents, and electronic health records." ] }, { @@ -238,11 +254,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Translator sample\n", - "[Translator](https://azure.microsoft.com/en-us/services/cognitive-services/translator/) is a cloud-based machine translation service and is part of the Azure Cognitive Services family of cognitive APIs used to build intelligent apps. Translator is easy to integrate in your applications, websites, tools, and solutions. It allows you to add multi-language user experiences in 90 languages and dialects and can be used for text translation with any operating system. In this sample, we do a simple text translation by providing the sentences you want to translate and target languages you want to translate to." + "[Translator](https://azure.microsoft.com/services/cognitive-services/translator/) is a cloud-based machine translation service and is part of the Azure Cognitive Services family of cognitive APIs used to build intelligent apps. Translator is easy to integrate in your applications, websites, tools, and solutions. It allows you to add multi-language user experiences in 90 languages and dialects and can be used for text translation with any operating system. In this sample, we do a simple text translation by providing the sentences you want to translate and target languages you want to translate to." ] }, { @@ -281,11 +298,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Form Recognizer sample\n", - "[Form Recognizer](https://azure.microsoft.com/en-us/services/form-recognizer/) is a part of Azure Applied AI Services that lets you build automated data processing software using machine learning technology. Identify and extract text, key/value pairs, selection marks, tables, and structure from your documents—the service outputs structured data that includes the relationships in the original file, bounding boxes, confidence and more. In this sample, we analyze a business card image and extract its information into structured data." + "[Form Recognizer](https://azure.microsoft.com/services/form-recognizer/) is a part of Azure Applied AI Services that lets you build automated data processing software using machine learning technology. Identify and extract text, key/value pairs, selection marks, tables, and structure from your documents. The service outputs structured data that includes the relationships in the original file, bounding boxes, confidence and more. In this sample, we analyze a business card image and extract its information into structured data." ] }, { @@ -328,12 +346,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Computer Vision sample\n", "\n", - "[Computer Vision](https://azure.microsoft.com/en-us/services/cognitive-services/computer-vision/) analyzes images to identify structure such as faces, objects, and natural-language descriptions. In this sample, we tag a list of images. Tags are one-word descriptions of things in the image like recognizable objects, people, scenery, and actions." + "[Computer Vision](https://azure.microsoft.com/services/cognitive-services/computer-vision/) analyzes images to identify structure such as faces, objects, and natural-language descriptions. In this sample, we tag a list of images. Tags are one-word descriptions of things in the image like recognizable objects, people, scenery, and actions." ] }, { @@ -355,7 +374,7 @@ " ],\n", ")\n", "\n", - "# Run the Computer Vision service. Analyze Image extracts infortmation from/about the images.\n", + "# Run the Computer Vision service. Analyze Image extracts information from/about the images.\n", "analysis = (\n", " AnalyzeImage()\n", " .setLocation(service_loc)\n", @@ -373,12 +392,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Bing Image Search sample\n", "\n", - "[Bing Image Search](https://azure.microsoft.com/en-us/services/cognitive-services/bing-image-search-api/) searches the web to retrieve images related to a user's natural language query. In this sample, we use a text query that looks for images with quotes. It returns a list of image URLs that contain photos related to our query." + "[Bing Image Search](https://azure.microsoft.com/services/cognitive-services/bing-image-search-api/) searches the web to retrieve images related to a user's natural language query. In this sample, we use a text query that looks for images with quotes. It returns a list of image URLs that contain photos related to our query." ] }, { @@ -418,11 +438,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Speech-to-Text sample\n", - "The [Speech-to-text](https://azure.microsoft.com/en-us/services/cognitive-services/speech-services/) service converts streams or files of spoken audio to text. In this sample, we transcribe one audio file." + "The [Speech-to-text](https://azure.microsoft.com/services/cognitive-services/speech-services/) service converts streams or files of spoken audio to text. In this sample, we transcribe one audio file." ] }, { @@ -452,11 +473,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Text-to-Speech sample\n", - "[Text to speech](https://azure.microsoft.com/en-us/services/cognitive-services/text-to-speech/#overview) is a service that allows one to build apps and services that speak naturally, choosing from more than 270 neural voices across 119 languages and variants." + "[Text to speech](https://azure.microsoft.com/services/cognitive-services/text-to-speech/#overview) is a service that allows one to build apps and services that speak naturally, choosing from more than 270 neural voices across 119 languages and variants." ] }, { @@ -498,12 +520,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "## Anomaly Detector sample\n", "\n", - "[Anomaly Detector](https://azure.microsoft.com/en-us/services/cognitive-services/anomaly-detector/) is great for detecting irregularities in your time series data. In this sample, we use the service to find anomalies in the entire time series." + "[Anomaly Detector](https://azure.microsoft.com/services/cognitive-services/anomaly-detector/) is great for detecting irregularities in your time series data. In this sample, we use the service to find anomalies in the entire time series." ] }, { @@ -576,7 +599,7 @@ " )\n", "\n", "\n", - "# Create a dataframe with spcificies which countries we want data on\n", + "# Create a dataframe with specifies which countries we want data on\n", "df = spark.createDataFrame([(\"br\",), (\"usa\",)], [\"country\"]).withColumn(\n", " \"request\", http_udf(world_bank_request)(col(\"country\"))\n", ")\n", @@ -603,7 +626,11 @@ }, { "cell_type": "markdown", - "metadata": {}, + "metadata": { + "tags": [ + "hide-synapse-internal" + ] + }, "source": [ "## Azure Cognitive search sample\n", "\n", @@ -613,7 +640,11 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "tags": [ + "hide-synapse-internal" + ] + }, "outputs": [], "source": [ "search_service = \"mmlspark-azure-search\"\n", diff --git a/notebooks/features/cognitive_services/CognitiveServices - Celebrity Quote Analysis.ipynb b/docs/Explore Algorithms/AI Services/Quickstart - Analyze Celebrity Quotes.ipynb similarity index 98% rename from notebooks/features/cognitive_services/CognitiveServices - Celebrity Quote Analysis.ipynb rename to docs/Explore Algorithms/AI Services/Quickstart - Analyze Celebrity Quotes.ipynb index 9a7dbf6a2f..5ecb192f2b 100644 --- a/notebooks/features/cognitive_services/CognitiveServices - Celebrity Quote Analysis.ipynb +++ b/docs/Explore Algorithms/AI Services/Quickstart - Analyze Celebrity Quotes.ipynb @@ -26,11 +26,8 @@ "from pyspark.ml import PipelineModel\n", "from pyspark.sql.functions import col, udf\n", "from pyspark.ml.feature import SQLTransformer\n", - "from pyspark.sql import SparkSession\n", "from synapse.ml.core.platform import find_secret\n", "\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", "# put your service keys here\n", "cognitive_key = find_secret(\"cognitive-api-key\")\n", "cognitive_location = \"eastus\"\n", diff --git a/notebooks/features/cognitive_services/CognitiveServices - Analyze Text.ipynb b/docs/Explore Algorithms/AI Services/Quickstart - Analyze Text.ipynb similarity index 86% rename from notebooks/features/cognitive_services/CognitiveServices - Analyze Text.ipynb rename to docs/Explore Algorithms/AI Services/Quickstart - Analyze Text.ipynb index 8132ca9291..4a4ad4c7c3 100644 --- a/notebooks/features/cognitive_services/CognitiveServices - Analyze Text.ipynb +++ b/docs/Explore Algorithms/AI Services/Quickstart - Analyze Text.ipynb @@ -14,14 +14,7 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", - "from pyspark.sql import SparkSession\n", - "from synapse.ml.core.platform import running_on_synapse, find_secret\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "if running_on_synapse():\n", - " from notebookutils.visualization import display\n", + "from synapse.ml.core.platform import find_secret\n", "\n", "cognitive_key = find_secret(\"cognitive-api-key\")\n", "cognitive_location = \"eastus\"" @@ -105,4 +98,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/notebooks/features/cognitive_services/CognitiveServices - Custom Search for Art.ipynb b/docs/Explore Algorithms/AI Services/Quickstart - Creare a Visual Search Engine.ipynb similarity index 88% rename from notebooks/features/cognitive_services/CognitiveServices - Custom Search for Art.ipynb rename to docs/Explore Algorithms/AI Services/Quickstart - Creare a Visual Search Engine.ipynb index 9255985c58..4b3a72a2dc 100644 --- a/notebooks/features/cognitive_services/CognitiveServices - Custom Search for Art.ipynb +++ b/docs/Explore Algorithms/AI Services/Quickstart - Creare a Visual Search Engine.ipynb @@ -16,38 +16,12 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "source": [ "import os, sys, time, json, requests\n", - "from pyspark.ml import Transformer, Estimator, Pipeline\n", - "from pyspark.ml.feature import SQLTransformer\n", - "from pyspark.sql.functions import lit, udf, col, split" - ], - "outputs": [], - "metadata": { - "collapsed": true - } - }, - { - "cell_type": "code", - "execution_count": null, - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", + "from pyspark.sql.functions import lit, udf, col, split\n", "from synapse.ml.core.platform import *\n", "\n", - "from synapse.ml.core.platform import materializing_display as display" - ], - "outputs": [], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": 4, - "source": [ "cognitive_key = find_secret(\"cognitive-api-key\")\n", "cognitive_loc = \"eastus\"\n", "azure_search_key = find_secret(\"azure-search-key\")\n", diff --git a/notebooks/features/cognitive_services/CognitiveServices - Create Audiobooks.ipynb b/docs/Explore Algorithms/AI Services/Quickstart - Create Audiobooks.ipynb similarity index 97% rename from notebooks/features/cognitive_services/CognitiveServices - Create Audiobooks.ipynb rename to docs/Explore Algorithms/AI Services/Quickstart - Create Audiobooks.ipynb index a763d8b84a..0c79a4eae4 100644 --- a/notebooks/features/cognitive_services/CognitiveServices - Create Audiobooks.ipynb +++ b/docs/Explore Algorithms/AI Services/Quickstart - Create Audiobooks.ipynb @@ -33,14 +33,10 @@ { "cell_type": "code", "source": [ - "from pyspark.sql import SparkSession\n", "from synapse.ml.core.platform import *\n", "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", "if running_on_synapse():\n", " from notebookutils import mssparkutils\n", - " from notebookutils.visualization import display\n", "\n", "# Fill this in with your cognitive service information\n", "service_key = find_secret(\n", diff --git a/docs/Explore Algorithms/AI Services/Quickstart - Document Question and Answering with PDFs.ipynb b/docs/Explore Algorithms/AI Services/Quickstart - Document Question and Answering with PDFs.ipynb new file mode 100644 index 0000000000..6d248ed270 --- /dev/null +++ b/docs/Explore Algorithms/AI Services/Quickstart - Document Question and Answering with PDFs.ipynb @@ -0,0 +1,890 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "6b31dee8-67e3-4bb7-a501-269c69c80d3f", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "# A Guide to Q&A on PDF Documents" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "b4000620-9ea1-45aa-be4f-ddb971cc708e", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "## Introduction\n", + "In this notebook, we'll demonstrate how to develop a context-aware question answering framework for any form of a document using [OpenAI models](https://azure.microsoft.com/products/ai-services/openai-service), [SynapseML](https://microsoft.github.io/SynapseML/) and [Azure AI Services](https://azure.microsoft.com/products/cognitive-services/). In this notebook, we assume that PDF documents are the source of data, however, the same framework can be easiy extended to other document formats too. \n", + "\n", + "We’ll cover the following key steps:\n", + "\n", + "1. Preprocessing PDF Documents: Learn how to load the PDF documents into a Spark DataFrame, read the documents using the [Azure AI Document Intelligence](https://azure.microsoft.com/products/ai-services/ai-document-intelligence) in Azure AI Services, and use SynapseML to split the documents into chunks.\n", + "2. Embedding Generation and Storage: Learn how to generate embeddings for the chunks using SynapseML and [Azure OpenAI Services](https://azure.microsoft.com/products/cognitive-services/openai-service), store the embeddings in a vector store using [Azure Cognitive Search](https://azure.microsoft.com/products/search), and search the vector store to answer the user’s question.\n", + "3. Question Answering Pipeline: Learn how to retrieve relevant document based on the user’s question and provide the answer using [Langchain](https://python.langchain.com/en/latest/index.html#)." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "b7c8afaa-8298-4d48-9db0-867b6307963a", + "showTitle": false, + "title": "" + } + }, + "source": [ + "We start by installing the necessary python libraries." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install langchain openai" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "be4f7d31-48e0-4d71-af5c-645883891567", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "### Step 1: Provide the keys for Azure AI Services and Azure OpenAI to authenticate the applications." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "87b58b64-49a4-4a78-a915-7c2478c22c7d", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "To authenticate Azure AI Services and Azure OpenAI applications, you need to provide the respective API keys. Here is an example of how you can provide the keys in Python code. `find_secret()` function uses Azure Keyvault to get the API keys, however you can directly paste your own keys there." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pyspark.sql import SparkSession\n", + "from synapse.ml.core.platform import find_secret\n", + "\n", + "ai_services_key = find_secret(\"cognitive-api-key\")\n", + "ai_services_location = \"eastus\"\n", + "\n", + "# Fill in the following lines with your Azure service information\n", + "aoai_service_name = \"synapseml-openai\"\n", + "aoai_endpoint = f\"https://{aoai_service_name}.openai.azure.com/\"\n", + "aoai_key = find_secret(\"openai-api-key\")\n", + "aoai_deployment_name_embeddings = \"text-embedding-ada-002\"\n", + "aoai_deployment_name_query = \"text-davinci-003\"\n", + "aoai_model_name_query = \"text-davinci-003\"\n", + "\n", + "# Azure Cognitive Search\n", + "cogsearch_name = \"mmlspark-azure-search\"\n", + "cogsearch_index_name = \"examplevectorindex\"\n", + "cogsearch_api_key = find_secret(\"azure-search-key\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "97f056e7-9f88-45b9-b6b2-95be8c7fccac", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "### Step 2: Load the PDF documents into a Spark DataFrame." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "eb6519d4-f03a-4359-8a6f-4922bfeedbf5", + "showTitle": false, + "title": "" + } + }, + "source": [ + "For this tutorial, we will be using NASA's [Earth](https://www.nasa.gov/sites/default/files/atoms/files/earth_book_2019_tagged.pdf) and [Earth at Night](https://www.nasa.gov/sites/default/files/atoms/files/earth_at_night_508.pdf) e-books. To load PDF documents into a Spark DataFrame, you can use the ```spark.read.format(\"binaryFile\")``` method provided by Apache Spark." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pyspark.sql.functions import udf\n", + "from pyspark.sql.types import StringType\n", + "\n", + "document_path = \"wasbs://public@synapseaisolutionsa.blob.core.windows.net/NASAEarth\" # path to your document\n", + "df = spark.read.format(\"binaryFile\").load(document_path).limit(10).cache()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "189a84ca-ac81-4130-9143-75883b2633ba", + "showTitle": false, + "title": "" + } + }, + "source": [ + "This code will read the PDF documents and create a Spark DataFrame named df with the contents of the PDFs. The DataFrame will have a schema that represents the structure of the PDF documents, including their textual content." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "90f708b9-9ef2-4de5-b555-a2aa32fd0cfc", + "showTitle": false, + "title": "" + } + }, + "source": [ + "Let's take a glimpse at the contents of the e-books we are working with. Below are some screenshots that showcase the essence of the books; as you can see they contain information about the Earth.\n", + "\n", + "\n", + "" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "8119ea95-aa60-4f81-8189-04009fb4aac0", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "##### Display the raw data from the PDF documents" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Show the dataframe without the content\n", + "display(df.drop(\"content\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "34e06daf-e9e7-4144-b956-e57bde8fab77", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "### Step 3: Read the documents using Azure AI Document Intelligence." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "304ed77d-a032-4620-a74d-65a277caeaf7", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "We utilize [SynapseML](https://microsoft.github.io/SynapseML/), an ecosystem of tools designed to enhance the distributed computing framework [Apache Spark](https://github.com/apache/spark). SynapseML introduces advanced networking capabilities to the Spark ecosystem and offers user-friendly SparkML transformers for various [Azure AI Services](https://azure.microsoft.com/products/ai-services).\n", + "\n", + "Additionally, we employ AnalyzeDocument from Azure AI Services to extract the complete document content and present it in the designated columns called \"output_content\" and \"paragraph.\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from synapse.ml.cognitive import AnalyzeDocument\n", + "from pyspark.sql.functions import col\n", + "\n", + "analyze_document = (\n", + " AnalyzeDocument()\n", + " .setPrebuiltModelId(\"prebuilt-layout\")\n", + " .setSubscriptionKey(ai_services_key)\n", + " .setLocation(ai_services_location)\n", + " .setImageBytesCol(\"content\")\n", + " .setOutputCol(\"result\")\n", + " .setPages(\n", + " \"1-15\"\n", + " ) # Here we are reading the first 15 pages of the documents for demo purposes\n", + ")\n", + "\n", + "analyzed_df = (\n", + " analyze_document.transform(df)\n", + " .withColumn(\"output_content\", col(\"result.analyzeResult.content\"))\n", + " .withColumn(\"paragraphs\", col(\"result.analyzeResult.paragraphs\"))\n", + ").cache()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "d26e4217-ac87-4583-9500-af65d969c199", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "We can observe the analayzed Spark DataFrame named ```analyzed_df``` using the following code. Note that we drop the \"content\" column as it is not needed anymore." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "analyzed_df = analyzed_df.drop(\"content\")\n", + "display(analyzed_df)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "59188b7a-32fa-406d-8562-09ad69400b28", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "### Step 4: Split the documents into chunks." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "d682af37-faa8-4830-acd0-96aa348815d3", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "After analyzing the document, we leverage SynapseML’s PageSplitter to divide the documents into smaller sections, which are subsequently stored in the “chunks” column. This allows for more granular representation and processing of the document content." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from synapse.ml.featurize.text import PageSplitter\n", + "\n", + "ps = (\n", + " PageSplitter()\n", + " .setInputCol(\"output_content\")\n", + " .setMaximumPageLength(4000)\n", + " .setMinimumPageLength(3000)\n", + " .setOutputCol(\"chunks\")\n", + ")\n", + "\n", + "splitted_df = ps.transform(analyzed_df)\n", + "display(splitted_df)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "ce75e0fc-c036-488f-acba-57a44924d55e", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "Note that the chunks for each document are presented in a single row inside an array. In order to embed all the chunks in the following cells, we need to have each chunk in a separate row. To accomplish that, we first explode these arrays so there is only one chunk in each row, then filter the Spark DataFrame in order to only keep the path to the document and the chunk in a single row." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Each column contains many chunks for the same document as a vector.\n", + "# Explode will distribute and replicate the content of a vecor across multple rows\n", + "from pyspark.sql.functions import explode, col\n", + "\n", + "exploded_df = splitted_df.select(\"path\", explode(col(\"chunks\")).alias(\"chunk\")).select(\n", + " \"path\", \"chunk\"\n", + ")\n", + "display(exploded_df)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "1e5b0f56-0a64-4e4a-86f2-b647e82b41ce", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "### Step 5: Generate Embeddings." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "ebba439c-9503-46d7-bafb-f7fa790974a8", + "showTitle": false, + "title": "" + } + }, + "source": [ + "To produce embeddings for each chunk, we utilize both SynapseML and Azure OpenAI Service. By integrating the Azure OpenAI service with SynapseML, we can leverage the power of the Apache Spark distributed computing framework to process numerous prompts using the OpenAI service. This integration enables the SynapseML embedding client to generate embeddings in a distributed manner, enabling efficient processing of large volumes of data. If you're interested in applying large language models at a distributed scale using Azure OpenAI and Azure Synapse Analytics, you can refer to [this approach](https://microsoft.github.io/SynapseML/docs/Explore%20Algorithms/OpenAI/). For more detailed information on generating embeddings with Azure OpenAI, you can look [here]( https://learn.microsoft.com/azure/cognitive-services/openai/how-to/embeddings?tabs=console)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from synapse.ml.cognitive import OpenAIEmbedding\n", + "\n", + "embedding = (\n", + " OpenAIEmbedding()\n", + " .setSubscriptionKey(aoai_key)\n", + " .setDeploymentName(aoai_deployment_name_embeddings)\n", + " .setCustomServiceName(aoai_service_name)\n", + " .setTextCol(\"chunk\")\n", + " .setErrorCol(\"error\")\n", + " .setOutputCol(\"embeddings\")\n", + ")\n", + "\n", + "df_embeddings = embedding.transform(exploded_df)\n", + "\n", + "display(df_embeddings)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "e7d8e559-92bb-44bc-aee0-93b2490f38e2", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "### Step 6: Store the embeddings in Azure Cognitive Search Vector Store." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "6d3aaa47-818c-4eb2-b131-8d316380a0ab", + "showTitle": false, + "title": "" + } + }, + "source": [ + "[Azure Cognitive Search](https://learn.microsoft.com/azure/search/search-what-is-azure-search) offers a user-friendly interface for creating a vector database, as well as storing and retrieving data using vector search. If you're interested in learning more about vector search, you can look [here](https://github.com/Azure/cognitive-search-vector-pr/tree/main).\n", + "\n", + "\n", + "Storing data in the AzureCogSearch vector database involves two main steps:\n", + "\n", + "Creating the Index: The first step is to establish the index or schema of the vector database. This entails defining the structure and properties of the data that will be stored and indexed in the vector database.\n", + "\n", + "Adding Chunked Documents and Embeddings: The second step involves adding the chunked documents, along with their corresponding embeddings, to the vector datastore. This allows for efficient storage and retrieval of the data using vector search capabilities.\n", + "\n", + "By following these steps, you can effectively store your chunked documents and their associated embeddings in the AzureCogSearch vector database, enabling seamless retrieval of relevant information through vector search functionality." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from pyspark.sql.functions import monotonically_increasing_id\n", + "from pyspark.sql.functions import lit\n", + "\n", + "df_embeddings = (\n", + " df_embeddings.drop(\"error\")\n", + " .withColumn(\n", + " \"idx\", monotonically_increasing_id().cast(\"string\")\n", + " ) # create index ID for ACS\n", + " .withColumn(\"searchAction\", lit(\"upload\"))\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from synapse.ml.cognitive import writeToAzureSearch\n", + "import json\n", + "\n", + "df_embeddings.writeToAzureSearch(\n", + " subscriptionKey=cogsearch_api_key,\n", + " actionCol=\"searchAction\",\n", + " serviceName=cogsearch_name,\n", + " indexName=cogsearch_index_name,\n", + " keyCol=\"idx\",\n", + " vectorCols=json.dumps([{\"name\": \"embeddings\", \"dimension\": 1536}]),\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "17b3890f-4163-443c-929b-252d62a6c736", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "### Step 7: Ask a Question." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "749a6ec7-d6c9-4945-bc72-2deed94e712b", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "After processing the document, we can proceed to pose a question. We will use [SynapseML](https://microsoft.github.io/SynapseML/docs/Explore%20Algorithms/OpenAI/Quickstart%20-%20OpenAI%20Embedding/) to convert the user's question into an embedding and then utilize cosine similarity to retrieve the top K document chunks that closely match the user's question. It's worth mentioning that alternative similarity metrics can also be employed." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "user_question = \"What did the astronaut Edgar Mitchell call Earth?\"\n", + "retrieve_k = 2 # Retrieve the top 2 documents from vector database" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import requests\n", + "\n", + "# Ask a question and convert to embeddings\n", + "\n", + "\n", + "def gen_question_embedding(user_question):\n", + " # Convert question to embedding using synapseML\n", + " from synapse.ml.cognitive import OpenAIEmbedding\n", + "\n", + " df_ques = spark.createDataFrame([(user_question, 1)], [\"questions\", \"dummy\"])\n", + " embedding = (\n", + " OpenAIEmbedding()\n", + " .setSubscriptionKey(aoai_key)\n", + " .setDeploymentName(aoai_deployment_name_embeddings)\n", + " .setCustomServiceName(aoai_service_name)\n", + " .setTextCol(\"questions\")\n", + " .setErrorCol(\"errorQ\")\n", + " .setOutputCol(\"embeddings\")\n", + " )\n", + " df_ques_embeddings = embedding.transform(df_ques)\n", + " row = df_ques_embeddings.collect()[0]\n", + " question_embedding = row.embeddings.tolist()\n", + " return question_embedding\n", + "\n", + "\n", + "def retrieve_k_chunk(k, question_embedding):\n", + " # Retrieve the top K entries\n", + " url = f\"https://{cogsearch_name}.search.windows.net/indexes/{cogsearch_index_name}/docs/search?api-version=2023-07-01-Preview\"\n", + "\n", + " payload = json.dumps(\n", + " {\"vector\": {\"value\": question_embedding, \"fields\": \"embeddings\", \"k\": k}}\n", + " )\n", + " headers = {\n", + " \"Content-Type\": \"application/json\",\n", + " \"api-key\": cogsearch_api_key,\n", + " }\n", + "\n", + " response = requests.request(\"POST\", url, headers=headers, data=payload)\n", + " output = json.loads(response.text)\n", + " print(response.status_code)\n", + " return output\n", + "\n", + "\n", + "# Generate embeddings for the question and retrieve the top k document chunks\n", + "question_embedding = gen_question_embedding(user_question)\n", + "output = retrieve_k_chunk(retrieve_k, question_embedding)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "79356cff-a236-4ef3-91f7-a601ee38d5f9", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "### Step 8: Respond to a User’s Question." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "06778fa1-303f-4a3b-814b-c0375df855c2", + "showTitle": false, + "title": "" + }, + "nteract": { + "transient": { + "deleting": false + } + } + }, + "source": [ + "To provide a response to the user's question, we will utilize the [LangChain](https://python.langchain.com/en/latest/index.html) framework. With the LangChain framework we will augment the retrieved documents with respect to the user's question. Following this, we can request a response to the user's question from our framework." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import necenssary libraries and setting up OpenAI\n", + "from langchain.llms import AzureOpenAI\n", + "from langchain import PromptTemplate\n", + "from langchain.chains import LLMChain\n", + "import openai\n", + "\n", + "openai.api_type = \"azure\"\n", + "openai.api_base = aoai_endpoint\n", + "openai.api_version = \"2022-12-01\"\n", + "openai.api_key = aoai_key" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": { + "byteLimit": 2048000, + "rowLimit": 10000 + }, + "inputWidgets": {}, + "nuid": "412d83cc-4fe9-455e-ad3d-7780ed262dac", + "showTitle": false, + "title": "" + } + }, + "source": [ + "We can now wrap up the Q&A journey by asking a question and checking the answer. You will see that Edgar Mitchell called Earth \"a sparkling blue and white jewel\"!" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Define a Question Answering chain function using LangChain\n", + "def qa_chain_func():\n", + "\n", + " # Define llm model\n", + " llm = AzureOpenAI(\n", + " deployment_name=aoai_deployment_name_query,\n", + " model_name=aoai_model_name_query,\n", + " openai_api_key=aoai_key,\n", + " openai_api_version=\"2022-12-01\",\n", + " )\n", + "\n", + " # Write a preprompt with context and query as variables\n", + " template = \"\"\"\n", + " context :{context}\n", + " Answer the question based on the context above. If the\n", + " information to answer the question is not present in the given context then reply \"I don't know\".\n", + " Question: {query}\n", + " Answer: \"\"\"\n", + "\n", + " # Define a prompt template\n", + " prompt_template = PromptTemplate(\n", + " input_variables=[\"context\", \"query\"], template=template\n", + " )\n", + " # Define a chain\n", + " qa_chain = LLMChain(llm=llm, prompt=prompt_template)\n", + " return qa_chain\n", + "\n", + "\n", + "# Concatenate the content of retrieved documents\n", + "context = [i[\"chunk\"] for i in output[\"value\"]]\n", + "\n", + "# Make a Quesion Answer chain function and pass\n", + "qa_chain = qa_chain_func()\n", + "answer = qa_chain.run({\"context\": context, \"query\": user_question})\n", + "\n", + "print(answer)" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/notebooks/features/geospatial_services/GeospatialServices - Flooding Risk.ipynb b/docs/Explore Algorithms/AI Services/Quickstart - Flooding Risk.ipynb similarity index 96% rename from notebooks/features/geospatial_services/GeospatialServices - Flooding Risk.ipynb rename to docs/Explore Algorithms/AI Services/Quickstart - Flooding Risk.ipynb index 6ee75fc2dd..a2dc21ff28 100644 --- a/notebooks/features/geospatial_services/GeospatialServices - Flooding Risk.ipynb +++ b/docs/Explore Algorithms/AI Services/Quickstart - Flooding Risk.ipynb @@ -29,7 +29,6 @@ "metadata": {}, "outputs": [], "source": [ - "import os\n", "import json\n", "import time\n", "import requests\n", @@ -45,15 +44,7 @@ "adapter = HTTPAdapter(max_retries=retry_strategy)\n", "http = requests.Session()\n", "http.mount(\"https://\", adapter)\n", - "http.mount(\"http://\", adapter)\n", - "\n", - "from pyspark.sql import SparkSession\n", - "from synapse.ml.core.platform import *\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" + "http.mount(\"http://\", adapter)" ] }, { @@ -62,6 +53,8 @@ "metadata": {}, "outputs": [], "source": [ + "from synapse.ml.core.platform import *\n", + "\n", "# Azure Maps account key\n", "maps_key = find_secret(\"azuremaps-api-key\") # Replace this with your azure maps key\n", "\n", diff --git a/notebooks/features/cognitive_services/CognitiveServices - Predictive Maintenance.ipynb b/docs/Explore Algorithms/AI Services/Quickstart - Predictive Maintenance.ipynb similarity index 82% rename from notebooks/features/cognitive_services/CognitiveServices - Predictive Maintenance.ipynb rename to docs/Explore Algorithms/AI Services/Quickstart - Predictive Maintenance.ipynb index daaf26720a..0611541843 100644 --- a/notebooks/features/cognitive_services/CognitiveServices - Predictive Maintenance.ipynb +++ b/docs/Explore Algorithms/AI Services/Quickstart - Predictive Maintenance.ipynb @@ -44,10 +44,6 @@ "source": [ "import os\n", "from synapse.ml.core.platform import find_secret\n", - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", "\n", "service_key = find_secret(\"anomaly-api-key\") # Paste your anomaly detector key here\n", "location = \"westus2\" # Paste your anomaly detector location here" @@ -59,7 +55,9 @@ { "cell_type": "markdown", "source": [ - "## Read data into a DataFrame\n\nNext, let's read the IoTSignals file into a DataFrame. Open a new notebook in your Synapse workspace and create a DataFrame from the file." + "## Read data into a DataFrame\n", + "\n", + "Next, let's read the IoTSignals file into a DataFrame. Open a new notebook in your Synapse workspace and create a DataFrame from the file." ], "metadata": {} }, @@ -79,7 +77,9 @@ { "cell_type": "markdown", "source": [ - "### Run anomaly detection using Cognitive Services on Spark\n\nThe goal is to find instances where the signals from the IoT devices were outputting anomalous values so that we can see when something is going wrong and do predictive maintenance. To do that, let's use Anomaly Detector on Spark:" + "### Run anomaly detection using Cognitive Services on Spark\n", + "\n", + "The goal is to find instances where the signals from the IoT devices were outputting anomalous values so that we can see when something is going wrong and do predictive maintenance. To do that, let's use Anomaly Detector on Spark:" ], "metadata": {} }, @@ -133,14 +133,22 @@ { "cell_type": "markdown", "source": [ - "This cell should yield a result that looks like:\n\n| timestamp | value | deviceId | isAnomaly |\n|:--------------------|--------:|:-----------|:------------|\n| 2020-05-01 18:33:51 | 3174 | dev-7 | False |\n| 2020-05-01 18:33:52 | 2976 | dev-7 | False |\n| 2020-05-01 18:33:53 | 2714 | dev-7 | False |" + "This cell should yield a result that looks like:\n", + "\n", + "| timestamp | value | deviceId | isAnomaly |\n", + "|:--------------------|--------:|:-----------|:------------|\n", + "| 2020-05-01 18:33:51 | 3174 | dev-7 | False |\n", + "| 2020-05-01 18:33:52 | 2976 | dev-7 | False |\n", + "| 2020-05-01 18:33:53 | 2714 | dev-7 | False |" ], "metadata": {} }, { "cell_type": "markdown", "source": [ - "## Visualize anomalies for one of the devices\n\nIoTSignals.csv has signals from multiple IoT devices. We'll focus on a specific device and visualize anomalous outputs from the device." + "## Visualize anomalies for one of the devices\n", + "\n", + "IoTSignals.csv has signals from multiple IoT devices. We'll focus on a specific device and visualize anomalous outputs from the device." ], "metadata": {} }, @@ -229,7 +237,13 @@ { "cell_type": "markdown", "source": [ - "If successful, your output will look like this:\n\n![Anomaly Detector Plot](https://github.com/MicrosoftDocs/azure-docs/raw/master/articles/cognitive-services/big-data/media/anomaly-output.png)\n\n## Next steps\n\nLearn how to do predictive maintenance at scale with Azure Cognitive Services, Azure Synapse Analytics, and Azure CosmosDB. For more information, see the full sample on [GitHub](https://github.com/Azure-Samples/cosmosdb-synapse-link-samples)." + "If successful, your output will look like this:\n", + "\n", + "![Anomaly Detector Plot](https://github.com/MicrosoftDocs/azure-docs/raw/master/articles/cognitive-services/big-data/media/anomaly-output.png)\n", + "\n", + "## Next steps\n", + "\n", + "Learn how to do predictive maintenance at scale with Azure Cognitive Services, Azure Synapse Analytics, and Azure CosmosDB. For more information, see the full sample on [GitHub](https://github.com/Azure-Samples/cosmosdb-synapse-link-samples)." ], "metadata": {} } diff --git a/notebooks/features/isolation_forest/IsolationForest - Multivariate Anomaly Detection.ipynb b/docs/Explore Algorithms/Anomaly Detection/Quickstart - Isolation Forests.ipynb similarity index 96% rename from notebooks/features/isolation_forest/IsolationForest - Multivariate Anomaly Detection.ipynb rename to docs/Explore Algorithms/Anomaly Detection/Quickstart - Isolation Forests.ipynb index 41c7dd1f3e..f2a359a224 100644 --- a/notebooks/features/isolation_forest/IsolationForest - Multivariate Anomaly Detection.ipynb +++ b/docs/Explore Algorithms/Anomaly Detection/Quickstart - Isolation Forests.ipynb @@ -24,7 +24,7 @@ "metadata": {}, "source": [ "## Prerequisites\n", - " - If you are running it on Synapse, you'll need to [create an AML workspace and set up linked Service](https://microsoft.github.io/SynapseML/docs/next/mlflow/installation/). \n" + " - If you are running it on Synapse, you'll need to [create an AML workspace and set up linked Service](../../../Use%20with%20MLFlow/Overview/).\n" ] }, { @@ -32,11 +32,7 @@ "execution_count": null, "outputs": [], "source": [ - "import subprocess\n", - "import sys\n", - "\n", - "for package in [\"sqlparse\", \"raiwidgets\", \"interpret-community\"]:\n", - " subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", package])" + "%pip install sqlparse raiwidgets interpret-community" ], "metadata": { "collapsed": false, @@ -76,7 +72,6 @@ "import mlflow\n", "\n", "from pyspark.sql import functions as F\n", - "from pyspark.sql import SparkSession\n", "from pyspark.ml.feature import VectorAssembler\n", "from pyspark.sql.types import *\n", "from pyspark.ml import Pipeline\n", @@ -161,29 +156,9 @@ "# MLFlow experiment\n", "artifact_path = \"isolationforest\"\n", "experiment_name = f\"/Shared/isolation_forest_experiment-{str(uuid.uuid1())}/\"\n", - "model_name = f\"isolation-forest-model\"\n", - "if running_on_synapse():\n", - " from synapse.ml.core.platform import materializing_display as display\n", - "\n", - " # use regular display when running on interactive notebook\n", - " # from notebookutils.visualization import display" + "model_name = f\"isolation-forest-model\"" ] }, - { - "cell_type": "code", - "execution_count": null, - "outputs": [], - "source": [ - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" - ], - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - } - }, { "cell_type": "markdown", "metadata": { @@ -1030,4 +1005,4 @@ }, "nbformat": 4, "nbformat_minor": 1 -} \ No newline at end of file +} diff --git a/website/docs/features/causal_inference/about.md b/docs/Explore Algorithms/Causal Inference/Overview.md similarity index 97% rename from website/docs/features/causal_inference/about.md rename to docs/Explore Algorithms/Causal Inference/Overview.md index a4664dba24..2d5384d4de 100644 --- a/website/docs/features/causal_inference/about.md +++ b/docs/Explore Algorithms/Causal Inference/Overview.md @@ -1,7 +1,7 @@ --- -title: Causal Inference +title: Overview hide_title: true -sidebar_label: About +sidebar_label: Overview --- ## Causal Inference on Apache Spark @@ -58,4 +58,4 @@ dmlModel.getConfidenceInterval() ``` For an end to end application, check out the DoubleMLEstimator [notebook -example](../Effects%20of%20Outreach%20Efforts). +example](../Quickstart%20-%20Measure%20Causal%20Effects). diff --git a/notebooks/features/causal_inference/Effects of Outreach Efforts.ipynb b/docs/Explore Algorithms/Causal Inference/Quickstart - Measure Causal Effects.ipynb similarity index 96% rename from notebooks/features/causal_inference/Effects of Outreach Efforts.ipynb rename to docs/Explore Algorithms/Causal Inference/Quickstart - Measure Causal Effects.ipynb index 8191154316..e584a20e37 100644 --- a/notebooks/features/causal_inference/Effects of Outreach Efforts.ipynb +++ b/docs/Explore Algorithms/Causal Inference/Quickstart - Measure Causal Effects.ipynb @@ -104,18 +104,6 @@ "| Revenue | Y | $ Revenue from customer given by the amount of software purchased |\n" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" - ] - }, { "cell_type": "code", "execution_count": 2, @@ -250,4 +238,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/notebooks/features/causal_inference/Heterogeneous Effects of Outreach Efforts.ipynb b/docs/Explore Algorithms/Causal Inference/Quickstart - Measure Heterogeneous Effects.ipynb similarity index 97% rename from notebooks/features/causal_inference/Heterogeneous Effects of Outreach Efforts.ipynb rename to docs/Explore Algorithms/Causal Inference/Quickstart - Measure Heterogeneous Effects.ipynb index 959ca8365a..d814880289 100644 --- a/notebooks/features/causal_inference/Heterogeneous Effects of Outreach Efforts.ipynb +++ b/docs/Explore Algorithms/Causal Inference/Quickstart - Measure Heterogeneous Effects.ipynb @@ -104,18 +104,6 @@ "| Revenue | Y | $ Revenue from customer given by the amount of software purchased |\n" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" - ] - }, { "cell_type": "code", "execution_count": null, @@ -331,4 +319,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/notebooks/features/classification/Classification - Before and After SynapseML.ipynb b/docs/Explore Algorithms/Classification/Quickstart - SparkML vs SynapseML.ipynb similarity index 82% rename from notebooks/features/classification/Classification - Before and After SynapseML.ipynb rename to docs/Explore Algorithms/Classification/Quickstart - SparkML vs SynapseML.ipynb index b6442f46f6..e55932c9f8 100644 --- a/notebooks/features/classification/Classification - Before and After SynapseML.ipynb +++ b/docs/Explore Algorithms/Classification/Quickstart - SparkML vs SynapseML.ipynb @@ -1,46 +1,58 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## Classification - Before and After SynapseML\n", - "\n", - "### 1. Introduction\n", - "\n", - "


\n", - "\n", - "In this tutorial, we perform the same classification task in two\n", + "# Classification - before and after SynapseML" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "tags": [ + "hide-synapse-internal" + ] + }, + "source": [ + "


" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this article, you perform the same classification task in two\n", "different ways: once using plain **`pyspark`** and once using the\n", "**`synapseml`** library. The two methods yield the same performance,\n", - "but one of the two libraries is drastically simpler to use and iterate\n", - "on (can you guess which one?).\n", + "but highlights the simplicity of using `synapseml` compared to `pyspark`.\n", "\n", - "The task is simple: Predict whether a user's review of a book sold on\n", - "Amazon is good (rating > 3) or bad based on the text of the review. We\n", - "accomplish this by training LogisticRegression learners with different\n", + "The task is to predict whether a customer's review of a book sold on\n", + "Amazon is good (rating > 3) or bad based on the text of the review. You\n", + "accomplish it by training LogisticRegression learners with different\n", "hyperparameters and choosing the best model." ] }, { - "cell_type": "code", - "execution_count": null, + "attachments": {}, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" + "## Setup\n", + "Import necessary Python libraries and get a spark session." ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "### 2. Read the data\n", + "## Read the data\n", "\n", - "We download and read in the data. We show a sample below:" + "Download and read in the data." ] }, { @@ -56,16 +68,16 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "### 3. Extract more features and process data\n", + "## Extract features and process data\n", "\n", - "Real data however is more complex than the above dataset. It is common\n", - "for a dataset to have features of multiple types: text, numeric,\n", - "categorical. To illustrate how difficult it is to work with these\n", - "datasets, we add two numerical features to the dataset: the **word\n", - "count** of the review and the **mean word length**." + "Real data is more complex than the above dataset. It's common\n", + "for a dataset to have features of multiple types, such as text, numeric, and\n", + "categorical. To illustrate how difficult it's to work with these\n", + "datasets, add two numerical features to the dataset: the **word count** of the review and the **mean word length**." ] }, { @@ -142,25 +154,22 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 4a. Classify using pyspark\n", + "## Classify using pyspark\n", "\n", "To choose the best LogisticRegression classifier using the `pyspark`\n", - "library, need to *explicitly* perform the following steps:\n", + "library, you need to *explicitly* perform the following steps:\n", "\n", "1. Process the features:\n", " * Tokenize the text column\n", " * Hash the tokenized column into a vector using hashing\n", - " * Merge the numeric features with the vector in the step above\n", + " * Merge the numeric features with the vector\n", "2. Process the label column: cast it into the proper type.\n", "3. Train multiple LogisticRegression algorithms on the `train` dataset\n", " with different hyperparameters\n", "4. Compute the area under the ROC curve for each of the trained models\n", " and select the model with the highest metric as computed on the\n", " `test` dataset\n", - "5. Evaluate the best model on the `validation` set\n", - "\n", - "As you can see below, there is a lot of work involved and a lot of\n", - "steps where something can go wrong!" + "5. Evaluate the best model on the `validation` set" ] }, { @@ -235,16 +244,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### 4b. Classify using synapseml\n", + "## Classify using SynapseML\n", "\n", - "Life is a lot simpler when using `synapseml`!\n", + "The pipeline can be simplified by using SynapseML:\n", "\n", "1. The **`TrainClassifier`** Estimator featurizes the data internally,\n", " as long as the columns selected in the `train`, `test`, `validation`\n", " dataset represent the features\n", "\n", "2. The **`FindBestModel`** Estimator finds the best model from a pool of\n", - " trained models by finding the model which performs best on the `test`\n", + " trained models by finding the model that performs best on the `test`\n", " dataset given the specified metric\n", "\n", "3. The **`ComputeModelStatistics`** Transformer computes the different\n", diff --git a/notebooks/features/classification/Classification - Adult Census.ipynb b/docs/Explore Algorithms/Classification/Quickstart - Train Classifier.ipynb similarity index 88% rename from notebooks/features/classification/Classification - Adult Census.ipynb rename to docs/Explore Algorithms/Classification/Quickstart - Train Classifier.ipynb index 04db727924..6408b13a52 100644 --- a/notebooks/features/classification/Classification - Adult Census.ipynb +++ b/docs/Explore Algorithms/Classification/Quickstart - Train Classifier.ipynb @@ -11,28 +11,6 @@ ], "metadata": {} }, - { - "cell_type": "code", - "execution_count": null, - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" - ], - "outputs": [], - "metadata": {} - }, - { - "cell_type": "code", - "execution_count": null, - "source": [ - "import numpy as np\n", - "import pandas as pd" - ], - "outputs": [], - "metadata": {} - }, { "cell_type": "markdown", "source": [ diff --git a/notebooks/features/classification/Classification - Adult Census with Vowpal Wabbit.ipynb b/docs/Explore Algorithms/Classification/Quickstart - Vowpal Wabbit on Tabular Data.ipynb similarity index 94% rename from notebooks/features/classification/Classification - Adult Census with Vowpal Wabbit.ipynb rename to docs/Explore Algorithms/Classification/Quickstart - Vowpal Wabbit on Tabular Data.ipynb index 4dab04ecaf..52b70dec61 100644 --- a/notebooks/features/classification/Classification - Adult Census with Vowpal Wabbit.ipynb +++ b/docs/Explore Algorithms/Classification/Quickstart - Vowpal Wabbit on Tabular Data.ipynb @@ -11,18 +11,6 @@ ")." ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" - ] - }, { "cell_type": "code", "execution_count": null, diff --git a/notebooks/features/classification/Classification - Twitter Sentiment with Vowpal Wabbit.ipynb b/docs/Explore Algorithms/Classification/Quickstart - Vowpal Wabbit on Text Data.ipynb similarity index 96% rename from notebooks/features/classification/Classification - Twitter Sentiment with Vowpal Wabbit.ipynb rename to docs/Explore Algorithms/Classification/Quickstart - Vowpal Wabbit on Text Data.ipynb index 83bac1450c..e85b6e1dad 100644 --- a/notebooks/features/classification/Classification - Twitter Sentiment with Vowpal Wabbit.ipynb +++ b/docs/Explore Algorithms/Classification/Quickstart - Vowpal Wabbit on Text Data.ipynb @@ -16,14 +16,10 @@ "outputs": [], "source": [ "import os\n", - "import re\n", "import urllib.request\n", - "import numpy as np\n", "import pandas as pd\n", "from zipfile import ZipFile\n", - "from bs4 import BeautifulSoup\n", "from pyspark.sql.functions import udf, rand, when, col\n", - "from pyspark.sql.types import StructType, StructField, DoubleType, StringType\n", "from pyspark.ml import Pipeline\n", "from pyspark.ml.feature import CountVectorizer, RegexTokenizer\n", "from synapse.ml.vw import VowpalWabbitClassifier\n", @@ -32,18 +28,6 @@ "import matplotlib.pyplot as plt" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" - ] - }, { "cell_type": "code", "execution_count": 3, diff --git a/website/docs/features/simple_deep_learning/about.md b/docs/Explore Algorithms/Deep Learning/Distributed Training.md similarity index 95% rename from website/docs/features/simple_deep_learning/about.md rename to docs/Explore Algorithms/Deep Learning/Distributed Training.md index 4ede10d040..8b9fd9d2ea 100644 --- a/website/docs/features/simple_deep_learning/about.md +++ b/docs/Explore Algorithms/Deep Learning/Distributed Training.md @@ -1,6 +1,6 @@ --- -title: Simple Deep Learning with SynapseML -sidebar_label: About +title: Distributed Training +sidebar_label: Distributed Training --- ### Why Simple Deep Learning @@ -72,5 +72,5 @@ pred_df = deep_vision_model.transform(test_df) ``` ## Examples -- [DeepLearning - Deep Vision Classification](../DeepLearning%20-%20Deep%20Vision%20Classification) -- [DeepLearning - Deep Text Classification](../DeepLearning%20-%20Deep%20Text%20Classification) +- [Quickstart - Fine-tune a Text Classifier](../Quickstart%20-%20Fine-tune%20a%20Text%20Classifier) +- [Quickstart - Fine-tune a Vision Classifier](../Quickstart%20-%20Fine-tune%20a%20Vision%20Classifier) diff --git a/website/docs/features/simple_deep_learning/installation.md b/docs/Explore Algorithms/Deep Learning/Getting Started.md similarity index 76% rename from website/docs/features/simple_deep_learning/installation.md rename to docs/Explore Algorithms/Deep Learning/Getting Started.md index 677e5589b7..98d5341c31 100644 --- a/website/docs/features/simple_deep_learning/installation.md +++ b/docs/Explore Algorithms/Deep Learning/Getting Started.md @@ -1,6 +1,6 @@ --- -title: Installation Guidance -sidebar_label: Installation Guidance for Deep Vision Classification +title: Getting Started +sidebar_label: Getting Started --- :::note @@ -21,22 +21,22 @@ Restarting the cluster automatically installs horovod v0.25.0 with pytorch_light You could install the single synapseml-deep-learning wheel package to get the full functionality of deep vision classification. Run the following command: ```powershell -pip install synapseml==0.11.1 +pip install synapseml==0.11.2 ``` An alternative is installing the SynapseML jar package in library management section, by adding: ``` -Coordinate: com.microsoft.azure:synapseml_2.12:0.11.1 +Coordinate: com.microsoft.azure:synapseml_2.12:0.11.2 Repository: https://mmlspark.azureedge.net/maven ``` :::note -If you install the jar package, follow the first two cells of this [sample](./DeepLearning%20-%20Deep%20Vision%20Classification.md/#environment-setup----reinstall-horovod-based-on-new-version-of-pytorch) +If you install the jar package, follow the first two cells of this [sample](../Quickstart%20-%20Fine-tune%20a%20Vision%20Classifier#environment-setup----reinstall-horovod-based-on-new-version-of-pytorch) to ensure horovod recognizes SynapseML. ::: ## 3. Try our sample notebook -You could follow the rest of this [sample](./DeepLearning%20-%20Deep%20Vision%20Classification.md) and have a try on your own dataset. +You could follow the rest of this [sample](../Quickstart%20-%20Fine-Tune a Vision Classifier) and have a try on your own dataset. Supported models (`backbone` parameter for `DeepVisionClassifer`) should be string format of [Torchvision-supported models](https://github.com/pytorch/vision/blob/v0.12.0/torchvision/models/__init__.py); You could also check by running `backbone in torchvision.models.__dict__`. diff --git a/website/versioned_docs/version-0.11.0/features/onnx/about.md b/docs/Explore Algorithms/Deep Learning/ONNX.md similarity index 94% rename from website/versioned_docs/version-0.11.0/features/onnx/about.md rename to docs/Explore Algorithms/Deep Learning/ONNX.md index baec0d8e6c..5d45e38679 100644 --- a/website/versioned_docs/version-0.11.0/features/onnx/about.md +++ b/docs/Explore Algorithms/Deep Learning/ONNX.md @@ -1,7 +1,7 @@ --- -title: ONNX model inferencing on Spark +title: ONNX hide_title: true -sidebar_label: About +sidebar_label: ONNX description: Learn how to use the ONNX model transformer to run inference for an ONNX model on Spark. --- @@ -71,7 +71,7 @@ available models, optionally filtering by name or tags. | softMaxDict | A map between output DataFrame columns, where the value column will be computed from taking the softmax of the key column. If the 'rawPrediction' column contains logits outputs, then one can set softMaxDict to `Map("rawPrediction" -> "probability")` to obtain the probability outputs. | None | | argMaxDict | A map between output DataFrame columns, where the value column will be computed from taking the argmax of the key column. This parameter can be used to convert probability or logits output to the predicted label. | None | | deviceType | Specify a device type the model inference runs on. Supported types are: CPU or CUDA. If not specified, auto detection will be used. | None | - | optimizationLevel | Specify the [optimization level](https://onnxruntime.ai/docs/resources/graph-optimizations.html#graph-optimization-levels) for the ONNX graph optimizations. Supported values are: `NO_OPT`, `BASIC_OPT`, `EXTENDED_OPT`, `ALL_OPT`. | `ALL_OPT` | + | optimizationLevel | Specify the [optimization level](https://onnxruntime.ai/docs/performance/model-optimizations/graph-optimizations.html#graph-optimization-levels) for the ONNX graph optimizations. Supported values are: `NO_OPT`, `BASIC_OPT`, `EXTENDED_OPT`, `ALL_OPT`. | `ALL_OPT` | 4. Call `transform` method to run inference on the input DataFrame. @@ -104,5 +104,5 @@ The below example shows how to perform the slicing manually with a direct ONNXMo ## Example -- [Interpretability - Image Explainers](../../responsible_ai/Interpretability%20-%20Image%20Explainers) -- [ONNX - Inference on Spark](../ONNX%20-%20Inference%20on%20Spark) +- [Image Explainers](../../Responsible%20AI/Image%20Explainers) +- [Quickstart - ONNX Model Inference](../Quickstart%20-%20ONNX%20Model%20Inference) diff --git a/notebooks/features/simple_deep_learning/DeepLearning - Deep Text Classification.ipynb b/docs/Explore Algorithms/Deep Learning/Quickstart - Fine-tune a Text Classifier.ipynb similarity index 94% rename from notebooks/features/simple_deep_learning/DeepLearning - Deep Text Classification.ipynb rename to docs/Explore Algorithms/Deep Learning/Quickstart - Fine-tune a Text Classifier.ipynb index 5430efad48..f730d14b58 100644 --- a/notebooks/features/simple_deep_learning/DeepLearning - Deep Text Classification.ipynb +++ b/docs/Explore Algorithms/Deep Learning/Quickstart - Fine-tune a Text Classifier.ipynb @@ -16,34 +16,24 @@ }, { "cell_type": "markdown", - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "910eee89-ded8-4c36-90ae-e9b8539c5773", - "showTitle": false, - "title": "" - } - }, "source": [ "### Environment Setup on databricks" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "60a84fca-38ae-48dc-826a-1cc2011c3977", - "showTitle": false, - "title": "" - } - }, "outputs": [], "source": [ "# install cloudpickle 2.0.0 to add synapse module for usage of horovod\n", "%pip install cloudpickle==2.0.0 --force-reinstall --no-deps" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", diff --git a/notebooks/features/simple_deep_learning/DeepLearning - Deep Vision Classification.ipynb b/docs/Explore Algorithms/Deep Learning/Quickstart - Fine-tune a Vision Classifier.ipynb similarity index 100% rename from notebooks/features/simple_deep_learning/DeepLearning - Deep Vision Classification.ipynb rename to docs/Explore Algorithms/Deep Learning/Quickstart - Fine-tune a Vision Classifier.ipynb diff --git a/notebooks/features/onnx/ONNX - Inference on Spark.ipynb b/docs/Explore Algorithms/Deep Learning/Quickstart - ONNX Model Inference.ipynb similarity index 63% rename from notebooks/features/onnx/ONNX - Inference on Spark.ipynb rename to docs/Explore Algorithms/Deep Learning/Quickstart - ONNX Model Inference.ipynb index afd6ae2efc..866822e1fa 100644 --- a/notebooks/features/onnx/ONNX - Inference on Spark.ipynb +++ b/docs/Explore Algorithms/Deep Learning/Quickstart - ONNX Model Inference.ipynb @@ -2,50 +2,45 @@ "cells": [ { "cell_type": "markdown", - "metadata": {}, "source": [ - "## ONNX Inference on Spark\n", + "# ONNX Inference on Spark\n", "\n", - "In this example, we will train a LightGBM model, convert the model to ONNX format and use the converted model to infer some testing data on Spark.\n", + "In this example, you train a LightGBM model and convert the model to [ONNX](https://onnx.ai/) format. Once converted, you use the model to infer some testing data on Spark.\n", "\n", - "Python dependencies:\n", + "This example uses the following Python packages and versions:\n", "\n", - "- onnxmltools==1.7.0\n", - "- lightgbm==3.2.1\n" - ] + "- `onnxmltools==1.7.0`\n", + "- `lightgbm==3.2.1`\n" + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ - "Load training data" - ] + "## Load the example data\n", + "\n", + "To load the example data, add the following code examples to cells in your notebook and then run the cells:" + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, "outputs": [], "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import *\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" + "%pip install lightgbm" ], "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } + "collapsed": false } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "df = (\n", @@ -58,19 +53,38 @@ ")\n", "\n", "display(df)" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ - "Use LightGBM to train a model" - ] + "The output should look similar to the following table, though the values and number of rows may differ:\n", + "\n", + "| Interest Coverage Ratio | Net Income Flag | Equity to Liability |\n", + "| ----- | ----- | ----- |\n", + "| 0.5641 | 1.0 | 0.0165 |\n", + "| 0.5702 | 1.0 | 0.0208 |\n", + "| 0.5673 | 1.0 | 0.0165 |" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "markdown", + "source": [ + "## Use LightGBM to train a model" + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "from pyspark.ml.feature import VectorAssembler\n", @@ -100,26 +114,42 @@ ")\n", "\n", "model = model.fit(train_data)" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ - "Export the trained model to a LightGBM booster, convert it to ONNX format." - ] + "## Convert the model to ONNX format\n", + "\n", + "The following code exports the trained model to a LightGBM booster and then converts it to ONNX format:" + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "from synapse.ml.core.platform import running_on_binder\n", "\n", "if running_on_binder():\n", - " !pip install lightgbm==3.2.1\n", - " from IPython import get_ipython\n", + " from IPython import get_ipython" + ], + "metadata": { + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ "import lightgbm as lgb\n", "from lightgbm import Booster, LGBMClassifier\n", "\n", @@ -141,10 +171,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Load the ONNX payload into an `ONNXModel`, and inspect the model inputs and outputs." + "After conversion, load the ONNX payload into an `ONNXModel` and inspect the model inputs and outputs:" ] }, { @@ -162,6 +193,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -183,10 +215,13 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Create some testing data and transform the data through the ONNX model." + "## Use the model for inference\n", + "\n", + "To perform inference with the model, the following code creates testing data and transforms the data through the ONNX model." ] }, { @@ -217,11 +252,39 @@ "\n", "display(onnx_ml.transform(testDf))" ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The output should look similar to the following table, though the values and number of rows may differ:\n", + "\n", + "| Index | Features | Prediction | Probability |\n", + "| ----- | ----- | ----- | ----- |\n", + "| 1 | `\"{\"type\":1,\"values\":[0.105...` | 0 | `\"{\"0\":0.835...` |\n", + "| 2 | `\"{\"type\":1,\"values\":[0.814...` | 0 | `\"{\"0\":0.658...` |" + ] } ], "metadata": { + "celltoolbar": "Tags", + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.8" } }, "nbformat": 4, diff --git a/notebooks/features/other/DeepLearning - Flower Image Classification.ipynb b/docs/Explore Algorithms/Deep Learning/Quickstart - Transfer Learn for Image Classification.ipynb similarity index 92% rename from notebooks/features/other/DeepLearning - Flower Image Classification.ipynb rename to docs/Explore Algorithms/Deep Learning/Quickstart - Transfer Learn for Image Classification.ipynb index 8d12174e04..d13f9b67df 100644 --- a/notebooks/features/other/DeepLearning - Flower Image Classification.ipynb +++ b/docs/Explore Algorithms/Deep Learning/Quickstart - Transfer Learn for Image Classification.ipynb @@ -15,15 +15,7 @@ "source": [ "from pyspark.ml import Transformer, Estimator, Pipeline\n", "from pyspark.ml.classification import LogisticRegression\n", - "import sys, time\n", - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import running_on_synapse, running_on_databricks\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" + "import sys, time" ] }, { @@ -111,7 +103,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### How does it work?\n\n![Convolutional network weights](http://i.stack.imgur.com/Hl2H6.png)" + "### How does it work?\n", + "\n", + "![Convolutional network weights](http://i.stack.imgur.com/Hl2H6.png)" ] }, { diff --git a/notebooks/features/hyperparameter_tuning/HyperOpt-SynapseML.ipynb b/docs/Explore Algorithms/Hyperparameter Tuning/HyperOpt.ipynb similarity index 95% rename from notebooks/features/hyperparameter_tuning/HyperOpt-SynapseML.ipynb rename to docs/Explore Algorithms/Hyperparameter Tuning/HyperOpt.ipynb index 50910ae895..808f3c1488 100644 --- a/notebooks/features/hyperparameter_tuning/HyperOpt-SynapseML.ipynb +++ b/docs/Explore Algorithms/Hyperparameter Tuning/HyperOpt.ipynb @@ -19,7 +19,7 @@ "* Running distributed training with SynapseML without hyperparameter tuning.\n", "* Using Hyperopt to tune hyperparameters in the distributed training workflow.\n", "## Prerequisites\n", - " - If you are running it on Synapse, you'll need to [create an AML workspace and set up linked Service](https://microsoft.github.io/SynapseML/docs/next/mlflow/installation/). \n", + " - If you are running it on Synapse, you'll need to [create an AML workspace and set up linked Service](../../../Use%20with%20MLFlow/Overview/).\n", "\n", "## Requirements\n", " - Install HyperOpt" @@ -31,10 +31,7 @@ "metadata": {}, "outputs": [], "source": [ - "# %pip install hyperopt\n", - "import os\n", - "\n", - "os.system(\"pip install hyperopt\")" + "%pip install hyperopt mlflow" ] }, { @@ -47,17 +44,6 @@ "To track model training and tuning with MLflow, you could enable MLflow autologging by running `mlflow.pyspark.ml.autolog()`." ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# version >= 1.28.0 supports reading logModelAllowlistFile from url\n", - "# %pip install mlflow==1.29.0\n", - "os.system(\"pip install mlflow==1.29.0\")" - ] - }, { "cell_type": "code", "execution_count": null, @@ -65,18 +51,11 @@ "outputs": [], "source": [ "from synapse.ml.core.platform import *\n", - "from pyspark.sql import SparkSession\n", - "\n", - "spark = SparkSession.builder.getOrCreate()\n", "\n", "if running_on_synapse_internal():\n", " experiment_name = \"hyperopt-synapseml\"\n", "elif running_on_synapse():\n", " experiment_name = \"hyperopt-synapseml\"\n", - " # from notebookutils.visualization import display # use this display on interactive notebook\n", - " from synapse.ml.core.platform import (\n", - " materializing_display as display,\n", - " ) # display for pipeline testing\n", "else:\n", " experiment_name = \"/Shared/hyperopt-synapseml\"" ] @@ -517,4 +496,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/notebooks/features/other/HyperParameterTuning - Fighting Breast Cancer.ipynb b/docs/Explore Algorithms/Hyperparameter Tuning/Quickstart - Random Search.ipynb similarity index 78% rename from notebooks/features/other/HyperParameterTuning - Fighting Breast Cancer.ipynb rename to docs/Explore Algorithms/Hyperparameter Tuning/Quickstart - Random Search.ipynb index 02f2070471..22df0a1b42 100644 --- a/notebooks/features/other/HyperParameterTuning - Fighting Breast Cancer.ipynb +++ b/docs/Explore Algorithms/Hyperparameter Tuning/Quickstart - Random Search.ipynb @@ -1,34 +1,30 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "## HyperParameterTuning - Fighting Breast Cancer\n", - "\n", - "We can do distributed randomized grid search hyperparameter tuning with SynapseML.\n", + "# HyperParameterTuning - Fighting Breast Cancer\n", "\n", - "First, we import the packages" + "This tutorial shows how SynapseML can be used to identify the best combination of hyperparameters for your chosen classifiers, ultimately resulting in more accurate and reliable models. In order to demonstrate this, we'll show how to perform distributed randomized grid search hyperparameter tuning to build a model to identify breast cancer. " ] }, { - "cell_type": "code", - "execution_count": null, + "attachments": {}, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "import pandas as pd\n", - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" + "## 1 - Set up dependencies\n", + "Start by importing pandas and setting up our Spark session." ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "Now let's read the data and split it to tuning and test sets:" + "Next, read the data and split it into tuning and test sets." ] }, { @@ -49,7 +45,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next, define the models that will be tuned:" + "Define the models to be used." ] }, { @@ -74,12 +70,14 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "We can specify the hyperparameters using the HyperparamBuilder.\n", - "We can add either DiscreteHyperParam or RangeHyperParam hyperparameters.\n", - "TuneHyperparameters will randomly choose values from a uniform distribution." + "## 2 - Find the best model using AutoML\n", + "\n", + "Import SynapseML's AutoML classes from `synapse.ml.automl`.\n", + "Specify the hyperparameters using the `HyperparamBuilder`. Add either `DiscreteHyperParam` or `RangeHyperParam` hyperparameters. `TuneHyperparameters` will randomly choose values from a uniform distribution:" ] }, { @@ -129,9 +127,11 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ + "## 3 - Evaluate the model\n", "We can view the best model's parameters and retrieve the underlying best model pipeline" ] }, @@ -188,4 +188,4 @@ }, "nbformat": 4, "nbformat_minor": 2 -} +} \ No newline at end of file diff --git a/website/docs/features/lightgbm/about.md b/docs/Explore Algorithms/LightGBM/Overview.md similarity index 99% rename from website/docs/features/lightgbm/about.md rename to docs/Explore Algorithms/LightGBM/Overview.md index bed6b7b8ee..f5979e1072 100644 --- a/website/docs/features/lightgbm/about.md +++ b/docs/Explore Algorithms/LightGBM/Overview.md @@ -1,7 +1,7 @@ --- -title: LightGBM +title: Overview hide_title: true -sidebar_label: About +sidebar_label: Overview --- # LightGBM on Apache Spark @@ -55,7 +55,7 @@ model = LightGBMRegressor(application='quantile', ``` For an end to end application, check out the LightGBM [notebook -example](../LightGBM%20-%20Overview). +example](../Quickstart%20-%20Classification,%20Ranking,%20and%20Regression). ### Arguments/Parameters diff --git a/notebooks/features/lightgbm/LightGBM - Overview.ipynb b/docs/Explore Algorithms/LightGBM/Quickstart - Classification, Ranking, and Regression.ipynb similarity index 82% rename from notebooks/features/lightgbm/LightGBM - Overview.ipynb rename to docs/Explore Algorithms/LightGBM/Quickstart - Classification, Ranking, and Regression.ipynb index 6c66ff025f..580bf9373a 100644 --- a/notebooks/features/lightgbm/LightGBM - Overview.ipynb +++ b/docs/Explore Algorithms/LightGBM/Quickstart - Classification, Ranking, and Regression.ipynb @@ -1,42 +1,23 @@ { - "metadata": { - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - }, - "orig_nbformat": 2, - "kernelspec": { - "name": "python385jvsc74a57bd072be13fef265c65d19cf428fd1b09dd31615eed186d1dccdebb6e555960506ee", - "display_name": "Python 3.8.5 64-bit (conda)" - } - }, - "nbformat": 4, - "nbformat_minor": 2, "cells": [ { "cell_type": "markdown", + "metadata": {}, "source": [ "# LightGBM" - ], - "metadata": {} + ] }, { + "attachments": {}, "cell_type": "markdown", + "metadata": {}, "source": [ "[LightGBM](https://github.com/Microsoft/LightGBM) is an open-source,\n", "distributed, high-performance gradient boosting (GBDT, GBRT, GBM, or\n", "MART) framework. This framework specializes in creating high-quality and\n", "GPU enabled decision tree algorithms for ranking, classification, and\n", "many other machine learning tasks. LightGBM is part of Microsoft's\n", - "[DMTK](http://github.com/microsoft/dmtk) project.\n", + "[DMTK](https://github.com/microsoft/dmtk) project.\n", "\n", "### Advantages of LightGBM\n", "\n", @@ -56,49 +37,42 @@ "\n", "### LightGBM Usage:\n", "\n", - "- LightGBMClassifier: used for building classification models. For example, to predict whether a company will bankrupt or not, we could build a binary classification model with LightGBMClassifier.\n", + "- LightGBMClassifier: used for building classification models. For example, to predict whether a company enters bankruptcy or not, we could build a binary classification model with LightGBMClassifier.\n", "- LightGBMRegressor: used for building regression models. For example, to predict the house price, we could build a regression model with LightGBMRegressor.\n", "- LightGBMRanker: used for building ranking models. For example, to predict website searching result relevance, we could build a ranking model with LightGBMRanker." - ], - "metadata": {} + ] }, { + "attachments": {}, "cell_type": "markdown", + "metadata": {}, "source": [ "## Bankruptcy Prediction with LightGBM Classifier\n", "\n", - "\n", - "\n", "In this example, we use LightGBM to build a classification model in order to predict bankruptcy." - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Read dataset" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import *\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" - ], + "metadata": {}, "outputs": [], - "metadata": {} + "source": [ + "from synapse.ml.core.platform import *" + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "df = (\n", " spark.read.format(\"csv\")\n", @@ -112,45 +86,45 @@ "print(\"records read: \" + str(df.count()))\n", "print(\"Schema: \")\n", "df.printSchema()" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "display(df)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Split the dataset into train and test" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "train, test = df.randomSplit([0.85, 0.15], seed=1)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Add featurizer to convert features to vector" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from pyspark.ml.feature import VectorAssembler\n", "\n", @@ -158,65 +132,74 @@ "featurizer = VectorAssembler(inputCols=feature_cols, outputCol=\"features\")\n", "train_data = featurizer.transform(train)[\"Bankrupt?\", \"features\"]\n", "test_data = featurizer.transform(test)[\"Bankrupt?\", \"features\"]" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Check if the data is unbalanced" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "display(train_data.groupBy(\"Bankrupt?\").count())" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Model Training" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from synapse.ml.lightgbm import LightGBMClassifier\n", "\n", "model = LightGBMClassifier(\n", " objective=\"binary\", featuresCol=\"features\", labelCol=\"Bankrupt?\", isUnbalance=True\n", ")" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "model = model.fit(train_data)" - ], - "outputs": [], - "metadata": {} + ] }, { + "attachments": {}, "cell_type": "markdown", + "metadata": { + "tags": [ + "hide-synapse-internal" + ] + }, "source": [ - "By calling \"saveNativeModel\", it allows you to extract the underlying lightGBM model for fast deployment after you train on Spark." - ], - "metadata": {} + "\"saveNativeModel\" allows you to extract the underlying lightGBM model for fast deployment after you train on Spark." + ] }, { "cell_type": "code", "execution_count": null, + "metadata": { + "tags": [ + "hide-synapse-internal" + ] + }, + "outputs": [], "source": [ "from synapse.ml.lightgbm import LightGBMClassificationModel\n", "\n", @@ -226,29 +209,29 @@ " \"/models/lgbmclassifier.model\"\n", " )\n", "if running_on_synapse_internal():\n", - " model.saveNativeModel(\"Files/models/lgbmclassifier.model\")\n", - " model = LightGBMClassificationModel.loadNativeModelFromFile(\n", - " \"Files/models/lgbmclassifier.model\"\n", - " )\n", + " model.saveNativeModel(\"Files/models/lgbmclassifier.model\")\n", + " model = LightGBMClassificationModel.loadNativeModelFromFile(\n", + " \"Files/models/lgbmclassifier.model\"\n", + " )\n", "else:\n", " model.saveNativeModel(\"/tmp/lgbmclassifier.model\")\n", " model = LightGBMClassificationModel.loadNativeModelFromFile(\n", " \"/tmp/lgbmclassifier.model\"\n", " )" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Feature Importances Visualization" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "import pandas as pd\n", "import matplotlib.pyplot as plt\n", @@ -273,30 +256,30 @@ "plt.xlabel(\"importances\")\n", "plt.ylabel(\"features\")\n", "plt.show()" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Model Prediction" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "predictions = model.transform(test_data)\n", "predictions.limit(10).toPandas()" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from synapse.ml.train import ComputeModelStatistics\n", "\n", @@ -306,117 +289,116 @@ " scoredLabelsCol=\"prediction\",\n", ").transform(predictions)\n", "display(metrics)" - ], - "outputs": [], - "metadata": {} + ] }, { + "attachments": {}, "cell_type": "markdown", + "metadata": {}, "source": [ "## Quantile Regression for Drug Discovery with LightGBMRegressor\n", "\n", - "\n", - "\n", - "In this example, we show how to use LightGBM to build a simple regression model." - ], - "metadata": {} + "In this example, we show how to use LightGBM to build a regression model." + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Read dataset" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "triazines = spark.read.format(\"libsvm\").load(\n", " \"wasbs://publicwasb@mmlspark.blob.core.windows.net/triazines.scale.svmlight\"\n", ")" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "# print some basic info\n", "print(\"records read: \" + str(triazines.count()))\n", "print(\"Schema: \")\n", "triazines.printSchema()\n", "display(triazines.limit(10))" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Split dataset into train and test" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "train, test = triazines.randomSplit([0.85, 0.15], seed=1)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Model Training" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from synapse.ml.lightgbm import LightGBMRegressor\n", "\n", "model = LightGBMRegressor(\n", " objective=\"quantile\", alpha=0.2, learningRate=0.3, numLeaves=31\n", ").fit(train)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "print(model.getFeatureImportances())" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Model Prediction" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "scoredData = model.transform(test)\n", "display(scoredData)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from synapse.ml.train import ComputeModelStatistics\n", "\n", @@ -424,27 +406,27 @@ " evaluationMetric=\"regression\", labelCol=\"label\", scoresCol=\"prediction\"\n", ").transform(scoredData)\n", "display(metrics)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "## LightGBM Ranker" - ], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Read dataset" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "df = spark.read.format(\"parquet\").load(\n", " \"wasbs://publicwasb@mmlspark.blob.core.windows.net/lightGBMRanker_train.parquet\"\n", @@ -454,20 +436,20 @@ "print(\"Schema: \")\n", "df.printSchema()\n", "display(df.limit(10))" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Model Training" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "from synapse.ml.lightgbm import LightGBMRanker\n", "\n", @@ -487,38 +469,57 @@ " evalAt=[1, 3, 5],\n", " metric=\"ndcg\",\n", ")" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "lgbm_ranker_model = lgbm_ranker.fit(df)" - ], - "outputs": [], - "metadata": {} + ] }, { "cell_type": "markdown", + "metadata": {}, "source": [ "#### Model Prediction" - ], - "metadata": {} + ] }, { "cell_type": "code", "execution_count": null, + "metadata": {}, + "outputs": [], "source": [ "dt = spark.read.format(\"parquet\").load(\n", " \"wasbs://publicwasb@mmlspark.blob.core.windows.net/lightGBMRanker_test.parquet\"\n", ")\n", "predictions = lgbm_ranker_model.transform(dt)\n", "predictions.limit(10).toPandas()" - ], - "outputs": [], - "metadata": {} + ] } - ] + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.8.5 64-bit (conda)", + "name": "python385jvsc74a57bd072be13fef265c65d19cf428fd1b09dd31615eed186d1dccdebb6e555960506ee" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5" + }, + "orig_nbformat": 2 + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/notebooks/features/cognitive_services/CognitiveServices - LangchainTransformer.ipynb b/docs/Explore Algorithms/OpenAI/Langchain.ipynb similarity index 86% rename from notebooks/features/cognitive_services/CognitiveServices - LangchainTransformer.ipynb rename to docs/Explore Algorithms/OpenAI/Langchain.ipynb index 577aedc2b8..1f22e8fa58 100644 --- a/notebooks/features/cognitive_services/CognitiveServices - LangchainTransformer.ipynb +++ b/docs/Explore Algorithms/OpenAI/Langchain.ipynb @@ -40,12 +40,11 @@ "source": [ "## Step 1: Prerequisites\n", "\n", - "The key prerequisites for this quickstart include a working Azure OpenAI resource, and an Apache Spark cluster with SynapseML installed. We suggest creating a Synapse workspace, but an Azure Databricks, HDInsight, or Spark on Kubernetes, or even a python environment with the `pyspark` package will work. If you need to use the last component of the chain - An agent with web searching capabilities, you also need a SerpAPIKey.\n", + "The key prerequisites for this quickstart include a working Azure OpenAI resource, and an Apache Spark cluster with SynapseML installed. We suggest creating a Synapse workspace, but an Azure Databricks, HDInsight, or Spark on Kubernetes, or even a python environment with the `pyspark` package will work. \n", "\n", "1. An Azure OpenAI resource – request access [here](https://customervoice.microsoft.com/Pages/ResponsePage.aspx?id=v4j5cvGGr0GRqy180BHbR7en2Ais5pxKtso_Pz4b1_xUOFA5Qk1UWDRBMjg0WFhPMkIzTzhKQ1dWNyQlQCN0PWcu) before [creating a resource](https://docs.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource)\n", "1. [Create a Synapse workspace](https://docs.microsoft.com/en-us/azure/synapse-analytics/get-started-create-workspace)\n", - "1. [Create a serverless Apache Spark pool](https://docs.microsoft.com/en-us/azure/synapse-analytics/get-started-analyze-spark#create-a-serverless-apache-spark-pool)\n", - "1. Get a SerpAPIKey from [SerpApi](https://serpapi.com/)." + "1. [Create a serverless Apache Spark pool](https://docs.microsoft.com/en-us/azure/synapse-analytics/get-started-analyze-spark#create-a-serverless-apache-spark-pool)" ] }, { @@ -72,34 +71,6 @@ "1. Connect your notebook to a cluster and follow along, editing and running the cells below." ] }, - { - "cell_type": "code", - "execution_count": 0, - "metadata": { - "application/vnd.databricks.v1+cell": { - "cellMetadata": {}, - "inputWidgets": {}, - "nuid": "d0642e69-1669-4b18-94a2-258af0fbcf9f", - "showTitle": false, - "title": "" - } - }, - "outputs": [], - "source": [ - "# This cell ensures make magic command like '%pip install' works on synapse scheduled spark jobs\n", - "from synapse.ml.core.platform import running_on_synapse\n", - "\n", - "if running_on_synapse():\n", - " from IPython import get_ipython\n", - " from IPython.terminal.interactiveshell import TerminalInteractiveShell\n", - "\n", - " try:\n", - " shell = TerminalInteractiveShell.instance()\n", - " except:\n", - " pass\n", - " from notebookutils.visualization import display" - ] - }, { "cell_type": "code", "execution_count": 0, @@ -117,7 +88,7 @@ }, "outputs": [], "source": [ - "%pip install langchain openai pdf2image pdfminer.six pytesseract unstructured" + "%pip install openai langchain pdf2image pdfminer.six pytesseract unstructured" ] }, { @@ -140,11 +111,10 @@ "import os, openai, langchain, uuid\n", "from langchain.llms import AzureOpenAI, OpenAI\n", "from langchain.agents import load_tools, initialize_agent, AgentType\n", - "from langchain.chat_models import AzureChatOpenAI\n", "from langchain.chains import TransformChain, LLMChain, SimpleSequentialChain\n", "from langchain.document_loaders import OnlinePDFLoader\n", + "from langchain.tools.bing_search.tool import BingSearchRun, BingSearchAPIWrapper\n", "from langchain.prompts import PromptTemplate\n", - "import pyspark.sql.functions as f\n", "from synapse.ml.cognitive.langchain import LangchainTransformer\n", "from synapse.ml.core.platform import running_on_synapse, find_secret" ] @@ -169,7 +139,7 @@ "\n", "`openai_api_key = \"99sj2w82o....\"`\n", "\n", - "Note: If using SerpAPI you'll need to first [create a key](https://serpapi.com/dashboard)" + "`bing_subscription_key = \"...\"`" ] }, { @@ -189,19 +159,24 @@ }, "outputs": [], "source": [ - "os.environ[\"SERPAPI_API_KEY\"] = \"YOURSERPAPIKEY\"\n", "openai_api_key = find_secret(\"openai-api-key\")\n", "openai_api_base = \"https://synapseml-openai.openai.azure.com/\"\n", "openai_api_version = \"2022-12-01\"\n", "openai_api_type = \"azure\"\n", + "deployment_name = \"text-davinci-003\"\n", + "bing_search_url = \"https://api.bing.microsoft.com/v7.0/search\"\n", + "bing_subscription_key = find_secret(\"bing-search-key\")\n", "\n", + "os.environ[\"BING_SUBSCRIPTION_KEY\"] = bing_subscription_key\n", + "os.environ[\"BING_SEARCH_URL\"] = bing_search_url\n", "os.environ[\"OPENAI_API_TYPE\"] = openai_api_type\n", "os.environ[\"OPENAI_API_VERSION\"] = openai_api_version\n", "os.environ[\"OPENAI_API_BASE\"] = openai_api_base\n", "os.environ[\"OPENAI_API_KEY\"] = openai_api_key\n", + "\n", "llm = AzureOpenAI(\n", - " deployment_name=\"text-davinci-003\",\n", - " model_name=\"text-davinci-003\",\n", + " deployment_name=deployment_name,\n", + " model_name=deployment_name,\n", " temperature=0.1,\n", " verbose=True,\n", ")" @@ -390,7 +365,7 @@ "1. **Transform Chain**: Extract Paper Content from arxiv Link **=>**\n", "1. **LLMChain**: Summarize the Paper, extract paper title and authors **=>**\n", "1. **Transform Chain**: to generate the prompt **=>**\n", - "1. **Agent with Web Search Tool**: Use Web Search to find the recent papers by the first author (this part is commented out as it needs the SerpAPIKey to run successfully)" + "1. **Agent with Web Search Tool**: Use Web Search to find the recent papers by the first author" ] }, { @@ -434,7 +409,7 @@ " verbose=False,\n", ")\n", "\n", - "paper_summarizer_template = \"\"\"You are a paper summarizer, given the paper content, it is your job to summarize the paper into a short summary, and extract authors and paper title from the paper content.\n", + "paper_summarizer_template = \"\"\"You are a paper summarizer, given the paper content, it is your job to summarize the paper into a short summary, and extract authors and paper title from the paper content.\n", "Here is the paper content:\n", "{paper_content}\n", "Output:\n", @@ -445,19 +420,27 @@ ")\n", "summarize_chain = LLMChain(llm=llm, prompt=prompt, verbose=False)\n", "\n", - "sequential_chain = SimpleSequentialChain(\n", - " chains=[paper_content_extraction_chain, summarize_chain]\n", + "prompt_generation_chain = TransformChain(\n", + " input_variables=[\"Output\"],\n", + " output_variables=[\"prompt\"],\n", + " transform=prompt_generation,\n", + " verbose=False,\n", ")\n", "\n", - "\"\"\"\n", - "Uncomment the following when you have a SerpAPIKey to enable the final websearch component of the chain.\n", - "\"\"\"\n", - "# prompt_generation_chain = TransformChain(input_variables=[\"Output\"], output_variables=[\"prompt\"], transform=prompt_generation, verbose=False)\n", - "# tools = load_tools([\"serpapi\"], llm=llm)\n", - "# web_search_agent = initialize_agent(tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=False)\n", - "# sequential_chain = SimpleSequentialChain(chains=[\n", - "# paper_content_extraction_chain, summarize_chain, prompt_generation_chain, web_search_agent\n", - "# ])" + "bing = BingSearchAPIWrapper(k=3)\n", + "tools = [BingSearchRun(api_wrapper=bing)]\n", + "web_search_agent = initialize_agent(\n", + " tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=False\n", + ")\n", + "\n", + "sequential_chain = SimpleSequentialChain(\n", + " chains=[\n", + " paper_content_extraction_chain,\n", + " summarize_chain,\n", + " prompt_generation_chain,\n", + " web_search_agent,\n", + " ]\n", + ")" ] }, { @@ -531,8 +514,25 @@ "notebookMetadata": { "pythonIndentUnit": 2 }, - "notebookName": "LangchainTransformer", + "notebookName": "CognitiveServices - LangchainTransformer", "widgets": {} + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.3" } }, "nbformat": 4, diff --git a/notebooks/features/cognitive_services/CognitiveServices - OpenAI.ipynb b/docs/Explore Algorithms/OpenAI/OpenAI.ipynb similarity index 97% rename from notebooks/features/cognitive_services/CognitiveServices - OpenAI.ipynb rename to docs/Explore Algorithms/OpenAI/OpenAI.ipynb index f54d476645..fd70e261af 100644 --- a/notebooks/features/cognitive_services/CognitiveServices - OpenAI.ipynb +++ b/docs/Explore Algorithms/OpenAI/OpenAI.ipynb @@ -16,7 +16,7 @@ } }, "source": [ - "# Azure OpenAI for Big Data\n", + "# Azure OpenAI for big data\n", "\n", "The Azure OpenAI service can be used to solve a large number of natural language tasks through prompting the completion API. To make it easier to scale your prompting workflows from a few examples to large datasets of examples, we have integrated the Azure OpenAI service with the distributed machine learning library [SynapseML](https://www.microsoft.com/en-us/research/blog/synapseml-a-simple-multilingual-and-massively-parallel-machine-learning-library/). This integration makes it easy to use the [Apache Spark](https://spark.apache.org/) distributed computing framework to process millions of prompts with the OpenAI service. This tutorial shows how to apply large language models at a distributed scale using Azure Open AI and Azure Synapse Analytics. " ] @@ -75,15 +75,7 @@ }, "outputs": [], "source": [ - "import os\n", - "from pyspark.sql import SparkSession\n", - "from synapse.ml.core.platform import running_on_synapse, find_secret\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "if running_on_synapse():\n", - " from notebookutils.visualization import display\n", + "from synapse.ml.core.platform import find_secret\n", "\n", "# Fill in the following lines with your service information\n", "# Learn more about selecting which embedding model to choose: https://openai.com/blog/new-and-improved-embedding-model\n", @@ -329,7 +321,7 @@ "source": [ "### Generating Text Embeddings\n", "\n", - "In addition to completing text, we can also embed text for use in downstream algorithms or vector retrieval architectures. Creating embeddings allows you to search and retrieve documents from large collections and can be used when prompt engineering isn't sufficient for the task. For more information on using `OpenAIEmbedding`, see our [embedding guide](https://microsoft.github.io/SynapseML/docs/features/cognitive_services/CognitiveServices%20-%20OpenAI%20Embedding/)." + "In addition to completing text, we can also embed text for use in downstream algorithms or vector retrieval architectures. Creating embeddings allows you to search and retrieve documents from large collections and can be used when prompt engineering isn't sufficient for the task. For more information on using `OpenAIEmbedding`, see our [embedding guide](./Quickstart%20-%20OpenAI%20Embedding)." ] }, { @@ -469,7 +461,7 @@ "The example makes several requests to the service, one for each prompt. To complete multiple prompts in a single request, use batch mode. First, in the OpenAICompletion object, instead of setting the Prompt column to \"Prompt\", specify \"batchPrompt\" for the BatchPrompt column.\n", "To do so, create a dataframe with a list of prompts per row.\n", "\n", - "**Note** that as of this writing there is currently a limit of 20 prompts in a single request, and a hard limit of 2048 \"tokens\", or approximately 1500 words." + "As of this writing there is currently a limit of 20 prompts in a single request, and a hard limit of 2048 \"tokens\", or approximately 1500 words." ] }, { diff --git a/notebooks/features/cognitive_services/CognitiveServices - OpenAI Embedding.ipynb b/docs/Explore Algorithms/OpenAI/Quickstart - OpenAI Embedding.ipynb similarity index 98% rename from notebooks/features/cognitive_services/CognitiveServices - OpenAI Embedding.ipynb rename to docs/Explore Algorithms/OpenAI/Quickstart - OpenAI Embedding.ipynb index 5f40955952..db6ac26237 100644 --- a/notebooks/features/cognitive_services/CognitiveServices - OpenAI Embedding.ipynb +++ b/docs/Explore Algorithms/OpenAI/Quickstart - OpenAI Embedding.ipynb @@ -59,15 +59,7 @@ }, "outputs": [], "source": [ - "import os\n", - "from pyspark.sql import SparkSession\n", - "from synapse.ml.core.platform import running_on_synapse, find_secret\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "if running_on_synapse():\n", - " from notebookutils.visualization import display\n", + "from synapse.ml.core.platform import find_secret\n", "\n", "# Fill in the following lines with your service information\n", "# Learn more about selecting which embedding model to choose: https://openai.com/blog/new-and-improved-embedding-model\n", diff --git a/notebooks/features/cognitive_services/CognitiveServices - Create a Multilingual Search Engine from Forms.ipynb b/docs/Explore Algorithms/OpenAI/Quickstart - Understand and Search Forms.ipynb similarity index 89% rename from notebooks/features/cognitive_services/CognitiveServices - Create a Multilingual Search Engine from Forms.ipynb rename to docs/Explore Algorithms/OpenAI/Quickstart - Understand and Search Forms.ipynb index 28ce1121ed..d5e91d2825 100644 --- a/notebooks/features/cognitive_services/CognitiveServices - Create a Multilingual Search Engine from Forms.ipynb +++ b/docs/Explore Algorithms/OpenAI/Quickstart - Understand and Search Forms.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -17,7 +18,7 @@ "source": [ "# Tutorial: Create a custom search engine and question-answering system\n", "\n", - "In this tutorial, learn how to index and query large data loaded from a Spark cluster. You'll set up a Jupyter Notebook that performs the following actions:\n", + "In this tutorial, learn how to index and query large data loaded from a Spark cluster. You will set up a Jupyter Notebook that performs the following actions:\n", "\n", "> + Load various forms (invoices) into a data frame in an Apache Spark session\n", "> + Analyze them to determine their features\n", @@ -27,6 +28,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -48,7 +50,20 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, + "metadata": { + "tags": [ + "hide-synapse-internal" + ] + }, + "outputs": [], + "source": [ + "%pip install openai" + ] + }, + { + "cell_type": "code", + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -63,36 +78,34 @@ }, "outputs": [], "source": [ - "import os\n", - "from pyspark.sql import SparkSession\n", - "from synapse.ml.core.platform import running_on_synapse, find_secret\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "if running_on_synapse():\n", - " from notebookutils.visualization import display\n", - " import subprocess\n", - " import sys\n", + "from synapse.ml.core.platform import find_secret\n", "\n", - " subprocess.check_call([sys.executable, \"-m\", \"pip\", \"install\", \"openai\"])\n", - "\n", - "cognitive_key = find_secret(\"cognitive-api-key\")\n", + "cognitive_key = find_secret(\n", + " \"cognitive-api-key\"\n", + ") # Replace the call to find_secret with your key as a python string. e.g. cognitive_key=\"27snaiw...\"\n", "cognitive_location = \"eastus\"\n", "\n", - "translator_key = find_secret(\"translator-key\")\n", + "translator_key = find_secret(\n", + " \"translator-key\"\n", + ") # Replace the call to find_secret with your key as a python string.\n", "translator_location = \"eastus\"\n", "\n", - "search_key = find_secret(\"azure-search-key\")\n", + "search_key = find_secret(\n", + " \"azure-search-key\"\n", + ") # Replace the call to find_secret with your key as a python string.\n", "search_service = \"mmlspark-azure-search\"\n", "search_index = \"form-demo-index-5\"\n", "\n", - "openai_key = find_secret(\"openai-api-key\")\n", + "openai_key = find_secret(\n", + " \"openai-api-key\"\n", + ") # Replace the call to find_secret with your key as a python string.\n", "openai_service_name = \"synapseml-openai\"\n", "openai_deployment_name = \"gpt-35-turbo\"\n", "openai_url = f\"https://{openai_service_name}.openai.azure.com/\"" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -114,7 +127,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -155,6 +168,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -169,13 +183,17 @@ }, "pycharm": { "name": "#%% md\n" - } + }, + "tags": [ + "hide-synapse-internal" + ] }, "source": [ "" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -192,12 +210,12 @@ "source": [ "## 3 - Apply form recognition\n", "\n", - "This code loads the [AnalyzeInvoices transformer](https://microsoft.github.io/SynapseML/docs/documentation/transformers/transformers_cognitive/#analyzeinvoices) and passes a reference to the data frame containing the invoices. It calls the pre-built invoice model of Azure Forms Analyzer." + "This code loads the AnalyzeInvoices transformer and passes a reference to the data frame containing the invoices. It calls the pre-built invoice model of Azure Forms Analyzer." ] }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -208,7 +226,8 @@ "nuid": "c38db874-a1a5-49ae-913e-d55e3593c794", "showTitle": false, "title": "" - } + }, + "is_executing": true }, "outputs": [], "source": [ @@ -230,6 +249,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -253,7 +273,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -284,6 +304,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -303,7 +324,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -331,6 +352,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -347,12 +369,12 @@ "source": [ "## 5 - Add translations\n", "\n", - "This code loads [Translate](https://microsoft.github.io/SynapseML/docs/documentation/transformers/transformers_cognitive/#translate), a transformer that calls the Azure Translator service in Cognitive Services. The original text, which is in English in the \"Description\" column, is machine-translated into various languages. All of the output is consolidated into \"output.translations\" array." + "This code loads Translate, a transformer that calls the Azure Translator service in Cognitive Services. The original text, which is in English in the \"Description\" column, is machine-translated into various languages. All of the output is consolidated into \"output.translations\" array." ] }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -388,6 +410,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -407,7 +430,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -457,7 +480,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -476,6 +499,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -490,12 +514,12 @@ } }, "source": [ - "## 7 - Infer vendor adress continent with OpenAI" + "## 7 - Infer vendor address continent with OpenAI" ] }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -536,7 +560,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -555,6 +579,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -574,7 +599,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -606,6 +631,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -625,7 +651,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -651,6 +677,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -665,14 +692,24 @@ } }, "source": [ - "## 10 - Build a simple chatbot that can use Azure Search as a tool 🧠🔧\n", - "#\n", + "## 10 - Build a chatbot that can use Azure Search as a tool 🧠🔧" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "tags": [ + "hide-synapse-internal" + ] + }, + "source": [ "" ] }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -759,6 +796,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -778,7 +816,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -797,6 +835,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -816,7 +855,7 @@ }, { "cell_type": "code", - "execution_count": 0, + "execution_count": null, "metadata": { "application/vnd.databricks.v1+cell": { "cellMetadata": { @@ -862,4 +901,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} \ No newline at end of file +} diff --git a/notebooks/features/opencv/OpenCV - Pipeline Image Transformations.ipynb b/docs/Explore Algorithms/OpenCV/Image Transformations.ipynb similarity index 95% rename from notebooks/features/opencv/OpenCV - Pipeline Image Transformations.ipynb rename to docs/Explore Algorithms/OpenCV/Image Transformations.ipynb index 01e4a7178c..e7b2e799c6 100644 --- a/notebooks/features/opencv/OpenCV - Pipeline Image Transformations.ipynb +++ b/docs/Explore Algorithms/OpenCV/Image Transformations.ipynb @@ -26,18 +26,6 @@ "metadata": {}, "outputs": [], "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import running_on_synapse\n", - "\n", - "if running_on_synapse():\n", - " from notebookutils.visualization import display\n", - "\n", - "import synapse.ml\n", - "import numpy as np\n", "from synapse.ml.opencv import toNDArray\n", "from synapse.ml.io import *\n", "\n", diff --git a/website/docs/reference/cyber.md b/docs/Explore Algorithms/Other Algorithms/Cyber ML.md similarity index 90% rename from website/docs/reference/cyber.md rename to docs/Explore Algorithms/Other Algorithms/Cyber ML.md index dd742f6a46..1681dbfa01 100644 --- a/website/docs/reference/cyber.md +++ b/docs/Explore Algorithms/Other Algorithms/Cyber ML.md @@ -18,50 +18,50 @@ sidebar_label: CyberML In other words, it returns a sample from the complement set. ## feature engineering: [indexers.py](https://github.com/microsoft/SynapseML/blob/master/core/src/main/python/synapse/ml/cyber/feature/indexers.py) -1. [IdIndexer](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.indexers.IdIndexer) +1. [IdIndexer](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.indexers.IdIndexer) is a SparkML [Estimator](https://spark.apache.org/docs/2.2.0/api/java/index.html?org/apache/spark/ml/Estimator.html). Given a dataframe, it creates an IdIndexerModel (described next) for categorical features. The model maps each partition and column seen in the given dataframe to an ID, for each partition or one consecutive range for all partition and column values. -2. [IdIndexerModel](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.indexers.IdIndexerModel) +2. [IdIndexerModel](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.indexers.IdIndexerModel) is a SparkML [Transformer](https://spark.apache.org/docs/2.2.0/api/java/index.html?org/apache/spark/ml/Transformer.html). Given a dataframe maps each partition and column field to a consecutive integer ID. Partitions or column values not encountered in the estimator are mapped to 0. The model can operate in two modes, either create consecutive integer ID independently -3. [MultiIndexer](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.indexers.MultiIndexer) +3. [MultiIndexer](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.indexers.MultiIndexer) is a SparkML [Estimator](https://spark.apache.org/docs/2.2.0/api/java/index.html?org/apache/spark/ml/Estimator.html). Uses multiple IdIndexers to generate a MultiIndexerModel (described next) for categorical features. The model contains multiple IdIndexers for multiple partitions and columns. -4. [MultiIndexerModel](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.indexers.MultiIndexerModel) +4. [MultiIndexerModel](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.indexers.MultiIndexerModel) is a SparkML [Transformer](https://spark.apache.org/docs/2.2.0/api/java/index.html?org/apache/spark/ml/Transformer.html). Given a dataframe maps each partition and column field to a consecutive integer ID. Partitions or column values not encountered in the estimator are mapped to 0. The model can operate in two modes, either create consecutive integer ID independently ## feature engineering: [scalers.py](https://github.com/microsoft/SynapseML/blob/master/core/src/main/python/synapse/ml/cyber/feature/scalers.py) -1. [StandardScalarScaler](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.scalers.StandardScalarScaler) +1. [StandardScalarScaler](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.scalers.StandardScalarScaler) is a SparkML [Estimator](https://spark.apache.org/docs/2.2.0/api/java/index.html?org/apache/spark/ml/Estimator.html). Given a dataframe it creates a StandardScalarScalerModel (described next) which normalizes any given dataframe according to the mean and standard deviation calculated on the dataframe given to the estimator. -2. [StandardScalarScalerModel](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.scalers.StandardScalarScalerModel) +2. [StandardScalarScalerModel](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.scalers.StandardScalarScalerModel) is a SparkML [Transformer](https://spark.apache.org/docs/2.2.0/api/java/index.html?org/apache/spark/ml/Transformer.html). Given a dataframe with a value column x, the transformer changes its value as follows: x'=(x-mean)/stddev. That is, if the transformer is given the same dataframe the estimator was given then the value column will have a mean of 0.0 and a standard deviation of 1.0. -3. [LinearScalarScaler](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.scalers.LinearScalarScaler) +3. [LinearScalarScaler](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.scalers.LinearScalarScaler) is a SparkML [Estimator](https://spark.apache.org/docs/2.2.0/api/java/index.html?org/apache/spark/ml/Estimator.html). Given a dataframe it creates a LinearScalarScalerModel (described next) which normalizes any given dataframe according to the minimum and maximum values calculated on the dataframe given to the estimator. -4. [LinearScalarScalerModel](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.scalers.LinearScalarScalerModel) +4. [LinearScalarScalerModel](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cyber.feature.html#synapse.ml.cyber.feature.scalers.LinearScalarScalerModel) is a SparkML [Transformer](https://spark.apache.org/docs/2.2.0/api/java/index.html?org/apache/spark/ml/Transformer.html). Given a dataframe with a value column x, the transformer changes its value such that if the transformer is given the same dataframe the estimator was given then the value column will be scaled linearly to the given ranges. ## access anomalies: [collaborative_filtering.py](https://github.com/microsoft/SynapseML/blob/master/core/src/main/python/synapse/ml/cyber/anomaly/collaborative_filtering.py) -1. [AccessAnomaly](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cyber.anomaly.html#synapse.ml.cyber.anomaly.collaborative_filtering.AccessAnomaly) +1. [AccessAnomaly](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cyber.anomaly.html#synapse.ml.cyber.anomaly.collaborative_filtering.AccessAnomaly) is a SparkML [Estimator](https://spark.apache.org/docs/2.2.0/api/java/index.html?org/apache/spark/ml/Estimator.html). Given a dataframe, the estimator generates an AccessAnomalyModel (described next). The model can detect anomalous access of users to resources where the access @@ -69,14 +69,14 @@ sidebar_label: CyberML a resource from Finance. This result is based solely on access patterns rather than explicit features. Internally, the code is based on Collaborative Filtering as implemented in Spark, using Matrix Factorization with Alternating Least Squares. -2. [AccessAnomalyModel](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cyber.anomaly.html#synapse.ml.cyber.anomaly.collaborative_filtering.AccessAnomalyModel) +2. [AccessAnomalyModel](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cyber.anomaly.html#synapse.ml.cyber.anomaly.collaborative_filtering.AccessAnomalyModel) is a SparkML [Transformer](https://spark.apache.org/docs/2.2.0/api/java/index.html?org/apache/spark/ml/Transformer.html). Given a dataframe the transformer computes a value between (-inf, inf) where positive values indicate an anomaly score. Anomaly scores are computed to have a mean of 1.0 and a standard deviation of 1.0 over the original dataframe given to the estimator. -3. [ModelNormalizeTransformer](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cyber.anomaly.html#synapse.ml.cyber.anomaly.collaborative_filtering.ModelNormalizeTransformer) +3. [ModelNormalizeTransformer](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cyber.anomaly.html#synapse.ml.cyber.anomaly.collaborative_filtering.ModelNormalizeTransformer) is a SparkML [Transformer](https://spark.apache.org/docs/2.2.0/api/java/index.html?org/apache/spark/ml/Transformer.html). This transformer is used internally by AccessAnomaly to normalize a model to generate anomaly scores with mean 0.0 and standard deviation of 1.0. -4. [AccessAnomalyConfig](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.cyber.anomaly.html#synapse.ml.cyber.anomaly.collaborative_filtering.AccessAnomalyConfig) +4. [AccessAnomalyConfig](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.cyber.anomaly.html#synapse.ml.cyber.anomaly.collaborative_filtering.AccessAnomalyConfig) contains the default values for AccessAnomaly. diff --git a/notebooks/features/other/CyberML - Anomalous Access Detection.ipynb b/docs/Explore Algorithms/Other Algorithms/Quickstart - Anomalous Access Detection.ipynb similarity index 93% rename from notebooks/features/other/CyberML - Anomalous Access Detection.ipynb rename to docs/Explore Algorithms/Other Algorithms/Quickstart - Anomalous Access Detection.ipynb index 9d70b3810f..2642d7a9c5 100644 --- a/notebooks/features/other/CyberML - Anomalous Access Detection.ipynb +++ b/docs/Explore Algorithms/Other Algorithms/Quickstart - Anomalous Access Detection.ipynb @@ -30,63 +30,65 @@ }, { "cell_type": "markdown", - "metadata": {}, "source": [ "# Create an Azure Databricks cluster and install the following libs\n", "\n", "1. In Cluster Libraries install from library source Maven:\n", - "Coordinates: com.microsoft.azure:synapseml_2.12:0.11.1\n", + "Coordinates: com.microsoft.azure:synapseml_2.12:0.11.2\n", "Repository: https://mmlspark.azureedge.net/maven\n", "\n", "2. In Cluster Libraries install from PyPI the library called plotly" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ "# Setup & Initialization" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ - "# this is used to produce the synthetic dataset for this test\n", - "from synapse.ml.cyber.dataset import DataFactory\n", - "\n", - "# the access anomalies model generator\n", - "from synapse.ml.cyber.anomaly.collaborative_filtering import AccessAnomaly\n", - "\n", - "from pyspark.sql import functions as f, types as t" - ] + "%pip install plotly" + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" - ] + "# this is used to produce the synthetic dataset for this test\n", + "from synapse.ml.cyber.dataset import DataFactory\n", + "from synapse.ml.cyber.anomaly.collaborative_filtering import AccessAnomaly\n", + "from pyspark.sql import functions as f" + ], + "metadata": { + "collapsed": false + } }, { - "attachments": {}, "cell_type": "markdown", - "metadata": {}, "source": [ "# Load up datasets" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "from synapse.ml.core.platform import running_on_databricks, running_on_synapse\n", @@ -96,9 +98,6 @@ "else:\n", " spark.sparkContext.setCheckpointDir(\"./tmp/checkpoint_path/\")\n", "\n", - "if running_on_synapse():\n", - " from notebookutils.visualization import display\n", - "\n", "factory = DataFactory(\n", " num_hr_users=25,\n", " num_hr_resources=50,\n", @@ -119,39 +118,47 @@ "outgroup_df = spark.createDataFrame(\n", " factory.create_clustered_inter_test_data()\n", ").withColumn(\"tenant_id\", f.lit(0))" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "training_df.show()" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "print(training_df.count())\n", "print(ingroup_df.count())\n", "print(outgroup_df.count())" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ "# Model setup & training" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "access_anomaly = AccessAnomaly(\n", @@ -161,37 +168,45 @@ " likelihoodCol=\"likelihood\",\n", " maxIter=1000,\n", ")" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "model = access_anomaly.fit(training_df)" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ "# Apply model & show result stats" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "ingroup_scored_df = model.transform(ingroup_df)" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "ingroup_scored_df.agg(\n", @@ -200,21 +215,25 @@ " f.mean(\"anomaly_score\").alias(\"mean_anomaly_score\"),\n", " f.stddev(\"anomaly_score\").alias(\"stddev_anomaly_score\"),\n", ").show()" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "outgroup_scored_df = model.transform(outgroup_df)" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "outgroup_scored_df.agg(\n", @@ -223,19 +242,23 @@ " f.mean(\"anomaly_score\").alias(\"mean_anomaly_score\"),\n", " f.stddev(\"anomaly_score\").alias(\"stddev_anomaly_score\"),\n", ").show()" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ "# Examine results" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": {}, "outputs": [], "source": [ "#\n", @@ -265,23 +288,19 @@ "results_to_la = results_to_la.withColumn(\"timestamp\", f.current_timestamp())\n", "\n", "display(results_to_la)" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ "# Display all resource accesses by users with highest anomalous score" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# !pip install plotly" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", diff --git a/notebooks/features/other/ConditionalKNN - Exploring Art Across Cultures.ipynb b/docs/Explore Algorithms/Other Algorithms/Quickstart - Exploring Art Across Cultures.ipynb similarity index 80% rename from notebooks/features/other/ConditionalKNN - Exploring Art Across Cultures.ipynb rename to docs/Explore Algorithms/Other Algorithms/Quickstart - Exploring Art Across Cultures.ipynb index c060adc495..226573fbd6 100644 --- a/notebooks/features/other/ConditionalKNN - Exploring Art Across Cultures.ipynb +++ b/docs/Explore Algorithms/Other Algorithms/Quickstart - Exploring Art Across Cultures.ipynb @@ -1,42 +1,44 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ "# Exploring Art across Culture and Medium with Fast, Conditional, k-Nearest Neighbors\n", "\n", - "\n", - "\n", - "This notebook serves as a guideline for match-finding via k-nearest-neighbors. In the code below, we will set up code that allows queries involving cultures and mediums of art amassed from the Metropolitan Museum of Art in NYC and the Rijksmuseum in Amsterdam." + "This article serves as a guideline for match-finding via k-nearest-neighbors. You set up code that allows queries involving cultures and mediums of art amassed from the Metropolitan Museum of Art in NYC and the Rijksmuseum in Amsterdam." ] }, { + "attachments": {}, "cell_type": "markdown", - "metadata": {}, + "metadata": { + "tags": [ + "hide-synapse-internal" + ] + }, "source": [ - "### Overview of the BallTree\n", - "The structure functioning behind the kNN model is a BallTree, which is a recursive binary tree where each node (or \"ball\") contains a partition of the points of data to be queried. Building a BallTree involves assigning data points to the \"ball\" whose center they are closest to (with respect to a certain specified feature), resulting in a structure that allows binary-tree-like traversal and lends itself to finding k-nearest neighbors at a BallTree leaf." + "" ] }, { "cell_type": "markdown", - "metadata": {}, "source": [ - "#### Setup\n", - "Import necessary Python libraries and prepare dataset." - ] + "## Overview of the BallTree\n", + "The structure functioning behind the KNN model is a BallTree, which is a recursive binary tree where each node (or \"ball\") contains a partition of the points of data to be queried. Building a BallTree involves assigning data points to the \"ball\" whose center they're closest to (with respect to a certain specified feature), resulting in a structure that allows binary-tree-like traversal and lends itself to finding k-nearest neighbors at a BallTree leaf." + ], + "metadata": { + "collapsed": false + } }, { - "cell_type": "code", - "execution_count": 4, + "attachments": {}, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "from synapse.ml.core.platform import *\n", - "\n", - "if running_on_binder():\n", - " from IPython import get_ipython" + "## Setup\n", + "Import necessary Python libraries and prepare dataset." ] }, { @@ -47,20 +49,13 @@ "source": [ "from pyspark.sql.types import BooleanType\n", "from pyspark.sql.types import *\n", - "from pyspark.ml.feature import Normalizer\n", "from pyspark.sql.functions import lit, array, array_contains, udf, col, struct\n", "from synapse.ml.nn import ConditionalKNN, ConditionalKNNModel\n", "from PIL import Image\n", "from io import BytesIO\n", - "\n", "import requests\n", "import numpy as np\n", - "import matplotlib.pyplot as plt\n", - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "from synapse.ml.core.platform import materializing_display as display" + "import matplotlib.pyplot as plt" ] }, { @@ -100,11 +95,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "#### Define categories to be queried on\n", - "We will be using two kNN models: one for culture, and one for medium. The categories for each grouping are defined below." + "## Define categories to be queried on\n", + "Two KNN models are used: one for culture, and one for medium." ] }, { @@ -146,11 +142,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "### Define and fit ConditionalKNN models\n", - "Below, we create ConditionalKNN models for both the medium and culture columns; each model takes in an output column, features column (feature vector), values column (cell values under the output column), and label column (the quality that the respective KNN is conditioned on)." + "## Define and fit ConditionalKNN models\n", + "Create ConditionalKNN models for both the medium and culture columns; each model takes in an output column, features column (feature vector), values column (cell values under the output column), and label column (the quality that the respective KNN is conditioned on)." ] }, { @@ -190,11 +187,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "#### Define matching and visualizing methods\n", + "## Define matching and visualizing methods\n", "\n", - "After the initial dataset and category setup, we prepare methods that will query and visualize the conditional kNN's results. \n", + "After the initial dataset and category setup, prepare methods that will query and visualize the conditional KNN's results.\n", "\n", - "`addMatches()` will create a Dataframe with a handful of matches per category." + "`addMatches()` creates a Dataframe with a handful of matches per category." ] }, { @@ -213,6 +210,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ @@ -260,11 +258,12 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "### Putting it all together\n", - "Below, we define `test_all()` to take in the data, CKNN models, the art id values to query on, and the file path to save the output visualization to. The medium and culture models were previously trained and loaded." + "## Putting it all together\n", + "Define `test_all()` to take in the data, CKNN models, the art id values to query on, and the file path to save the output visualization to. The medium and culture models were previously trained and loaded." ] }, { @@ -305,13 +304,23 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": {}, "source": [ - "### Demo\n", - "The following cell performs batched queries given desired image IDs and a filename to save the visualization.\n", - "\n", - "\n", + "## Demo\n", + "The following cell performs batched queries given desired image IDs and a filename to save the visualization." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "tags": [ + "hide-synapse-internal" + ] + }, + "source": [ "" ] }, diff --git a/website/docs/reference/SAR.md b/docs/Explore Algorithms/Other Algorithms/Smart Adaptive Recommendations.md similarity index 100% rename from website/docs/reference/SAR.md rename to docs/Explore Algorithms/Other Algorithms/Smart Adaptive Recommendations.md diff --git a/notebooks/features/regression/Regression - Flight Delays with DataCleaning.ipynb b/docs/Explore Algorithms/Regression/Quickstart - Data Cleaning.ipynb similarity index 91% rename from notebooks/features/regression/Regression - Flight Delays with DataCleaning.ipynb rename to docs/Explore Algorithms/Regression/Quickstart - Data Cleaning.ipynb index 69b46b6eb7..38b1dc7768 100644 --- a/notebooks/features/regression/Regression - Flight Delays with DataCleaning.ipynb +++ b/docs/Explore Algorithms/Regression/Quickstart - Data Cleaning.ipynb @@ -16,34 +16,11 @@ "\n", "This sample demonstrates how to use the following APIs:\n", "- [`TrainRegressor`\n", - " ](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.train.html?#module-synapse.ml.train.TrainRegressor)\n", + " ](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.train.html?#module-synapse.ml.train.TrainRegressor)\n", "- [`ComputePerInstanceStatistics`\n", - " ](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.train.html?#module-synapse.ml.train.ComputePerInstanceStatistics)\n", + " ](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.train.html?#module-synapse.ml.train.ComputePerInstanceStatistics)\n", "- [`DataConversion`\n", - " ](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.featurize.html?#module-synapse.ml.featurize.DataConversion)\n", - "\n", - "First, import the pandas package" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import pandas as pd" + " ](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.featurize.html?#module-synapse.ml.featurize.DataConversion)" ] }, { diff --git a/notebooks/features/regression/Regression - Auto Imports.ipynb b/docs/Explore Algorithms/Regression/Quickstart - Train Regressor.ipynb similarity index 94% rename from notebooks/features/regression/Regression - Auto Imports.ipynb rename to docs/Explore Algorithms/Regression/Quickstart - Train Regressor.ipynb index 62c6b43dc4..b47c5cdcd1 100644 --- a/notebooks/features/regression/Regression - Auto Imports.ipynb +++ b/docs/Explore Algorithms/Regression/Quickstart - Train Regressor.ipynb @@ -15,32 +15,20 @@ "\n", "This sample demonstrates the use of several members of the synapseml library:\n", "- [`TrainRegressor`\n", - " ](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.train.html?#module-synapse.ml.train.TrainRegressor)\n", + " ](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.train.html?#module-synapse.ml.train.TrainRegressor)\n", "- [`SummarizeData`\n", - " ](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.stages.html?#module-synapse.ml.stages.SummarizeData)\n", + " ](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.stages.html?#module-synapse.ml.stages.SummarizeData)\n", "- [`CleanMissingData`\n", - " ](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.featurize.html?#module-synapse.ml.featurize.CleanMissingData)\n", + " ](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.featurize.html?#module-synapse.ml.featurize.CleanMissingData)\n", "- [`ComputeModelStatistics`\n", - " ](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.train.html?#module-synapse.ml.train.ComputeModelStatistics)\n", + " ](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.train.html?#module-synapse.ml.train.ComputeModelStatistics)\n", "- [`FindBestModel`\n", - " ](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/synapse.ml.automl.html?#module-synapse.ml.automl.FindBestModel)\n", + " ](https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/synapse.ml.automl.html?#module-synapse.ml.automl.FindBestModel)\n", "\n", "First, import the pandas package so that we can read and parse the datafile\n", "using `pandas.read_csv()`" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" - ] - }, { "cell_type": "code", "execution_count": null, diff --git a/notebooks/features/regression/Regression - Vowpal Wabbit vs. LightGBM vs. Linear Regressor.ipynb b/docs/Explore Algorithms/Regression/Quickstart - Vowpal Wabbit and LightGBM.ipynb similarity index 96% rename from notebooks/features/regression/Regression - Vowpal Wabbit vs. LightGBM vs. Linear Regressor.ipynb rename to docs/Explore Algorithms/Regression/Quickstart - Vowpal Wabbit and LightGBM.ipynb index 97dd1c59ea..c3c2eebd42 100644 --- a/notebooks/features/regression/Regression - Vowpal Wabbit vs. LightGBM vs. Linear Regressor.ipynb +++ b/docs/Explore Algorithms/Regression/Quickstart - Vowpal Wabbit and LightGBM.ipynb @@ -13,23 +13,6 @@ " [Spark MLlib Linear Regression](https://spark.apache.org/docs/latest/ml-classification-regression.html#linear-regression)." ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import *\n", - "\n", - "if running_on_synapse():\n", - " from synapse.ml.core.platform import materializing_display as display" - ] - }, { "cell_type": "code", "execution_count": null, diff --git a/website/docs/features/responsible_ai/Data Balance Analysis.md b/docs/Explore Algorithms/Responsible AI/Data Balance Analysis.md similarity index 99% rename from website/docs/features/responsible_ai/Data Balance Analysis.md rename to docs/Explore Algorithms/Responsible AI/Data Balance Analysis.md index 07f00ad07b..c8437cc65b 100644 --- a/website/docs/features/responsible_ai/Data Balance Analysis.md +++ b/docs/Explore Algorithms/Responsible AI/Data Balance Analysis.md @@ -32,7 +32,7 @@ In summary, Data Balance Analysis, when used as a step for building ML models, h ## Examples -* [Data Balance Analysis - Adult Census Income](../../../features/responsible_ai/DataBalanceAnalysis%20-%20Adult%20Census%20Income) +* [Quickstart - Data Balance Analysis](../Quickstart%20-%20Data%20Balance%20Analysis) ## Usage diff --git a/notebooks/features/responsible_ai/Interpretability - Explanation Dashboard.ipynb b/docs/Explore Algorithms/Responsible AI/Explanation Dashboard.ipynb similarity index 78% rename from notebooks/features/responsible_ai/Interpretability - Explanation Dashboard.ipynb rename to docs/Explore Algorithms/Responsible AI/Explanation Dashboard.ipynb index c734bb0a4a..f543dc78f5 100644 --- a/notebooks/features/responsible_ai/Interpretability - Explanation Dashboard.ipynb +++ b/docs/Explore Algorithms/Responsible AI/Explanation Dashboard.ipynb @@ -2,38 +2,33 @@ "cells": [ { "cell_type": "markdown", - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "4a463c67-7543-42d2-a116-e70e8451b09b", - "showTitle": false, - "title": "" - } - }, "source": [ "## Interpretability - Explanation Dashboard\n", "\n", "In this example, similar to the \"Interpretability - Tabular SHAP explainer\" notebook, we use Kernel SHAP to explain a tabular classification model built from the Adults Census dataset and then visualize the explanation in the ExplanationDashboard from https://github.com/microsoft/responsible-ai-widgets.\n", "\n", "First we import the packages and define some UDFs we will need later." - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, + "outputs": [], + "source": [ + "%pip install raiwidgets itsdangerous==2.0.1 interpret-community" + ], "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "bf0fdfc2-97b2-48e4-b3d9-794b0cb3da67", - "showTitle": false, - "title": "" - }, - "collapsed": true - }, + "collapsed": false + } + }, + { + "cell_type": "code", + "execution_count": null, "outputs": [], "source": [ - "import pyspark\n", - "from IPython import get_ipython\n", "from IPython.terminal.interactiveshell import TerminalInteractiveShell\n", "from synapse.ml.explainers import *\n", "from pyspark.ml import Pipeline\n", @@ -42,48 +37,26 @@ "from pyspark.sql.types import *\n", "from pyspark.sql.functions import *\n", "import pandas as pd\n", - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import running_on_synapse\n", - "\n", - "if running_on_synapse():\n", - " shell = TerminalInteractiveShell.instance()\n", - " from notebookutils.visualization import display\n", - "\n", "\n", "vec_access = udf(lambda v, i: float(v[i]), FloatType())\n", "vec2array = udf(lambda vec: vec.toArray().tolist(), ArrayType(FloatType()))" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "ae47e1f9-0672-47ed-94de-10970e1b14b5", - "showTitle": false, - "title": "" - } - }, "source": [ "Now let's read the data and train a simple binary classification model." - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "58807448-d8e0-4818-adc8-27536d561fb3", - "showTitle": false, - "title": "" - }, - "collapsed": true - }, "outputs": [], "source": [ "df = spark.read.parquet(\n", @@ -129,68 +102,46 @@ "lr = LogisticRegression(featuresCol=\"features\", labelCol=\"label\", weightCol=\"fnlwgt\")\n", "pipeline = Pipeline(stages=[strIndexer, onehotEnc, vectAssem, lr])\n", "model = pipeline.fit(training)" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "f617f9a4-7e67-43f8-8fa9-92680b635b3d", - "showTitle": false, - "title": "" - } - }, "source": [ "After the model is trained, we randomly select some observations to be explained." - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "f55757a6-6204-4f64-a91e-65bfbacf62bc", - "showTitle": false, - "title": "" - }, - "collapsed": true - }, "outputs": [], "source": [ "explain_instances = (\n", " model.transform(training).orderBy(rand()).limit(5).repartition(200).cache()\n", ")\n", "display(explain_instances)" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "48a0c8ee-8e36-4bd3-9a04-eded6d2c8894", - "showTitle": false, - "title": "" - } - }, "source": [ "We create a TabularSHAP explainer, set the input columns to all the features the model takes, specify the model and the target output column we are trying to explain. In this case, we are trying to explain the \"probability\" output which is a vector of length 2, and we are only looking at class 1 probability. Specify targetClasses to `[0, 1]` if you want to explain class 0 and 1 probability at the same time. Finally we sample 100 rows from the training data for background data, which is used for integrating out features in Kernel SHAP." - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "7e097552-e617-4e1c-a085-b66eca5bcb69", - "showTitle": false, - "title": "" - }, - "collapsed": true - }, "outputs": [], "source": [ "shap = TabularSHAP(\n", @@ -204,36 +155,24 @@ ")\n", "\n", "shap_df = shap.transform(explain_instances)" - ] + ], + "metadata": { + "collapsed": false + } }, { - "attachments": {}, "cell_type": "markdown", - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "6933b52b-7d46-4210-810a-f984b76dd4a2", - "showTitle": false, - "title": "" - } - }, "source": [ "Once we have the resulting dataframe, we extract the class 1 probability of the model output, the SHAP values for the target class, the original features and the true label. Then we convert it to a pandas dataframe for visualization.\n", "For each observation, the first element in the SHAP values vector is the base value (the mean output of the background dataset), and each of the following element is the SHAP values for each feature." - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "05e01f98-e44c-46c9-a8ae-26ba892f85b3", - "showTitle": false, - "title": "" - }, - "collapsed": true - }, "outputs": [], "source": [ "shaps = (\n", @@ -248,37 +187,25 @@ "shaps_local.sort_values(\"probability\", ascending=False, inplace=True, ignore_index=True)\n", "pd.set_option(\"display.max_colwidth\", None)\n", "shaps_local" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "f9317a27-900a-4d1d-9e9f-9fe906eae75c", - "showTitle": false, - "title": "" - } - }, "source": [ "We can visualize the explanation in the [interpret-community format](https://github.com/interpretml/interpret-community) in the ExplanationDashboard from https://github.com/microsoft/responsible-ai-widgets/" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "c9b4c03e-eac8-4314-a6c2-0a451525e6a4", - "showTitle": false, - "title": "" - }, - "collapsed": true - }, "outputs": [], "source": [ - "import pandas as pd\n", "import numpy as np\n", "\n", "features = categorical_features + numeric_features\n", @@ -289,14 +216,14 @@ "local_importance_values = shaps_local[[\"shapValues\"]]\n", "eval_data = shaps_local[features]\n", "true_y = np.array(shaps_local[[\"label\"]])" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": true - }, "outputs": [], "source": [ "list_local_importance_values = local_importance_values.values.tolist()\n", @@ -309,37 +236,19 @@ " # remove the bias from local importance values\n", " del converted_list[0]\n", " converted_importance_values.append(converted_list)" - ] + ], + "metadata": { + "collapsed": false + } }, { "cell_type": "markdown", - "metadata": {}, "source": [ "When running Synapse Analytics, please follow instructions here [Package management - Azure Synapse Analytics | Microsoft Docs](https://docs.microsoft.com/en-us/azure/synapse-analytics/spark/apache-spark-azure-portal-add-libraries) to install [\"raiwidgets\"](https://pypi.org/project/raiwidgets/) and [\"interpret-community\"](https://pypi.org/project/interpret-community/) packages." - ] - }, - { - "cell_type": "code", - "execution_count": null, + ], "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "!pip install --upgrade raiwidgets\n", - "!pip install itsdangerous==2.0.1" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [ - "!pip install --upgrade interpret-community" - ] + "collapsed": false + } }, { "cell_type": "code", @@ -480,4 +389,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} \ No newline at end of file +} diff --git a/notebooks/features/responsible_ai/Interpretability - Image Explainers.ipynb b/docs/Explore Algorithms/Responsible AI/Image Explainers.ipynb similarity index 94% rename from notebooks/features/responsible_ai/Interpretability - Image Explainers.ipynb rename to docs/Explore Algorithms/Responsible AI/Image Explainers.ipynb index 26681c9f47..57cfed4f33 100644 --- a/notebooks/features/responsible_ai/Interpretability - Image Explainers.ipynb +++ b/docs/Explore Algorithms/Responsible AI/Image Explainers.ipynb @@ -22,24 +22,14 @@ "from synapse.ml.opencv import ImageTransformer\n", "from synapse.ml.io import *\n", "from pyspark.ml import Pipeline\n", - "from pyspark.ml.classification import LogisticRegression\n", - "from pyspark.ml.feature import StringIndexer\n", "from pyspark.sql.functions import *\n", "from pyspark.sql.types import *\n", "import numpy as np\n", - "import pyspark\n", "import urllib.request\n", "import matplotlib.pyplot as plt\n", - "import PIL, io\n", "from PIL import Image\n", - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", "from synapse.ml.core.platform import *\n", "\n", - "from synapse.ml.core.platform import materializing_display as display\n", "\n", "vec_slice = udf(\n", " lambda vec, indices: (vec.toArray())[indices].tolist(), ArrayType(FloatType())\n", @@ -73,10 +63,7 @@ " image_array[y, x, 3] = 200\n", " plt.clf()\n", " plt.imshow(image_array)\n", - " if running_on_synapse() or running_on_synapse_internal():\n", - " plt.show()\n", - " else:\n", - " display(plt)" + " plt.show()" ] }, { @@ -328,4 +315,4 @@ }, "nbformat": 4, "nbformat_minor": 4 -} +} \ No newline at end of file diff --git a/website/versioned_docs/version-0.10.0/features/responsible_ai/Model Interpretation on Spark.md b/docs/Explore Algorithms/Responsible AI/Interpreting Model Predictions.md similarity index 96% rename from website/versioned_docs/version-0.10.0/features/responsible_ai/Model Interpretation on Spark.md rename to docs/Explore Algorithms/Responsible AI/Interpreting Model Predictions.md index 93dbc54cef..92b3ca745f 100644 --- a/website/versioned_docs/version-0.10.0/features/responsible_ai/Model Interpretation on Spark.md +++ b/docs/Explore Algorithms/Responsible AI/Interpreting Model Predictions.md @@ -1,7 +1,7 @@ --- -title: Model Interpretation on Spark +title: Interpreting Model Predictions hide_title: true -sidebar_label: Model Interpretation on Spark +sidebar_label: Interpreting Model Predictions --- # Model Interpretation on Spark @@ -26,9 +26,9 @@ Both explainers extends from `org.apache.spark.ml.Transformer`. After setting up To see examples of model interpretability on Spark in action, take a look at these sample notebooks: -- [Tabular SHAP explainer](../../../features/responsible_ai/Interpretability%20-%20Tabular%20SHAP%20explainer) -- [Image explainers](../../../features/responsible_ai/Interpretability%20-%20Image%20Explainers) -- [Text explainers](../../../features/responsible_ai/Interpretability%20-%20Text%20Explainers) +- [Tabular Explainers](../Tabular%20Explainers) +- [Image Explainers](../Image%20Explainers) +- [Text Explainers](../Text%20Explainers) | | Tabular models | Vector models | Image models | Text models | |------------------------|-----------------------------|---------------------------|-------------------------|-----------------------| diff --git a/notebooks/features/responsible_ai/Interpretability - PDP and ICE explainer.ipynb b/docs/Explore Algorithms/Responsible AI/PDP and ICE Explainers.ipynb similarity index 99% rename from notebooks/features/responsible_ai/Interpretability - PDP and ICE explainer.ipynb rename to docs/Explore Algorithms/Responsible AI/PDP and ICE Explainers.ipynb index 12b8a2a05f..af0d15ad4f 100644 --- a/notebooks/features/responsible_ai/Interpretability - PDP and ICE explainer.ipynb +++ b/docs/Explore Algorithms/Responsible AI/PDP and ICE Explainers.ipynb @@ -78,14 +78,7 @@ "from pyspark.ml.evaluation import BinaryClassificationEvaluator\n", "from synapse.ml.explainers import ICETransformer\n", "import matplotlib.pyplot as plt\n", - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import *\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" + "from synapse.ml.core.platform import *" ] }, { @@ -1201,4 +1194,4 @@ }, "nbformat": 4, "nbformat_minor": 0 -} +} \ No newline at end of file diff --git a/notebooks/features/responsible_ai/DataBalanceAnalysis - Adult Census Income.ipynb b/docs/Explore Algorithms/Responsible AI/Quickstart - Data Balance Analysis.ipynb similarity index 97% rename from notebooks/features/responsible_ai/DataBalanceAnalysis - Adult Census Income.ipynb rename to docs/Explore Algorithms/Responsible AI/Quickstart - Data Balance Analysis.ipynb index e021561b2e..517cde68ca 100644 --- a/notebooks/features/responsible_ai/DataBalanceAnalysis - Adult Census Income.ipynb +++ b/docs/Explore Algorithms/Responsible AI/Quickstart - Data Balance Analysis.ipynb @@ -51,18 +51,10 @@ }, "outputs": [], "source": [ - "import matplotlib\n", "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import pyspark.sql.functions as F\n", - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import *\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" + "from synapse.ml.core.platform import *" ] }, { @@ -179,7 +171,7 @@ } }, "source": [ - "### [Calculate Feature Balance Measures](/docs/features/responsible_ai/Data%20Balance%20Analysis/)\n", + "### [Calculate Feature Balance Measures](../Data%20Balance%20Analysis)\n", "\n", "Feature Balance Measures allow us to see whether each combination of sensitive feature is receiving the positive outcome (true prediction) at equal rates.\n", "\n", @@ -380,7 +372,7 @@ } }, "source": [ - "### Calculate [Distribution Balance Measures](/docs/features/responsible_ai/Data%20Balance%20Analysis/)\n", + "### Calculate [Distribution Balance Measures](../Data%20Balance%20Analysis)\n", "\n", "Distribution Balance Measures allow us to compare our data with a reference distribution (i.e. uniform distribution). They are calculated per sensitive column and don't use the label column. |" ] @@ -534,7 +526,7 @@ } }, "source": [ - "### Calculate [Aggregate Balance Measures](/docs/features/responsible_ai/Data%20Balance%20Analysis/)\n", + "### Calculate [Aggregate Balance Measures](../Data%20Balance%20Analysis)\n", "\n", "Aggregate Balance Measures allow us to obtain a higher notion of inequality. They are calculated on the global set of sensitive columns and don't use the label column.\n", "\n", diff --git a/notebooks/features/responsible_ai/Interpretability - Snow Leopard Detection.ipynb b/docs/Explore Algorithms/Responsible AI/Quickstart - Snow Leopard Detection.ipynb similarity index 97% rename from notebooks/features/responsible_ai/Interpretability - Snow Leopard Detection.ipynb rename to docs/Explore Algorithms/Responsible AI/Quickstart - Snow Leopard Detection.ipynb index 95721aae21..c478c81bea 100644 --- a/notebooks/features/responsible_ai/Interpretability - Snow Leopard Detection.ipynb +++ b/docs/Explore Algorithms/Responsible AI/Quickstart - Snow Leopard Detection.ipynb @@ -13,16 +13,8 @@ "cell_type": "code", "execution_count": null, "source": [ - "import os\n", - "from pyspark.sql import SparkSession\n", "from synapse.ml.core.platform import *\n", "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "if running_on_synapse():\n", - " from notebookutils.visualization import display\n", - "\n", "bing_search_key = find_secret(\"bing-search-key\")\n", "\n", "# WARNING this notebook requires a lot of memory.\n", diff --git a/notebooks/features/responsible_ai/Interpretability - Tabular SHAP explainer.ipynb b/docs/Explore Algorithms/Responsible AI/Tabular Explainers.ipynb similarity index 91% rename from notebooks/features/responsible_ai/Interpretability - Tabular SHAP explainer.ipynb rename to docs/Explore Algorithms/Responsible AI/Tabular Explainers.ipynb index 44b811a46e..55efc6d590 100644 --- a/notebooks/features/responsible_ai/Interpretability - Tabular SHAP explainer.ipynb +++ b/docs/Explore Algorithms/Responsible AI/Tabular Explainers.ipynb @@ -1,6 +1,7 @@ { "cells": [ { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -11,11 +12,11 @@ } }, "source": [ - "## Interpretability - Tabular SHAP explainer\n", + "# Interpretability - Tabular SHAP explainer\n", "\n", "In this example, we use Kernel SHAP to explain a tabular classification model built from the Adults Census dataset.\n", "\n", - "First we import the packages and define some UDFs we will need later." + "First we import the packages and define some UDFs we need later." ] }, { @@ -31,7 +32,6 @@ }, "outputs": [], "source": [ - "import pyspark\n", "from synapse.ml.explainers import *\n", "from pyspark.ml import Pipeline\n", "from pyspark.ml.classification import LogisticRegression\n", @@ -39,21 +39,15 @@ "from pyspark.sql.types import *\n", "from pyspark.sql.functions import *\n", "import pandas as pd\n", - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", "from synapse.ml.core.platform import *\n", "\n", - "from synapse.ml.core.platform import materializing_display as display\n", - "\n", "\n", "vec_access = udf(lambda v, i: float(v[i]), FloatType())\n", "vec2array = udf(lambda vec: vec.toArray().tolist(), ArrayType(FloatType()))" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -64,7 +58,7 @@ } }, "source": [ - "Now let's read the data and train a simple binary classification model." + "Now let's read the data and train a binary classification model." ] }, { @@ -126,6 +120,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -159,6 +154,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -169,7 +165,7 @@ } }, "source": [ - "We create a TabularSHAP explainer, set the input columns to all the features the model takes, specify the model and the target output column we are trying to explain. In this case, we are trying to explain the \"probability\" output which is a vector of length 2, and we are only looking at class 1 probability. Specify targetClasses to `[0, 1]` if you want to explain class 0 and 1 probability at the same time. Finally we sample 100 rows from the training data for background data, which is used for integrating out features in Kernel SHAP." + "We create a TabularSHAP explainer, set the input columns to all the features the model takes, specify the model and the target output column we're trying to explain. In this case, we're trying to explain the \"probability\" output, which is a vector of length 2, and we're only looking at class 1 probability. Specify targetClasses to `[0, 1]` if you want to explain class 0 and 1 probability at the same time. Finally we sample 100 rows from the training data for background data, which is used for integrating out features in Kernel SHAP." ] }, { @@ -242,6 +238,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -304,10 +301,13 @@ "fig.update_yaxes(range=[-1, 1], fixedrange=True, zerolinecolor=\"black\")\n", "fig.update_xaxes(type=\"category\", tickangle=45, fixedrange=True)\n", "fig.update_layout(height=400 * rows, title_text=\"SHAP explanations\")\n", - "fig.show()" + "\n", + "if not running_on_synapse():\n", + " fig.show()" ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -315,10 +315,13 @@ "nuid": "8f22fceb-0fc0-4a86-a0ca-2a7b47b4795a", "showTitle": false, "title": "" - } + }, + "tags": [ + "hide-synapse-internal" + ] }, "source": [ - "Your results will look like:\n", + "Your results should look like:\n", "\n", "" ] diff --git a/notebooks/features/responsible_ai/Interpretability - Text Explainers.ipynb b/docs/Explore Algorithms/Responsible AI/Text Explainers.ipynb similarity index 96% rename from notebooks/features/responsible_ai/Interpretability - Text Explainers.ipynb rename to docs/Explore Algorithms/Responsible AI/Text Explainers.ipynb index ed78e24552..a46ca98f4f 100644 --- a/notebooks/features/responsible_ai/Interpretability - Text Explainers.ipynb +++ b/docs/Explore Algorithms/Responsible AI/Text Explainers.ipynb @@ -33,20 +33,12 @@ "source": [ "from pyspark.sql.functions import *\n", "from pyspark.sql.types import *\n", - "from pyspark.ml.feature import StopWordsRemover, HashingTF, IDF, Tokenizer\n", "from pyspark.ml import Pipeline\n", "from pyspark.ml.classification import LogisticRegression\n", "from synapse.ml.explainers import *\n", "from synapse.ml.featurize.text import TextFeaturizer\n", - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", "from synapse.ml.core.platform import *\n", "\n", - "from synapse.ml.core.platform import materializing_display as display", - "\n", "vec2array = udf(lambda vec: vec.toArray().tolist(), ArrayType(FloatType()))\n", "vec_access = udf(lambda v, i: float(v[i]), FloatType())" ] diff --git a/notebooks/features/vw/Vowpal Wabbit - Contextual Bandits.ipynb b/docs/Explore Algorithms/Vowpal Wabbit/Contextual Bandits.ipynb similarity index 92% rename from notebooks/features/vw/Vowpal Wabbit - Contextual Bandits.ipynb rename to docs/Explore Algorithms/Vowpal Wabbit/Contextual Bandits.ipynb index ea51fa390e..b3616faabe 100644 --- a/notebooks/features/vw/Vowpal Wabbit - Contextual Bandits.ipynb +++ b/docs/Explore Algorithms/Vowpal Wabbit/Contextual Bandits.ipynb @@ -22,22 +22,6 @@ "## Step1: Read the dataset" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import *\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" - ] - }, { "cell_type": "code", "execution_count": null, diff --git a/notebooks/features/vw/Vowpal Wabbit - Multi-class classification.ipynb b/docs/Explore Algorithms/Vowpal Wabbit/Multi-class classification.ipynb similarity index 91% rename from notebooks/features/vw/Vowpal Wabbit - Multi-class classification.ipynb rename to docs/Explore Algorithms/Vowpal Wabbit/Multi-class classification.ipynb index f5ce7ebbeb..08599b34a2 100644 --- a/notebooks/features/vw/Vowpal Wabbit - Multi-class classification.ipynb +++ b/docs/Explore Algorithms/Vowpal Wabbit/Multi-class classification.ipynb @@ -16,22 +16,6 @@ "#### Read dataset" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import *\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" - ] - }, { "cell_type": "code", "execution_count": null, diff --git a/website/versioned_docs/version-0.10.1/features/vw/about.md b/docs/Explore Algorithms/Vowpal Wabbit/Overview.md similarity index 97% rename from website/versioned_docs/version-0.10.1/features/vw/about.md rename to docs/Explore Algorithms/Vowpal Wabbit/Overview.md index ac0f56ff2f..eda047ca0b 100644 --- a/website/versioned_docs/version-0.10.1/features/vw/about.md +++ b/docs/Explore Algorithms/Vowpal Wabbit/Overview.md @@ -64,7 +64,7 @@ model = (VowpalWabbitRegressor(args="--holdout_off --loss_function quantile -q : You can pass command line parameters to VW via the args parameter, as documented in the [VW Wiki](https://github.com/vowpalWabbit/vowpal_wabbit/wiki/Command-Line-Arguments). For an end to end application, check out the VowpalWabbit [notebook -example](../Vowpal%20Wabbit%20-%20Overview). +example](../Quickstart%20-%20Classification,%20Quantile%20Regression,%20and%20Regression). ### Hyper-parameter tuning @@ -87,7 +87,7 @@ VowpalWabbit on Spark uses an optimized JNI layer to efficiently support Spark. Java bindings can be found in the [VW GitHub repo](https://github.com/VowpalWabbit/vowpal_wabbit/blob/master/java/src/main/c%2B%2B/jni_spark_vw_generated.h). VW's command line tool uses a two-thread architecture (1x parsing/hashing, 1x learning) for learning and inference. -To fluently embed VW into the Spark ML eco system, the following adaptions were made: +To fluently embed VW into the Spark ML ecosystem, the following adaptions were made: - VW classifier/regressor operates on Spark's dense/sparse vectors - Pro: best composability with existing Spark ML components. diff --git a/notebooks/features/vw/Vowpal Wabbit - Classification using SparkML Vector.ipynb b/docs/Explore Algorithms/Vowpal Wabbit/Quickstart - Classification using SparkML Vectors.ipynb similarity index 90% rename from notebooks/features/vw/Vowpal Wabbit - Classification using SparkML Vector.ipynb rename to docs/Explore Algorithms/Vowpal Wabbit/Quickstart - Classification using SparkML Vectors.ipynb index ce1475083c..b08913b78c 100644 --- a/notebooks/features/vw/Vowpal Wabbit - Classification using SparkML Vector.ipynb +++ b/docs/Explore Algorithms/Vowpal Wabbit/Quickstart - Classification using SparkML Vectors.ipynb @@ -23,22 +23,6 @@ "#### Read dataset" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import *\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" - ] - }, { "cell_type": "code", "execution_count": null, @@ -46,7 +30,6 @@ "outputs": [], "source": [ "import pyspark.sql.types as T\n", - "from pyspark.sql import functions as F\n", "\n", "schema = T.StructType(\n", " [\n", diff --git a/notebooks/features/vw/Vowpal Wabbit - Classification using VW-native Format.ipynb b/docs/Explore Algorithms/Vowpal Wabbit/Quickstart - Classification using VW-native Format.ipynb similarity index 91% rename from notebooks/features/vw/Vowpal Wabbit - Classification using VW-native Format.ipynb rename to docs/Explore Algorithms/Vowpal Wabbit/Quickstart - Classification using VW-native Format.ipynb index 38aae4b605..a88965eeac 100644 --- a/notebooks/features/vw/Vowpal Wabbit - Classification using VW-native Format.ipynb +++ b/docs/Explore Algorithms/Vowpal Wabbit/Quickstart - Classification using VW-native Format.ipynb @@ -23,22 +23,6 @@ "#### Read dataset" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import *\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" - ] - }, { "cell_type": "code", "execution_count": null, diff --git a/notebooks/features/vw/Vowpal Wabbit - Overview.ipynb b/docs/Explore Algorithms/Vowpal Wabbit/Quickstart - Classification, Quantile Regression, and Regression.ipynb similarity index 98% rename from notebooks/features/vw/Vowpal Wabbit - Overview.ipynb rename to docs/Explore Algorithms/Vowpal Wabbit/Quickstart - Classification, Quantile Regression, and Regression.ipynb index 66c5258fe6..d60d70fb97 100644 --- a/notebooks/features/vw/Vowpal Wabbit - Overview.ipynb +++ b/docs/Explore Algorithms/Vowpal Wabbit/Quickstart - Classification, Quantile Regression, and Regression.ipynb @@ -85,23 +85,6 @@ "#### Read dataset" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import *\n", - "\n", - "if running_on_synapse():\n", - " from synapse.ml.core.platform import materializing_display as display" - ] - }, { "cell_type": "code", "execution_count": null, diff --git a/docs/Get Started/Create a Spark Cluster.ipynb b/docs/Get Started/Create a Spark Cluster.ipynb new file mode 100644 index 0000000000..704e034db7 --- /dev/null +++ b/docs/Get Started/Create a Spark Cluster.ipynb @@ -0,0 +1,87 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setting up your computing platform for SynapseML \n", + "\n", + "SynapseML is preinstalled on Microsoft Fabric and Synapse Analytics. Follow the instructions to get started with these platforms." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Microsoft Fabric\n", + "[Microsoft Fabric](https://www.microsoft.com/microsoft-fabric/) is an all-in-one analytics solution for enterprises that covers everything from data movement to data science, Real-Time Analytics, and business intelligence. It offers a comprehensive suite of services, including data lake, data engineering, and data integration, all in one place.\n", + "\n", + "SynapseML is preinstalled on Fabric, and this guide will walk you through getting access to fabric." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "fabric-prerequisites" + ] + }, + "source": [ + "* [Get a Microsoft Fabric license](https://learn.microsoft.com/fabric/enterprise/licenses) or sign-up for a free [Microsoft Fabric (Preview) trial](https://learn.microsoft.com/fabric/get-started/fabric-trial).\n", + "* Sign in to [Microsoft Fabric](https://fabric.microsoft.com/)\n", + "* Go to the Data Science experience.\n", + "* [Create a new notebook](https://learn.microsoft.com/en-us/fabric/data-engineering/how-to-use-notebook#create-notebooks) or attach your notebook to a lakehouse. On the left side, select **Add** to add an existing lakehouse or [create a lakehouse](https://learn.microsoft.com/en-us/fabric/data-engineering/how-to-use-notebook#connect-lakehouses-and-notebooks)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "SynapseML is preinstalled on Fabric, but if you want to use another version of SynapseML, follow [this guide on updating SynapseML](https://learn.microsoft.com/en-us/fabric/data-science/install-synapseml)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Synapse Analytics\n", + "[Azure Synapse Analytics](https://azure.microsoft.com/products/synapse-analytics) is an enterprise analytics service that accelerates time to insight across data warehouses and big data systems.\n", + "\n", + "SynapseML is preinstalled on Synapse Analytics. To start with Synapse Analytics, you need:" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "synapse-prerequisites" + ] + }, + "source": [ + "* A valid Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/).\n", + "* [Create a Synapse workspace and launch Synapse studio](https://docs.microsoft.com/en-us/azure/synapse-analytics/get-started-create-workspace)\n", + "* [Create a serverless Apache Spark pool](https://docs.microsoft.com/en-us/azure/synapse-analytics/get-started-analyze-spark#create-a-serverless-apache-spark-pool)\n", + "* Once Synapse Studio has launched, select **Develop**. Then, select the **\"+\"** icon to add a new resource. From there, select **Notebook**. A new notebook is created and opened. Alternatively, you can select **Import** to upload your notebook." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "pycharm": { + "name": "#%% md\n" + } + }, + "source": [ + "SynapseML is preinstalled on Azure Synapse Analytics, but if you want to use another version of SynapseML, follow [this guide on updating SynapseML](../Install%20SynapseML)." + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/website/docs/getting_started/installation.md b/docs/Get Started/Install SynapseML.md similarity index 84% rename from website/docs/getting_started/installation.md rename to docs/Get Started/Install SynapseML.md index 4ab298fd0c..6870207c39 100644 --- a/website/docs/getting_started/installation.md +++ b/docs/Get Started/Install SynapseML.md @@ -1,6 +1,6 @@ --- -title: Installation -description: Getting started with SynapseML +title: Install SynapseML +description: Install SynapseML --- ## Synapse @@ -14,7 +14,7 @@ For Spark3.2 pool: { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1,org.apache.spark:spark-avro_2.12:3.3.1", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2,org.apache.spark:spark-avro_2.12:3.3.1", "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true", @@ -30,7 +30,7 @@ For Spark3.3 pool: { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1-spark3.3", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2-spark3.3", "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true", @@ -47,8 +47,8 @@ installed via pip with `pip install pyspark`. ```python import pyspark spark = pyspark.sql.SparkSession.builder.appName("MyApp") \ - # Use 0.11.1-spark3.3 version for Spark3.3 and 0.11.1 version for Spark3.2 - .config("spark.jars.packages", "com.microsoft.azure:synapseml_2.12:0.11.1") \ + # Use 0.11.2-spark3.3 version for Spark3.3 and 0.11.2 version for Spark3.2 + .config("spark.jars.packages", "com.microsoft.azure:synapseml_2.12:0.11.2") \ .config("spark.jars.repositories", "https://mmlspark.azureedge.net/maven") \ .getOrCreate() import synapse.ml @@ -61,8 +61,8 @@ your `build.sbt`: ```scala resolvers += "SynapseML" at "https://mmlspark.azureedge.net/maven" -// Use 0.11.1 version for Spark3.2 and 0.11.1-spark3.3 for Spark3.3 -libraryDependencies += "com.microsoft.azure" % "synapseml_2.12" % "0.11.1" +// Use 0.11.2 version for Spark3.2 and 0.11.2-spark3.3 for Spark3.3 +libraryDependencies += "com.microsoft.azure" % "synapseml_2.12" % "0.11.2" ``` ## Spark package @@ -71,10 +71,10 @@ SynapseML can be conveniently installed on existing Spark clusters via the `--packages` option, examples: ```bash -# Please use 0.11.1-spark3.3 version for Spark3.3 and 0.11.1 version for Spark3.2 -spark-shell --packages com.microsoft.azure:synapseml_2.12:0.11.1 -pyspark --packages com.microsoft.azure:synapseml_2.12:0.11.1 -spark-submit --packages com.microsoft.azure:synapseml_2.12:0.11.1 MyApp.jar +# Please use 0.11.2-spark3.3 version for Spark3.3 and 0.11.2 version for Spark3.2 +spark-shell --packages com.microsoft.azure:synapseml_2.12:0.11.2 +pyspark --packages com.microsoft.azure:synapseml_2.12:0.11.2 +spark-submit --packages com.microsoft.azure:synapseml_2.12:0.11.2 MyApp.jar ``` A similar technique can be used in other Spark contexts too. For example, you can use SynapseML @@ -89,8 +89,8 @@ cloud](http://community.cloud.databricks.com), create a new [library from Maven coordinates](https://docs.databricks.com/user-guide/libraries.html#libraries-from-maven-pypi-or-spark-packages) in your workspace. -For the coordinates use: `com.microsoft.azure:synapseml_2.12:0.11.1` for Spark3.2 Cluster and - `com.microsoft.azure:synapseml_2.12:0.11.1-spark3.3` for Spark3.3 Cluster; +For the coordinates use: `com.microsoft.azure:synapseml_2.12:0.11.2` for Spark3.2 Cluster and + `com.microsoft.azure:synapseml_2.12:0.11.2-spark3.3` for Spark3.3 Cluster; Add the resolver: `https://mmlspark.azureedge.net/maven`. Ensure this library is attached to your target cluster(s). @@ -98,7 +98,7 @@ Finally, ensure that your Spark cluster has at least Spark 3.2 and Scala 2.12. You can use SynapseML in both your Scala and PySpark notebooks. To get started with our example notebooks, import the following databricks archive: -`https://mmlspark.blob.core.windows.net/dbcs/SynapseMLExamplesv0.11.1.dbc` +`https://mmlspark.blob.core.windows.net/dbcs/SynapseMLExamplesv0.11.2.dbc` ## Microsoft Fabric @@ -111,7 +111,7 @@ In Microsoft Fabric notebooks please place the following in the first cell of yo { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1,org.apache.spark:spark-avro_2.12:3.3.1", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2,org.apache.spark:spark-avro_2.12:3.3.1", "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true", @@ -128,7 +128,7 @@ In Microsoft Fabric notebooks please place the following in the first cell of yo { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1-spark3.3", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2-spark3.3", "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true", @@ -148,8 +148,8 @@ Excluding certain packages from the library may be necessary due to current issu { "name": "synapseml", "conf": { - # Please use 0.11.1 version for Spark3.2 and 0.11.1-spark3.3 version for Spark3.3 - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1", + # Please use 0.11.2 version for Spark3.2 and 0.11.2-spark3.3 version for Spark3.3 + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind" } } @@ -162,8 +162,8 @@ In Azure Synapse, "spark.yarn.user.classpath.first" should be set to "true" to o { "name": "synapseml", "conf": { - # Please use 0.11.1 version for Spark3.2 and 0.11.1-spark3.3 version for Spark3.3 - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1", + # Please use 0.11.2 version for Spark3.2 and 0.11.2-spark3.3 version for Spark3.3 + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true" } @@ -180,7 +180,7 @@ docker run -it -p 8888:8888 -e ACCEPT_EULA=yes mcr.microsoft.com/mmlspark/releas ``` Navigate to in your web browser to run the sample -notebooks. See the [documentation](reference/docker.md) for more on Docker use. +notebooks. See the [documentation](../../Reference/Docker Setup.md) for more on Docker use. > To read the EULA for using the docker image, run ``` bash @@ -191,21 +191,21 @@ docker run -it -p 8888:8888 mcr.microsoft.com/mmlspark/release eula ## Building from source SynapseML has recently transitioned to a new build infrastructure. -For detailed developer docs, see the [Developer Readme](reference/developer-readme.md) +For detailed developer docs, see the [Developer Readme](../../Reference/Docker%20Setup) If you're an existing SynapseML developer, you'll need to reconfigure your development setup. We now support platform independent development and better integrate with intellij and SBT. - If you encounter issues, reach out to our support email! +If you encounter issues, reach out to our support email! ## R (Beta) To try out SynapseML using the R autogenerated wrappers, [see our -instructions](reference/R-setup.md). Note: This feature is still under development +instructions](../../Reference/R%20Setup). Note: This feature is still under development and some necessary custom wrappers may be missing. ## C# (.NET) -To try out SynapseML with .NET, follow the [.NET Installation Guide](reference/dotnet-setup.md). +To try out SynapseML with .NET, follow the [.NET Installation Guide](../../Reference/Dotnet%20Setup). Note: Some stages including AzureSearchWriter, DiagnosticInfo, UDPyF Param, ParamSpaceParam, BallTreeParam, ConditionalBallTreeParam, LightGBMBooster Param are still under development and not exposed in .NET. diff --git a/notebooks/features/classification/Classification - Sentiment Analysis Quickstart.ipynb b/docs/Get Started/Quickstart - Your First Models.ipynb similarity index 71% rename from notebooks/features/classification/Classification - Sentiment Analysis Quickstart.ipynb rename to docs/Get Started/Quickstart - Your First Models.ipynb index 1bf408c7c5..4c339dc5f6 100644 --- a/notebooks/features/classification/Classification - Sentiment Analysis Quickstart.ipynb +++ b/docs/Get Started/Quickstart - Your First Models.ipynb @@ -11,34 +11,12 @@ } }, "source": [ - "# A 5-minute tour of SynapseML" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "jupyter": { - "outputs_hidden": false, - "source_hidden": false - }, - "nteract": { - "transient": { - "deleting": false - } - } - }, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "from synapse.ml.core.platform import *\n", - "\n", - "spark = SparkSession.builder.getOrCreate()\n", - "\n", - "from synapse.ml.core.platform import materializing_display as display" + "# Build your first SynapseML models\n", + "This tutorial provides a brief introduction to SynapseML. In particular, we use SynapseML to create two different pipelines for sentiment analysis. The first pipeline combines a text featurization stage with LightGBM regression to predict ratings based on review text from a dataset containing book reviews from Amazon. The second pipeline shows how to use prebuilt models through the Azure Cognitive Services to solve this problem without training data." ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "nteract": { @@ -48,7 +26,8 @@ } }, "source": [ - "# Step 1: Load our Dataset" + "## Load a dataset\n", + "Load your dataset and split it into train and test sets." ] }, { @@ -77,6 +56,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -87,7 +67,8 @@ } }, "source": [ - "# Step 2: Make our Model" + "## Create the training pipeline\n", + "Create a pipeline that featurizes data using `TextFeaturizer` from the `synapse.ml.featurize.text` library and derives a rating using the `LightGBMRegressor` function." ] }, { @@ -116,6 +97,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -126,7 +108,8 @@ } }, "source": [ - "# Step 3: Predict!" + "## Predict the output of the test data\n", + "Call the `transform` function on the model to predict and display the output of the test data as a dataframe." ] }, { @@ -146,6 +129,7 @@ ] }, { + "attachments": {}, "cell_type": "markdown", "metadata": { "application/vnd.databricks.v1+cell": { @@ -156,7 +140,8 @@ } }, "source": [ - "# Alternate route: Let the Cognitive Services handle it" + "## Use Cognitive Services to transform data in one step\n", + "Alternatively, for these kinds of tasks that have a prebuilt solution, you can use SynapseML's integration with Cognitive Services to transform your data in one step." ] }, { @@ -181,7 +166,9 @@ "model = TextSentiment(\n", " textCol=\"text\",\n", " outputCol=\"sentiment\",\n", - " subscriptionKey=find_secret(\"cognitive-api-key\"),\n", + " subscriptionKey=find_secret(\n", + " \"cognitive-api-key\"\n", + " ), # Replace the call to find_secret with your key as a python string.\n", ").setLocation(\"eastus\")\n", "\n", "display(model.transform(test))" diff --git a/docs/Get Started/Set up Cognitive Services.ipynb b/docs/Get Started/Set up Cognitive Services.ipynb new file mode 100644 index 0000000000..dbabcbe42d --- /dev/null +++ b/docs/Get Started/Set up Cognitive Services.ipynb @@ -0,0 +1,77 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Setting up Cognitive Services and Azure OpenAI resources for SynapseML \n", + "\n", + "In order to use SynapseML's OpenAI or Cognitive Services features, specific Azure resources are required. This documentation walks you through the process of setting up these resources and acquiring the necessary credentials." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "azure-prerequisites" + ] + }, + "source": [ + "First, create an Azure subscription to create resources.\n", + "* A valid Azure subscription - [Create one for free](https://azure.microsoft.com/free/cognitive-services/)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Azure OpenAI\n", + "\n", + "The [Azure OpenAI service](https://azure.microsoft.com/products/cognitive-services/openai-service/) can be used to solve a large number of natural language tasks through prompting the completion API. To make it easier to scale your prompting workflows from a few examples to large datasets of examples, we have integrated the Azure OpenAI service with the distributed machine learning library SynapseML. This integration makes it easy to use the Apache Spark distributed computing framework to process millions of prompts with the OpenAI service." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "openai-prerequisites" + ] + }, + "source": [ + "To set up your Azure OpenAI Resource for SynapseML usage you need to: \n", + "* [Apply for access to Azure OpenAI](https://aka.ms/oai/access) if you do not already have access. \n", + "* [Create an Azure OpenAI resource](https://docs.microsoft.com/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal#create-a-resource) \n", + "* Get your Azure OpenAI resource's key. After your resource is successfully deployed, select **Next Steps** > **Go to resource**. Once at the resource, you can get the key from **Resource Management** > **Keys and Endpoint**. Copy the key and paste it into the notebook. Store keys securely and do not share them. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Cognitive Services" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "tags": [ + "cognitive-services-prerequisites" + ] + }, + "source": [ + "To set up [Cognitive Services](https://azure.microsoft.com/products/cognitive-services/) for use with SynapseML you first need to:\n", + "* [Assign yourself the Cognitive Services Contributor role](https://learn.microsoft.com/azure/role-based-access-control/role-assignments-steps) to agree to the responsible AI terms and create a resource. \n", + "* [Create an Azure Cognitive multi-service (Decision, Language, Speech, Vision) resource](https://portal.azure.com/#create/Microsoft.CognitiveServicesAllInOne). Alternatively, you can follow the steps to [create Single-service resource](https://learn.microsoft.com/en-us/azure/cognitive-services/cognitive-services-apis-create-account?tabs=decision%2Canomaly-detector%2Clanguage-service%2Ccomputer-vision%2Cwindows#create-a-new-azure-cognitive-services-resource). \n", + "* Get your Cognitive Service resource's key. After your resource is successfully deployed, select **Next Steps** > **Go to resource**. Once at the resource, you can get the key from **Resource Management** > **Keys and Endpoint**. Copy the key and paste it into the notebook. Store keys securely and do not share them. " + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} \ No newline at end of file diff --git a/docs/Overview.md b/docs/Overview.md new file mode 100644 index 0000000000..c5a4a615d9 --- /dev/null +++ b/docs/Overview.md @@ -0,0 +1,29 @@ +--- +title: What is SynapseML? +sidebar_label: What is SynapseML? +hide_title: true +--- + +import useBaseUrl from "@docusaurus/useBaseUrl"; + +# What is SynapseML? + +SynapseML (previously known as MMLSpark), is an open-source library that simplifies the creation of massively scalable machine learning (ML) pipelines. SynapseML provides simple, composable, and distributed APIs for a wide variety of different machine learning tasks such as text analytics, vision, anomaly detection, and many others. SynapseML is built on the [Apache Spark distributed computing framework](https://spark.apache.org/) and shares the same API as the [SparkML/MLLib library](https://spark.apache.org/mllib/), allowing you to seamlessly embed SynapseML models into existing Apache Spark workflows. + +With SynapseML, you can build scalable and intelligent systems to solve challenges in domains such as anomaly detection, computer vision, deep learning, text analytics, and others. SynapseML can train and evaluate models on single-node, multi-node, and elastically resizable clusters of computers. This lets you scale your work without wasting resources. SynapseML is usable across Python, R, Scala, Java, and .NET. Furthermore, its API abstracts over a wide variety of databases, file systems, and cloud data stores to simplify experiments no matter where data is located. + +SynapseML requires Scala 2.12, Spark 3.2+, and Python 3.8+. + +import Link from '@docusaurus/Link'; + +Get Started + +## Papers + +- [Large Scale Intelligent Microservices](https://arxiv.org/abs/2009.08044) + +- [Conditional Image Retrieval](https://arxiv.org/abs/2007.07177) + +- [SynapseML: Unifying Machine Learning Ecosystems at Massive Scales](https://arxiv.org/abs/1810.08744) + +- [Flexible and Scalable Deep Learning with MMLSpark](https://arxiv.org/abs/1804.04031) diff --git a/website/docs/documentation/estimators/_LightGBM.md b/docs/Quick Examples/estimators/_LightGBM.md similarity index 100% rename from website/docs/documentation/estimators/_LightGBM.md rename to docs/Quick Examples/estimators/_LightGBM.md diff --git a/website/docs/documentation/estimators/_VW.md b/docs/Quick Examples/estimators/_VW.md similarity index 100% rename from website/docs/documentation/estimators/_VW.md rename to docs/Quick Examples/estimators/_VW.md diff --git a/website/versioned_docs/version-0.11.0/documentation/estimators/causal/_causalInferenceDML.md b/docs/Quick Examples/estimators/causal/_causalInferenceDML.md similarity index 100% rename from website/versioned_docs/version-0.11.0/documentation/estimators/causal/_causalInferenceDML.md rename to docs/Quick Examples/estimators/causal/_causalInferenceDML.md diff --git a/website/docs/documentation/estimators/cognitive/_MAD.md b/docs/Quick Examples/estimators/cognitive/_MAD.md similarity index 100% rename from website/docs/documentation/estimators/cognitive/_MAD.md rename to docs/Quick Examples/estimators/cognitive/_MAD.md diff --git a/website/docs/documentation/estimators/core/_AutoML.md b/docs/Quick Examples/estimators/core/_AutoML.md similarity index 100% rename from website/docs/documentation/estimators/core/_AutoML.md rename to docs/Quick Examples/estimators/core/_AutoML.md diff --git a/website/docs/documentation/estimators/core/_Featurize.md b/docs/Quick Examples/estimators/core/_Featurize.md similarity index 100% rename from website/docs/documentation/estimators/core/_Featurize.md rename to docs/Quick Examples/estimators/core/_Featurize.md diff --git a/website/docs/documentation/estimators/core/_IsolationForest.md b/docs/Quick Examples/estimators/core/_IsolationForest.md similarity index 100% rename from website/docs/documentation/estimators/core/_IsolationForest.md rename to docs/Quick Examples/estimators/core/_IsolationForest.md diff --git a/website/docs/documentation/estimators/core/_NN.md b/docs/Quick Examples/estimators/core/_NN.md similarity index 100% rename from website/docs/documentation/estimators/core/_NN.md rename to docs/Quick Examples/estimators/core/_NN.md diff --git a/website/docs/documentation/estimators/core/_Recommendation.md b/docs/Quick Examples/estimators/core/_Recommendation.md similarity index 100% rename from website/docs/documentation/estimators/core/_Recommendation.md rename to docs/Quick Examples/estimators/core/_Recommendation.md diff --git a/website/docs/documentation/estimators/core/_Stages.md b/docs/Quick Examples/estimators/core/_Stages.md similarity index 100% rename from website/docs/documentation/estimators/core/_Stages.md rename to docs/Quick Examples/estimators/core/_Stages.md diff --git a/website/docs/documentation/estimators/core/_Train.md b/docs/Quick Examples/estimators/core/_Train.md similarity index 100% rename from website/docs/documentation/estimators/core/_Train.md rename to docs/Quick Examples/estimators/core/_Train.md diff --git a/website/versioned_docs/version-0.11.0/documentation/estimators/estimators_causal.md b/docs/Quick Examples/estimators/estimators_causal.md similarity index 100% rename from website/versioned_docs/version-0.11.0/documentation/estimators/estimators_causal.md rename to docs/Quick Examples/estimators/estimators_causal.md diff --git a/website/docs/documentation/estimators/estimators_cognitive.md b/docs/Quick Examples/estimators/estimators_cognitive.md similarity index 100% rename from website/docs/documentation/estimators/estimators_cognitive.md rename to docs/Quick Examples/estimators/estimators_cognitive.md diff --git a/website/docs/documentation/estimators/estimators_core.md b/docs/Quick Examples/estimators/estimators_core.md similarity index 100% rename from website/docs/documentation/estimators/estimators_core.md rename to docs/Quick Examples/estimators/estimators_core.md diff --git a/website/docs/documentation/estimators/estimators_lightgbm.md b/docs/Quick Examples/estimators/estimators_lightgbm.md similarity index 100% rename from website/docs/documentation/estimators/estimators_lightgbm.md rename to docs/Quick Examples/estimators/estimators_lightgbm.md diff --git a/website/docs/documentation/estimators/estimators_vw.md b/docs/Quick Examples/estimators/estimators_vw.md similarity index 100% rename from website/docs/documentation/estimators/estimators_vw.md rename to docs/Quick Examples/estimators/estimators_vw.md diff --git a/website/docs/documentation/transformers/_OpenCV.md b/docs/Quick Examples/transformers/_OpenCV.md similarity index 100% rename from website/docs/documentation/transformers/_OpenCV.md rename to docs/Quick Examples/transformers/_OpenCV.md diff --git a/website/docs/documentation/transformers/_VW.md b/docs/Quick Examples/transformers/_VW.md similarity index 100% rename from website/docs/documentation/transformers/_VW.md rename to docs/Quick Examples/transformers/_VW.md diff --git a/website/docs/documentation/transformers/cognitive/_AnomalyDetection.md b/docs/Quick Examples/transformers/cognitive/_AnomalyDetection.md similarity index 100% rename from website/docs/documentation/transformers/cognitive/_AnomalyDetection.md rename to docs/Quick Examples/transformers/cognitive/_AnomalyDetection.md diff --git a/website/docs/documentation/transformers/cognitive/_AzureSearch.md b/docs/Quick Examples/transformers/cognitive/_AzureSearch.md similarity index 100% rename from website/docs/documentation/transformers/cognitive/_AzureSearch.md rename to docs/Quick Examples/transformers/cognitive/_AzureSearch.md diff --git a/website/docs/documentation/transformers/cognitive/_BingImageSearch.md b/docs/Quick Examples/transformers/cognitive/_BingImageSearch.md similarity index 100% rename from website/docs/documentation/transformers/cognitive/_BingImageSearch.md rename to docs/Quick Examples/transformers/cognitive/_BingImageSearch.md diff --git a/website/docs/documentation/transformers/cognitive/_ComputerVision.md b/docs/Quick Examples/transformers/cognitive/_ComputerVision.md similarity index 100% rename from website/docs/documentation/transformers/cognitive/_ComputerVision.md rename to docs/Quick Examples/transformers/cognitive/_ComputerVision.md diff --git a/website/docs/documentation/transformers/cognitive/_Face.md b/docs/Quick Examples/transformers/cognitive/_Face.md similarity index 100% rename from website/docs/documentation/transformers/cognitive/_Face.md rename to docs/Quick Examples/transformers/cognitive/_Face.md diff --git a/website/docs/documentation/transformers/cognitive/_FormRecognizer.md b/docs/Quick Examples/transformers/cognitive/_FormRecognizer.md similarity index 100% rename from website/docs/documentation/transformers/cognitive/_FormRecognizer.md rename to docs/Quick Examples/transformers/cognitive/_FormRecognizer.md diff --git a/website/docs/documentation/transformers/cognitive/_SpeechToText.md b/docs/Quick Examples/transformers/cognitive/_SpeechToText.md similarity index 100% rename from website/docs/documentation/transformers/cognitive/_SpeechToText.md rename to docs/Quick Examples/transformers/cognitive/_SpeechToText.md diff --git a/website/docs/documentation/transformers/cognitive/_TextAnalytics.md b/docs/Quick Examples/transformers/cognitive/_TextAnalytics.md similarity index 100% rename from website/docs/documentation/transformers/cognitive/_TextAnalytics.md rename to docs/Quick Examples/transformers/cognitive/_TextAnalytics.md diff --git a/website/docs/documentation/transformers/cognitive/_Translator.md b/docs/Quick Examples/transformers/cognitive/_Translator.md similarity index 100% rename from website/docs/documentation/transformers/cognitive/_Translator.md rename to docs/Quick Examples/transformers/cognitive/_Translator.md diff --git a/website/docs/documentation/transformers/core/_Explainers.md b/docs/Quick Examples/transformers/core/_Explainers.md similarity index 100% rename from website/docs/documentation/transformers/core/_Explainers.md rename to docs/Quick Examples/transformers/core/_Explainers.md diff --git a/website/docs/documentation/transformers/core/_Featurize.md b/docs/Quick Examples/transformers/core/_Featurize.md similarity index 100% rename from website/docs/documentation/transformers/core/_Featurize.md rename to docs/Quick Examples/transformers/core/_Featurize.md diff --git a/website/docs/documentation/transformers/core/_IO.md b/docs/Quick Examples/transformers/core/_IO.md similarity index 100% rename from website/docs/documentation/transformers/core/_IO.md rename to docs/Quick Examples/transformers/core/_IO.md diff --git a/website/docs/documentation/transformers/core/_Image.md b/docs/Quick Examples/transformers/core/_Image.md similarity index 100% rename from website/docs/documentation/transformers/core/_Image.md rename to docs/Quick Examples/transformers/core/_Image.md diff --git a/website/docs/documentation/transformers/core/_Stages.md b/docs/Quick Examples/transformers/core/_Stages.md similarity index 100% rename from website/docs/documentation/transformers/core/_Stages.md rename to docs/Quick Examples/transformers/core/_Stages.md diff --git a/website/docs/documentation/transformers/core/_Train.md b/docs/Quick Examples/transformers/core/_Train.md similarity index 100% rename from website/docs/documentation/transformers/core/_Train.md rename to docs/Quick Examples/transformers/core/_Train.md diff --git a/website/docs/documentation/transformers/deep_learning/_ONNXModel.md b/docs/Quick Examples/transformers/deep_learning/_ONNXModel.md similarity index 100% rename from website/docs/documentation/transformers/deep_learning/_ONNXModel.md rename to docs/Quick Examples/transformers/deep_learning/_ONNXModel.md diff --git a/website/docs/documentation/transformers/transformers_cognitive.md b/docs/Quick Examples/transformers/transformers_cognitive.md similarity index 100% rename from website/docs/documentation/transformers/transformers_cognitive.md rename to docs/Quick Examples/transformers/transformers_cognitive.md diff --git a/website/docs/documentation/transformers/transformers_core.md b/docs/Quick Examples/transformers/transformers_core.md similarity index 100% rename from website/docs/documentation/transformers/transformers_core.md rename to docs/Quick Examples/transformers/transformers_core.md diff --git a/website/docs/documentation/transformers/transformers_deep_learning.md b/docs/Quick Examples/transformers/transformers_deep_learning.md similarity index 100% rename from website/docs/documentation/transformers/transformers_deep_learning.md rename to docs/Quick Examples/transformers/transformers_deep_learning.md diff --git a/website/docs/documentation/transformers/transformers_opencv.md b/docs/Quick Examples/transformers/transformers_opencv.md similarity index 100% rename from website/docs/documentation/transformers/transformers_opencv.md rename to docs/Quick Examples/transformers/transformers_opencv.md diff --git a/website/docs/documentation/transformers/transformers_vw.md b/docs/Quick Examples/transformers/transformers_vw.md similarity index 100% rename from website/docs/documentation/transformers/transformers_vw.md rename to docs/Quick Examples/transformers/transformers_vw.md diff --git a/website/versioned_docs/version-0.10.1/reference/contributing_guide.md b/docs/Reference/Contributor Guide.md similarity index 97% rename from website/versioned_docs/version-0.10.1/reference/contributing_guide.md rename to docs/Reference/Contributor Guide.md index 341edbd548..e841340082 100644 --- a/website/versioned_docs/version-0.10.1/reference/contributing_guide.md +++ b/docs/Reference/Contributor Guide.md @@ -1,8 +1,8 @@ --- -title: Contributing Guide +title: Contributor Guide hide_title: true -sidebar_label: Contributing Guide -description: Contributing Guide +sidebar_label: Contributor Guide +description: Contributor Guide --- ## Interested in contributing to SynapseML? We're excited to work with you. diff --git a/website/docs/reference/developer-readme.md b/docs/Reference/Developer Setup.md similarity index 87% rename from website/docs/reference/developer-readme.md rename to docs/Reference/Developer Setup.md index 95ab9878e3..b448a2910c 100644 --- a/website/docs/reference/developer-readme.md +++ b/docs/Reference/Developer Setup.md @@ -1,8 +1,8 @@ --- -title: Development Setup and Building From Source +title: Developer Setup hide_title: true -sidebar_label: Development Setup -description: SynapseML Development Setup +sidebar_label: Developer Setup +description: Developer Setup --- # SynapseML Development Setup @@ -40,15 +40,7 @@ description: SynapseML Development Setup - Place it in C:\Program Files\Hadoop\bin - Add an environment variable HADOOP_HOME with value C:\Program Files\Hadoop - Append C:\Program Files\Hadoop\bin to PATH environment variable -1. Install pre-commit - - This repository uses the [pre-commit](https://pre-commit.com/index.html) tool to manage git hooks and enforce linting/coding styles. - - The hooks are configured in [.pre-commit-config.yaml](https://github.com/microsoft/SynapseML/blob/master/environment.yml). - - To use the hooks, run the following commands: - ```bash - pip install pre-commit - pre-commit install - ``` - - Now `pre-commit` should automatically run on every `git commit` operation to find AND fix linting issues. + > NOTE > diff --git a/website/versioned_docs/version-0.9.4/reference/docker.md b/docs/Reference/Docker Setup.md similarity index 97% rename from website/versioned_docs/version-0.9.4/reference/docker.md rename to docs/Reference/Docker Setup.md index 42d38b0a66..b65dcb4394 100644 --- a/website/versioned_docs/version-0.9.4/reference/docker.md +++ b/docs/Reference/Docker Setup.md @@ -1,7 +1,7 @@ --- -title: Using the SynapseML Docker Image -sidebar_label: Docker Image -description: Using the SynapseML Docker Image +title: Docker Setup +sidebar_label: Docker Setup +description: Docker Setup --- ## Quickstart: install and run the Docker image @@ -32,7 +32,7 @@ You can now select one of the sample notebooks and run it, or create your own. In the preceding docker command, `mcr.microsoft.com/mmlspark/release` specifies the project and image name that you want to run. There's another component implicit here: the _tsag_ (= version) that you want to use. Specifying it explicitly looks like -`mcr.microsoft.com/mmlspark/release:0.9.4` for the `0.9.4` tag. +`mcr.microsoft.com/mmlspark/release:0.11.2` for the `0.11.2` tag. Leaving `mcr.microsoft.com/mmlspark/release` by itself has an implicit `latest` tag, so it's equivalent to `mcr.microsoft.com/mmlspark/release:latest`. The `latest` tag is identical to the @@ -48,7 +48,7 @@ that you'll probably want to use can look as follows: docker run -it --rm \ -p 127.0.0.1:80:8888 \ -v ~/myfiles:/notebooks/myfiles \ - mcr.microsoft.com/mmlspark/release:0.9.4 + mcr.microsoft.com/mmlspark/release:0.11.2 ``` In this example, backslashes are for readability; you @@ -58,7 +58,7 @@ path and line breaks looks a little different: docker run -it --rm ` -p 127.0.0.1:80:8888 ` -v C:\myfiles:/notebooks/myfiles ` - mcr.microsoft.com/mmlspark/release:0.9.4 + mcr.microsoft.com/mmlspark/release:0.11.2 Let's break this command and go over the meaning of each part: @@ -141,7 +141,7 @@ Let's break this command and go over the meaning of each part: model.write().overwrite().save('myfiles/myTrainedModel.mml') ``` -- **`mcr.microsoft.com/mmlspark/release:0.9.4`** +- **`mcr.microsoft.com/mmlspark/release:0.11.2`** Finally, this argument specifies an explicit version tag for the image that we want to run. diff --git a/website/versioned_docs/version-0.10.1/reference/dotnet-setup.md b/docs/Reference/Dotnet Setup.md similarity index 95% rename from website/versioned_docs/version-0.10.1/reference/dotnet-setup.md rename to docs/Reference/Dotnet Setup.md index c2506932a4..f0ccd7d7b6 100644 --- a/website/versioned_docs/version-0.10.1/reference/dotnet-setup.md +++ b/docs/Reference/Dotnet Setup.md @@ -2,7 +2,7 @@ title: .NET setup hide_title: true sidebar_label: .NET setup -description: .NET setup and example for SynapseML +description: .NET setup --- import Tabs from '@theme/Tabs'; @@ -37,7 +37,7 @@ for a Windows x64 machine or jdk-8u231-macosx-x64.dmg for macOS. Then, use the c ### 3. Install Apache Spark [Download and install Apache Spark](https://spark.apache.org/downloads.html) with version >= 3.2.0. -(SynapseML v0.10.1 only supports spark version >= 3.2.0) +(SynapseML v0.11.2 only supports spark version >= 3.2.0) Extract downloaded zipped files (with 7-Zip app on Windows or `tar` on linux) and remember the location of extracted files, we take `~/bin/spark-3.2.0-bin-hadoop3.2/` as an example here. @@ -127,7 +127,7 @@ In your command prompt or terminal, run the following command: dotnet add package Microsoft.Spark --version 2.1.1 ``` :::note -This tutorial uses Microsoft.Spark version 2.1.1 as SynapseML 0.10.1 depends on it. +This tutorial uses Microsoft.Spark version 2.1.1 as SynapseML 0.11.2 depends on it. Change to corresponding version if necessary. ::: @@ -137,7 +137,7 @@ In your command prompt or terminal, run the following command: ```powershell # Update Nuget Config to include SynapseML Feed dotnet nuget add source https://mmlspark.blob.core.windows.net/synapsemlnuget/index.json -n SynapseMLFeed -dotnet add package SynapseML.Cognitive --version 0.10.1 +dotnet add package SynapseML.Cognitive --version 0.11.2 ``` The `dotnet nuget add` command adds SynapseML's resolver to the source, so that our package can be found. @@ -202,7 +202,7 @@ namespace SynapseMLApp of Apache Spark applications, which manages the context and information of your application. A DataFrame is a way of organizing data into a set of named columns. -Create a [TextSentiment](https://mmlspark.blob.core.windows.net/docs/0.10.1/dotnet/classSynapse_1_1ML_1_1Cognitive_1_1TextSentiment.html) +Create a [TextSentiment](https://mmlspark.blob.core.windows.net/docs/0.11.2/dotnet/classSynapse_1_1ML_1_1Cognitive_1_1TextSentiment.html) instance, set corresponding subscription key and other configurations. Then, apply transformation to the dataframe, which analyzes the sentiment based on each row, and stores result into output column. @@ -218,9 +218,9 @@ dotnet build Navigate to your build output directory. For example, in Windows you could run `cd bin\Debug\net5.0`. Use the spark-submit command to submit your application to run on Apache Spark. ```powershell -spark-submit --class org.apache.spark.deploy.dotnet.DotnetRunner --packages com.microsoft.azure:synapseml_2.12:0.10.1 --master local microsoft-spark-3-2_2.12-2.1.1.jar dotnet SynapseMLApp.dll +spark-submit --class org.apache.spark.deploy.dotnet.DotnetRunner --packages com.microsoft.azure:synapseml_2.12:0.11.2 --master local microsoft-spark-3-2_2.12-2.1.1.jar dotnet SynapseMLApp.dll ``` -`--packages com.microsoft.azure:synapseml_2.12:0.10.1` specifies the dependency on synapseml_2.12 version 0.10.1; +`--packages com.microsoft.azure:synapseml_2.12:0.11.2` specifies the dependency on synapseml_2.12 version 0.11.2; `microsoft-spark-3-2_2.12-2.1.1.jar` specifies Microsoft.Spark version 2.1.1 and Spark version 3.2 :::note This command assumes you have downloaded Apache Spark and added it to your PATH environment variable so that you can use spark-submit. @@ -238,7 +238,7 @@ When your app runs, the sentiment analysis result is written to the console. +-----------------------------------------+--------+-----+--------------------------------------------------+ ``` Congratulations! You successfully authored and ran a .NET for SynapseML app. -Refer to the [developer docs](https://mmlspark.blob.core.windows.net/docs/0.10.1/dotnet/index.html) for API guidance. +Refer to the [developer docs](https://mmlspark.blob.core.windows.net/docs/0.11.2/dotnet/index.html) for API guidance. ## Next diff --git a/website/docs/getting_started/dotnet_example.md b/docs/Reference/Quickstart - LightGBM in Dotnet.md similarity index 97% rename from website/docs/getting_started/dotnet_example.md rename to docs/Reference/Quickstart - LightGBM in Dotnet.md index 7b1da80fa6..579c1b897c 100644 --- a/website/docs/getting_started/dotnet_example.md +++ b/docs/Reference/Quickstart - LightGBM in Dotnet.md @@ -1,11 +1,11 @@ --- -title: .NET Example with LightGBMClassifier -sidebar_label: .NET example +title: Quickstart - LightGBM in Dotnet +sidebar_label: Quickstart - LightGBM in Dotnet description: A simple example about classification with LightGBMClassifier using .NET --- :::note -Make sure you have followed the guidance in [.NET installation](../reference/dotnet-setup.md) before jumping into this example. +Make sure you have followed the guidance in [.NET installation](../Dotnet%20Setup) before jumping into this example. ::: ## Classification with LightGBMClassifier @@ -13,8 +13,8 @@ Make sure you have followed the guidance in [.NET installation](../reference/dot Install NuGet packages by running following command: ```powershell dotnet add package Microsoft.Spark --version 2.1.1 -dotnet add package SynapseML.Lightgbm --version 0.11.1 -dotnet add package SynapseML.Core --version 0.11.1 +dotnet add package SynapseML.Lightgbm --version 0.11.2 +dotnet add package SynapseML.Core --version 0.11.2 ``` Use the following code in your main program file: @@ -91,7 +91,7 @@ namespace SynapseMLApp Run `dotnet build` to build the project. Then navigate to build output directory, and run following command: ```powershell -spark-submit --class org.apache.spark.deploy.dotnet.DotnetRunner --packages com.microsoft.azure:synapseml_2.12:0.11.1,org.apache.hadoop:hadoop-azure:3.3.1 --master local microsoft-spark-3-2_2.12-2.1.1.jar dotnet SynapseMLApp.dll +spark-submit --class org.apache.spark.deploy.dotnet.DotnetRunner --packages com.microsoft.azure:synapseml_2.12:0.11.2,org.apache.hadoop:hadoop-azure:3.3.1 --master local microsoft-spark-3-2_2.12-2.1.1.jar dotnet SynapseMLApp.dll ``` :::note Here we added two packages: synapseml_2.12 for SynapseML's scala source, and hadoop-azure to support reading files from ADLS. diff --git a/website/docs/reference/R-setup.md b/docs/Reference/R Setup.md similarity index 97% rename from website/docs/reference/R-setup.md rename to docs/Reference/R Setup.md index 36de37a67d..45f98a91f5 100644 --- a/website/docs/reference/R-setup.md +++ b/docs/Reference/R Setup.md @@ -55,7 +55,7 @@ Installing all dependencies may be time-consuming. When complete, run: library(sparklyr) library(dplyr) config <- spark_config() -config$sparklyr.defaultPackages <- "com.microsoft.azure:synapseml_2.12:0.11.1" +config$sparklyr.defaultPackages <- "com.microsoft.azure:synapseml_2.12:0.11.2" sc <- spark_connect(master = "local", config = config) ... ``` @@ -120,7 +120,7 @@ and then use spark_connect with method = "databricks": ```R install.packages("devtools") -devtools::install_url("https://mmlspark.azureedge.net/rrr/synapseml-0.11.1.zip") +devtools::install_url("https://mmlspark.azureedge.net/rrr/synapseml-0.11.2.zip") library(sparklyr) library(dplyr) sc <- spark_connect(method = "databricks") @@ -132,7 +132,7 @@ ml_train_regressor(faithful_df, labelCol="eruptions", unfit_model) ## Building from Source Our R bindings are built as part of the [normal build -process](developer-readme.md). To get a quick build, start at the root +process](../Developer%20Setup). To get a quick build, start at the root of the synapseml directory, and find the generated files. For instance, to find the R files for deep-learning, run diff --git a/website/versioned_docs/version-0.11.0/mlflow/autologging.md b/docs/Use with MLFlow/Autologging.md similarity index 97% rename from website/versioned_docs/version-0.11.0/mlflow/autologging.md rename to docs/Use with MLFlow/Autologging.md index 76149e72fb..b440434e3e 100644 --- a/website/versioned_docs/version-0.11.0/mlflow/autologging.md +++ b/docs/Use with MLFlow/Autologging.md @@ -1,5 +1,6 @@ --- -title: SynapseML Autologging +title: Autologging +sidebar_label: Autologging description: SynapseML autologging --- @@ -23,7 +24,7 @@ Note: ## Configuration process in Databricks as an example -1. Install latest MLflow via `%pip install mlflow -u` +1. Install latest MLflow via `%pip install mlflow` 2. Upload your customized `log_model_allowlist.txt` file to dbfs by clicking File/Upload Data button on Databricks UI. 3. Set Cluster Spark configuration following [this documentation](https://docs.microsoft.com/en-us/azure/databricks/clusters/configure#spark-configuration) ``` diff --git a/docs/Use with MLFlow/Install.md b/docs/Use with MLFlow/Install.md new file mode 100644 index 0000000000..f85f524812 --- /dev/null +++ b/docs/Use with MLFlow/Install.md @@ -0,0 +1,4 @@ +--- +title: Install +description: install Mlflow on different environments +--- diff --git a/website/versioned_docs/version-0.11.0/mlflow/examples.md b/docs/Use with MLFlow/Overview.md similarity index 53% rename from website/versioned_docs/version-0.11.0/mlflow/examples.md rename to docs/Use with MLFlow/Overview.md index f1745b3aeb..c6956e72ad 100644 --- a/website/versioned_docs/version-0.11.0/mlflow/examples.md +++ b/docs/Use with MLFlow/Overview.md @@ -1,26 +1,91 @@ --- -title: Examples -description: Examples using SynapseML with MLflow +title: Overview +description: MLflow support of SynapseML --- -## Prerequisites +## What is MLflow -If you're using Databricks, install mlflow with this command: +[MLflow](https://github.com/mlflow/mlflow) is a platform to streamline machine learning development, including tracking experiments, packaging code into reproducible runs, and sharing and deploying models. MLflow offers a set of lightweight APIs that can be used with any existing machine learning application or library, for instance TensorFlow, PyTorch, XGBoost, etc. It runs wherever you currently run ML code, for example, in notebooks, standalone applications or the cloud. MLflow's current components are: + +* [MLflow Tracking](https://mlflow.org/docs/latest/tracking.html): An API to log parameters, code, and results in machine learning experiments and compare them using an interactive UI. +* [MLflow Projects](https://mlflow.org/docs/latest/projects.html): A code packaging format for reproducible runs using Conda and Docker, so you can share your ML code with others. +* [MLflow Models](https://mlflow.org/docs/latest/models.html): A model packaging format and tools that let you easily deploy the same model from any ML library for both batch and real-time scoring. It supports platforms such as Docker, Apache Spark, Azure ML and AWS SageMaker. +* [MLflow Model Registry](https://mlflow.org/docs/latest/model-registry.html): A centralized model store, set of APIs, and UI, to collaboratively manage the full lifecycle of MLflow Models. + + +## Installation + +Install MLflow from PyPI via `pip install mlflow` + +MLflow requires `conda` to be on the `PATH` for the projects feature. + +Learn more about MLflow on their [GitHub page](https://github.com/mlflow/mlflow). + + +### Install Mlflow on Databricks + +If you're using Databricks, install Mlflow with this command: ``` -# run this so that mlflow is installed on workers besides driver +# run this so that Mlflow is installed on workers besides driver %pip install mlflow ``` -Install SynapseML based on the [installation guidance](../getting_started/installation.md). +### Install Mlflow on Synapse +To log model with Mlflow, you need to create an Azure Machine Learning workspace and link it with your Synapse workspace. + +#### Create Azure Machine Learning Workspace + +Follow this document to create [AML workspace](https://learn.microsoft.com/en-us/azure/machine-learning/quickstart-create-resources#create-the-workspace). You don't need to create compute instance and compute clusters. + +#### Create an Azure ML Linked Service + + + +- In the Synapse workspace, go to **Manage** -> **External connections** -> **Linked services**, select **+ New** +- Select the workspace you want to log the model in and create the linked service. You need the **name of the linked service** to set up connection. -## API Reference +#### Auth Synapse Workspace + + +- Go to the **Azure Machine Learning workspace** resource -> **access control (IAM)** -> **Role assignment**, select **+ Add**, choose **Add role assignment** +- Choose **contributor**, select next +- In members page, choose **Managed identity**, select **+ select members**. Under **managed identity**, choose Synapse workspace. Under **Select**, choose the workspace you run your experiment on. Click **Select**, **Review + assign**. + + +#### Use MLFlow in Synapse with Linked Service +Set up connection +```python + +#AML workspace authentication using linked service +from notebookutils.mssparkutils import azureML +linked_service_name = "YourLinkedServiceName" +ws = azureML.getWorkspace(linked_service_name) +mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri()) + +#Set MLflow experiment.  +experiment_name = "synapse-mlflow-experiment" +mlflow.set_experiment(experiment_name)  +``` + +#### Use MLFlow in Synapse without a Linked Service +Once you create an AML workspace, you can obtain the MLflow tracking URL directly. The AML start page is where you can locate the MLflow tracking URL. + +You can set it tracking url with +```python +mlflow.set_tracking_uri("your mlflow tracking url") +``` + + +## MLFlow API Reference * [mlflow.spark.save_model](https://www.mlflow.org/docs/latest/python_api/mlflow.spark.html#mlflow.spark.save_model) * [mlflow.spark.log_model](https://www.mlflow.org/docs/latest/python_api/mlflow.spark.html#mlflow.spark.log_model) * [mlflow.spark.load_model](https://www.mlflow.org/docs/latest/python_api/mlflow.spark.html#mlflow.spark.load_model) * [mlflow.log_metric](https://mlflow.org/docs/latest/python_api/mlflow.html#mlflow.log_metric) -## LightGBMClassificationModel +## Examples + +### LightGBMClassifier ```python import mlflow @@ -95,7 +160,7 @@ with mlflow.start_run(): mlflow.log_metric("accuracy", metrics[0]['accuracy']) ``` -## Cognitive Services +### Cognitive Services ```python import mlflow diff --git a/docs/manifest.yaml b/docs/manifest.yaml deleted file mode 100644 index d16b02a576..0000000000 --- a/docs/manifest.yaml +++ /dev/null @@ -1,6 +0,0 @@ -version: 0.1 -channels: - - name: website - is_active: true - notebooks: - - path: "./notebooks/features/" diff --git a/docs/python/documentprojection/__init__.py b/docs/python/documentprojection/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/python/documentprojection/__main__.py b/docs/python/documentprojection/__main__.py deleted file mode 100644 index fb25b42fc3..0000000000 --- a/docs/python/documentprojection/__main__.py +++ /dev/null @@ -1,112 +0,0 @@ -from __future__ import absolute_import - -from .utils.manifest import parse_manifest -from .utils.reflection import * -from .utils.logging import * -from .utils.notebook import * -from .framework.pipeline import * -from .channels import default_channels - -import re - -log = get_log(__name__) - - -def get_channel_map(custom_channels_folder, cwd): - - sys.path.insert(0, "documentprojection") - - def camel_to_snake(name): - s1 = re.sub("(.)([A-Z][a-z]+)", r"\1_\2", name) - s2 = re.sub("([a-z0-9])([A-Z])", r"\1_\2", s1).lower() - return s2.replace("_channel", "") - - channels = default_channels.copy() - if custom_channels_folder is not None and len(custom_channels_folder) > 0: - channels.extend(get_channels_from_dir(custom_channels_folder, cwd)) - log.info(f"All channels: {channels}") - - channel_map = { - k: v - for k, v in [ - (camel_to_snake(channel.__name__), channel) for channel in channels - ] - } - return channel_map - - -def parse_args(): - log_level_choices = ["debug", "info", "warn", "error", "critical"] - - import argparse - - parser = argparse.ArgumentParser(description="Document Projection Pipeline") - parser.add_argument( - "project_root", - metavar="ROOT", - type=str, - help="the root directory of the project", - default=".", - ) - parser.add_argument( - "manifest", - metavar="MANIFEST", - type=str, - help="a notebook or folder of notebooks to project", - default="docs/manifest.yaml", - ) - parser.add_argument( - "-f", "--format", action="store_true", default=False, help="run only formatters" - ) - parser.add_argument( - "-p", - "--publish", - action="store_true", - default=False, - help="run publishers. forces -t and -f.", - ) - parser.add_argument( - "-c", - "--channels", - default="console", - type=str, - help="A channel or comma-separated list of channels through which the notebook(s) should be processed. defaults to console if not specified.", - ) - parser.add_argument( - "--customchannels", - type=str, - default=None, - help="A folder containing custom channel implementations.", - ) - parser.add_argument( - "-v", - "--loglevel", - choices=log_level_choices, - default="info", - help="set log level", - ) - return parser.parse_args() - - -def run(): - args = parse_args() - config_log(args.loglevel) - log.debug("script executed with args: {}".format(args)) - - args.project_root = os.path.abspath(args.project_root) - - if args.manifest is not None: - import json - - log.info(f"Reading manifest file: {args.manifest}.") - args.manifest = parse_manifest(args.manifest) - log.debug(f"Manifest:\n{json.dumps(args.manifest, indent=4, sort_keys=True)}") - - channel_map = get_channel_map(args.customchannels, args.project_root) - pipeline = DocumentProjectionPipeline( - channel_map, config=PipelineConfig(vars(args)) - ) - pipeline.run() - - -run() diff --git a/docs/python/documentprojection/channels/__init__.py b/docs/python/documentprojection/channels/__init__.py deleted file mode 100644 index 9bcca8458f..0000000000 --- a/docs/python/documentprojection/channels/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from ..utils.reflection import get_subclasses -from ..framework import Channel - -default_channels = get_subclasses(__name__, Channel) diff --git a/docs/python/documentprojection/channels/console.py b/docs/python/documentprojection/channels/console.py deleted file mode 100644 index 1d034cd465..0000000000 --- a/docs/python/documentprojection/channels/console.py +++ /dev/null @@ -1,37 +0,0 @@ -from ..framework import * -from ..utils.logging import get_log -from ..framework.markdown import MarkdownFormatter - -log = get_log(__name__) - -# A sample Console (no-operation) channel that 'publishes' to the console. Useful for testing. -class ConsoleDoc(Document): - def __init__(self, content, metadata): - self.content = content - self.metadata = metadata - - -class ConsoleFormatter(MarkdownFormatter): - def clean_markdown(self, markdown: str) -> str: - return markdown - - def get_header(self, notebook: Notebook) -> str: - return "This is a test header injected by the 'console' formatter." - - def get_metadata(self, notebook: Notebook) -> dict: - notebook.metadata.update( - {"source_path": notebook.path, "target_path": "stdout"} - ) - return notebook.metadata - - -class ConsolePublisher(Publisher): - def publish(self, document: Document) -> bool: - print(document.content) - return True - - -class ConsoleChannel(Channel): - def __init__(self, _): - self.formatter = ConsoleFormatter() - self.publisher = ConsolePublisher() diff --git a/docs/python/documentprojection/framework/__init__.py b/docs/python/documentprojection/framework/__init__.py deleted file mode 100644 index 0e86c7e5c5..0000000000 --- a/docs/python/documentprojection/framework/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .objects import * diff --git a/docs/python/documentprojection/framework/markdown.py b/docs/python/documentprojection/framework/markdown.py deleted file mode 100644 index bbe518e8c4..0000000000 --- a/docs/python/documentprojection/framework/markdown.py +++ /dev/null @@ -1,29 +0,0 @@ -from abc import ABC, abstractmethod -from nbconvert import MarkdownExporter - -from ..framework.objects import * - -# Sample base formatter for documents that are projected to markdown, include some cleaning, and inject a header -class MarkdownFormatter(ABC): - def _add_header(markdown: str, header: str): - content = f"{header}\n{markdown}" - return content - - def _to_markdown(notebook: Notebook) -> str: - exporter = MarkdownExporter() - markdown, _ = exporter.from_notebook_node(notebook.data) - return markdown - - @abstractmethod - def clean_markdown(self, markdown: str) -> str: - pass - - @abstractmethod - def get_header(self, notebook: Notebook) -> str: - pass - - def format(self, notebook: Notebook) -> Document: - markdown = MarkdownFormatter._to_markdown(notebook) - markdown = self.clean_markdown(markdown) - content = MarkdownFormatter._add_header(markdown, self.get_header(notebook)) - return Document(content, self.get_metadata(notebook)) diff --git a/docs/python/documentprojection/framework/objects.py b/docs/python/documentprojection/framework/objects.py deleted file mode 100644 index c6d64e7c05..0000000000 --- a/docs/python/documentprojection/framework/objects.py +++ /dev/null @@ -1,98 +0,0 @@ -from abc import ABC, abstractmethod -from ..utils.notebook import * -from ..utils.logging import get_log - - -def _defaultrepr(cls): - def __repr__(self): - return type(self).__name__ - - cls.__repr__ = __repr__ - return cls - - -class Notebook: - def __init__(self, path, metadata: dict = {}): - self.path: str = path - self.metadata: dict = metadata - self.data: NotebookNode = Notebook._parse(self.path) - self._repr = "Notebook(...{})".format("\\".join(self.path.split("\\")[-3:])) - - def __repr__(self): - return self._repr - - def _parse(path: str) -> NotebookNode: - return read(path, as_version=4) - - -class DocumentMetadata: - def __init__(self, source_path: str, target_path: str, dimensions: dict = {}): - self.source_path = source_path - self.target_path = target_path - self.dimensions = dimensions - self.dimensions["target_path"] = target_path - - def __repr__(self): - return f"{repr(self.source_path)}:{repr(self.dimensions)}" - - -class Document: - def __init__(self, content, metadata: DocumentMetadata): - self.content = content - self.metadata = metadata - - def __repr__(self): - return f"{repr(self.metadata)}" - - -@_defaultrepr -class Formatter(ABC): - @abstractmethod - def format(self, notebook: Notebook) -> Document: - pass - - @abstractmethod - def get_metadata(self, notebook: Notebook) -> DocumentMetadata: - pass - - -@_defaultrepr -class Publisher(ABC): - @abstractmethod - def publish(self, document: Document) -> bool: - pass - - -class ChannelMetadata(dict): - def __init__(self, dict: dict): - self.__dict__.update(dict) - - def __repr__(self): - return repr(self.__dict__) - - project_root = None - - -@_defaultrepr -class Channel(ABC): - def __init__( - self, formatter: Formatter, publisher: Publisher, config: ChannelMetadata - ): - self.formatter: Formatter = formatter - self.publisher: Publisher = publisher - self.config: ChannelMetadata = config - - def format(self, notebook: Notebook) -> Document: - instance_log = get_log(self.__class__.__name__) - instance_log.debug(f"Formatting {notebook}") - content = self.formatter.format(notebook) - instance_log.debug(f"Done formatting {notebook}.") - return content - - def publish(self, document: Document) -> bool: - instance_log = get_log(self.__class__.__name__) - instance_log.debug(f"Publishing {document}") - succeeded = self.publisher.publish(document) - instance_log.debug( - f"Publishing {document} {'SUCCEEDED' if succeeded else 'FAILED'}" - ) diff --git a/docs/python/documentprojection/framework/pipeline.py b/docs/python/documentprojection/framework/pipeline.py deleted file mode 100644 index 55053f7d7a..0000000000 --- a/docs/python/documentprojection/framework/pipeline.py +++ /dev/null @@ -1,92 +0,0 @@ -from typing import List -from ..utils.logging import get_log -from .objects import * -from ..utils.notebook import * -from ..utils.parallelism import process_in_parallel - -log = get_log(__name__) - - -class PipelineConfig(dict): - def __init__(self, dict: dict): - self.__dict__.update(dict) - - format = None - publish = None - channel = None - project_root = None - manifest = None - - -class DocumentProjectionPipeline: - def __init__(self, channel_map: dict, config: PipelineConfig = PipelineConfig({})): - self.channel_map = channel_map - self.config = config - - def run(self) -> None: - log.debug( - f"""DocumentProjectionPipeline running with: - Mode: {self.config}, - Config: {self.config}""" - ) - - channels = self.config.manifest["channels"] - - if len(channels) == 0: - raise Exception("No channels registered.") - - if not self.config.publish: - log.warn(f"PUBLISH mode not enabled. Skipping publish step.") - - for channel_config in channels: - if channel_config["name"] not in self.channel_map: - raise Exception( - f"Channel declared in manifest but no implementation was found: {channel_config['name']}. If this is a custom channel, make sure you have specified the custom channels folder." - ) - - for channel_config in channels: - if channel_config["is_active"] == False: - log.info( - f"Skipping channel marked as inactive: {channel_config['name']}" - ) - continue - channel_metadata = ChannelMetadata( - { - key: self.config.__dict__[key] - for key in ["project_root"] - if key in self.config.__dict__ - } - ) - if "metadata" in channel_config: - channel_metadata.update(channel_config["metadata"]) - channel = self.channel_map[channel_config["name"]](channel_metadata) - - notebook_metadata = channel_config["notebooks"] - notebooks = [] - for entry in notebook_metadata: - parsed_notebooks = parse_notebooks([entry["path"]], recursive=True) - notebooks.extend( - [ - Notebook(parsed_notebook, metadata=entry) - for parsed_notebook in parsed_notebooks - ] - ) - log.info( - f"Processing {len(notebooks)} notebooks in parallel for: {repr(channel)}" - ) - - formatted_documents = process_in_parallel(channel.format, notebooks) - if self.config.publish: - process_in_parallel(channel.publish, formatted_documents) - if self.config.format: - for i in range(len(notebooks)): - log.info( - "Formatted content for {}:\n{}".format( - notebooks[i], formatted_documents[i].content - ) - ) - log.info(f"End formatted content for {notebooks[i]}") - - -def collect_notebooks(paths: List[str], recursive: bool) -> List[Notebook]: - return [Notebook(nb) for nb in parse_notebooks(paths, recursive)] diff --git a/docs/python/documentprojection/utils/__init__.py b/docs/python/documentprojection/utils/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/docs/python/documentprojection/utils/logging.py b/docs/python/documentprojection/utils/logging.py deleted file mode 100644 index 86cd8fc690..0000000000 --- a/docs/python/documentprojection/utils/logging.py +++ /dev/null @@ -1,17 +0,0 @@ -import logging - -logging.basicConfig( - level="INFO", format="%(asctime)s | %(name)s | %(levelname)s | %(message)s" -) - - -def config_log(level: str): - logging.basicConfig( - level=level.upper(), - format="%(asctime)s | %(name)s | %(levelname)s | %(message)s", - force=True, - ) - - -def get_log(name: str): - return logging.getLogger(name) diff --git a/docs/python/documentprojection/utils/manifest.py b/docs/python/documentprojection/utils/manifest.py deleted file mode 100644 index bb79165634..0000000000 --- a/docs/python/documentprojection/utils/manifest.py +++ /dev/null @@ -1,20 +0,0 @@ -from .logging import * - -import yaml - -log = get_log(__name__) - - -def parse_manifest(manifest_path): - PARSER_VERSION = 0.1 - with open(manifest_path, "r") as file: - try: - data = yaml.safe_load(file) - if float(data["version"]) > PARSER_VERSION: - raise Exception( - f"Manifest version {data['version']} is greater than parser version {PARSER_VERSION}. Failing." - ) - return data - except yaml.YAMLError as error: - log.error("Failed to parse manifest file. Failing.") - raise error diff --git a/docs/python/documentprojection/utils/mock_notebook.ipynb b/docs/python/documentprojection/utils/mock_notebook.ipynb deleted file mode 100644 index c5c538e63c..0000000000 --- a/docs/python/documentprojection/utils/mock_notebook.ipynb +++ /dev/null @@ -1,35 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": { - "tags": [] - }, - "source": [ - "# Mock Title\n", - "Mock Text" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "# Mock Comment\n", - "print(\"Mock Print Statement\")" - ] - } - ], - "metadata": { - "language_info": { - "name": "python" - }, - "tags": [] - }, - "nbformat": 4, - "nbformat_minor": 0 -} diff --git a/docs/python/documentprojection/utils/notebook.py b/docs/python/documentprojection/utils/notebook.py deleted file mode 100644 index a5b6115ffb..0000000000 --- a/docs/python/documentprojection/utils/notebook.py +++ /dev/null @@ -1,64 +0,0 @@ -import os -from typing import List -from nbformat import NotebookNode, read -from pathlib import Path -from .logging import get_log - -log = get_log(__name__) - - -def get_mock_path(): - return str( - os.path.join(os.path.dirname(os.path.realpath(__file__)), "mock_notebook.ipynb") - ) - - -def parse_notebooks(notebooks: List[str], recursive=False) -> List[str]: - if type(notebooks) is not list: - raise ValueError( - f"Notebooks must be a list of paths. Received {type(notebooks)}." - ) - concrete_notebook_paths = [] - ignored_directories = [] - for notebook in notebooks: - notebook = os.path.abspath(notebook) - if not os.path.exists(notebook): - raise ValueError( - f"Specified notebook path {repr(notebook)} does not exist." - ) - is_dir = os.path.isdir(notebook) - if not is_dir: - if not notebook.endswith(".ipynb"): - raise ValueError( - f"Specified notebook path {notebook} is not a notebook. Notebooks must have a .ipynb extension." - ) - concrete_notebook_paths.append(notebook) - - # non-recursively scan for notebooks in the given directory - if is_dir and not recursive: - for file_or_dir in os.listdir(notebook): - abs_path = os.path.join(notebook, file_or_dir) - if file_or_dir.endswith(".ipynb"): - concrete_notebook_paths.append(abs_path) - if os.path.isdir(abs_path): - ignored_directories.append(abs_path) - - if is_dir and recursive: - for root, _, files in os.walk(notebook): - for file_or_dir in files: - if file_or_dir.endswith(".ipynb"): - concrete_notebook_paths.append(os.path.join(root, file_or_dir)) - - if len(ignored_directories) > 0 and not recursive: - log.warn( - "Recursive flag is not set. Ignoring the following directories:\n {}".format( - "\n ".join(ignored_directories) - ) - ) - - num_notebooks = len(concrete_notebook_paths) - leveled_log = log.warning if num_notebooks == 0 else log.debug - leveled_log( - f"Found {num_notebooks} notebooks to process: {repr(concrete_notebook_paths)}" - ) - return concrete_notebook_paths diff --git a/docs/python/documentprojection/utils/parallelism.py b/docs/python/documentprojection/utils/parallelism.py deleted file mode 100644 index bfe5c328b3..0000000000 --- a/docs/python/documentprojection/utils/parallelism.py +++ /dev/null @@ -1,32 +0,0 @@ -import os -import concurrent.futures -import threading -from typing import List -from tqdm import tqdm - -_global_lock = threading.Lock() - -_locks = {} - - -def process_in_parallel(func, data: List): - results = [] - with tqdm(total=len(data)) as progress: - with concurrent.futures.ThreadPoolExecutor( - max_workers=os.cpu_count() - ) as executor: - for result in executor.map(func, data): - progress.update() - results.append(result) - return results - - -def get_global_lock(): - return _global_lock - - -def get_lock(key): - with _global_lock: - if key not in _locks: - _locks[key] = threading.Lock() - return _locks[key] diff --git a/docs/python/documentprojection/utils/reflection.py b/docs/python/documentprojection/utils/reflection.py deleted file mode 100644 index 7dc817a6f0..0000000000 --- a/docs/python/documentprojection/utils/reflection.py +++ /dev/null @@ -1,79 +0,0 @@ -import sys -import inspect -import importlib -import inspect -import os -import sys -from pathlib import Path -import pathlib - -from ..utils.logging import get_log - -log = get_log(__name__) - - -def get_subclasses(module, class_): - # Get the directory of the current module - current_module_dir = os.path.dirname(sys.modules[module].__file__) - - # Get all the python files in the current module directory - files = [f for f in os.listdir(current_module_dir) if f.endswith(".py")] - - # Dynamically import all modules in the current package - modules = [ - importlib.import_module("." + f[:-3], module) - for f in files - if not f.startswith("__") - ] - - # Get all members of each imported module - all_members = [inspect.getmembers(module) for module in modules] - all_members = [item[1] for sublist in all_members for item in sublist] - - # Filter out only the classes that are children of the Channel parent class - return [ - m - for m in all_members - if inspect.isclass(m) and issubclass(m, class_) and m != class_ - ] - - -def insert_current_module_into_syspath(cwd): - current_file_path = Path(__file__) - current_directory = current_file_path.parent.parent.parent - import_path = os.path.relpath(current_directory.resolve(), cwd) - sys.path.insert(0, import_path) - - -def get_channels_from_dir(dir_, cwd): - log.info(f"Importing channels from {dir_} with cwd {cwd}") - insert_current_module_into_syspath(cwd) - files = [ - file.absolute() - for file in pathlib.Path(dir_).glob("**/*.py") - if not file.absolute().name.startswith("__") - ] - modules = [] - for file_path in files: - module_name = os.path.basename(file_path.resolve()).replace(".py", "") - spec = importlib.util.spec_from_file_location(module_name, file_path) - module = importlib.util.module_from_spec(spec) - modules.append(module) - sys.modules[module_name] = module - spec.loader.exec_module(module) - - log.info(f"found extra modules: {modules}") - - # Get all members of each imported module - all_members = [inspect.getmembers(module) for module in modules] - all_members = [item[1] for sublist in all_members for item in sublist] - - from documentprojection.framework.objects import Channel - - channels = [ - m - for m in all_members - if inspect.isclass(m) and issubclass(m, Channel) and m != Channel - ] - - return channels diff --git a/docs/python/synapseml_channels/website.py b/docs/python/synapseml_channels/website.py deleted file mode 100644 index bd2693dc8e..0000000000 --- a/docs/python/synapseml_channels/website.py +++ /dev/null @@ -1,67 +0,0 @@ -import os -import re - -from documentprojection.framework import * -from documentprojection.utils.logging import get_log -from documentprojection.framework.markdown import MarkdownFormatter -from documentprojection.utils.parallelism import get_lock - -log = get_log(__name__) - - -def get_project_root() -> str: - """Returns project root folder.""" - # root of parent module - filepath = Path(os.path.abspath(os.path.join(os.getcwd(), __file__))) - return os.path.abspath(filepath.parent.parent.parent.parent.parent) - - -class WebsiteDoc(Document): - def __init__(self, content, metadata): - self.content = content - self.metadata = metadata - - -class WebsiteFormatter(MarkdownFormatter): - def __init__(self, config: ChannelMetadata): - self.config = config - - def clean_markdown(self, markdown: str) -> str: - markdown = re.sub(r"style=\"[\S ]*?\"", "", markdown) - markdown = re.sub(r"", "", markdown) - return markdown - - def get_header(self, notebook: Notebook) -> str: - filename = os.path.basename(notebook.path).replace(".ipynb", "") - return f"---\ntitle: {filename}\nhide_title: true\nstatus: stable\n---" - - def get_metadata(self, notebook: Notebook) -> DocumentMetadata: - feature_dir = os.path.basename(os.path.dirname(notebook.path)) - file_name = os.path.basename(notebook.path).replace("ipynb", "md") - website_path = os.path.join( - self.config.project_root, - "website", - "docs", - "features", - feature_dir, - file_name, - ) - return DocumentMetadata(notebook.path, website_path) - - -class WebsitePublisher(Publisher): - def publish(self, document: Document) -> bool: - dir_name = os.path.dirname(document.metadata.target_path) - with get_lock(dir_name): - if not os.path.exists(dir_name): - os.makedirs(dir_name) - with open(document.metadata.target_path, "w", encoding="utf-8") as f: - f.write(document.content) - - # TODO: run orchestrate yarn - - -class WebsiteChannel(Channel): - def __init__(self, config: ChannelMetadata): - self.formatter = WebsiteFormatter(config) - self.publisher = WebsitePublisher() diff --git a/environment.yml b/environment.yml index 8879472629..5c61ae0378 100644 --- a/environment.yml +++ b/environment.yml @@ -46,3 +46,8 @@ dependencies: - openai==0.27.5 - black==22.3.0 - black[jupyter]==22.3.0 + - mistletoe + - pypandoc + - markdownify + - traitlets + diff --git a/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/BasePartitionTask.scala b/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/BasePartitionTask.scala index 030dcfc317..6dccaa84f6 100644 --- a/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/BasePartitionTask.scala +++ b/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/BasePartitionTask.scala @@ -324,6 +324,9 @@ abstract class BasePartitionTask extends Serializable with Logging { s" shouldExecuteTraining: $shouldExecuteTraining, isEmptyPartition: $isEmptyPartition") val shouldCalcValidationDataset = trainingCtx.sharedState.validationDatasetWorker.getOrElse(-1) == taskId + if (trainingCtx.hasValidationData) + log.info(s"Validation data found. Task: $taskId, PartId: $partitionId. Main task: $mainExecutorWorkerId" + + s" shouldCalcValidationDataset: $shouldCalcValidationDataset, isEmptyPartition: $isEmptyPartition") PartitionTaskContext(trainingCtx, partitionId, diff --git a/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/NetworkManager.scala b/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/NetworkManager.scala index 197a8e4208..4644d49e2b 100644 --- a/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/NetworkManager.scala +++ b/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/NetworkManager.scala @@ -163,22 +163,25 @@ object NetworkManager { // and a list of partition ids in this executor. val lightGbmMachineList = driverInput.readLine() val partitionsByExecutorStr = driverInput.readLine() - val executorPartitionIds: Array[Int] = - parseExecutorPartitionList(partitionsByExecutorStr, taskStatus.executorId) - log.info(s"task $taskId, partition $partitionId received nodes for network init: '$lightGbmMachineList'") log.info(s"task $taskId, partition $partitionId received partition topology: '$partitionsByExecutorStr'") + log.info(s"task $taskId, partition $partitionId received nodes for network init: '$lightGbmMachineList'") + val executorPartitionIds: Array[Int] = + parseExecutorPartitionList(partitionsByExecutorStr, taskStatus.executorId, log) NetworkTopologyInfo(lightGbmMachineList, executorPartitionIds, localListenPort) }.get }.get } - private def parseExecutorPartitionList(partitionsByExecutorStr: String, executorId: String): Array[Int] = { + private def parseExecutorPartitionList(partitionsByExecutorStr: String, + executorId: String, + log: Logger): Array[Int] = { // extract this executors partition ids as an array, from a string that is formatter like this: // executor1=partition1,partition2:executor2=partition3,partition4 val partitionsByExecutor = partitionsByExecutorStr.split(":") val executorListStr = partitionsByExecutor.find(line => line.startsWith(executorId + "=")) if (executorListStr.isEmpty) - throw new Exception(s"Could not find partitions for executor $executorListStr. List: $partitionsByExecutorStr") + throw new Exception(s"Could not find partitions for executor $executorId. List: $partitionsByExecutorStr") + log.info(s"executor $executorId received partitions: '$executorListStr'") val partitionList = executorListStr.get.split("=")(1) partitionList.split(",").map(str => str.toInt).sorted } diff --git a/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/StreamingPartitionTask.scala b/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/StreamingPartitionTask.scala index f04517f6e7..98cac95f51 100644 --- a/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/StreamingPartitionTask.scala +++ b/lightgbm/src/main/scala/com/microsoft/azure/synapse/ml/lightgbm/StreamingPartitionTask.scala @@ -106,7 +106,7 @@ class StreamingPartitionTask extends BasePartitionTask { if (!shouldExecuteTraining && !isEmptyPartition) ctx.sharedState().incrementDataPrepDoneSignal(log) // First dataset to reach here calculates the validation Dataset if needed - if (ctx.hasValidationData) { + if (ctx.hasValidationData && !isEmptyPartition) { ctx.sharedState().linkValidationDatasetWorker() } } @@ -174,15 +174,12 @@ class StreamingPartitionTask extends BasePartitionTask { val partitionRowCount = ctx.trainingCtx.partitionCounts.get(ctx.partitionId).toInt val partitionRowOffset = ctx.streamingPartitionOffset val isSparse = ctx.sharedState.isSparse.get - log.info(s"Inserting rows into training Dataset from partition ${ctx.partitionId}, " + + log.debug(s"Inserting rows into training Dataset from partition ${ctx.partitionId}, " + s"size $partitionRowCount, offset: $partitionRowOffset, sparse: $isSparse, threadId: ${ctx.threadIndex}") val dataset = ctx.sharedState.datasetState.streamingDataset.get val stopIndex = partitionRowOffset + partitionRowCount insertRowsIntoDataset(ctx, dataset, inputRows, partitionRowOffset, stopIndex, ctx.threadIndex) - - log.info(s"Part ${ctx.partitionId}: inserted $partitionRowCount partition ${ctx.partitionId} " + - s"rows into shared training dataset at offset $partitionRowOffset") } private def insertRowsIntoDataset(ctx: PartitionTaskContext, @@ -213,9 +210,7 @@ class StreamingPartitionTask extends BasePartitionTask { if (maxBatchSize == 0) 0 else loadOneDenseMicroBatchBuffer(state, inputRows, 0, maxBatchSize) if (count > 0) { - log.info(s"Part ${state.ctx.partitionId}: Pushing $count dense rows at $startIndex, will stop at $stopIndex") if (state.hasInitialScores && state.microBatchSize != count && state.numInitScoreClasses > 1) { - log.info(s"Part ${state.ctx.partitionId}: Adjusting $count initial scores") (1 until state.numInitScoreClasses).foreach { i => (0 until count).foreach { j => { val score = state.initScoreBuffer.getItem(i * state.microBatchSize + j) @@ -253,7 +248,6 @@ class StreamingPartitionTask extends BasePartitionTask { if (microBatchRowCount > 0) { // If we have only a partial micro-batch, and we have multi-class initial scores (i.e. numClass > 1), // we need to re-coalesce the data since it was stored column-wise based on original microBatchSize - log.info(s"Part ${state.ctx.partitionId}: Pushing $microBatchRowCount sparse rows at $startIndex") if (state.hasInitialScores && state.microBatchSize != microBatchRowCount && state.numInitScoreClasses > 1) { (1 until state.numInitScoreClasses).foreach { i => // TODO make this shared (0 until microBatchRowCount).foreach { j => { @@ -279,8 +273,6 @@ class StreamingPartitionTask extends BasePartitionTask { // might be more rows, so continue with tail recursion at next index pushSparseMicroBatches(state, inputRows, startIndex + microBatchRowCount, stopIndex) - } else { - log.info(s"LightGBM pushed $startIndex in partition ${state.ctx.partitionId}") } } diff --git a/notebooks/features/cognitive_services/CognitiveServices - Multivariate Anomaly Detection.ipynb b/notebooks/features/cognitive_services/CognitiveServices - Multivariate Anomaly Detection.ipynb deleted file mode 100644 index c8a1bfc4ac..0000000000 --- a/notebooks/features/cognitive_services/CognitiveServices - Multivariate Anomaly Detection.ipynb +++ /dev/null @@ -1,1140 +0,0 @@ -{ - "cells": [ - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Recipe: Cognitive Services - Multivariate Anomaly Detection \n", - "This recipe shows how you can use SynapseML and Azure Cognitive Services on Apache Spark for multivariate anomaly detection. Multivariate anomaly detection allows for the detection of anomalies among many variables or time series, taking into account all the inter-correlations and dependencies between the different variables. In this scenario, we use SynapseML to train a model for multivariate anomaly detection using the Azure Cognitive Services, and we then use to the model to infer multivariate anomalies within a dataset containing synthetic measurements from three IoT sensors. \n", - "\n", - "To learn more about the Anomaly Detector Cognitive Service please refer to [ this documentation page](https://docs.microsoft.com/en-us/azure/cognitive-services/anomaly-detector/). \n", - "\n", - "### Prerequisites\n", - "- An Azure subscription - [Create one for free](https://azure.microsoft.com/en-us/free/)\n", - "\n", - "### Setup\n", - "#### Create an Anomaly Detector resource\n", - "Follow the instructions below to create an `Anomaly Detector` resource using the Azure portal or alternatively, you can also use the Azure CLI to create this resource.\n", - "\n", - "- In the Azure Portal, click `Create` in your resource group, and then type `Anomaly Detector`. Click on the Anomaly Detector resource.\n", - "- Give the resource a name, and ideally use the same region as the rest of your resource group. Use the default options for the rest, and then click `Review + Create` and then `Create`.\n", - "- Once the Anomaly Detector resource is created, open it and click on the `Keys and Endpoints` panel on the left. Copy the key for the Anomaly Detector resource into the `ANOMALY_API_KEY` environment variable, or store it in the `anomalyKey` variable in the cell below.\n", - "\n", - "#### Create a Storage Account resource\n", - "In order to save intermediate data, you will need to create an Azure Blob Storage Account. Within that storage account, create a container for storing the intermediate data. Make note of the container name, and copy the connection string to that container. You will need this later to populate the `containerName` variable and the `BLOB_CONNECTION_STRING` environment variable." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Enter your service keys\n", - "Let's start by setting up the environment variables for our service keys. The next cell sets the `ANOMALY_API_KEY` and the `BLOB_CONNECTION_STRING` environment variables based on the values stored in our Azure Key Vault. If you are running this in your own environment, make sure you set these environment variables before you proceed." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "
" - ] - }, - "metadata": { - "application/vnd.databricks.v1+output": { - "addedWidgets": {}, - "arguments": {}, - "data": "
", - "datasetInfos": [], - "metadata": {}, - "removedWidgets": [], - "type": "html" - } - }, - "output_type": "display_data" - } - ], - "source": [ - "import os\n", - "from pyspark.sql import SparkSession\n", - "from synapse.ml.core.platform import find_secret\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, lets read the `ANOMALY_API_KEY` and `BLOB_CONNECTION_STRING` environment variables and set the `containerName` and `location` variables." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "
" - ] - }, - "metadata": { - "application/vnd.databricks.v1+output": { - "addedWidgets": {}, - "arguments": {}, - "data": "
", - "datasetInfos": [], - "metadata": {}, - "removedWidgets": [], - "type": "html" - } - }, - "output_type": "display_data" - } - ], - "source": [ - "# An Anomaly Dectector subscription key\n", - "anomalyKey = find_secret(\"anomaly-api-key\")\n", - "# Your storage account name\n", - "storageName = \"anomalydetectiontest\"\n", - "# A connection string to your blob storage account\n", - "storageKey = find_secret(\"madtest-storage-key\")\n", - "# A place to save intermediate MVAD results\n", - "intermediateSaveDir = (\n", - " \"wasbs://madtest@anomalydetectiontest.blob.core.windows.net/intermediateData\"\n", - ")\n", - "# The location of the anomaly detector resource that you created\n", - "location = \"westus2\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "First we will connect to our storage account so that anomaly detector can save intermediate results there:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false, - "pycharm": { - "name": "#%%\n" - } - }, - "outputs": [], - "source": [ - "spark.sparkContext._jsc.hadoopConfiguration().set(\n", - " f\"fs.azure.account.key.{storageName}.blob.core.windows.net\", storageKey\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "collapsed": false - }, - "source": [ - "Let's import all the necessary modules." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "201891b5-7ec3-4350-bdfa-306a265d2b44", - "showTitle": false, - "title": "" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "
" - ] - }, - "metadata": { - "application/vnd.databricks.v1+output": { - "addedWidgets": {}, - "arguments": {}, - "data": "
", - "datasetInfos": [], - "metadata": {}, - "removedWidgets": [], - "type": "html" - } - }, - "output_type": "display_data" - } - ], - "source": [ - "import numpy as np\n", - "import pandas as pd\n", - "\n", - "import pyspark\n", - "from pyspark.sql.functions import col\n", - "from pyspark.sql.functions import lit\n", - "from pyspark.sql.types import DoubleType\n", - "import matplotlib.pyplot as plt\n", - "\n", - "import synapse.ml\n", - "from synapse.ml.cognitive import *" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now, let's read our sample data into a Spark DataFrame." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "58080b22-fff1-463b-ad80-0639d475ec89", - "showTitle": false, - "title": "" - } - }, - "outputs": [], - "source": [ - "df = (\n", - " spark.read.format(\"csv\")\n", - " .option(\"header\", \"true\")\n", - " .load(\"wasbs://publicwasb@mmlspark.blob.core.windows.net/MVAD/sample.csv\")\n", - ")\n", - "\n", - "df = (\n", - " df.withColumn(\"sensor_1\", col(\"sensor_1\").cast(DoubleType()))\n", - " .withColumn(\"sensor_2\", col(\"sensor_2\").cast(DoubleType()))\n", - " .withColumn(\"sensor_3\", col(\"sensor_3\").cast(DoubleType()))\n", - ")\n", - "\n", - "# Let's inspect the dataframe:\n", - "df.show(5)" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "e9bd6780-dcd1-4ee6-8116-eb2b4c6950c9", - "showTitle": false, - "title": "" - } - }, - "source": [ - "We can now create an `estimator` object, which will be used to train our model. In the cell below, we specify the start and end times for the training data. We also specify the input columns to use, and the name of the column that contains the timestamps. Finally, we specify the number of data points to use in the anomaly detection sliding window, and we set the connection string to the Azure Blob Storage Account. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "38beb5f0-8a46-439e-886f-3ffd06066e8c", - "showTitle": false, - "title": "" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "
" - ] - }, - "metadata": { - "application/vnd.databricks.v1+output": { - "addedWidgets": {}, - "arguments": {}, - "data": "
", - "datasetInfos": [], - "metadata": {}, - "removedWidgets": [], - "type": "html" - } - }, - "output_type": "display_data" - } - ], - "source": [ - "trainingStartTime = \"2020-06-01T12:00:00Z\"\n", - "trainingEndTime = \"2020-07-02T17:55:00Z\"\n", - "timestampColumn = \"timestamp\"\n", - "inputColumns = [\"sensor_1\", \"sensor_2\", \"sensor_3\"]\n", - "\n", - "estimator = (\n", - " SimpleFitMultivariateAnomaly()\n", - " .setSubscriptionKey(anomalyKey)\n", - " .setLocation(location)\n", - " .setStartTime(trainingStartTime)\n", - " .setEndTime(trainingEndTime)\n", - " .setIntermediateSaveDir(intermediateSaveDir)\n", - " .setTimestampCol(timestampColumn)\n", - " .setInputCols(inputColumns)\n", - " .setSlidingWindow(200)\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have created the `estimator`, let's fit it to the data:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "820249ea-8520-458e-9365-ad15e8d3583e", - "showTitle": false, - "title": "" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "
" - ] - }, - "metadata": { - "application/vnd.databricks.v1+output": { - "addedWidgets": {}, - "arguments": {}, - "data": "
", - "datasetInfos": [], - "metadata": {}, - "removedWidgets": [], - "type": "html" - } - }, - "output_type": "display_data" - } - ], - "source": [ - "model = estimator.fit(df)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Once the training is done, we can now use the model for inference. The code in the next cell specifies the start and end times for the data we would like to detect the anomalies in. It will then show the results." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "89b54ad2-3474-4e1e-a9c7-829e703831d0", - "showTitle": false, - "title": "" - } - }, - "outputs": [], - "source": [ - "inferenceStartTime = \"2020-07-02T18:00:00Z\"\n", - "inferenceEndTime = \"2020-07-06T05:15:00Z\"\n", - "\n", - "result = (\n", - " model.setStartTime(inferenceStartTime)\n", - " .setEndTime(inferenceEndTime)\n", - " .setOutputCol(\"results\")\n", - " .setErrorCol(\"errors\")\n", - " .setInputCols(inputColumns)\n", - " .setTimestampCol(timestampColumn)\n", - " .transform(df)\n", - ")\n", - "\n", - "result.show(5)" - ] - }, - { - "attachments": {}, - "cell_type": "markdown", - "metadata": {}, - "source": [ - "When we called `.show(5)` in the previous cell, it showed us the first five rows in the dataframe. The results were all `null` because they were not inside the inference window.\n", - "\n", - "To show the results only for the inferred data, lets select the columns we need. We can then order the rows in the dataframe by ascending order, and filter the result to only show the rows that are in the range of the inference window. In our case `inferenceEndTime` is the same as the last row in the dataframe, so can ignore that. \n", - "\n", - "Finally, to be able to better plot the results, lets convert the Spark dataframe to a Pandas dataframe.\n", - "\n", - "This is what the next cell does:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "18c9be87-c4e7-4221-9135-b80b3788c43e", - "showTitle": false, - "title": "" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "
/databricks/spark/python/pyspark/sql/pandas/conversion.py:92: UserWarning: toPandas attempted Arrow optimization because 'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, failed by the reason below:\n", - " Unable to convert the field contributors. If this column is not necessary, you may consider dropping it or converting to primitive type before the conversion.\n", - "Direct cause: Unsupported type in conversion to Arrow: ArrayType(StructType(List(StructField(contributionScore,DoubleType,true),StructField(variable,StringType,true))),true)\n", - "Attempting non-optimization as 'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to true.\n", - " warnings.warn(msg)\n", - "Out[8]:
" - ] - }, - "metadata": { - "application/vnd.databricks.v1+output": { - "addedWidgets": {}, - "arguments": {}, - "data": "
/databricks/spark/python/pyspark/sql/pandas/conversion.py:92: UserWarning: toPandas attempted Arrow optimization because 'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, failed by the reason below:\n Unable to convert the field contributors. If this column is not necessary, you may consider dropping it or converting to primitive type before the conversion.\nDirect cause: Unsupported type in conversion to Arrow: ArrayType(StructType(List(StructField(contributionScore,DoubleType,true),StructField(variable,StringType,true))),true)\nAttempting non-optimization as 'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to true.\n warnings.warn(msg)\nOut[8]:
", - "datasetInfos": [], - "metadata": {}, - "removedWidgets": [], - "type": "html" - } - }, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
timestampsensor_1sensor_2sensor_3contributorsisAnomalyseverity
02020-07-02T18:00:00Z1.0696800.3931733.129125NoneFalse0.00000
12020-07-02T18:05:00Z0.9327840.2149593.077339[(0.5516611337661743, series_1), (0.3133429884...True0.06478
22020-07-02T18:10:00Z1.0122140.4660372.909561NoneFalse0.00000
32020-07-02T18:15:00Z1.1221820.3984383.029489NoneFalse0.00000
42020-07-02T18:20:00Z1.0913100.2821372.948016NoneFalse0.00000
........................
9952020-07-06T04:55:00Z-0.4434380.768980-0.710800NoneFalse0.00000
9962020-07-06T05:00:00Z-0.5294000.822140-0.944681NoneFalse0.00000
9972020-07-06T05:05:00Z-0.3779110.738591-0.871468NoneFalse0.00000
9982020-07-06T05:10:00Z-0.5019930.727775-0.786263NoneFalse0.00000
9992020-07-06T05:15:00Z-0.4041380.806980-0.883521NoneFalse0.00000
\n", - "

1000 rows × 7 columns

\n", - "
" - ] - }, - "metadata": { - "application/vnd.databricks.v1+output": { - "addedWidgets": {}, - "arguments": {}, - "data": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
timestampsensor_1sensor_2sensor_3contributorsisAnomalyseverity
02020-07-02T18:00:00Z1.0696800.3931733.129125NoneFalse0.00000
12020-07-02T18:05:00Z0.9327840.2149593.077339[(0.5516611337661743, series_1), (0.3133429884...True0.06478
22020-07-02T18:10:00Z1.0122140.4660372.909561NoneFalse0.00000
32020-07-02T18:15:00Z1.1221820.3984383.029489NoneFalse0.00000
42020-07-02T18:20:00Z1.0913100.2821372.948016NoneFalse0.00000
........................
9952020-07-06T04:55:00Z-0.4434380.768980-0.710800NoneFalse0.00000
9962020-07-06T05:00:00Z-0.5294000.822140-0.944681NoneFalse0.00000
9972020-07-06T05:05:00Z-0.3779110.738591-0.871468NoneFalse0.00000
9982020-07-06T05:10:00Z-0.5019930.727775-0.786263NoneFalse0.00000
9992020-07-06T05:15:00Z-0.4041380.806980-0.883521NoneFalse0.00000
\n

1000 rows × 7 columns

\n
", - "datasetInfos": [], - "metadata": {}, - "removedWidgets": [], - "textData": null, - "type": "htmlSandbox" - } - }, - "output_type": "display_data" - } - ], - "source": [ - "rdf = (\n", - " result.select(\n", - " \"timestamp\",\n", - " *inputColumns,\n", - " \"results.interpretation\",\n", - " \"isAnomaly\",\n", - " \"results.severity\"\n", - " )\n", - " .orderBy(\"timestamp\", ascending=True)\n", - " .filter(col(\"timestamp\") >= lit(inferenceStartTime))\n", - " .toPandas()\n", - ")\n", - "\n", - "rdf" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Let's now format the `interpretation` column that stores the contribution score from each sensor to the detected anomalies. The next cell formats this data, and splits the contribution score of each sensor into its own column." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "5b4e072f-e0e6-4362-a321-bbfae41dea0c", - "showTitle": false, - "title": "" - } - }, - "outputs": [ - { - "data": { - "text/html": [ - "\n", - "
Out[9]:
" - ] - }, - "metadata": { - "application/vnd.databricks.v1+output": { - "addedWidgets": {}, - "arguments": {}, - "data": "
Out[9]:
", - "datasetInfos": [], - "metadata": {}, - "removedWidgets": [], - "type": "html" - } - }, - "output_type": "display_data" - }, - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - "
timestampsensor_1sensor_2sensor_3isAnomalyseverityseries_0series_1series_2
02020-07-02T18:00:00Z1.0696800.3931733.129125False0.000000.0000000.0000000.000000
12020-07-02T18:05:00Z0.9327840.2149593.077339True0.064780.3133430.5516610.134996
22020-07-02T18:10:00Z1.0122140.4660372.909561False0.000000.0000000.0000000.000000
32020-07-02T18:15:00Z1.1221820.3984383.029489False0.000000.0000000.0000000.000000
42020-07-02T18:20:00Z1.0913100.2821372.948016False0.000000.0000000.0000000.000000
..............................
9952020-07-06T04:55:00Z-0.4434380.768980-0.710800False0.000000.0000000.0000000.000000
9962020-07-06T05:00:00Z-0.5294000.822140-0.944681False0.000000.0000000.0000000.000000
9972020-07-06T05:05:00Z-0.3779110.738591-0.871468False0.000000.0000000.0000000.000000
9982020-07-06T05:10:00Z-0.5019930.727775-0.786263False0.000000.0000000.0000000.000000
9992020-07-06T05:15:00Z-0.4041380.806980-0.883521False0.000000.0000000.0000000.000000
\n", - "

1000 rows × 9 columns

\n", - "
" - ] - }, - "metadata": { - "application/vnd.databricks.v1+output": { - "addedWidgets": {}, - "arguments": {}, - "data": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
timestampsensor_1sensor_2sensor_3isAnomalyseverityseries_0series_1series_2
02020-07-02T18:00:00Z1.0696800.3931733.129125False0.000000.0000000.0000000.000000
12020-07-02T18:05:00Z0.9327840.2149593.077339True0.064780.3133430.5516610.134996
22020-07-02T18:10:00Z1.0122140.4660372.909561False0.000000.0000000.0000000.000000
32020-07-02T18:15:00Z1.1221820.3984383.029489False0.000000.0000000.0000000.000000
42020-07-02T18:20:00Z1.0913100.2821372.948016False0.000000.0000000.0000000.000000
..............................
9952020-07-06T04:55:00Z-0.4434380.768980-0.710800False0.000000.0000000.0000000.000000
9962020-07-06T05:00:00Z-0.5294000.822140-0.944681False0.000000.0000000.0000000.000000
9972020-07-06T05:05:00Z-0.3779110.738591-0.871468False0.000000.0000000.0000000.000000
9982020-07-06T05:10:00Z-0.5019930.727775-0.786263False0.000000.0000000.0000000.000000
9992020-07-06T05:15:00Z-0.4041380.806980-0.883521False0.000000.0000000.0000000.000000
\n

1000 rows × 9 columns

\n
", - "datasetInfos": [], - "metadata": {}, - "removedWidgets": [], - "textData": null, - "type": "htmlSandbox" - } - }, - "output_type": "display_data" - } - ], - "source": [ - "def parse(x):\n", - " if len(x) > 0:\n", - " return dict([item[:2] for item in x])\n", - " else:\n", - " return {\"sensor_1\": 0, \"sensor_2\": 0, \"sensor_3\": 0}\n", - "\n", - "\n", - "rdf[\"contributors\"] = rdf[\"interpretation\"].apply(parse)\n", - "rdf = pd.concat(\n", - " [\n", - " rdf.drop([\"contributors\"], axis=1),\n", - " pd.json_normalize(rdf[\"contributors\"]).rename(\n", - " columns={\n", - " \"sensor_1\": \"series_1\",\n", - " \"sensor_2\": \"series_2\",\n", - " \"sensor_3\": \"series_3\",\n", - " }\n", - " ),\n", - " ],\n", - " axis=1,\n", - ")\n", - "rdf" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "67943277-ef55-4a84-a478-0e89dbf33d6a", - "showTitle": false, - "title": "" - } - }, - "source": [ - "Great! We now have the contribution scores of sensors 1, 2, and 3 in the `series_0`, `series_1`, and `series_2` columns respectively. \n", - "\n", - "Let's run the next cell to plot the results. The `minSeverity` parameter in the first line specifies the minimum severity of the anomalies to be plotted. " - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "5b259f82-9e91-4034-b5f9-4a2bc49a59ef", - "showTitle": false, - "title": "" - } - }, - "outputs": [], - "source": [ - "minSeverity = 0.1\n", - "\n", - "\n", - "####### Main Figure #######\n", - "plt.figure(figsize=(23, 8))\n", - "plt.plot(\n", - " rdf[\"timestamp\"],\n", - " rdf[\"sensor_1\"],\n", - " color=\"tab:orange\",\n", - " linestyle=\"solid\",\n", - " linewidth=2,\n", - " label=\"sensor_1\",\n", - ")\n", - "plt.plot(\n", - " rdf[\"timestamp\"],\n", - " rdf[\"sensor_2\"],\n", - " color=\"tab:green\",\n", - " linestyle=\"solid\",\n", - " linewidth=2,\n", - " label=\"sensor_2\",\n", - ")\n", - "plt.plot(\n", - " rdf[\"timestamp\"],\n", - " rdf[\"sensor_3\"],\n", - " color=\"tab:blue\",\n", - " linestyle=\"solid\",\n", - " linewidth=2,\n", - " label=\"sensor_3\",\n", - ")\n", - "plt.grid(axis=\"y\")\n", - "plt.tick_params(axis=\"x\", which=\"both\", bottom=False, labelbottom=False)\n", - "plt.legend()\n", - "\n", - "anoms = list(rdf[\"severity\"] >= minSeverity)\n", - "_, _, ymin, ymax = plt.axis()\n", - "plt.vlines(np.where(anoms), ymin=ymin, ymax=ymax, color=\"r\", alpha=0.8)\n", - "\n", - "plt.legend()\n", - "plt.title(\n", - " \"A plot of the values from the three sensors with the detected anomalies highlighted in red.\"\n", - ")\n", - "plt.show()\n", - "\n", - "####### Severity Figure #######\n", - "plt.figure(figsize=(23, 1))\n", - "plt.tick_params(axis=\"x\", which=\"both\", bottom=False, labelbottom=False)\n", - "plt.plot(\n", - " rdf[\"timestamp\"],\n", - " rdf[\"severity\"],\n", - " color=\"black\",\n", - " linestyle=\"solid\",\n", - " linewidth=2,\n", - " label=\"Severity score\",\n", - ")\n", - "plt.plot(\n", - " rdf[\"timestamp\"],\n", - " [minSeverity] * len(rdf[\"severity\"]),\n", - " color=\"red\",\n", - " linestyle=\"dotted\",\n", - " linewidth=1,\n", - " label=\"minSeverity\",\n", - ")\n", - "plt.grid(axis=\"y\")\n", - "plt.legend()\n", - "plt.ylim([0, 1])\n", - "plt.title(\"Severity of the detected anomalies\")\n", - "plt.show()\n", - "\n", - "####### Contributors Figure #######\n", - "plt.figure(figsize=(23, 1))\n", - "plt.tick_params(axis=\"x\", which=\"both\", bottom=False, labelbottom=False)\n", - "plt.bar(\n", - " rdf[\"timestamp\"], rdf[\"series_1\"], width=2, color=\"tab:orange\", label=\"sensor_1\"\n", - ")\n", - "plt.bar(\n", - " rdf[\"timestamp\"],\n", - " rdf[\"series_2\"],\n", - " width=2,\n", - " color=\"tab:green\",\n", - " label=\"sensor_2\",\n", - " bottom=rdf[\"series_1\"],\n", - ")\n", - "plt.bar(\n", - " rdf[\"timestamp\"],\n", - " rdf[\"series_3\"],\n", - " width=2,\n", - " color=\"tab:blue\",\n", - " label=\"sensor_3\",\n", - " bottom=rdf[\"series_1\"] + rdf[\"series_2\"],\n", - ")\n", - "plt.grid(axis=\"y\")\n", - "plt.legend()\n", - "plt.ylim([0, 1])\n", - "plt.title(\"The contribution of each sensor to the detected anomaly\")\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "" - ] - }, - { - "cell_type": "markdown", - "metadata": { - "application/vnd.databricks.v1+cell": { - "inputWidgets": {}, - "nuid": "d999ebc4-320b-45ab-9196-f3067e06ccd5", - "showTitle": false, - "title": "" - } - }, - "source": [ - "The plots above show the raw data from the sensors (inside the inference window) in orange, green, and blue. The red vertical lines in the first figure show the detected anomalies that have a severity greater than or equal to `minSeverity`. \n", - "\n", - "The second plot shows the severity score of all the detected anomalies, with the `minSeverity` threshold shown in the dotted red line.\n", - "\n", - "Finally, the last plot shows the contribution of the data from each sensor to the detected anomalies. This helps us diagnose and understand the most likely cause of each anomaly." - ] - } - ], - "metadata": { - "application/vnd.databricks.v1+notebook": { - "dashboards": [], - "language": "python", - "notebookMetadata": { - "pythonIndentUnit": 4 - }, - "notebookName": "sample_mvad_notebook", - "notebookOrigID": 595270988434496, - "widgets": {} - }, - "kernelspec": { - "display_name": "Python 3.8.5 ('base')", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.5" - }, - "vscode": { - "interpreter": { - "hash": "601a75c4c141f401603984f1538447337114e368c54c4d5b589ea94315afdca2" - } - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/notebooks/features/regression/Regression - Flight Delays.ipynb b/notebooks/features/regression/Regression - Flight Delays.ipynb deleted file mode 100644 index b28777756b..0000000000 --- a/notebooks/features/regression/Regression - Flight Delays.ipynb +++ /dev/null @@ -1,210 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Regression - Flight Delays\n", - "\n", - "In this example, we run a linear regression on the *Flight Delay* dataset to predict the delay times.\n", - "\n", - "We demonstrate how to use the `TrainRegressor` and the `ComputePerInstanceStatistics` APIs.\n", - "\n", - "First, import the packages." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from pyspark.sql import SparkSession\n", - "\n", - "# Bootstrap Spark Session\n", - "spark = SparkSession.builder.getOrCreate()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import numpy as np\n", - "import pandas as pd\n", - "import synapse.ml" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, import the CSV dataset." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "flightDelay = spark.read.parquet(\n", - " \"wasbs://publicwasb@mmlspark.blob.core.windows.net/On_Time_Performance_2012_9.parquet\"\n", - ")\n", - "# print some basic info\n", - "print(\"records read: \" + str(flightDelay.count()))\n", - "print(\"Schema: \")\n", - "flightDelay.printSchema()\n", - "flightDelay.limit(10).toPandas()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Split the dataset into train and test sets." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "train, test = flightDelay.randomSplit([0.75, 0.25])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Train a regressor on dataset with `l-bfgs`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from synapse.ml.train import TrainRegressor, TrainedRegressorModel\n", - "from pyspark.ml.regression import LinearRegression\n", - "from pyspark.ml.feature import StringIndexer\n", - "\n", - "# Convert columns to categorical\n", - "catCols = [\"Carrier\", \"DepTimeBlk\", \"ArrTimeBlk\"]\n", - "trainCat = train\n", - "testCat = test\n", - "for catCol in catCols:\n", - " simodel = StringIndexer(inputCol=catCol, outputCol=catCol + \"Tmp\").fit(train)\n", - " trainCat = (\n", - " simodel.transform(trainCat)\n", - " .drop(catCol)\n", - " .withColumnRenamed(catCol + \"Tmp\", catCol)\n", - " )\n", - " testCat = (\n", - " simodel.transform(testCat)\n", - " .drop(catCol)\n", - " .withColumnRenamed(catCol + \"Tmp\", catCol)\n", - " )\n", - "lr = LinearRegression().setRegParam(0.1).setElasticNetParam(0.3)\n", - "model = TrainRegressor(model=lr, labelCol=\"ArrDelay\").fit(trainCat)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Save, load, or Score the regressor on the test data." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from synapse.ml.core.platform import *\n", - "\n", - "if running_on_synapse():\n", - " model_name = \"/models/flightDelayModel.mml\"\n", - "elif running_on_synapse_internal():\n", - " model_name = \"Files/models/flightDelayModel.mml\"\n", - "elif running_on_databricks():\n", - " model_name = \"dbfs:/flightDelayModel.mml\"\n", - "else:\n", - " model_name = \"/tmp/flightDelayModel.mml\"\n", - "\n", - "model.write().overwrite().save(model_name)\n", - "flightDelayModel = TrainedRegressorModel.load(model_name)\n", - "\n", - "scoredData = flightDelayModel.transform(testCat)\n", - "scoredData.limit(10).toPandas()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Compute model metrics against the entire scored dataset" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from synapse.ml.train import ComputeModelStatistics\n", - "\n", - "metrics = ComputeModelStatistics().transform(scoredData)\n", - "metrics.toPandas()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, compute and show per-instance statistics, demonstrating the usage\n", - "of `ComputePerInstanceStatistics`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from synapse.ml.train import ComputePerInstanceStatistics\n", - "\n", - "evalPerInstance = ComputePerInstanceStatistics().transform(scoredData)\n", - "evalPerInstance.select(\"ArrDelay\", \"prediction\", \"L1_loss\", \"L2_loss\").limit(\n", - " 10\n", - ").toPandas()" - ] - } - ], - "metadata": { - "anaconda-cloud": {}, - "kernelspec": { - "display_name": "Python [default]", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.3" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/pipeline.yaml b/pipeline.yaml index 0d373d4124..09f083512a 100644 --- a/pipeline.yaml +++ b/pipeline.yaml @@ -10,9 +10,7 @@ trigger: - README.md - CONTRIBUTORS.md - SECURITY.md - - docs/* - CODEOWNERS - - .github pr: branches: @@ -21,13 +19,14 @@ pr: paths: exclude: - README.md - - docs/* + - CONTRIBUTORS.md + - SECURITY.md - CODEOWNERS - - .github schedules: - cron: "0 0 * * *" displayName: Daily midnight build + always: true branches: include: - master @@ -94,7 +93,6 @@ jobs: source activate synapseml sbt packagePython sbt publishBlob publishDocs publishR publishPython uploadNotebooks - sbt publishSigned sbt genBuildInfo echo "##vso[task.uploadsummary]$(pwd)/target/Build.md" displayName: Publish Artifacts diff --git a/src/test/scala/com/microsoft/azure/synapse/ml/core/test/fuzzing/FuzzingTest.scala b/src/test/scala/com/microsoft/azure/synapse/ml/core/test/fuzzing/FuzzingTest.scala index e9a9ba0354..23e9dba612 100644 --- a/src/test/scala/com/microsoft/azure/synapse/ml/core/test/fuzzing/FuzzingTest.scala +++ b/src/test/scala/com/microsoft/azure/synapse/ml/core/test/fuzzing/FuzzingTest.scala @@ -382,56 +382,6 @@ class FuzzingTest extends TestBase { } } - test("Scan codebase for secrets") { - val excludedFiles = List( - ".png", - ".jpg", - ".jpeg") - val excludedDirs = List( - ".git", - ".idea", - "target", - ".docusaurus", - "node_modules", - s"website${File.separator}build" - ) - - val regexps: List[Regex] = using(Source.fromURL(Secrets.SecretRegexpFile)) { s => - s.getLines().toList.map(_.r) - }.get - - val allFiles = Files.walk(BuildInfo.baseDirectory.getParentFile.toPath) - .iterator().asScala.map(_.toFile) - .filterNot(f => excludedDirs.exists(dir => f.toString.contains(dir))) - .toList - - val nameIssues = allFiles.flatMap { - case f if regexps.flatMap(_.findFirstMatchIn(f.toString)).nonEmpty => - Some(s"Bad file name: ${f.toString}") - case _ => - None - } - val contentsIssue = allFiles.filter(_.isFile) - .filterNot(f => excludedFiles.exists(end => f.toString.endsWith(end))) - .flatMap { f => - println(f) - try { - val lines = using(Source.fromFile(f)) { s => s.getLines().toList }.get - lines.zipWithIndex.flatMap { case (l, i) => - if (regexps.flatMap(_.findFirstMatchIn(l)).nonEmpty) { - Some(s"Line $i of file ${f.toString} contains secrets") - } else { - None - } - } - } catch { - case _: MalformedInputException => List() - } - } - val allIssues = nameIssues ++ contentsIssue - assert(allIssues.isEmpty, allIssues.mkString("\n")) - } - private def assertOrLog(condition: Boolean, hint: String = "", disableFailure: Boolean = disableFailure): Unit = { if (disableFailure && !condition) println(hint) diff --git a/start b/start index 3c1805ce7f..b8ba0e8621 100644 --- a/start +++ b/start @@ -3,7 +3,7 @@ export OPENMPI_VERSION="3.1.2" export SPARK_VERSION="3.3.1" export HADOOP_VERSION="2.7" -export SYNAPSEML_VERSION="0.11.1" # Binder compatibility version +export SYNAPSEML_VERSION="0.11.2" # Binder compatibility version echo "Beginning Spark Session..." exec "$@" diff --git a/tools/docgen/README.md b/tools/docgen/README.md new file mode 100644 index 0000000000..fad893969b --- /dev/null +++ b/tools/docgen/README.md @@ -0,0 +1,81 @@ +# Doc generating pipeline onboarding - Fabric channel + +Please edit the rst file to met Fabric doc requirement + +## Set manifest.yaml + +write a manifest file with filename and metadata +``` +channels: + - name: docgen.channels.FabricChannel + input_dir: path to input folder + output_dir: path to output folder + notebooks: + - path: path/under/input/dir/filename1.rst + metadata: + title: title 1 + description: description 1 + ms.topic: eg:overview + ms.custom: build-2023 + ms.reviewer: reviewers' Microsoft alias + author: authors' github usernames + ms.author: authors' Microsoft alias + - path: path/under/input/dir/filename2.ipynb + metadata: + title: title 2 + description: description 2 + ms.topic: eg:overview + ms.custom: build-2023 + ms.reviewer: reviewers' Microsoft alias + author: authors' github usernames + ms.author: authors' Microsoft alias +``` + +## Run the tool + +```bash +cd tools/docgen +pip install -e . + +python -m docgen --manifest docgen-manifest.yaml +``` + +## Modify input file + +### Image alt text + +Please add alt text to all the image to meet Fabric doc requirement +#### rst file +For each image, add alt text. + +eg: + +``` +.. image:: + media/an-example.png +``` + +Change it to +``` +.. image:: + media/an-example.png + :alt: the-alt-text-you-want-for this image +``` + +#### Notebook file +Set image url in Notebook (Markdown format): +``` +![image-alt-text](image_url) +``` + +### Remove Locale information from URLs +Please remove all locale information from urls from https://docs.microsoft.com and https://learn.microsoft.com +eg: + +``` +https://learn.microsoft.com/en-us/fabric/onelake/onelake-overview +``` +Change it to +``` +https://learn.microsoft.com/fabric/onelake/onelake-overview +``` diff --git a/docs/python/LICENSE.txt b/tools/docgen/docgen/LICENSE.txt similarity index 100% rename from docs/python/LICENSE.txt rename to tools/docgen/docgen/LICENSE.txt diff --git a/docs/python/MANIFEST.in b/tools/docgen/docgen/MANIFEST.in similarity index 100% rename from docs/python/MANIFEST.in rename to tools/docgen/docgen/MANIFEST.in diff --git a/docs/python/__init__.py b/tools/docgen/docgen/__init__.py similarity index 100% rename from docs/python/__init__.py rename to tools/docgen/docgen/__init__.py diff --git a/tools/docgen/docgen/__main__.py b/tools/docgen/docgen/__main__.py new file mode 100644 index 0000000000..a7f1166142 --- /dev/null +++ b/tools/docgen/docgen/__main__.py @@ -0,0 +1,34 @@ +import argparse +import yaml +from docgen.core import DocumentProcessor +import importlib + + +def instantiate_channel(channel_yml): + name = channel_yml["name"] + module_name, class_name = name.rsplit(".", 1) + + print(f"Instantiating {class_name} from module {module_name}") + clazz = getattr(importlib.import_module(module_name), class_name) + channel_yml.pop("name") + return clazz(**channel_yml) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Document Projection Pipeline") + parser.add_argument( + "--manifest", + type=str, + help="A manifest file with the configuration of the pipeline", + default="manifest.yaml", + ) + args = parser.parse_args() + print("Executing with args: {}".format(args)) + + with open(args.manifest, "r") as f: + parsed_manifest = yaml.safe_load(f) + print("Found Manifest:") + print(parsed_manifest) + + channels = [instantiate_channel(c) for c in parsed_manifest["channels"]] + DocumentProcessor(channels).run() diff --git a/tools/docgen/docgen/channels.py b/tools/docgen/docgen/channels.py new file mode 100644 index 0000000000..de1b79e02c --- /dev/null +++ b/tools/docgen/docgen/channels.py @@ -0,0 +1,331 @@ +import os +import pathlib +import re +import shutil +import warnings +from datetime import datetime +from os.path import basename, dirname +from typing import List +from urllib.parse import urlparse + +import markdown +import pypandoc +import requests +from bs4 import BeautifulSoup +from docgen.core import Channel, ParallelChannel +from markdownify import ATX, MarkdownConverter +from nbconvert import MarkdownExporter +from nbformat import read +from traitlets.config import Config + + +class WebsiteChannel(ParallelChannel): + def __init__(self, input_dir: str, output_dir: str): + self.input_dir = input_dir + self.output_dir = output_dir + + def list_input_files(self) -> List[str]: + return list(pathlib.Path(self.input_dir).rglob("*")) + + def process(self, input_file: str) -> (): + print(f"Processing {input_file} for website") + output_file = os.path.join( + self.output_dir, os.path.relpath(input_file, self.input_dir) + ) + if str(input_file).endswith(".ipynb"): + output_file = str(output_file).replace(".ipynb", ".md") + parsed = read(input_file, as_version=4) + markdown, resources = MarkdownExporter().from_notebook_node(parsed) + + markdown = re.sub(r"style=\"[\S ]*?\"", "", markdown) + markdown = re.sub(r"", "", markdown) + title = basename(input_file).replace(".ipynb", "") + markdown = f"---\ntitle: {title}\nhide_title: true\nstatus: stable\n---\n{markdown}" + + os.makedirs(dirname(output_file), exist_ok=True) + with open(output_file, "w+", encoding="utf-8") as f: + f.write(markdown) + else: + if os.path.isdir(input_file): + os.makedirs(output_file, exist_ok=True) + else: + os.makedirs(dirname(output_file), exist_ok=True) + shutil.copy(input_file, output_file) + + +class FabricChannel(Channel): + def __init__(self, input_dir: str, output_dir: str, notebooks: List[dict]): + self.input_dir = input_dir + self.output_dir = output_dir + self.notebooks = notebooks + self.hide_tag = "hide-synapse-internal" + self.media_dir = os.path.join(self.output_dir, "media") + + def list_input_files(self) -> List[str]: + return [n["path"] for n in self.notebooks] + + def _sentence_to_snake(self, path: str): + return ( + path.lower() + .replace(" - ", "-") + .replace(" ", "-") + .replace(",", "") + .replace(".ipynb", "") + .replace(".rst", "") + ) + + def _is_valid_url(self, url): + try: + result = urlparse(url) + return all([result.scheme, result.netloc]) + except: + return False + + def _replace_img_tag(self, img_tag, img_path_rel): + img_tag.replace_with( + f':::image type="content" source="{img_path_rel}" ' + f'alt-text="{img_tag.get("alt", "placeholder alt text")}":::' + ) + + def _download_and_replace_images( + self, + html_soup, + resources, + output_folder, + relative_to, + notebook_path, + get_image_from_local=False, + ): + output_folder = output_folder.replace("/", os.sep) + os.makedirs(output_folder, exist_ok=True) + + if resources: + # resources converted from notebook + resources_img, i = [], 0 + for img_filename, content in resources.get("outputs", {}).items(): + img_path = os.path.join(output_folder, img_filename.replace("_", "-")) + with open(img_path, "wb") as img_file: + img_file.write(content) + img_path_rel = os.path.relpath(img_path, relative_to).replace( + os.sep, "/" + ) + resources_img.append(img_path_rel) + + img_tags = html_soup.find_all("img") + for img_tag in img_tags: + img_loc = img_tag["src"] + + if self._is_valid_url(img_loc): + # downloaded image + response = requests.get(img_loc) + if response.status_code == 200: + img_filename = self._sentence_to_snake(img_loc.split("/")[-1]) + img_path = os.path.join(output_folder, img_filename) + with open(img_path, "wb") as img_file: + img_file.write(response.content) + img_path_rel = os.path.relpath(img_path, relative_to).replace( + os.sep, "/" + ) + img_tag["src"] = img_path_rel + else: + raise ValueError(f"Could not download image from {img_loc}") + + elif get_image_from_local: + # process local images + img_filename = self._sentence_to_snake(img_loc.split("/")[-1]).replace( + "_", "-" + ) + file_folder = "/".join( + notebook_path.split("/")[:-1] + ) # path read from manifest file + img_input_path = os.path.join( + self.input_dir, file_folder, img_loc + ).replace("/", os.sep) + if not os.path.exists(img_input_path): + raise ValueError(f"Could not get image from {img_loc}") + img_path = os.path.join(output_folder, img_filename) + img_path_rel = os.path.relpath(img_path, relative_to).replace( + os.sep, "/" + ) + shutil.copy(img_input_path, img_path) + + else: + # process image got from notebook resources + img_path_rel = resources_img[i] + img_tag["src"] = img_path_rel + i += 1 + + self._replace_img_tag(img_tag, img_path_rel) + + return html_soup + + def _validate_metadata(self, metadata): + required_metadata = [ + "author", + "description", + "ms.author", + "ms.topic", + "title", + ] + for req in required_metadata: + assert ( + req in metadata.keys() + ), f"{req} is required metadata, please add it to manifest file" + + def _generate_metadata_header(self, metadata): + """ + take a file and the authors name, generate metadata + metadata requirements: https://learn.microsoft.com/contribute/metadata + Azure Doc require MS authors and contributors need to make content contributions through the private repository + so the content can be staged and validated by the current validation rules. (Jan 4th, 2023) + """ + if "ms.date" not in metadata: + update_date = datetime.today().strftime("%m/%d/%Y") + metadata["ms.date"] = update_date + else: + warnings.warn( + "ms.date is set in manifest file, the date won't be automatically updated. " + "to update date automatically, remove ms.date from manifest file" + ) + formatted_list = ( + ["---"] + + ["{k}: {v}".format(k=k, v=v) for k, v in metadata.items()] + + ["---\n"] + ) + return "\n".join(formatted_list) + + def _remove_content(self, text): + patterns_to_remove = [ + "https://docs.microsoft.com", + "https://learn.microsoft.com", + ] + for pattern in patterns_to_remove: + text = re.sub(pattern, "", text) + return text + + def _read_rst(self, rst_file_path): + try: + extra_args = ["--wrap=none"] + html_string = pypandoc.convert_file( + rst_file_path, "html", format="rst", extra_args=extra_args + ) + return html_string + except Exception as e: + print("Error converting the RST file to Markdown:", e) + return None + + def _convert_to_markdown_links(self, parsed_html): + for link in parsed_html.find_all("a", href=True): + href = link["href"] + if not self._is_valid_url(href) and ".md" not in href: + split_href = href.split("#") + split_href[0] += ".md" + new_href = "#".join(split_href) + link["href"] = new_href + return parsed_html + + def process(self, input_file: str, index: int) -> (): + print(f"Processing {input_file} for fabric") + output_file = os.path.join(self.output_dir, input_file) + output_img_dir = self.media_dir + "/" + self._sentence_to_snake(input_file) + full_input_file = os.path.join(self.input_dir, input_file) + notebook_path = self.notebooks[index]["path"] + metadata = self.notebooks[index]["metadata"] + self._validate_metadata(metadata) + + def callback(el): + if el.contents[0].has_attr("class"): + return ( + el.contents[0]["class"][0].split("-")[-1] + if len(el.contents) >= 1 + else None + ) + else: + return el["class"][0] if el.has_attr("class") else None + + def convert_soup_to_md(soup, **options): + return MarkdownConverter(**options).convert_soup(soup) + + if str(input_file).endswith(".rst"): + output_file = self._sentence_to_snake( + str(output_file).replace(".rst", ".md") + ) + html = self._read_rst(full_input_file) + parsed_html = markdown.markdown( + html, + extensions=[ + "markdown.extensions.tables", + "markdown.extensions.fenced_code", + ], + ) + parsed_html = BeautifulSoup(parsed_html, features="html.parser") + parsed_html = self._download_and_replace_images( + parsed_html, + None, + output_img_dir, + os.path.dirname(output_file), + notebook_path, + True, + ) + parsed_html = self._convert_to_markdown_links(parsed_html) + + elif str(input_file).endswith(".ipynb"): + output_file = self._sentence_to_snake( + str(output_file).replace(".ipynb", ".md") + ) + parsed = read(full_input_file, as_version=4) + + c = Config() + c.TagRemovePreprocessor.remove_cell_tags = (self.hide_tag,) + c.TagRemovePreprocessor.enabled = True + c.MarkdownExporter.preprocessors = [ + "nbconvert.preprocessors.TagRemovePreprocessor" + ] + md, resources = MarkdownExporter(config=c).from_notebook_node(parsed) + + html = markdown.markdown( + md, + extensions=[ + "markdown.extensions.tables", + "markdown.extensions.fenced_code", + ], + ) + parsed_html = BeautifulSoup(html) + # Download images and place them in media directory while updating their links + parsed_html = self._download_and_replace_images( + parsed_html, + resources, + output_img_dir, + os.path.dirname(output_file), + None, + False, + ) + + # Remove StatementMeta + for element in parsed_html.find_all( + text=re.compile("StatementMeta\(.*?Available\)") + ): + element.extract() + warnings.warn( + f"Found StatementMeta in {input_file}, please check if you want it in the notebook.", + UserWarning, + ) + + # Remove extra CSS styling info + for style_tag in parsed_html.find_all("style"): + style_tag.extract() + + # Convert from HTML to MD + new_md = convert_soup_to_md( + parsed_html, + code_language_callback=callback, + heading_style=ATX, + escape_underscores=False, + ) + # Post processing + new_md = f"{self._generate_metadata_header(metadata)}\n{new_md}" + output_md = self._remove_content(new_md) + + os.makedirs(dirname(output_file), exist_ok=True) + with open(output_file, "w+", encoding="utf-8") as f: + f.write(output_md) diff --git a/tools/docgen/docgen/core.py b/tools/docgen/docgen/core.py new file mode 100644 index 0000000000..452eee2554 --- /dev/null +++ b/tools/docgen/docgen/core.py @@ -0,0 +1,37 @@ +import multiprocessing +from abc import ABC, abstractmethod +from typing import List + + +class Channel(ABC): + @abstractmethod + def process(self, input_file: str) -> (): + pass + + @abstractmethod + def list_input_files(self) -> List[str]: + pass + + def run(self) -> (): + for index, input_file in enumerate(self.list_input_files()): + self.process(input_file, index) + + +class ParallelChannel(Channel): + def run(self) -> (): + with multiprocessing.Pool() as pool: + pool.map(self.process, self.list_input_files()) + + +class DocumentProcessor: + def __init__(self, channels: List[Channel]): + self.channels = channels + + def run(self) -> None: + print(f"Running DocumentProcessor on {self.channels}") + if len(self.channels) == 0: + raise ValueError("No channels selected.") + + for channel in self.channels: + print(f"Running Channel: {self.channels}") + channel.run() diff --git a/tools/docgen/docgen/manifest.yaml b/tools/docgen/docgen/manifest.yaml new file mode 100644 index 0000000000..96848562c6 --- /dev/null +++ b/tools/docgen/docgen/manifest.yaml @@ -0,0 +1,135 @@ +channels: + - name: "channels.WebsiteChannel" + input_dir: "../../../docs/" + output_dir: "../../../website/docs/" + - name: channels.FabricChannel + input_dir: ../../../docs/ + output_dir: ../../../target/fabric-docs-pr/ + notebooks: + - path: Explore Algorithms/AI Services/Multivariate Anomaly Detection.ipynb + metadata: + title: Analyze time series + description: Use SynapseML and Azure Cognitive Services for multivariate anomaly detection. + ms.topic: overview + ms.custom: build-2023 + ms.reviewer: jessiwang + author: JessicaXYWang + ms.author: jessiwang + - path: Explore Algorithms/AI Services/Overview.ipynb + metadata: + title: Cognitive Services in Azure Synapse Analytics + description: Enrich your data with artificial intelligence (AI) in Azure Synapse Analytics using pretrained models from Azure Cognitive Services. + ms.topic: overview + ms.reviewer: jessiwang + author: JessicaXYWang + ms.author: jessiwang + - path: Explore Algorithms/Anomaly Detection/Quickstart - Isolation Forests.ipynb + metadata: + title: Outlier and Anomaly Detection + description: Use SynapseML on Apache Spark for multivariate anomaly detection with Isolation Forest model. + ms.topic: overview + ms.custom: build-2023 + ms.reviewer: jessiwang + author: JessicaXYWang + ms.author: jessiwang + - path: Explore Algorithms/Causal Inference/Quickstart - Measure Causal Effects.ipynb + metadata: + title: Causal Structure + description: Causal Structure + ms.topic: overview + ms.custom: build-2023 + ms.reviewer: jessiwang + author: JessicaXYWang + ms.author: jessiwang + - path: Explore Algorithms/Classification/Quickstart - SparkML vs SynapseML.ipynb + filename: classification-before-and-after-synapseml + metadata: + title: Classification - before and after SynapseML + description: Perform the same classification task with and without SynapseML. + ms.topic: how-to + ms.custom: build-2023 + ms.reviewer: jessiwang + author: JessicaXYWang + ms.author: jessiwang + - path: Explore Algorithms/Deep Learning/Quickstart - Fine-tune a Text Classifier.ipynb + metadata: + title: Train a Text Classifier + description: Train a Text Classifier + ms.topic: overview + ms.custom: build-2023 + ms.reviewer: jessiwang + author: JessicaXYWang + ms.author: jessiwang + - path: Explore Algorithms/Deep Learning/Quickstart - ONNX Model Inference.ipynb + filename: onnx-overview + metadata: + title: ONNX - Inference on Spark + description: Use SynapseML to build a LightGBM model, convert it to ONNX format, then perform inference. + ms.topic: how-to + ms.custom: build-2023 + ms.reviewer: larryfr + author: JessicaXYWang + ms.author: jessiwang + - path: Explore Algorithms/Hyperparameter Tuning/Quickstart - Random Search.ipynb + metadata: + title: Hyperparameter tuning + description: Identify the best combination of hyperparameters for your chosen classifiers with SynapseML. + ms.topic: overview + ms.custom: build-2023 + ms.reviewer: jessiwang + author: JessicaXYWang + ms.author: jessiwang + - path: Explore Algorithms/LightGBM/Quickstart - Classification, Ranking, and Regression.ipynb + metadata: + title: LightGBM Overview + description: build LightGBM model with SynapseML + ms.topic: overview + ms.reviewer: mopeakande + author: JessicaXYWang + ms.author: jessiwang + - path: Explore Algorithms/OpenAI/OpenAI.ipynb + metadata: + title: Azure OpenAI for big data + description: Use Azure OpenAI service to solve a large number of natural language tasks through prompting the completion API. + ms.topic: how-to + ms.custom: build-2023 + ms.reviewer: jessiwang + author: JessicaXYWang + ms.author: jessiwang + - path: Explore Algorithms/OpenAI/Quickstart - Understand and Search Forms.ipynb + metadata: + title: Build a Search Engine + description: Build a custom search engine and question-answering system with SynapseML. + ms.topic: overview + ms.custom: build-2023 + ms.reviewer: jessiwang + author: JessicaXYWang + ms.author: JessicaXYWang + - path: Explore Algorithms/Other Algorithms/Quickstart - Exploring Art Across Cultures.ipynb + filename: conditional-k-nearest-neighbors-exploring-art + metadata: + title: Conditional KNN Exploring Art Across Cultures + description: A guideline for match-finding via k-nearest-neighbors. + ms.topic: how-to + ms.custom: build-2023 + ms.reviewer: larryfr + author: JessicaXYWang + ms.author: jessiwang + - path: Explore Algorithms/Responsible AI/Tabular Explainers.ipynb + metadata: + title: Interpretability - Tabular SHAP explainer + description: Use Kernel SHAP to explain a tabular classification model. + ms.topic: overview + ms.custom: build-2023 + ms.reviewer: jessiwang + author: JessicaXYWang + ms.author: jessiwang + - path: Get Started/Quickstart - Your First Models.ipynb + metadata: + title: SynapseMl first model + description: A quick introduction to building your first machine learning model with SynapseML. + ms.topic: how-to + ms.custom: build-2023 + ms.reviewer: mopeakande + author: JessicaXYWang + ms.author: jessiwang \ No newline at end of file diff --git a/docs/python/setup.py b/tools/docgen/setup.py similarity index 76% rename from docs/python/setup.py rename to tools/docgen/setup.py index edf2a06679..7d1fe844fc 100644 --- a/docs/python/setup.py +++ b/tools/docgen/setup.py @@ -5,13 +5,13 @@ from setuptools import setup, find_packages setup( - name="documentprojection", + name="docgen", + packages=["docgen"], version=0.1, description="Synapse Machine Learning Documentation Pipeline", long_description="SynapseML contains Microsoft's open source " + "contributions to the Apache Spark ecosystem", license="MIT", - packages=find_packages(), url="https://github.com/Microsoft/SynapseML", author="Microsoft", author_email="synapseml-support@microsoft.com", @@ -25,9 +25,16 @@ "Programming Language :: Python :: 3", ], zip_safe=True, - package_data={ - "documentprojection": ["../LICENSE.txt", "../README.txt", "./utils/*.ipynb"] - }, + package_data={"docgen": ["../LICENSE.txt", "../README.txt"]}, python_requires=">=3.8.8", - install_requires=["nbformat", "nbconvert", "pathlib", "argparse"], + install_requires=[ + "nbformat", + "nbconvert", + "pathlib", + "argparse", + "pypandoc", + "markdownify", + "markdown", + "traitlets", + ], ) diff --git a/tools/docker/demo/Dockerfile b/tools/docker/demo/Dockerfile index aa8cc81ce6..c0eeabbfd3 100644 --- a/tools/docker/demo/Dockerfile +++ b/tools/docker/demo/Dockerfile @@ -1,6 +1,6 @@ FROM mcr.microsoft.com/oss/mirror/docker.io/library/ubuntu:20.04 -ARG SYNAPSEML_VERSION=0.11.1 +ARG SYNAPSEML_VERSION=0.11.2 ARG DEBIAN_FRONTEND=noninteractive ENV SPARK_VERSION=3.3.1 @@ -60,8 +60,8 @@ RUN jupyter-notebook --generate-config \ # Copy the init script for jupyter startup. COPY tools/docker/demo/init_notebook.py /root/.ipython/profile_default/startup/init_notebook.py -COPY notebooks notebooks -WORKDIR notebooks/features +COPY docs docs +WORKDIR docs # Jupyter Notebook UI EXPOSE 8888 diff --git a/tools/docker/demo/README.md b/tools/docker/demo/README.md index ca0a74bb4f..2c091234d1 100644 --- a/tools/docker/demo/README.md +++ b/tools/docker/demo/README.md @@ -15,9 +15,9 @@ docker build . --build-arg SYNAPSEML_VERSION= -f tools/docker eg. -For building image with SynapseML version 0.11.1, run: +For building image with SynapseML version 0.11.2, run: ``` -docker build . --build-arg SYNAPSEML_VERSION=0.11.1 -f tools/docker/demo/Dockerfile -t synapseml:0.11.1 +docker build . --build-arg SYNAPSEML_VERSION=0.11.2 -f tools/docker/demo/Dockerfile -t synapseml:0.11.2 ``` # Run the image diff --git a/tools/docker/demo/init_notebook.py b/tools/docker/demo/init_notebook.py index 643463c36e..8b9f359de0 100644 --- a/tools/docker/demo/init_notebook.py +++ b/tools/docker/demo/init_notebook.py @@ -27,7 +27,7 @@ ( "spark.jars.packages", "com.microsoft.azure:synapseml_2.12:" - + os.getenv("SYNAPSEML_VERSION", "0.11.1") + + os.getenv("SYNAPSEML_VERSION", "0.11.2") + ",org.apache.hadoop:hadoop-azure:2.7.0,org.apache.hadoop:hadoop-common:2.7.0,com.microsoft.azure:azure-storage:2.0.0", ), ( diff --git a/tools/docker/minimal/Dockerfile b/tools/docker/minimal/Dockerfile index daa06a0ed3..9d9845051b 100644 --- a/tools/docker/minimal/Dockerfile +++ b/tools/docker/minimal/Dockerfile @@ -1,6 +1,6 @@ FROM mcr.microsoft.com/oss/mirror/docker.io/library/ubuntu:20.04 -ARG SYNAPSEML_VERSION=0.11.1 +ARG SYNAPSEML_VERSION=0.11.2 ARG DEBIAN_FRONTEND=noninteractive ENV SPARK_VERSION=3.3.1 diff --git a/website/.gitignore b/website/.gitignore index 3e53ef5c00..de06993cba 100644 --- a/website/.gitignore +++ b/website/.gitignore @@ -9,31 +9,7 @@ .cache-loader # Converted markdowns -/docs/features/* -!/docs/features/causal_inference -/docs/features/causal_inference/* -!/docs/features/causal_inference/about.md -!/docs/features/lightgbm -/docs/features/lightgbm/* -!/docs/features/lightgbm/about.md -!/docs/features/onnx -/docs/features/onnx/* -!/docs/features/onnx/about.md -!/docs/features/responsible_ai -/docs/features/responsible_ai/* -!/docs/features/responsible_ai/Data Balance Analysis.md -!/docs/features/responsible_ai/Model Interpretation on Spark.md -!/docs/features/simple_deep_learning -/docs/features/simple_deep_learning/* -!/docs/features/simple_deep_learning/about.md -!/docs/features/simple_deep_learning/installation.md -!/docs/features/spark_serving -/docs/features/spark_serving/* -!/docs/features/spark_serving/about.md -!/docs/features/vw -/docs/features/vw/* -!/docs/features/vw/about.md -/docs/features/hyperparameter_tuning/* +/docs/* # Misc .DS_Store diff --git a/website/blog/overview.md b/website/blog/overview.md index ca7dc68689..09ea6eb811 100644 --- a/website/blog/overview.md +++ b/website/blog/overview.md @@ -1,15 +1,15 @@ --- -title: "Overview" -description: "SynapseML Overview" -keywords: [ - "SynapseML", -] +title: What is SynapseML? +sidebar_label: What is SynapseML? +hide_title: true --- -Synapse Machine Learning expands the distributed computing framework [Apache Spark](https://github.com/apache/spark) in several new directions. SynapseML adds several machine learning frameworks to the SparkML Ecosystem, including [LightGBM](/docs/features/lightgbm/about), [Vowpal Wabbit](/docs/features/vw/about), [OpenCV](https://opencv.org/), [Isolation Forest](https://github.com/linkedin/isolation-forest), and the [Microsoft Cognitive Toolkit (CNTK)](https://www.microsoft.com/en-us/research/product/cognitive-toolkit/). These tools allow users to craft powerful and highly scalable models that span multiple ML ecosystems. +import useBaseUrl from "@docusaurus/useBaseUrl"; - +# What is SynapseML? -SynapseML also brings new networking capabilities to the Spark ecosystem. With the HTTP on Spark project, users can embed any web service into their SparkML models and use their Spark clusters for massive networking workflows. In this vein, SynapseML provides easy to use SparkML transformers for a wide variety of Azure Cognitive Services. Finally, the Spark Serving project enables high throughput, submillisecond latency web services, backed by your Spark cluster. +SynapseML (previously known as MMLSpark), is an open-source library that simplifies the creation of massively scalable machine learning (ML) pipelines. SynapseML provides simple, composable, and distributed APIs for a wide variety of different machine learning tasks such as text analytics, vision, anomaly detection, and many others. SynapseML is built on the [Apache Spark distributed computing framework](https://spark.apache.org/) and shares the same API as the [SparkML/MLLib library](https://spark.apache.org/mllib/), allowing you to seamlessly embed SynapseML models into existing Apache Spark workflows. -Visit the SynapseML GitHub repository to learn more. +With SynapseML, you can build scalable and intelligent systems to solve challenges in domains such as anomaly detection, computer vision, deep learning, text analytics, and others. SynapseML can train and evaluate models on single-node, multi-node, and elastically resizable clusters of computers. This lets you scale your work without wasting resources. SynapseML is usable across Python, R, Scala, Java, and .NET. Furthermore, its API abstracts over a wide variety of databases, file systems, and cloud data stores to simplify experiments no matter where data is located. + +SynapseML requires Scala 2.12, Spark 3.2+, and Python 3.8+. diff --git a/website/docs/about.md b/website/docs/about.md deleted file mode 100644 index 4bb619dbfa..0000000000 --- a/website/docs/about.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: SynapseML -sidebar_label: Introduction -hide_title: true ---- - -import useBaseUrl from "@docusaurus/useBaseUrl"; - -
- -# SynapseML - -SynapseML is an ecosystem of tools aimed towards expanding the distributed computing framework -[Apache Spark](https://github.com/apache/spark) in several new directions. -SynapseML adds many deep learning and data science tools to the Spark ecosystem, -including seamless integration of Spark Machine Learning pipelines with [Microsoft Cognitive Toolkit -(CNTK)](https://github.com/Microsoft/CNTK), [LightGBM](https://github.com/Microsoft/LightGBM) and -[OpenCV](http://www.opencv.org/). These tools enable powerful and highly scalable predictive and analytical models -for many types of datasources. - -SynapseML also brings new networking capabilities to the Spark Ecosystem. With the HTTP on Spark project, users -can embed **any** web service into their SparkML models. In this vein, SynapseML provides easy to use -SparkML transformers for a wide variety of [Azure Cognitive Services](https://azure.microsoft.com/en-us/services/cognitive-services/). For production grade deployment, the Spark Serving project enables high throughput, -submillisecond latency web services, backed by your Spark cluster. - -SynapseML requires Scala 2.12, Spark 3.2+, and Python 3.8+. -See the API documentation [for -Scala](https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/index.html#package) and [for -PySpark](https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/index.html). - -import Link from '@docusaurus/Link'; - -Get Started - -## Examples - -import NotebookExamples from "@theme/NotebookExamples"; - - - -## Explore our Features - -import FeatureCards from "@theme/FeatureCards"; - - - -## Papers - -- [Large Scale Intelligent Microservices](https://arxiv.org/abs/2009.08044) - -- [Conditional Image Retrieval](https://arxiv.org/abs/2007.07177) - -- [SynapseML: Unifying Machine Learning Ecosystems at Massive Scales](https://arxiv.org/abs/1810.08744) - -- [Flexible and Scalable Deep Learning with MMLSpark](https://arxiv.org/abs/1804.04031) diff --git a/website/docs/documentation/estimators/causal/_causalInferenceDML.md b/website/docs/documentation/estimators/causal/_causalInferenceDML.md deleted file mode 100644 index d39eb7b35f..0000000000 --- a/website/docs/documentation/estimators/causal/_causalInferenceDML.md +++ /dev/null @@ -1,100 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - -## DoubleMLEstimator - - - - - - -```python -from synapse.ml.causal import * -from pyspark.ml.classification import LogisticRegression -from pyspark.sql.types import StructType, StructField, DoubleType, IntegerType, BooleanType - -schema = StructType([ - StructField("Treatment", BooleanType()), - StructField("Outcome", BooleanType()), - StructField("col2", DoubleType()), - StructField("col3", DoubleType()), - StructField("col4", DoubleType()) - ]) - - -df = spark.createDataFrame([ - (False, True, 0.30, 0.66, 0.2), - (True, False, 0.38, 0.53, 1.5), - (False, True, 0.68, 0.98, 3.2), - (True, False, 0.15, 0.32, 6.6), - (False, True, 0.50, 0.65, 2.8), - (True, True, 0.40, 0.54, 3.7), - (False, True, 0.78, 0.97, 8.1), - (True, False, 0.12, 0.32, 10.2), - (False, True, 0.35, 0.63, 1.8), - (True, False, 0.45, 0.57, 4.3), - (False, True, 0.75, 0.97, 7.2), - (True, True, 0.16, 0.32, 11.7)], schema -) - -dml = (DoubleMLEstimator() - .setTreatmentCol("Treatment") - .setTreatmentModel(LogisticRegression()) - .setOutcomeCol("Outcome") - .setOutcomeModel(LogisticRegression()) - .setMaxIter(20)) - -dmlModel = dml.fit(df) -dmlModel.getAvgTreatmentEffect() -dmlModel.getConfidenceInterval() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.causal._ -import org.apache.spark.ml.classification.LogisticRegression - -val df = (Seq( - (false, true, 0.50, 0.60, 0), - (true, false, 0.40, 0.50, 1), - (false, true, 0.78, 0.99, 2), - (true, false, 0.12, 0.34, 3), - (false, true, 0.50, 0.60, 0), - (true, false, 0.40, 0.50, 1), - (false, true, 0.78, 0.99, 2), - (true, false, 0.12, 0.34, 3), - (false, false, 0.50, 0.60, 0), - (true, true, 0.40, 0.50, 1), - (false, true, 0.78, 0.99, 2), - (true, false, 0.12, 0.34, 3)) - .toDF("Treatment", "Outcome", "col2", "col3", "col4")) - -val dml = (new DoubleMLEstimator() - .setTreatmentCol("Treatment") - .setTreatmentModel(new LogisticRegression()) - .setOutcomeCol("Outcome") - .setOutcomeModel(new LogisticRegression()) - .setMaxIter(20)) - -val dmlModel = dml.fit(df) -dmlModel.getAvgTreatmentEffect -dmlModel.getConfidenceInterval -``` - - - - - diff --git a/website/docs/documentation/estimators/estimators_causal.md b/website/docs/documentation/estimators/estimators_causal.md deleted file mode 100644 index 80ae2e5aaf..0000000000 --- a/website/docs/documentation/estimators/estimators_causal.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Estimators - Causal -sidebar_label: Causal Inference -hide_title: true ---- - -# Causal Inference - -import DoubleMLEstimator, {toc as DoubleMLEstimatorTOC} from './causal/_causalInferenceDML.md'; - - - -export const toc = [...DoubleMLEstimatorTOC] diff --git a/website/docs/features/onnx/about.md b/website/docs/features/onnx/about.md deleted file mode 100644 index baec0d8e6c..0000000000 --- a/website/docs/features/onnx/about.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: ONNX model inferencing on Spark -hide_title: true -sidebar_label: About -description: Learn how to use the ONNX model transformer to run inference for an ONNX model on Spark. ---- - -# ONNX model inferencing on Spark - -## ONNX - -[ONNX](https://onnx.ai/) is an open format to represent both deep learning and traditional machine learning models. With ONNX, AI developers can more easily move models between state-of-the-art tools and choose the combination that is best for them. - -SynapseML now includes a Spark transformer to bring a trained ONNX model to Apache Spark, so you can run inference on your data with Spark's large-scale data processing power. - -## ONNXHub -Although you can use your own local model, many popular existing models are provided through the ONNXHub. You can use -a model's ONNXHub name (for example "MNIST") and download the bytes of the model, and some metadata about the model. You can also list -available models, optionally filtering by name or tags. - -```scala - // List models - val hub = new ONNXHub() - val models = hub.listModels(model = Some("mnist"), tags = Some(Seq("vision"))) - - // Retrieve and transform with a model - val info = hub.getModelInfo("resnet50") - val bytes = hub.load(name) - val model = new ONNXModel() - .setModelPayload(bytes) - .setFeedDict(Map("data" -> "features")) - .setFetchDict(Map("rawPrediction" -> "resnetv24_dense0_fwd")) - .setSoftMaxDict(Map("rawPrediction" -> "probability")) - .setArgMaxDict(Map("rawPrediction" -> "prediction")) - .setMiniBatchSize(1) - - val (probability, _) = model.transform({YOUR_DATAFRAME}) - .select("probability", "prediction") - .as[(Vector, Double)] - .head -``` - -## Usage - -1. Create a `com.microsoft.azure.synapse.ml.onnx.ONNXModel` object and use `setModelLocation` or `setModelPayload` to load the ONNX model. - - For example: - - ```scala - val onnx = new ONNXModel().setModelLocation("/path/to/model.onnx") - ``` - - Optionally, create the model from the ONNXHub. - - ```scala - val onnx = new ONNXModel().setModelPayload(hub.load("MNIST")) - ``` -2. Use ONNX visualization tool (for example, [Netron](https://netron.app/)) to inspect the ONNX model's input and output nodes. - - ![Screenshot that illustrates an ONNX model's input and output nodes](https://mmlspark.blob.core.windows.net/graphics/ONNXModelInputsOutputs.png) - -3. Set the parameters properly to the `ONNXModel` object. - - The `com.microsoft.azure.synapse.ml.onnx.ONNXModel` class provides a set of parameters to control the behavior of the inference. - - | Parameter | Description | Default Value | - |:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------| - | feedDict | Map the ONNX model's expected input node names to the input DataFrame's column names. Make sure the input DataFrame's column schema matches with the corresponding input's shape of the ONNX model. For example, an image classification model may have an input node of shape `[1, 3, 224, 224]` with type Float. It's assumed that the first dimension (1) is the batch size. Then the input DataFrame's corresponding column's type should be `ArrayType(ArrayType(ArrayType(FloatType)))`. | None | - | fetchDict | Map the output DataFrame's column names to the ONNX model's output node names. NOTE: If you put outputs that are intermediate in the model, transform will automatically slice at those outputs. See the section on [Slicing](#slicing). | None | - | miniBatcher | Specify the MiniBatcher to use. | `FixedMiniBatchTransformer` with batch size 10 | - | softMaxDict | A map between output DataFrame columns, where the value column will be computed from taking the softmax of the key column. If the 'rawPrediction' column contains logits outputs, then one can set softMaxDict to `Map("rawPrediction" -> "probability")` to obtain the probability outputs. | None | - | argMaxDict | A map between output DataFrame columns, where the value column will be computed from taking the argmax of the key column. This parameter can be used to convert probability or logits output to the predicted label. | None | - | deviceType | Specify a device type the model inference runs on. Supported types are: CPU or CUDA. If not specified, auto detection will be used. | None | - | optimizationLevel | Specify the [optimization level](https://onnxruntime.ai/docs/resources/graph-optimizations.html#graph-optimization-levels) for the ONNX graph optimizations. Supported values are: `NO_OPT`, `BASIC_OPT`, `EXTENDED_OPT`, `ALL_OPT`. | `ALL_OPT` | - -4. Call `transform` method to run inference on the input DataFrame. - -## Model Slicing -By default, an ONNX model is treated as a black box with inputs and outputs. -If you want to use intermediate nodes of a model, you can slice the model at particular nodes. Slicing will create a new model, -keeping only parts of the model that are needed for those nodes. This new model's outputs will be the outputs from -the intermediate nodes. You can save the sliced model and use it to transform just like any other ONNXModel. - -This slicing feature is used implicitly by the ImageFeaturizer, which uses ONNX models. The OnnxHub manifest entry for each model -includes which intermediate node outputs should be used for featurization, so the ImageFeaturizer will automatically slice at the correct nodes. - -The below example shows how to perform the slicing manually with a direct ONNXModel. - -```scala - // create a df: Dataframe with image data - val hub = new ONNXHub() - val info = hub.getModelInfo("resnet50") - val bytes = hub.load(name) - val intermediateOutputName = "resnetv24_pool1_fwd" - val slicedModel = new ONNXModel() - .setModelPayload(bytes) - .setFeedDict(Map("data" -> "features")) - .setFetchDict(Map("rawFeatures" -> intermediateOutputName)) // automatic slicing based on fetch dictionary - // -- or -- - // .sliceAtOutput(intermediateOutputName) // manual slicing - - val slicedModelDf = slicedModel.transform(df) -``` - -## Example - -- [Interpretability - Image Explainers](../../responsible_ai/Interpretability%20-%20Image%20Explainers) -- [ONNX - Inference on Spark](../ONNX%20-%20Inference%20on%20Spark) diff --git a/website/docs/features/responsible_ai/Model Interpretation on Spark.md b/website/docs/features/responsible_ai/Model Interpretation on Spark.md deleted file mode 100644 index 93dbc54cef..0000000000 --- a/website/docs/features/responsible_ai/Model Interpretation on Spark.md +++ /dev/null @@ -1,174 +0,0 @@ ---- -title: Model Interpretation on Spark -hide_title: true -sidebar_label: Model Interpretation on Spark ---- - -# Model Interpretation on Spark - -## Interpretable Machine Learning - -Interpretable Machine Learning helps developers, data scientists and business stakeholders in the organization gain a comprehensive understanding of their machine learning models. It can also be used to debug models, explain predictions and enable auditing to meet compliance with regulatory requirements. - -## Why run model interpretation on Spark - -Model-agnostic interpretation methods can be computationally expensive due to the multiple evaluations needed to compute the explanations. Model interpretation on Spark enables users to interpret a black-box model at massive scales with the Apache Spark™ distributed computing ecosystem. Various components support local interpretation for tabular, vector, image and text classification models, with two popular model-agnostic interpretation methods: [LIME] and [Kernel SHAP]. - -[LIME]: https://arxiv.org/abs/1602.04938 - -[Kernel SHAP]: https://arxiv.org/abs/1705.07874 - -## Usage - -Both LIME and Kernel SHAP are local interpretation methods. Local interpretation explains why does the model predict certain outcome for a given observation. - -Both explainers extends from `org.apache.spark.ml.Transformer`. After setting up the explainer parameters, simply call the `transform` function on a `DataFrame` of observations to interpret the model behavior on these observations. - -To see examples of model interpretability on Spark in action, take a look at these sample notebooks: - -- [Tabular SHAP explainer](../../../features/responsible_ai/Interpretability%20-%20Tabular%20SHAP%20explainer) -- [Image explainers](../../../features/responsible_ai/Interpretability%20-%20Image%20Explainers) -- [Text explainers](../../../features/responsible_ai/Interpretability%20-%20Text%20Explainers) - -| | Tabular models | Vector models | Image models | Text models | -|------------------------|-----------------------------|---------------------------|-------------------------|-----------------------| -| LIME explainers | [TabularLIME](#tabularlime) | [VectorLIME](#vectorlime) | [ImageLIME](#imagelime) | [TextLIME](#textlime) | -| Kernel SHAP explainers | [TabularSHAP](#tabularshap) | [VectorSHAP](#vectorshap) | [ImageSHAP](#imageshap) | [TextSHAP](#textshap) | - -### Common local explainer params - -All local explainers support the following params: - -| Param | Type | Default | Description | -|------------------|---------------|---------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| targetCol | `String` | "probability" | The column name of the prediction target to explain (i.e. the response variable). This is usually set to "prediction" for regression models and "probability" for probabilistic classification models. | -| targetClasses | `Array[Int]` | empty array | The indices of the classes for multinomial classification models. | -| targetClassesCol | `String` | | The name of the column that specifies the indices of the classes for multinomial classification models. | -| outputCol | `String` | | The name of the output column for interpretation results. | -| model | `Transformer` | | The model to be explained. | - -### Common LIME explainer params - -All LIME based explainers ([TabularLIME](#tabularlime), [VectorLIME](#vectorlime), [ImageLIME](#imagelime), [TextLIME](#textlime)) support the following params: - -| Param | Type | Default | Description | -|----------------|----------|---------------------------------|-----------------------------------------------------------| -| regularization | `Double` | 0 | Regularization param for the underlying lasso regression. | -| kernelWidth | `Double` | sqrt(number of features) * 0.75 | Kernel width for the exponential kernel. | -| numSamples | `Int` | 1000 | Number of samples to generate. | -| metricsCol | `String` | "r2" | Column name for fitting metrics. | - -### Common SHAP explainer params - -All Kernel SHAP based explainers ([TabularSHAP](#tabularshap), [VectorSHAP](#vectorshap), [ImageSHAP](#imageshap), [TextSHAP](#textshap)) support the following params: - -| Param | Type | Default | Description | -|------------|----------|---------------------------------|------------------------------------------------| -| infWeight | `Double` | 1E8 | The double value to represent infinite weight. | -| numSamples | `Int` | 2 * (number of features) + 2048 | Number of samples to generate. | -| metricsCol | `String` | "r2" | Column name for fitting metrics. | - -### Tabular model explainer params - -All tabular model explainers ([TabularLIME](#tabularlime), [TabularSHAP](#tabularshap)) support the following params: - -| Param | Type | Default | Description | -|----------------|-----------------|---------|--------------------------------------------------------------------------------------------------------------| -| inputCols | `Array[String]` | | The names of input columns to the black-box model. | -| backgroundData | `DataFrame` | | A dataframe containing background data. It must contain all the input columns needed by the black-box model. | - -### Vector model explainer params - -All vector model explainers ([VectorLIME](#vectorlime), [VectorSHAP](#vectorshap)) support the following params: - -| Param | Type | Default | Description | -|----------------|-------------|---------|----------------------------------------------------------------------------------------------------------------| -| inputCol | `String` | | The names of input vector column to the black-box model. | -| backgroundData | `DataFrame` | | A dataframe containing background data. It must contain the input vector column needed by the black-box model. | - -### Image model explainer params - -All image model explainers ([ImageLIME](#imagelime), [ImageSHAP](#imageshap)) support the following params: - -| Param | Type | Default | Description | -|---------------|----------|---------------|--------------------------------------------------------------------| -| inputCol | `String` | | The names of input image column to the black-box model. | -| cellSize | `Double` | 16 | Number that controls the size of the super-pixels. | -| modifier | `Double` | 130 | Controls the trade-off spatial and color distance of super-pixels. | -| superpixelCol | `String` | "superpixels" | The column holding the super-pixel decompositions. | - -### Text model explainer params - -All text model explainers ([TextLIME](#textlime), [TextSHAP](#textshap)) support the following params: - -| Param | Type | Default | Description | -|-----------|----------|----------|--------------------------------------------------------| -| inputCol | `String` | | The names of input text column to the black-box model. | -| tokensCol | `String` | "tokens" | The column holding the text tokens. | - -### `TabularLIME` - -| Param | Type | Default | Description | -|---------------------|-----------------|-------------|----------------------------------------------------------------------| -| categoricalFeatures | `Array[String]` | empty array | The name of columns that should be treated as categorical variables. | - -> For categorical features, `TabularLIME` creates new samples by drawing samples based on the value distribution from the background dataset. For numerical features, it creates new samples by drawing from a normal distribution with mean taken from the target value to be explained, and standard deviation taken from the background dataset. - -### `TabularSHAP` - -No additional params are supported. - -### `VectorLIME` - -No additional params are supported. - -> `VectorLIME` assumes all features are numerical, and categorical features are not supported in `VectorLIME`. - -### `VectorSHAP` - -No additional params are supported. - -### `ImageLIME` - -| Param | Type | Default | Description | -|------------------|----------|---------|----------------------------------------------------------| -| samplingFraction | `Double` | 0.7 | The fraction of super-pixels to keep on during sampling. | - -> `ImageLIME` creates new samples by randomly turning super-pixels on or off with probability of keeping on set to `SamplingFraction`. - -### `ImageSHAP` - -No additional params are supported. - -### `TextLIME` - -| Param | Type | Default | Description | -|------------------|----------|---------|---------------------------------------------------------| -| samplingFraction | `Double` | 0.7 | The fraction of word tokens to keep on during sampling. | - -> `TextLIME` creates new samples by randomly turning word tokens on or off with probability of keeping on set to `SamplingFraction`. - -### `TextSHAP` - -No additional params are supported. - -## Result interpretation - -### LIME explainers - -LIME explainers return an array of vectors, and each vector maps to a class being explained. Each component of the vector is the coefficient for the corresponding feature, super-pixel, or word token from the local surrogate model. - -- For categorical variables, super-pixels, or word tokens, the coefficient shows the average change in model outcome if this feature is unknown to the model, if the super-pixel is replaced with background color (black), or if the word token is replaced with empty string. -- For numeric variables, the coefficient shows the change in model outcome if the feature value is incremented by 1 unit. - -### SHAP explainers - -SHAP explainers return an array of vectors, and each vector maps to a class being explained. Each vector starts with the [base value](#base-value), and each following component of the vector is the Shapley value for each feature, super-pixel, or token. - -The base value and Shapley values are additive, and they should add up to the model output for the target observation. - -#### Base value - -- For tabular and vector models, the base value represents the mean outcome of the model for the background dataset. -- For image models, the base value represents the model outcome for a background (all black) image. -- For text models, the base value represents the model outcome for an empty string. diff --git a/website/docs/features/spark_serving/about.md b/website/docs/features/spark_serving/about.md deleted file mode 100644 index 1aaeadde49..0000000000 --- a/website/docs/features/spark_serving/about.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -title: Spark Serving -hide_title: true -sidebar_label: About ---- - - - -# Spark Serving - -### An Engine for Deploying Spark Jobs as Distributed Web Services - -- **Distributed**: Takes full advantage of Node, JVM, and thread level - parallelism that Spark is famous for. -- **Fast**: No single node bottlenecks, no round trips to Python. - Requests can be routed directly to and from worker JVMs through - network switches. Spin up a web service in a matter of seconds. -- **Low Latency**: When using continuous serving, - you can achieve latencies as low as 1 millisecond. -- **Deployable Anywhere**: Works anywhere that runs Spark such as - Databricks, HDInsight, AZTK, DSVMs, local, or on your own - cluster. Usable from Spark, PySpark, and SparklyR. -- **Lightweight**: No dependence on costly Kafka or - Kubernetes clusters. -- **Idiomatic**: Uses the same API as batch and structured streaming. -- **Flexible**: Spin up and manage several services on a single Spark - cluster. Synchronous and Asynchronous service management and - extensibility. Deploy any spark job that is expressible as a - structured streaming query. Use serving sources/sinks with other - Spark data sources/sinks for more complex deployments. - -## Usage - -### Jupyter Notebook Examples - -- [Deploy a classifier trained on the Adult Census Dataset](../SparkServing%20-%20Deploying%20a%20Classifier) -- More coming soon! - -### Spark Serving Hello World - -```python -import synapse.ml -import pyspark -from pyspark.sql.functions import udf, col, length -from pyspark.sql.types import * - -df = spark.readStream.server() \ - .address("localhost", 8888, "my_api") \ - .load() \ - .parseRequest(StructType().add("foo", StringType()).add("bar", IntegerType())) - -replies = df.withColumn("fooLength", length(col("foo")))\ - .makeReply("fooLength") - -server = replies\ - .writeStream \ - .server() \ - .replyTo("my_api") \ - .queryName("my_query") \ - .option("checkpointLocation", "file:///path/to/checkpoints") \ - .start() -``` - -### Deploying a Deep Network with the CNTKModel - -```python -import synapse.ml -from synapse.ml.cntk import CNTKModel -import pyspark -from pyspark.sql.functions import udf, col - -df = spark.readStream.server() \ - .address("localhost", 8888, "my_api") - .load() - .parseRequest() - -# See notebook examples for how to create and save several -# examples of CNTK models -network = CNTKModel.load("file:///path/to/my_cntkmodel.mml") - -transformed_df = network.transform(df).makeReply() - -server = transformed_df \ - .writeStream \ - .server() \ - .replyTo("my_api") \ - .queryName("my_query") \ - .option("checkpointLocation", "file:///path/to/checkpoints") \ - .start() -``` - -## Architecture - -Spark Serving adds special streaming sources and sinks to turn any -structured streaming job into a web service. Spark Serving comes -with two deployment options that vary based on what form of load balancing -is being used. - -In brief you can use: -`spark.readStream.server()`: For head node load balanced services -`spark.readStream.distributedServer()`: For custom load balanced services -`spark.readStream.continuousServer()`: For a custom load balanced, submillisecond-latency continuous server - -to create the various different serving dataframes and use the equivalent statements after `df.writeStream` -for replying to the web requests. - -### Head Node Load Balanced - -You can deploy head node load balancing with the `HTTPSource` and -`HTTPSink` classes. This mode spins up a queue on the head node, -distributes work across partitions, then collects response data back to -the head node. All HTTP requests are kept and replied to on the head -node. In both python and Scala these classes can be access by using -`spark.readStream.server()` after importing SynapseML. -This mode allows for more complex windowing, repartitioning, and -SQL operations. This option is also idea for rapid setup and testing, -as it doesn't require any further load balancing or network -switches. A diagram of this configuration can be seen in this image: - -

- -

- -### Fully Distributed (Custom Load Balancer) - -You can configure Spark Serving for a custom load balancer using the -`DistributedHTTPSource` and `DistributedHTTPSink` classes. This mode -spins up servers on each executor JVM. -In both python and Scala these classes can be access by using -`spark.readStream.distributedServer()` after importing SynapseML. -Each server will feed its -executor's partitions in parallel. This mode is key for high throughput -and low latency as data doesn't need to be transferred to and from the -head node. This deployment results in several web services that all -route into the same spark computation. You can deploy an external load -balancer to unify the executor's services under a single IP address. -Support for automatic load balancer management and deployment is -targeted for the next release of SynapseML. A diagram of this -configuration can be seen here: - -

- -

- -Queries that involve data movement across workers, such as a nontrivial -SQL join, need special consideration. The user must ensure that the -right machine replies to each request. One can route data back to the -originating partition with a broadcast join. In the future, request -routing will be automatically handled by the sink. - -### Sub-Millisecond Latency with Continuous Processing - -

- -

- -Continuous processing can be enabled by hooking into the `HTTPSourceV2` class using: - - spark.readStream.continuousServer() - ... - -In continuous serving, much like continuous streaming you need to add a trigger to your write statement: - - df.writeStream - .continuousServer() - .trigger(continuous="1 second") - ... - -The architecture is similar to the custom load balancer setup described earlier. -More specifically, Spark will manage a web service on each partition. -These webservices can be unified together using an Azure Load Balancer, -Kubernetes Service Endpoint, Azure Application gateway or any other way to load balance a distributed service. -It's currently the user's responsibility to optionally unify these services as they see fit. -In the future, we'll include options to dynamically spin up and manage a load balancer. - -#### Databricks Setup - -Databricks is a managed architecture and they've restricted -all incoming traffic to the nodes of the cluster. -If you create a web service in your databricks cluster (head or worker nodes), -your cluster can communicate with the service, but the outside world can't. -However, in the future, Databricks will support Virtual Network Injection, so problem will not arise. -In the meantime, you must use SSH tunneling to forward the services to another machine(s) -to act as a networking gateway. This machine can be any machine that accepts SSH traffic and requests. -We have included settings to automatically configure this SSH tunneling for convenience. - -##### Linux Gateway Setup - Azure - -1. [Create a Linux VM using SSH](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/quick-create-portal) -2. [Open ports 8000-9999 from the Azure portal](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/nsg-quickstart-portal) -3. Open the port on the firewall on the VM - ```$xslt - firewall-cmd --zone=public --add-port=8000-10000/tcp --permanent - firewall-cmd --reload - echo "GatewayPorts yes" >> /etc/ssh/sshd_config - service ssh --full-restart - ``` -4. Add your private key to a private container in [Azure Storage Blob](https://docs.microsoft.com/en-us/azure/storage/common/storage-quickstart-create-account?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&tabs=portal). -5. Generate a SAS link for your key and save it. -6. Include the following parameters on your reader to configure the SSH tunneling: - serving_inputs = (spark.readStream.continuousServer() - .option("numPartitions", 1) - .option("forwarding.enabled", True) # enable ssh forwarding to a gateway machine - .option("forwarding.username", "username") - .option("forwarding.sshHost", "ip or dns") - .option("forwarding.keySas", "SAS url from the previous step") - .address("localhost", 8904, "my_api") - .load() - -This setup will make your service require an extra jump and affect latency. -It's important to pick a gateway that has good connectivity to your spark cluster. -For best performance and ease of configuration, we suggest using Spark Serving -on an open cluster environment such as Kubernetes, Mesos, or Azure Batch. - - -## Parameters - -| Parameter Name | Description | Necessary | Default Value | Applicable When | -| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | ------------- | ----------------------------------------------------------------------------------------------------- | -| host | The host to spin up a server on | Yes | | | -| port | The starting port when creating the web services. Web services will increment this port several times to find an open port. In the future, the flexibility of this param will be expanded | yes | | | -| name | The Path of the api a user would call. The format is `hostname:port/name` | yes | | | -| forwarding.enabled | Whether to forward the services to a gateway machine | no | false | When you need to forward services out of a protected network. Only Supported for Continuous Serving. | -| forwarding.username | the username to connect to on the remote host | no | | | -| forwarding.sshport | the port to ssh connect to | no | 22 | | -| forwarding.sshHost | the host of the gateway machine | no | | | -| forwarding.keySas | A Secure access link that can be used to automatically download the required ssh private key | no | | Sometimes more convenient than a directory | -| forwarding.keyDir | A directory on the machines holding the private key | no | "~/.ssh" | Useful if you can't send keys over the wire securely | diff --git a/website/docs/features/vw/about.md b/website/docs/features/vw/about.md deleted file mode 100644 index ac0f56ff2f..0000000000 --- a/website/docs/features/vw/about.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: VW -hide_title: true -sidebar_label: About ---- - - - -# VowpalWabbit on Apache Spark - -### Overview - -[VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) (VW) is a machine learning system that -pushes the frontier of machine learning with techniques such as online, hashing, allreduce, -reductions, learning2search, active, and interactive learning. -VowpalWabbit is a popular choice in ad-tech due to its speed and cost efficacy. -Furthermore it includes many advances in the area of reinforcement learning (for instance, contextual bandits). - -### Advantages of VowpalWabbit - -- **Composability**: VowpalWabbit models can be incorporated into existing - SparkML Pipelines, and used for batch, streaming, and serving workloads. -- **Small footprint**: VowpalWabbit memory consumption is rather small and can be controlled through '-b 18' or the setNumBits method. - This option determines the size of the model (2^18 * some_constant, in this example). -- **Feature Interactions**: Feature interactions (quadratic, cubic,... terms, for instance) are created on-the-fly within the most inner - learning loop in VW. - Interactions can be specified by using the -q parameter and passing the first character of the namespaces that should be _interacted_. - The VW namespace concept is mapped to Spark using columns. The column name is used as namespace name, thus one sparse or dense Spark ML vector corresponds to the features of a single namespace. - To allow passing of multiple namespaces, the VW estimator (classifier or regression) exposes a property called _additionalFeatures_. Users can pass an array of column names. -- **Simple deployment**: all native dependencies are packaged into a single jars (including boost and zlib). -- **VowpalWabbit command line arguments**: users can pass VW command line arguments to control the learning process. -- **VowpalWabbit binary models** To start the training, users can supply an initial VowpalWabbit model, which can be produced outside of - VW on Spark, by invoking _setInitialModel_ and passing the model as a byte array. Similarly, users can access the binary model by invoking - _getModel_ on the trained model object. -- **Java-based hashing** VW's version of murmur-hash was reimplemented in Java (praise to [JackDoe](https://github.com/jackdoe)) - providing a major performance improvement compared to passing input strings through JNI and hashing in C++. -- **Cross language** VowpalWabbit on Spark is available on Spark, PySpark, and SparklyR. - -### Limitations of VowpalWabbit on Spark - -- **Linux and CentOS only** The native binaries included with the published jar are built Linux and CentOS only. - We're working on creating a more portable version by statically linking Boost and lib C++. -- **Limited Parsing** Features implemented in the native VW parser (ngrams, skips, ...) are not yet implemented in - VowpalWabbitFeaturizer. - -### Usage - -In PySpark, you can run the `VowpalWabbitClassifier` via: - -```python -from synapse.ml.vw import VowpalWabbitClassifier -model = (VowpalWabbitClassifier(numPasses=5, args="--holdout_off --loss_function logistic") - .fit(train)) -``` - -Similarly, you can run the `VowpalWabbitRegressor`: - -```python -from synapse.ml.vw import VowpalWabbitRegressor -model = (VowpalWabbitRegressor(args="--holdout_off --loss_function quantile -q :: -l 0.1") - .fit(train)) -``` - -You can pass command line parameters to VW via the args parameter, as documented in the [VW Wiki](https://github.com/vowpalWabbit/vowpal_wabbit/wiki/Command-Line-Arguments). - -For an end to end application, check out the VowpalWabbit [notebook -example](../Vowpal%20Wabbit%20-%20Overview). - -### Hyper-parameter tuning - -- Common parameters can also be set through methods enabling the use of SparkMLs ParamGridBuilder and CrossValidator ([example](https://github.com/Azure/mmlspark/blob/master/src/test/scala/com/microsoft/azure/synapse/ml/vw/VerifyVowpalWabbitClassifier.scala#L29)). If - the same parameters are passed through the _args_ property (for instance, args="-l 0.2" and setLearningRate(0.5)) the _args_ value will - take precedence. - parameter -* learningRate -* numPasses -* numBits -* l1 -* l2 -* powerT -* interactions -* ignoreNamespaces - -### Architecture - -VowpalWabbit on Spark uses an optimized JNI layer to efficiently support Spark. -Java bindings can be found in the [VW GitHub repo](https://github.com/VowpalWabbit/vowpal_wabbit/blob/master/java/src/main/c%2B%2B/jni_spark_vw_generated.h). - -VW's command line tool uses a two-thread architecture (1x parsing/hashing, 1x learning) for learning and inference. -To fluently embed VW into the Spark ML eco system, the following adaptions were made: - -- VW classifier/regressor operates on Spark's dense/sparse vectors - - Pro: best composability with existing Spark ML components. - - Cons: due to type restrictions (for example, feature indices are Java integers), the maximum model size is limited to 30 bits. One could overcome this restriction by adding type support to the classifier/regressor to directly operate on input features (strings, int, double, ...). - -- VW hashing is separated out into the [VowpalWabbitFeaturizer](https://github.com/Azure/mmlspark/blob/master/src/test/scala/com/microsoft/azure/synapse/ml/vw/VerifyVowpalWabbitFeaturizer.scala#L34) transformer. It supports mapping Spark Dataframe schema into VW's namespaces and sparse -features. - - Pro: featurization can be scaled to many nodes, scale independent of distributed learning. - - Pro: hashed features can be cached and efficiently reused when performing hyper-parameter sweeps. - - Pro: featurization can be used for other Spark ML learning algorithms. - - Cons: due to type restrictions (for instance, sparse indices are Java integers) the hash space is limited to 30 bits. - -- VW multi-pass training can be enabled using '--passes 4' argument or setNumPasses method. Cache file is automatically named. - - Pro: simplified usage. - - Pro: certain algorithms (for example, l-bfgs) require a cache file when running in multi-pass node. - - Cons: Since the cache file resides in the Java temp directory, a bottleneck may arise, depending on your node's I/O performance and the location of the temp directory. -- VW distributed training is transparently set up and can be controlled through the input dataframes number of partitions. - Similar to LightGBM all training instances must be running at the same time, thus the maximum parallelism is restricted by the - number of executors available in the cluster. Under the hood, VW's built-in spanning tree functionality is used to coordinate _allreduce_. - Required parameters are automatically determined and supplied to VW. The spanning tree coordination process is run on the driver node. - - Pro: seamless parallelization. - - Cons: currently barrier execution mode isn't implemented and thus if one node crashes the complete job needs to be manually restarted. diff --git a/website/docs/getting_started/first_example.md b/website/docs/getting_started/first_example.md deleted file mode 100644 index 8d73dda6bf..0000000000 --- a/website/docs/getting_started/first_example.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: First Example -description: Build machine learning applications using Microsoft Machine Learning for Apache Spark ---- - -## Prerequisites - -- If you don't have an Azure subscription, [create a free account before you begin](https://azure.microsoft.com/free/). -- [Azure Synapse Analytics workspace](https://docs.microsoft.com/en-us/azure/synapse-analytics/get-started-create-workspace) with an Azure Data Lake Storage Gen2 storage account configured as the default storage. You need to be the _Storage Blob Data Contributor_ of the Data Lake Storage Gen2 file system that you work with. -- Spark pool in your Azure Synapse Analytics workspace. For details, see [Create a Spark pool in Azure Synapse](https://docs.microsoft.com/en-us/azure/synapse-analytics/get-started-analyze-spark). -- Pre-configuration steps described in the tutorial [Configure Cognitive Services in Azure Synapse](https://docs.microsoft.com/en-us/azure/synapse-analytics/machine-learning/tutorial-configure-cognitive-services-synapse). - -## Get started - -To get started, import synapse.ml and configurate service keys. - -```python -import synapse.ml -from synapse.ml.cognitive import * -from notebookutils import mssparkutils - -# A general Cognitive Services key for Text Analytics and Computer Vision (or use separate keys that belong to each service) -cognitive_service_key = mssparkutils.credentials.getSecret("ADD_YOUR_KEY_VAULT_NAME", "ADD_YOUR_SERVICE_KEY","ADD_YOUR_KEY_VAULT_LINKED_SERVICE_NAME") -# A Bing Search v7 subscription key -bingsearch_service_key = mssparkutils.credentials.getSecret("ADD_YOUR_KEY_VAULT_NAME", "ADD_YOUR_BING_SEARCH_KEY","ADD_YOUR_KEY_VAULT_LINKED_SERVICE_NAME") -# An Anomaly Dectector subscription key -anomalydetector_key = mssparkutils.credentials.getSecret("ADD_YOUR_KEY_VAULT_NAME", "ADD_YOUR_ANOMALY_KEY","ADD_YOUR_KEY_VAULT_LINKED_SERVICE_NAME") - - -``` - -## Text analytics sample - -The [Text Analytics](https://azure.microsoft.com/en-us/services/cognitive-services/text-analytics/) service provides several algorithms for extracting intelligent insights from text. For example, we can find the sentiment of given input text. The service will return a score between 0.0 and 1.0 where low scores indicate negative sentiment and high score indicates positive sentiment. This sample uses three simple sentences and returns the sentiment for each. - -```python -from pyspark.sql.functions import col - -# Create a dataframe that's tied to it's column names -df_sentences = spark.createDataFrame([ - ("I'm so happy today, it's sunny!", "en-US"), - ("this is a dog", "en-US"),s - ("I'm frustrated by this rush hour traffic!", "en-US") -], ["text", "language"]) - -# Run the Text Analytics service with options -sentiment = (TextSentiment() - .setTextCol("text") - .setLocation("eastasia") # Set the location of your cognitive service - .setSubscriptionKey(cognitive_service_key) - .setOutputCol("sentiment") - .setErrorCol("error") - .setLanguageCol("language")) - -# Show the results of your text query in a table format - -display(sentiment.transform(df_sentences).select("text", col("sentiment")[0].getItem("sentiment").alias("sentiment"))) -``` - -### Expected results - -| text | sentiment | -| ------------------------------------------ | --------- | -| I'm frustrated by this rush hour traffic! | negative | -| this is a dog | neutral | -| I'm so happy today, it's sunny! | positive | diff --git a/website/docs/getting_started/first_model.md b/website/docs/getting_started/first_model.md deleted file mode 100644 index b11797600f..0000000000 --- a/website/docs/getting_started/first_model.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: First Model -hide_title: true -description: First Model ---- - -# Your First Model - -In this example, we construct a basic classification model to predict a person's -income level given demographics data such as education level or marital status. -We also learn how to use Jupyter notebooks for developing and running the model. - -### Prerequisites - -- You've installed the SynapseML package, either as a Docker image or on a - Spark cluster, -- You have basic knowledge of Python language, -- You have basic understanding of machine learning concepts: training, testing, - classification. - -### Working with Jupyter Notebooks - -Once you have the SynapseML package installed, open Jupyter notebooks folder in -your web browser - -- Local Docker: `http://localhost:8888` -- Spark cluster: `https:///jupyter` - -Create a new notebook by selecting "New" -> "PySpark3". Let's also give the -notebook a friendlier name, _Adult Census Income Prediction_, by clicking the -title. - -### Importing Packages and Starting the Spark Application - -At this point, the notebook isn't running a Spark application yet. In the -first cell, let's import some needed packages - -```python -import numpy as np -import pandas as pd -``` - -Click the "run cell" button on the toolbar to start the application. After a -few moments, you should see the message "SparkSession available as 'spark'". -Now you're ready to start coding and running your application. - -### Reading in Data - -In a typical Spark application, you'd likely work with huge datasets stored on -distributed file system, such as HDFS. However, to keep this tutorial simple -and quick, we'll copy over a small dataset from a URL. We then read this data -into memory using Pandas CSV reader, and distribute the data as a Spark -DataFrame. Finally, we show the first 5 rows of the dataset. Copy the following -code to the next cell in your notebook, and run the cell. - -```python -dataFile = "AdultCensusIncome.csv" -import os, urllib -if not os.path.isfile(dataFile): - urllib.request.urlretrieve("https://mmlspark.azureedge.net/datasets/" + dataFile, dataFile) -data = spark.createDataFrame(pd.read_csv(dataFile, dtype={" hours-per-week": np.float64})) -data.show(5) -``` - -### Selecting Features and Splitting Data to Train and Test Sets - -Next, select some features to use in our model. You can try out different -features, but you should include `" income"` as it is the label column the model -is trying to predict. We then split the data into a `train` and `test` sets. - -```python -data = data.select([" education", " marital-status", " hours-per-week", " income"]) -train, test = data.randomSplit([0.75, 0.25], seed=123) -``` - -### Training a Model - -To train the classifier model, we use the `synapse.ml.TrainClassifier` class. It -takes in training data and a base SparkML classifier, maps the data into the -format expected by the base classifier algorithm, and fits a model. - -```python -from synapse.ml.train import TrainClassifier -from pyspark.ml.classification import LogisticRegression -model = TrainClassifier(model=LogisticRegression(), labelCol=" income").fit(train) -``` - -`TrainClassifier` implicitly handles string-valued columns and -binarizes the label column. - -### Scoring and Evaluating the Model - -Finally, let's score the model against the test set, and use -`synapse.ml.ComputeModelStatistics` class to compute metrics—accuracy, AUC, -precision, recall—from the scored data. - -```python -from synapse.ml.train import ComputeModelStatistics -prediction = model.transform(test) -metrics = ComputeModelStatistics().transform(prediction) -metrics.select('accuracy').show() -``` - -And that's it: you've build your first machine learning model using the SynapseML -package. For help on SynapseML classes and methods, you can use Python's help() -function, for example - -```python -help(synapse.ml.train.TrainClassifier) -``` - -Next, view our other tutorials to learn how to - -- Tune model parameters to find the best model -- Use SparkML pipelines to build a more complex model -- Use deep neural networks for image classification -- Use text analytics for document classification diff --git a/website/docs/mlflow/autologging.md b/website/docs/mlflow/autologging.md deleted file mode 100644 index 76149e72fb..0000000000 --- a/website/docs/mlflow/autologging.md +++ /dev/null @@ -1,84 +0,0 @@ ---- -title: SynapseML Autologging -description: SynapseML autologging ---- - -## Automatic Logging - -[MLflow automatic logging](https://www.mlflow.org/docs/latest/tracking.html#automatic-logging) allows you to log metrics, parameters, and models without the need for explicit log statements. -SynapseML supports autologging for every model in the library. - -To enable autologging for SynapseML: -1. Download this customized [log_model_allowlist file](https://mmlspark.blob.core.windows.net/publicwasb/log_model_allowlist.txt) and put it at a place that your code has access to. -For example: -* In Synapse `wasb://@.blob.core.windows.net/PATH_TO_YOUR/log_model_allowlist.txt` -* In Databricks `/dbfs/FileStore/PATH_TO_YOUR/log_model_allowlist.txt`. -2. Set spark configuration `spark.mlflow.pysparkml.autolog.logModelAllowlistFile` to the path of your `log_model_allowlist.txt` file. -3. Call `mlflow.pyspark.ml.autolog()` before your training code to enable autologging for all supported models. - -Note: -1. If you want to support autologging of PySpark models not present in the log_model_allowlist file, you can add such models to the file. -2. If you've enabled autologging, then don't write explicit `with mlflow.start_run()` as it might cause multiple runs for one single model or one run for multiple models. - - -## Configuration process in Databricks as an example - -1. Install latest MLflow via `%pip install mlflow -u` -2. Upload your customized `log_model_allowlist.txt` file to dbfs by clicking File/Upload Data button on Databricks UI. -3. Set Cluster Spark configuration following [this documentation](https://docs.microsoft.com/en-us/azure/databricks/clusters/configure#spark-configuration) -``` -spark.mlflow.pysparkml.autolog.logModelAllowlistFile /dbfs/FileStore/PATH_TO_YOUR/log_model_allowlist.txt -``` -4. Run the following line before your training code executes. -``` -mlflow.pyspark.ml.autolog() -``` -You can customize how autologging works by supplying appropriate [parameters](https://www.mlflow.org/docs/latest/python_api/mlflow.pyspark.ml.html#mlflow.pyspark.ml.autolog). - -5. To find your experiment's results via the `Experiments` tab of the MLFlow UI. - - -## Example for ConditionalKNNModel -```python -from pyspark.ml.linalg import Vectors -from synapse.ml.nn import * - -df = spark.createDataFrame([ - (Vectors.dense(2.0,2.0,2.0), "foo", 1), - (Vectors.dense(2.0,2.0,4.0), "foo", 3), - (Vectors.dense(2.0,2.0,6.0), "foo", 4), - (Vectors.dense(2.0,2.0,8.0), "foo", 3), - (Vectors.dense(2.0,2.0,10.0), "foo", 1), - (Vectors.dense(2.0,2.0,12.0), "foo", 2), - (Vectors.dense(2.0,2.0,14.0), "foo", 0), - (Vectors.dense(2.0,2.0,16.0), "foo", 1), - (Vectors.dense(2.0,2.0,18.0), "foo", 3), - (Vectors.dense(2.0,2.0,20.0), "foo", 0), - (Vectors.dense(2.0,4.0,2.0), "foo", 2), - (Vectors.dense(2.0,4.0,4.0), "foo", 4), - (Vectors.dense(2.0,4.0,6.0), "foo", 2), - (Vectors.dense(2.0,4.0,8.0), "foo", 2), - (Vectors.dense(2.0,4.0,10.0), "foo", 4), - (Vectors.dense(2.0,4.0,12.0), "foo", 3), - (Vectors.dense(2.0,4.0,14.0), "foo", 2), - (Vectors.dense(2.0,4.0,16.0), "foo", 1), - (Vectors.dense(2.0,4.0,18.0), "foo", 4), - (Vectors.dense(2.0,4.0,20.0), "foo", 4) -], ["features","values","labels"]) - -cnn = (ConditionalKNN().setOutputCol("prediction")) -cnnm = cnn.fit(df) - -test_df = spark.createDataFrame([ - (Vectors.dense(2.0,2.0,2.0), "foo", 1, [0, 1]), - (Vectors.dense(2.0,2.0,4.0), "foo", 4, [0, 1]), - (Vectors.dense(2.0,2.0,6.0), "foo", 2, [0, 1]), - (Vectors.dense(2.0,2.0,8.0), "foo", 4, [0, 1]), - (Vectors.dense(2.0,2.0,10.0), "foo", 4, [0, 1]) -], ["features","values","labels","conditioner"]) - -display(cnnm.transform(test_df)) -``` - -This code should log one run with a ConditionalKNNModel artifact and its parameters. - diff --git a/website/docs/mlflow/examples.md b/website/docs/mlflow/examples.md deleted file mode 100644 index f1745b3aeb..0000000000 --- a/website/docs/mlflow/examples.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: Examples -description: Examples using SynapseML with MLflow ---- - -## Prerequisites - -If you're using Databricks, install mlflow with this command: -``` -# run this so that mlflow is installed on workers besides driver -%pip install mlflow -``` - -Install SynapseML based on the [installation guidance](../getting_started/installation.md). - -## API Reference - -* [mlflow.spark.save_model](https://www.mlflow.org/docs/latest/python_api/mlflow.spark.html#mlflow.spark.save_model) -* [mlflow.spark.log_model](https://www.mlflow.org/docs/latest/python_api/mlflow.spark.html#mlflow.spark.log_model) -* [mlflow.spark.load_model](https://www.mlflow.org/docs/latest/python_api/mlflow.spark.html#mlflow.spark.load_model) -* [mlflow.log_metric](https://mlflow.org/docs/latest/python_api/mlflow.html#mlflow.log_metric) - -## LightGBMClassificationModel - -```python -import mlflow -from synapse.ml.featurize import Featurize -from synapse.ml.lightgbm import * -from synapse.ml.train import ComputeModelStatistics - -with mlflow.start_run(): - - feature_columns = ["Number of times pregnant","Plasma glucose concentration a 2 hours in an oral glucose tolerance test", - "Diastolic blood pressure (mm Hg)","Triceps skin fold thickness (mm)","2-Hour serum insulin (mu U/ml)", - "Body mass index (weight in kg/(height in m)^2)","Diabetes pedigree function","Age (years)"] - df = spark.createDataFrame([ - (0,131,66,40,0,34.3,0.196,22,1), - (7,194,68,28,0,35.9,0.745,41,1), - (3,139,54,0,0,25.6,0.402,22,1), - (6,134,70,23,130,35.4,0.542,29,1), - (9,124,70,33,402,35.4,0.282,34,0), - (0,93,100,39,72,43.4,1.021,35,0), - (4,110,76,20,100,28.4,0.118,27,0), - (2,127,58,24,275,27.7,1.6,25,0), - (0,104,64,37,64,33.6,0.51,22,1), - (2,120,54,0,0,26.8,0.455,27,0), - (7,178,84,0,0,39.9,0.331,41,1), - (2,88,58,26,16,28.4,0.766,22,0), - (1,91,64,24,0,29.2,0.192,21,0), - (10,101,76,48,180,32.9,0.171,63,0), - (5,73,60,0,0,26.8,0.268,27,0), - (3,158,70,30,328,35.5,0.344,35,1), - (2,105,75,0,0,23.3,0.56,53,0), - (12,84,72,31,0,29.7,0.297,46,1), - (9,119,80,35,0,29.0,0.263,29,1), - (6,93,50,30,64,28.7,0.356,23,0), - (1,126,60,0,0,30.1,0.349,47,1) - ], feature_columns+["labels"]).repartition(2) - - - featurize = (Featurize() - .setOutputCol("features") - .setInputCols(feature_columns) - .setOneHotEncodeCategoricals(True) - .setNumFeatures(4096)) - - df_trans = featurize.fit(df).transform(df) - - lightgbm_classifier = (LightGBMClassifier() - .setFeaturesCol("features") - .setRawPredictionCol("rawPrediction") - .setDefaultListenPort(12402) - .setNumLeaves(5) - .setNumIterations(10) - .setObjective("binary") - .setLabelCol("labels") - .setLeafPredictionCol("leafPrediction") - .setFeaturesShapCol("featuresShap")) - - lightgbm_model = lightgbm_classifier.fit(df_trans) - - # Use mlflow.spark.save_model to save the model to your path - mlflow.spark.save_model(lightgbm_model, "lightgbm_model") - # Use mlflow.spark.log_model to log the model if you have a connected mlflow service - mlflow.spark.log_model(lightgbm_model, "lightgbm_model") - - # Use mlflow.pyfunc.load_model to load model back as PyFuncModel and apply predict - prediction = mlflow.pyfunc.load_model("lightgbm_model").predict(df_trans.toPandas()) - prediction = list(map(str, prediction)) - mlflow.log_param("prediction", ",".join(prediction)) - - # Use mlflow.spark.load_model to load model back as PipelineModel and apply transform - predictions = mlflow.spark.load_model("lightgbm_model").transform(df_trans) - metrics = ComputeModelStatistics(evaluationMetric="classification", labelCol='labels', scoredLabelsCol='prediction').transform(predictions).collect() - mlflow.log_metric("accuracy", metrics[0]['accuracy']) -``` - -## Cognitive Services - -```python -import mlflow -from synapse.ml.cognitive import * - -with mlflow.start_run(): - - text_key = "YOUR_COG_SERVICE_SUBSCRIPTION_KEY" - df = spark.createDataFrame([ - ("I am so happy today, its sunny!", "en-US"), - ("I am frustrated by this rush hour traffic", "en-US"), - ("The cognitive services on spark aint bad", "en-US"), - ], ["text", "language"]) - - sentiment_model = (TextSentiment() - .setSubscriptionKey(text_key) - .setLocation("eastus") - .setTextCol("text") - .setOutputCol("prediction") - .setErrorCol("error") - .setLanguageCol("language")) - - display(sentiment_model.transform(df)) - - mlflow.spark.save_model(sentiment_model, "sentiment_model") - mlflow.spark.log_model(sentiment_model, "sentiment_model") - - output_df = mlflow.spark.load_model("sentiment_model").transform(df) - display(output_df) - - # In order to call the predict function successfully you need to specify the - # outputCol name as `prediction` - prediction = mlflow.pyfunc.load_model("sentiment_model").predict(df.toPandas()) - prediction = list(map(str, prediction)) - mlflow.log_param("prediction", ",".join(prediction)) -``` diff --git a/website/docs/mlflow/installation.md b/website/docs/mlflow/installation.md deleted file mode 100644 index ac67a23724..0000000000 --- a/website/docs/mlflow/installation.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: Mlflow Installation -description: install Mlflow on different environments ---- - -## Installation - -Install MLflow from PyPI via `pip install mlflow` - -MLflow requires `conda` to be on the `PATH` for the projects feature. - -Learn more about MLflow on their [GitHub page](https://github.com/mlflow/mlflow). - - -### Install Mlflow on Databricks - -If you're using Databricks, install Mlflow with this command: -``` -# run this so that Mlflow is installed on workers besides driver -%pip install mlflow -``` - -### Install Mlflow on Synapse -To log model with Mlflow, you need to create an Azure Machine Learning workspace and link it with your Synapse workspace. - -#### Create Azure Machine Learning Workspace - -Follow this document to create [AML workspace](https://learn.microsoft.com/en-us/azure/machine-learning/quickstart-create-resources#create-the-workspace). You don't need to create compute instance and compute clusters. - -#### Create an Azure ML Linked Service - - - -- In the Synapse workspace, go to **Manage** -> **External connections** -> **Linked services**, select **+ New** -- Select the workspace you want to log the model in and create the linked service. You need the **name of the linked service** to set up connection. - -#### Auth Synapse Workspace - - -- Go to the **Azure Machine Learning workspace** resource -> **access control (IAM)** -> **Role assignment**, select **+ Add**, choose **Add role assignment** -- Choose **contributor**, select next -- In members page, choose **Managed identity**, select **+ select members**. Under **managed identity**, choose Synapse workspace. Under **Select**, choose the workspace you run your experiment on. Click **Select**, **Review + assign**. - - -#### Use Mlflow in Synapse -Set up connection -```python - -#AML workspace authentication using linked service -from notebookutils.mssparkutils import azureML -linked_service_name = "YourLinkedServiceName" -ws = azureML.getWorkspace(linked_service_name) -mlflow.set_tracking_uri(ws.get_mlflow_tracking_uri()) - -#Set MLflow experiment.  -experiment_name = "synapse-mlflow-experiment" -mlflow.set_experiment(experiment_name)  -``` - -#### Alternative (Don't need Linked Service) -Once you create an AML workspace, you can obtain the MLflow tracking URL directly. The AML start page is where you can locate the MLflow tracking URL. - -You can set it tracking url with -```python -mlflow.set_tracking_uri("your mlflow tracking url") -``` diff --git a/website/docs/mlflow/introduction.md b/website/docs/mlflow/introduction.md deleted file mode 100644 index 8ed1077fbd..0000000000 --- a/website/docs/mlflow/introduction.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Introduction -description: MLflow support of SynapseML ---- - -## What is MLflow - -[MLflow](https://github.com/mlflow/mlflow) is a platform to streamline machine learning development, including tracking experiments, packaging code into reproducible runs, and sharing and deploying models. MLflow offers a set of lightweight APIs that can be used with any existing machine learning application or library, for instance TensorFlow, PyTorch, XGBoost, etc. It runs wherever you currently run ML code, for example, in notebooks, standalone applications or the cloud. MLflow's current components are: - -* [MLflow Tracking](https://mlflow.org/docs/latest/tracking.html): An API to log parameters, code, and results in machine learning experiments and compare them using an interactive UI. -* [MLflow Projects](https://mlflow.org/docs/latest/projects.html): A code packaging format for reproducible runs using Conda and Docker, so you can share your ML code with others. -* [MLflow Models](https://mlflow.org/docs/latest/models.html): A model packaging format and tools that let you easily deploy the same model from any ML library for both batch and real-time scoring. It supports platforms such as Docker, Apache Spark, Azure ML and AWS SageMaker. -* [MLflow Model Registry](https://mlflow.org/docs/latest/model-registry.html): A centralized model store, set of APIs, and UI, to collaboratively manage the full lifecycle of MLflow Models. diff --git a/website/docs/reference/contributing_guide.md b/website/docs/reference/contributing_guide.md deleted file mode 100644 index 341edbd548..0000000000 --- a/website/docs/reference/contributing_guide.md +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Contributing Guide -hide_title: true -sidebar_label: Contributing Guide -description: Contributing Guide ---- - -## Interested in contributing to SynapseML? We're excited to work with you. - -### You can contribute in many ways: - -- Use the library and give feedback: report bugs, request features. -- Add sample Jupyter notebooks, Python or Scala code examples, documentation - pages. -- Fix bugs and issues. -- Add new features, such as data transformations or machine learning algorithms. -- Review pull requests from other contributors. - -### How to contribute? - -You can give feedback, report bugs and request new features anytime by opening -an issue. Also, you can up-vote or comment on existing issues. - -If you want to add code, examples or documentation to the repository, follow -this process: - -#### Propose a contribution - -- Preferably, get started by tackling existing issues to get yourself acquainted - with the library source and the process. -- To ensure your contribution is a good fit and doesn't duplicate - on-going work, open an issue or comment on an existing issue. In it, discuss - your contribution and design. -- Any algorithm you're planning to contribute should be well known and accepted - for production use, and backed by research papers. -- Algorithms should be highly scalable and suitable for massive datasets. -- All contributions need to comply with the MIT License. Contributors external - to Microsoft need to sign CLA. - -#### Implement your contribution - -- Fork the SynapseML repository. -- Implement your algorithm in Scala, using our wrapper generation mechanism to - produce PySpark bindings. -- Use SparkML `PipelineStage`s so your algorithm can be used as a part of - pipeline. -- For parameters use `MMLParam`s. -- Implement model saving and loading by extending SparkML `MLReadable`. -- Use good Scala style. -- Binary dependencies should be on Maven Central. -- See this [pull request](https://github.com/Microsoft/SynapseML/pull/22) for an - example contribution. - -#### Implement tests - -- Set up build environment. Use a Linux machine or VM (we use Ubuntu, but other - distros should work too). -- Test your code locally. -- Add tests using ScalaTests. Unit tests are required. -- A sample notebook is required as an end-to-end test. - -#### Implement documentation - -- Add a [sample Jupyter notebook](https://github.com/microsoft/SynapseML/tree/master/notebooks) that shows the intended use - case of your algorithm, with instructions in step-by-step manner. (The same - notebook could be used for testing the code.) -- Add in-line ScalaDoc comments to your source code, to generate the [API - reference documentation](https://mmlspark.azureedge.net/docs/pyspark/) - -#### Open a pull request - -- In most cases, you should squash your commits into one. -- Open a pull request, and link it to the discussion issue you created earlier. -- A SynapseML core team member will trigger a build to test your changes. -- Fix any build failures. (The pull request will have comments from the build - with useful links.) -- Wait for code reviews from core team members and others. -- Fix issues found in code review and reiterate. - -#### Build and check-in - -- Wait for a core team member to merge your code in. -- Your feature will be available through a Docker image and script installation - in the next release, which typically happens around once a month. You can try - out your features sooner by using build artifacts for the version that has - your changes merged in (such versions end with a `.devN`). - -If in doubt about how to do something, see how it was done in existing code or -pull requests, and don't hesitate to ask. diff --git a/website/docs/reference/docker.md b/website/docs/reference/docker.md deleted file mode 100644 index 1d62b4074a..0000000000 --- a/website/docs/reference/docker.md +++ /dev/null @@ -1,292 +0,0 @@ ---- -title: Using the SynapseML Docker Image -sidebar_label: Docker Image -description: Using the SynapseML Docker Image ---- - -## Quickstart: install and run the Docker image - -Begin by installing [Docker for your OS][docker-products]. Then, to get the -SynapseML image and run it, open a terminal (PowerShell/cmd on Windows) and run - -```bash -docker run -it -p 8888:8888 mcr.microsoft.com/mmlspark/release -``` - -In your browser, go to —you'll see the Docker image -EULA, and once you accept it, the Jupyter notebook interface will start. To -skip this step, add `-e ACCEPT_EULA=yes` to the Docker command: - -```bash -docker run -it -p 8888:8888 -e ACCEPT_EULA=y mcr.microsoft.com/mmlspark/release -``` - -You can now select one of the sample notebooks and run it, or create your own. - -> Note: The EULA is needed only for running the SynapseML Docker image; the -> source code is released under the MIT license (see the [LICENSE](https://github.com/microsoft/SynapseML/blob/master/LICENSE) -> file). - -## Running a specific version - -In the preceding docker command, `mcr.microsoft.com/mmlspark/release` specifies the project and image name that you -want to run. There's another component implicit here: the _tsag_ (= -version) that you want to use. Specifying it explicitly looks like -`mcr.microsoft.com/mmlspark/release:0.11.1` for the `0.11.1` tag. - -Leaving `mcr.microsoft.com/mmlspark/release` by itself has an implicit `latest` tag, so it's -equivalent to `mcr.microsoft.com/mmlspark/release:latest`. The `latest` tag is identical to the -most recent stable SynapseML version. You can see the current [synapsemltags] on -our [Docker Hub repository][mmlspark-dockerhub]. - -## A more practical example - -The previous section had a rather simplistic command. A more complete command -that you'll probably want to use can look as follows: - -```bash -docker run -it --rm \ - -p 127.0.0.1:80:8888 \ - -v ~/myfiles:/notebooks/myfiles \ - mcr.microsoft.com/mmlspark/release:0.11.1 -``` - -In this example, backslashes are for readability; you -can enter the command as one long line if you like. In PowerShell, the `myfiles` local -path and line breaks looks a little different: - - docker run -it --rm ` - -p 127.0.0.1:80:8888 ` - -v C:\myfiles:/notebooks/myfiles ` - mcr.microsoft.com/mmlspark/release:0.11.1 - -Let's break this command and go over the meaning of each part: - -- **`-it`** - - This command uses a combination of `-i` and `-t` (which could also be specified as - `--interactive --tty`). Combining these two flags means that the - image is running interactively, which in this example means that you can see - messages that the server emits, and it also makes it possible to use - `Ctrl+C` to shut down the Jupyter notebook server. - -- **`--rm`** - - When Docker runs any image, it creates a _container_ to hold any further - filesystem data for files that were created or modified. If you ran the above - quickstart command, you can see the container that is left behind with `docker - container list -a`. You can reclaim such containers with `docker container rm - `, or reclaim all containers from stopped run with `docker container - prune`, or even more generally, reclaim all unused Docker resources with - `docker system prune`. - - Back to `--rm`: this flag tells Docker to discard the image when the image - exits, which means that any data created while - running the image is discarded when the run is done. But see the description - of the `-v` flag. - -- **`-e ACCEPT_EULA=y`** - - The `-e` flag is used to set environment variables in the running container. - In this case, we use it to bypass the EULA check. More flags can be - added for other variables, for example, you can add a `-e - MMLSPARK_JUPYTER_PORT=80` to change the port that the Jupyter server listens - to. - -- **`-p 127.0.0.1:80:8888`** - - The Jupyter server in the SynapseML image listens to port 8888, but that is - normally isolated from the actual network. Previously, we have used `-p - 8888:8888` to say that we want to map port 8888 (LHS) on our actual machine to - port 8888 (RHS) in the container. One problem with this is that `8888` might - be hard to remember, but a more serious problem is that your machine now - serves the Jupyter interface to any one on your network. - - This more complete example resolves these issues: we replaced `8888:8888` with - `80:8888` so HTTP port 80 goes to the container's running Jupyter (making just - work); and we also added a `127.0.0.1:` prefix to make the - Jupyter inteface available only from your own machine rather than the whole network. - - You can repeat this flag to forward additional ports similarly. For example, - you can expose some of the [Spark ports], for example: `-p 127.0.0.1:4040:4040`. - -- **`-v ~/myfiles:/notebooks/myfiles`** - - As described earlier, we're using `--rm` to remove the container when the run - exits, which is usually fine since pulling out files from these containers can - be a little complicated. Instead, we use the -v flag to map a directory from - your machine (the `~/myfiles` on the LHS) to a directory that is available - inside the running container. Any modifications to this directory that are - done by the Docker image are performed directly on the actual directory. - - The local directory follows the local filename conventions, so on - Windows you'd use a Windows-looking path. On Windows, you also need to share - the drive you want to use in the [Docker settings]. - - The path on the right side is used inside the container and it's therefore a - Linux path. The SynapseML image runs Jupyter in the `/notebooks` directory, so - it's a good place for making your files available conveniently. - - This flag can be used more than once, to make several directories available in - the running container. Both paths must be absolute, so if you want to specify - a path relatively, you can use something like `-v - $PWD/myfiles:/notebooks/myfiles`. - - With such directory sharing in place, you can create/edit notebooks, and code - in notebooks can use the shared directory for additional data, for example: - - ```python - data = spark.read.csv('myfiles/mydata.csv') - ... - model.write().overwrite().save('myfiles/myTrainedModel.mml') - ``` - -- **`mcr.microsoft.com/mmlspark/release:0.11.1`** - - Finally, this argument specifies an explicit version tag for the image that we want to - run. - -## Running the container as a server - -An alternative to running the Docker image interactively with `-it` is running -it in a "detached" mode, as a server, using the `-d` (or `--detach`) flag. -A second flag that may be useful here is `--name`, which gives a convenient -label to the running image: - -```bash -docker run -d --name my-synapseml ...flags... mcr.microsoft.com/mmlspark/release -``` - -When running in this mode, you can use - -- `docker stop my-synapseml`: to stop the image - -- `docker start my-synapseml`: to start it again - -- `docker logs my-synapseml`: to see the log output it produced - -## Running other commands in an active container - -Another useful `docker` command is `exec`, which runs a command in the context -of an _existing_ active container. To use it, you specify the container name -and the command to run. For example, with an already running detached container -named my-synapseml, you can use - -```bash -docker exec -it my-synapseml bash -``` - -to start a shell in the context of the server, roughly equivalent to starting a -terminal in the Jupyter interface. - -Other common Linux executables can be used, for example: - -```bash -docker exec -it my-synapseml top -docker exec my-synapseml ps auxw -``` - -(`ps` doesn't need `-it` since it's not an interactive command.) - -These commands can be used with interactive containers too, and `--name` can be -used to make them easy to target. If you don't use `--name`, Docker assigns a -random name to the container; you can use `docker ps` to see it. You can -also get the container IDs to use instead of names. - -Remember that the command given to `docker exec` is running in the context of -the running container: you can only run executables that exist in the container, -and the run is subject to the same resource restrictions (FS/network access, -etc.) as the container. The SynapseML image is based on a rather basic Ubuntu -installation (the `ubuntu` image from Docker Hub). - -## Running other Spark executables - -`docker run` can accept another optional argument after the image name, -specifying an alternative executable to run instead of the default launcher that -fires up the Jupyter notebook server. Using this extra argument you can use the -Spark environment directly in the container: - -```bash -docker run -it ...flags... mcr.microsoft.com/mmlspark/release bash -``` - -This command starts the container with bash instead of Jupyter. This environment -has all of the Spark executables available in its `$PATH`. You still need to -specify the command-line flags that load the SynapseML package, but there are -convenient environment variables that hold the required package and repositories -to use: - -```bash -pyspark --repositories "$MML_M2REPOS" --packages "$MML_PACKAGE" --master "local[*]" -``` - -Many of the above listed flags are useful in this case too, such as mapping work -directories with `-v`. - -## Updating the SynapseML image - -New releases of SynapseML are published from time to time, and they include a new -Docker image. As an image consumer, you'll normally not notice such new -versions: `docker run` will download an image if a copy of it doesn't exist -locally, but if it does, then `docker run` will blindly run it, _without_ -checking for new tags that were pushed. - -Hence you need to explicitly tell Docker to check for a new version -and pull it if one exists. You do so with the `pull` command: - -```bash -docker pull mcr.microsoft.com/mmlspark/release -``` - -Since we didn't specify an explicit tag here, `docker` adds the implied -`:latest` tag, and checks the available `mcr.microsoft.com/mmlspark/release` image with this tag -on Docker Hub. When it finds a different image with this tag, it will fetch a -copy to your machine, changing the image that an unqualified -`mcr.microsoft.com/mmlspark/release` refers to. - -Docker normally knows only about the tags that it fetched, so if you've always -used `mcr.microsoft.com/mmlspark/release` to refer to the image without an explicit version tag, -then you wouldn't have the version-tagged image too. Once the tag is updated, -the previous version will still be in your system, only without any tag. Using -`docker images` to list the images in your system will now show you two images -for `mcr.microsoft.com/mmlspark/release`, one with a tag of `latest` and one with no tag, shown -as ``. Assuming that you don't have active containers (including detached -ones), `docker system prune` will remove this untagged image, reclaiming the -used space. - -If you've used an explicit version tag, then it will still exist after a new -pull, which means that you can continue using this version. If you -used an unqualified name first and then a version-tagged one, Docker will fetch -both tags. Only the second fetch is fast since it points to content that -was already loaded. In this case, doing a `pull` when there's a new version -will fetch the new `latest` tag and change its meaning to the newer version, but -the older version will still be available under its own version tag. - -Finally, if there are such version-tagged older versions that you want to get -rid of, you can use `docker images` to check the list of installed images and -their tags, and `docker rmi :` to remove the unwanted ones. - -## A note about security - -Executing code in a Docker container can be unsafe if the running user is -`root`. For this reason, the SynapseML image uses a proper username instead. If -you still want to run as root (for instance, if you want to `apt install` an -another ubuntu package), then you should use `--user root`. This mode can be useful -when combined with `docker exec` to perform administrative work while the image -continues to run as usual. - -## Further reading - -This text briefly covers some of the useful things that you can do with the -SynapseML Docker image (and other images in general). You can find much more -documentation [online](https://docs.docker.com/). - -[docker-products]: http://www.docker.com/products/overview/ - -[mmlspark tags]: https://hub.docker.com/r/microsoft/mmlspark/tags/ - -[mmlspark-dockerhub]: https://hub.docker.com/r/microsoft/mmlspark/ - -[Spark ports]: https://spark.apache.org/docs/latest/security.html#configuring-ports-for-network-security - -[Docker settings]: https://docs.docker.com/docker-for-windows/#docker-settings diff --git a/website/docs/reference/dotnet-setup.md b/website/docs/reference/dotnet-setup.md deleted file mode 100644 index 11d791f725..0000000000 --- a/website/docs/reference/dotnet-setup.md +++ /dev/null @@ -1,247 +0,0 @@ ---- -title: .NET setup -hide_title: true -sidebar_label: .NET setup -description: .NET setup and example for SynapseML ---- - -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; - - -# .NET setup and example for SynapseML - -## Installation - -### 1. Install .NET - -To start building .NET apps, you need to download and install the .NET SDK (Software Development Kit). - -Download and install the [.NET Core SDK](https://dotnet.microsoft.com/en-us/download/dotnet/3.1). -Installing the SDK adds the dotnet toolchain to your PATH. - -Once you've installed the .NET Core SDK, open a new command prompt or terminal. Then run `dotnet`. - -If the command runs and prints information about how to use dotnet, you can move to the next step. -If you receive a `'dotnet' is not recognized as an internal or external command` error, make sure -you opened a new command prompt or terminal before running the command. - -### 2. Install Java - -Install [Java 8.1](https://www.oracle.com/java/technologies/downloads/#java8) for Windows and macOS, -or [OpenJDK 8](https://openjdk.org/install/) for Ubuntu. - -Select the appropriate version for your operating system. For example, select jdk-8u201-windows-x64.exe -for a Windows x64 machine or jdk-8u231-macosx-x64.dmg for macOS. Then, use the command java to verify the installation. - -### 3. Install Apache Spark - -[Download and install Apache Spark](https://spark.apache.org/downloads.html) with version >= 3.2.0. -(SynapseML v0.11.1 only supports spark version >= 3.2.0) - -Extract downloaded zipped files (with 7-Zip app on Windows or `tar` on linux) and remember the location of -extracted files, we take `~/bin/spark-3.2.0-bin-hadoop3.2/` as an example here. - -Run the following commands to set the environment variables used to locate Apache Spark. -On Windows, make sure to run the command prompt in administrator mode. - - - - setx /M HADOOP_HOME C:\bin\spark-3.2.0-bin-hadoop3.2\ - setx /M SPARK_HOME C:\bin\spark-3.2.0-bin-hadoop3.2\ - setx /M PATH "%PATH%;%HADOOP_HOME%;%SPARK_HOME%bin" # Warning: Don't run this if your path is already long as it will truncate your path to 1024 characters and potentially remove entries! - - - - - export SPARK_HOME=~/bin/spark-3.2.0-bin-hadoop3.2/ - export PATH="$SPARK_HOME/bin:$PATH" - source ~/.bashrc - - - - -Once you've installed everything and set your environment variables, open a **new** command prompt or terminal and run the following command: -```bash -spark-submit --version -``` -If the command runs and prints version information, you can move to the next step. - -If you receive a `'spark-submit' is not recognized as an internal or external command` error, make sure you opened a **new** command prompt. - -### 4. Install .NET for Apache Spark - -Download the [Microsoft.Spark.Worker](https://github.com/dotnet/spark/releases) **v2.1.1** release from the .NET for Apache Spark GitHub. -For example if you're on a Windows machine and plan to use .NET Core, download the Windows x64 netcoreapp3.1 release. - -Extract Microsoft.Spark.Worker and remember the location. - -### 5. Install WinUtils (Windows Only) - -.NET for Apache Spark requires WinUtils to be installed alongside Apache Spark. -[Download winutils.exe](https://github.com/steveloughran/winutils/blob/master/hadoop-3.0.0/bin/winutils.exe). -Then, copy WinUtils into C:\bin\spark-3.2.0-bin-hadoop3.2\bin. -:::note -If you're using a different version of Hadoop, select the version of WinUtils that's compatible with your version of Hadoop. You can see the Hadoop version at the end of your Spark install folder name. -::: - -### 6. Set DOTNET_WORKER_DIR and check dependencies - -Run one of the following commands to set the DOTNET_WORKER_DIR environment variable, which is used by .NET apps to locate .NET for Apache Spark -worker binaries. Make sure to replace with the directory where you downloaded and extracted the Microsoft.Spark.Worker. -On Windows, make sure to run the command prompt in administrator mode. - - - - - setx /M DOTNET_WORKER_DIR - - - - - export DOTNET_WORKER_DIR= - - - - -Finally, double-check that you can run `dotnet, java, spark-shell` from your command line before you move to the next section. - -## Write a .NET for SynapseML App - -### 1. Create a console app - -In your command prompt or terminal, run the following commands to create a new console application: -```powershell -dotnet new console -o SynapseMLApp -cd SynapseMLApp -``` -The `dotnet` command creates a new application of type console for you. The -o parameter creates a directory -named `SynapseMLApp` where your app is stored and populates it with the required files. -The `cd SynapseMLApp` command changes the directory to the app directory you created. - -### 2. Install NuGet package - -To use .NET for Apache Spark in an app, install the Microsoft.Spark package. -In your command prompt or terminal, run the following command: -```powershell -dotnet add package Microsoft.Spark --version 2.1.1 -``` -:::note -This tutorial uses Microsoft.Spark version 2.1.1 as SynapseML 0.11.1 depends on it. -Change to corresponding version if necessary. -::: - -To use SynapseML features in the app, install SynapseML.X package. -In this tutorial, we use SynapseML.Cognitive as an example. -In your command prompt or terminal, run the following command: -```powershell -# Update Nuget Config to include SynapseML Feed -dotnet nuget add source https://mmlspark.blob.core.windows.net/synapsemlnuget/index.json -n SynapseMLFeed -dotnet add package SynapseML.Cognitive --version 0.11.1 -``` -The `dotnet nuget add` command adds SynapseML's resolver to the source, so that our package can be found. - -### 3. Write your app -Open Program.cs in Visual Studio Code, or any text editor. Replace its contents with this code: -```csharp -using System; -using System.Collections.Generic; -using Synapse.ML.Cognitive; -using Microsoft.Spark.Sql; -using Microsoft.Spark.Sql.Types; - -namespace SynapseMLApp -{ - class Program - { static void Main(string[] args) - { - // Create Spark session - SparkSession spark = - SparkSession - .Builder() - .AppName("TextSentimentExample") - .GetOrCreate(); - - // Create DataFrame - DataFrame df = spark.CreateDataFrame( - new List - { - new GenericRow(new object[] {"I am so happy today, its sunny!", "en-US"}), - new GenericRow(new object[] {"I am frustrated by this rush hour traffic", "en-US"}), - new GenericRow(new object[] {"The cognitive services on spark aint bad", "en-US"}) - }, - new StructType(new List - { - new StructField("text", new StringType()), - new StructField("language", new StringType()) - }) - ); - - // Create TextSentiment - var model = new TextSentiment() - .SetSubscriptionKey("YOUR_SUBSCRIPTION_KEY") - .SetLocation("eastus") - .SetTextCol("text") - .SetOutputCol("sentiment") - .SetErrorCol("error") - .SetLanguageCol("language"); - - // Transform - var outputDF = model.Transform(df); - - // Display results - outputDF.Show(); - - // Stop Spark session - spark.Stop(); - } - } -} -``` -[SparkSession](https://docs.microsoft.com/en-us/dotnet/api/microsoft.spark.sql.sparksession?view=spark-dotnet) is the entrypoint -of Apache Spark applications, which manages the context and information of your application. A DataFrame is a way of organizing -data into a set of named columns. - -Create a [TextSentiment](https://mmlspark.blob.core.windows.net/docs/0.11.1/dotnet/classSynapse_1_1ML_1_1Cognitive_1_1TextSentiment.html) -instance, set corresponding subscription key and other configurations. Then, apply transformation to the dataframe, -which analyzes the sentiment based on each row, and stores result into output column. - -The result of the transformation is stored in another DataFrame. At this point, no operations have taken place because -.NET for Apache Spark lazily evaluates the data. The operation defined by the call to model.Transform doesn't execute until the Show method is called to display the contents of the transformed DataFrame to the console. Once you no longer need the Spark -session, use the Stop method to stop your session. - -### 4. Run your .NET App -Run the following command to build your application: -```powershell -dotnet build -``` -Navigate to your build output directory. For example, in Windows you could run `cd bin\Debug\net5.0`. -Use the spark-submit command to submit your application to run on Apache Spark. -```powershell -spark-submit --class org.apache.spark.deploy.dotnet.DotnetRunner --packages com.microsoft.azure:synapseml_2.12:0.11.1 --master local microsoft-spark-3-2_2.12-2.1.1.jar dotnet SynapseMLApp.dll -``` -`--packages com.microsoft.azure:synapseml_2.12:0.11.1` specifies the dependency on synapseml_2.12 version 0.11.1; -`microsoft-spark-3-2_2.12-2.1.1.jar` specifies Microsoft.Spark version 2.1.1 and Spark version 3.2 -:::note -This command assumes you have downloaded Apache Spark and added it to your PATH environment variable so that you can use spark-submit. -Otherwise, you'd have to use the full path (for example, C:\bin\apache-spark\bin\spark-submit or ~/spark/bin/spark-submit). -::: - -When your app runs, the sentiment analysis result is written to the console. -``` -+-----------------------------------------+--------+-----+--------------------------------------------------+ -| text|language|error| sentiment| -+-----------------------------------------+--------+-----+--------------------------------------------------+ -| I am so happy today, its sunny!| en-US| null|[{positive, null, {0.99, 0.0, 0.0}, [{I am so h...| -|I am frustrated by this rush hour traffic| en-US| null|[{negative, null, {0.0, 0.0, 0.99}, [{I am frus...| -| The cognitive services on spark aint bad| en-US| null|[{negative, null, {0.0, 0.01, 0.99}, [{The cogn...| -+-----------------------------------------+--------+-----+--------------------------------------------------+ -``` -Congratulations! You successfully authored and ran a .NET for SynapseML app. -Refer to the [developer docs](https://mmlspark.blob.core.windows.net/docs/0.11.1/dotnet/index.html) for API guidance. - -## Next - -* Refer to this [tutorial](https://docs.microsoft.com/en-us/dotnet/spark/tutorials/databricks-deployment) for deploying a .NET app to Databricks. -* You could download compatible [install-worker.sh](https://mmlspark.blob.core.windows.net/publicwasb/dotnet/install-worker.sh) -and [db-init.sh](https://mmlspark.blob.core.windows.net/publicwasb/dotnet/db-init.sh) files needed for deployment on Databricks. diff --git a/website/docs/reference/vagrant.md b/website/docs/reference/vagrant.md deleted file mode 100644 index 4d182a4f3f..0000000000 --- a/website/docs/reference/vagrant.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -title: Vagrant -hide_title: true -sidebar_label: Vagrant ---- - - -# Using the SynapseML Vagrant Image - -## Install Vagrant and Dependencies - -You'll need a few dependencies before we get started. These instructions are for using Vagrant on Windows OS. - -1. Ensure [Hyper-V](https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/) is enabled or install [VirtualBox](https://www.virtualbox.org/) -2. Install an X Server for Windows, [VcXsrv](https://sourceforge.net/projects/vcxsrv/) is a lightweight option. -3. Install the Vagrant version for your OS [here](https://www.vagrantup.com/downloads.html) - -## Build the Vagrant Image - -Start PowerShell as Administrator and go to the `synapseml/tools/vagrant` directory and run - - vagrant up - -_Note: you may need to select a network switch, try the Default Switch option if possible_ - -## Connect to the Vagrant Image - -First start the X-Window server (use 'XLaunch' if using VcXsrv). - -From the same directory (with PowerShell as Administrator) run - - $env:DISPLAY="localhost:0" - vagrant ssh -- -Y - - # now you can start IntelliJ and interact with the GUI - > idea - -## Stop the Vagrant Image - - vagrant halt - -## Further reading - -This guide covers the bare minimum for running a Vagrant image. For more information, see the [Vagrant Documentation](https://www.vagrantup.com/intro/index.html). diff --git a/website/docs/third-party-notices.txt b/website/docs/third-party-notices.txt deleted file mode 100644 index 58540ba262..0000000000 --- a/website/docs/third-party-notices.txt +++ /dev/null @@ -1,298 +0,0 @@ -================================================================================ -*** OpenCV -================================================================================ - -By downloading, copying, installing or using the software you agree to -this license. If you do not agree to this license, do not download, -install, copy or use the software. - - - License Agreement - For Open Source Computer Vision Library - (3-clause BSD License) - -Copyright (C) 2000-2016, Intel Corporation, all rights reserved. -Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved. -Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved. -Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved. -Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved. -Copyright (C) 2015-2016, Itseez Inc., all rights reserved. -Third party copyrights are property of their respective owners. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the names of the copyright holders nor the names of the contributors - may be used to endorse or promote products derived from this software - without specific prior written permission. - -This software is provided by the copyright holders and contributors "as -is" and any express or implied warranties, including, but not limited -to, the implied warranties of merchantability and fitness for a -particular purpose are disclaimed. In no event shall copyright holders -or contributors be liable for any direct, indirect, incidental, special, -exemplary, or consequential damages (including, but not limited to, -procurement of substitute goods or services; loss of use, data, or -profits; or business interruption) however caused and on any theory of -liability, whether in contract, strict liability, or tort (including -negligence or otherwise) arising in any way out of the use of this -software, even if advised of the possibility of such damage. - - - -================================================================================ -*** File with code "taken from" PCL library -================================================================================ - -Software License Agreement (BSD License) - -Point Cloud Library (PCL) - www.pointclouds.org -Copyright (c) 2009-2012, Willow Garage, Inc. -Copyright (c) 2012-, Open Perception, Inc. -Copyright (c) XXX, respective authors. - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the copyright holder(s) nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER -OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -================================================================================ -*** KAZE -================================================================================ - -Copyright (c) 2012, Pablo Fernández Alcantarilla -All Rights Reserved - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - * Neither the name of the copyright holders nor the names of its - contributors may be used to endorse or promote products derived - from this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -================================================================================ -*** libwebp -================================================================================ - -Copyright (c) 2010, Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - - * Neither the name of Google nor the names of its contributors may be - used to endorse or promote products derived from this software - without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -Additional IP Rights Grant (Patents) ------------------------------------- - -"These implementations" means the copyrightable works that implement the -WebM codecs distributed by Google as part of the WebM Project. - -Google hereby grants to you a perpetual, worldwide, non-exclusive, no-charge, -royalty-free, irrevocable (except as stated in this section) patent license to -make, have made, use, offer to sell, sell, import, transfer, and otherwise -run, modify and propagate the contents of these implementations of WebM, where -such license applies only to those patent claims, both currently owned by -Google and acquired in the future, licensable by Google that are necessarily -infringed by these implementations of WebM. This grant does not include claims -that would be infringed only as a consequence of further modification of these -implementations. If you or your agent or exclusive licensee institute or order -or agree to the institution of patent litigation or any other patent -enforcement activity against any entity (including a cross-claim or -counterclaim in a lawsuit) alleging that any of these implementations of WebM -or any code incorporated within any of these implementations of WebM -constitute direct or contributory patent infringement, or inducement of -patent infringement, then any patent rights granted to you under this License -for these implementations of WebM shall terminate as of the date such -litigation is filed." - - - -================================================================================ -*** File with code "based on" a message of Laurent Pinchart on the -*** video4linux mailing list -================================================================================ - -LEGAL ISSUES -============ - -In plain English: - -1. We don't promise that this software works. (But if you find any - bugs, please let us know!) -2. You can use this software for whatever you want. You don't have to - pay us. -3. You may not pretend that you wrote this software. If you use it in a - program, you must acknowledge somewhere in your documentation that - you've used the IJG code. - -In legalese: - -The authors make NO WARRANTY or representation, either express or -implied, with respect to this software, its quality, accuracy, -merchantability, or fitness for a particular purpose. This software is -provided "AS IS", and you, its user, assume the entire risk as to its -quality and accuracy. - -This software is copyright (C) 1991-2013, Thomas G. Lane, Guido -Vollbeding. All Rights Reserved except as specified below. - -Permission is hereby granted to use, copy, modify, and distribute this -software (or portions thereof) for any purpose, without fee, subject to -these conditions: -(1) If any part of the source code for this software is distributed, - then this README file must be included, with this copyright and - no-warranty notice unaltered; and any additions, deletions, or - changes to the original files must be clearly indicated in - accompanying documentation. -(2) If only executable code is distributed, then the accompanying - documentation must state that "this software is based in part on the - work of the Independent JPEG Group". -(3) Permission for use of this software is granted only if the user - accepts full responsibility for any undesirable consequences; the - authors accept NO LIABILITY for damages of any kind. - -These conditions apply to any software derived from or based on the IJG -code, not just to the unmodified library. If you use our work, you -ought to acknowledge us. - -Permission is NOT granted for the use of any IJG author's name or -company name in advertising or publicity relating to this software or -products derived from it. This software may be referred to only as "the -Independent JPEG Group's software". - -We specifically permit and encourage the use of this software as the -basis of commercial products, provided that all warranty or liability -claims are assumed by the product vendor. - -The Unix configuration script "configure" was produced with GNU -Autoconf. It is copyright by the Free Software Foundation but is freely -distributable. The same holds for its supporting scripts (config.guess, -config.sub, ltmain.sh). Another support script, install-sh, is -copyright by X Consortium but is also freely distributable. - -The IJG distribution formerly included code to read and write GIF files. -To avoid entanglement with the Unisys LZW patent, GIF reading support -has been removed altogether, and the GIF writer has been simplified to -produce "uncompressed GIFs". This technique does not use the LZW -algorithm; the resulting GIF files are larger than usual, but are -readable by all standard GIF decoders. - -We are required to state that - "The Graphics Interchange Format(c) is the Copyright property of - CompuServe Incorporated. GIF(sm) is a Service Mark property of - CompuServe Incorporated." - - - -================================================================================ -*** File with code copyright Yossi Rubner, as well as code copyright -*** MD-Mathematische Dienste GmbH -================================================================================ - - Copyright (c) 2002, - MD-Mathematische Dienste GmbH - Im Defdahl 5-10 - 44141 Dortmund - Germany - www.md-it.de - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - -Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. Redistributions -in binary form must reproduce the above copyright notice, this list of -conditions and the following disclaimer in the documentation and/or -other materials provided with the distribution. The name of Contributor -may not be used to endorse or promote products derived from this -software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS -IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED -TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A -PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF -THE POSSIBILITY OF SUCH DAMAGE. diff --git a/website/doctest.py b/website/doctest.py index 7ddfb615a9..6ab47b688d 100644 --- a/website/doctest.py +++ b/website/doctest.py @@ -58,7 +58,7 @@ def iterate_over_documentation(folder, version): def main(version): cur_path = os.getcwd() - folder = os.path.join(cur_path, "website", "docs", "documentation") + folder = os.path.join(cur_path, "docs", "Quick Examples") iterate_over_documentation(folder, version) os.chdir(folder) os.system( diff --git a/website/docusaurus.config.js b/website/docusaurus.config.js index cce12382cd..8de259a8bf 100644 --- a/website/docusaurus.config.js +++ b/website/docusaurus.config.js @@ -1,8 +1,7 @@ const math = require('remark-math') const katex = require('rehype-katex') const path = require('path'); -const { all_examples } = require('./src/plugins/examples'); -let version = "0.11.1"; +let version = "0.11.2"; module.exports = { title: 'SynapseML', @@ -14,8 +13,7 @@ module.exports = { projectName: 'SynapseML', trailingSlash: true, customFields: { - examples: all_examples(), - version: "0.11.1", + version: "0.11.2", }, stylesheets: [ { @@ -41,7 +39,7 @@ module.exports = { src: 'img/logo.svg', }, items: [ - { to: 'docs/about', label: 'Docs', position: 'left' }, + { to: 'docs/Overview', label: 'Docs', position: 'left' }, { to: 'blog', label: 'Blog', position: 'left' }, { to: 'videos', label: 'Videos', position: 'left' }, { @@ -86,19 +84,19 @@ module.exports = { items: [ { label: 'Installation', - to: 'docs/getting_started/installation', + to: 'docs/Get%20Started/Install%20SynapseML', }, { label: 'Getting Started', - to: 'docs/getting_started/first_example', + to: 'docs/Get%20Started/Quickstart%20-%20Your%20First%20Models', }, { label: 'Python API Reference', - to: 'https://mmlspark.blob.core.windows.net/docs/0.11.1/pyspark/index.html', + to: 'https://mmlspark.blob.core.windows.net/docs/0.11.2/pyspark/index.html', }, { label: 'Scala API Reference', - to: 'https://mmlspark.blob.core.windows.net/docs/0.11.1/scala/index.html', + to: 'https://mmlspark.blob.core.windows.net/docs/0.11.2/scala/index.html', }, ], }, diff --git a/website/notebookconvert.py b/website/notebookconvert.py deleted file mode 100644 index 85afd69870..0000000000 --- a/website/notebookconvert.py +++ /dev/null @@ -1,65 +0,0 @@ -import io -import os -import re - - -def add_header_to_markdown(folder, md): - name = md[:-3] - with io.open(os.path.join(folder, md), "r+", encoding="utf-8") as f: - content = f.read() - f.truncate(0) - content = re.sub(r"style=\"[\S ]*?\"", "", content) - content = re.sub(r"", "", content) - f.seek(0, 0) - f.write( - "---\ntitle: {}\nhide_title: true\nstatus: stable\n---\n".format(name) - + content, - ) - f.close() - - -def convert_notebook_to_markdown(file_path, outputdir): - print("Converting {} into markdown".format(file_path)) - convert_cmd = 'jupyter nbconvert --output-dir="{}" --to markdown "{}"'.format( - outputdir, - file_path, - ) - os.system(convert_cmd) - print() - - -def convert_allnotebooks_in_folder(folder, outputdir): - - cur_folders = [folder] - output_dirs = [outputdir] - while cur_folders: - cur_dir = cur_folders.pop(0) - cur_output_dir = output_dirs.pop(0) - for file in os.listdir(cur_dir): - if os.path.isdir(os.path.join(cur_dir, file)): - cur_folders.append(os.path.join(cur_dir, file)) - output_dirs.append(os.path.join(cur_output_dir, file)) - else: - if not os.path.exists(cur_output_dir): - os.mkdir(cur_output_dir) - - md = file.replace(".ipynb", ".md") - if os.path.exists(os.path.join(cur_output_dir, md)): - os.remove(os.path.join(cur_output_dir, md)) - - convert_notebook_to_markdown( - os.path.join(cur_dir, file), - cur_output_dir, - ) - add_header_to_markdown(cur_output_dir, md) - - -def main(): - cur_path = os.getcwd() - folder = os.path.join(cur_path, "notebooks", "features") - outputdir = os.path.join(cur_path, "website", "docs", "features") - convert_allnotebooks_in_folder(folder, outputdir) - - -if __name__ == "__main__": - main() diff --git a/website/sidebars.js b/website/sidebars.js index de438af3da..c3ea247681 100644 --- a/website/sidebars.js +++ b/website/sidebars.js @@ -1,164 +1,193 @@ -const { listExamplePaths } = require('./src/plugins/examples'); - -let cs_pages = listExamplePaths("features", "cognitive_services"); -let gs_pages = listExamplePaths("features", "geospatial_services"); -let if_pages = listExamplePaths("features", "isolation_forest"); -let rai_pages = listExamplePaths("features", "responsible_ai"); -let onnx_pages = listExamplePaths("features", "onnx"); -let lgbm_pages = listExamplePaths("features", "lightgbm"); -let vw_pages = listExamplePaths("features", "vw"); -let ss_pages = listExamplePaths("features", "spark_serving"); -let ocv_pages = listExamplePaths("features", "opencv"); -let cls_pages = listExamplePaths("features", "classification"); -let reg_pages = listExamplePaths("features", "regression"); -let dl_pages = listExamplePaths("features", "simple_deep_learning"); -let ci_pages = listExamplePaths("features", "causal_inference"); -let hpt_pages = listExamplePaths("features", "hyperparameter_tuning"); -let other_pages = listExamplePaths("features", "other"); - module.exports = { - docs: [ - { - type: 'doc', - id: 'about', - }, - { - type: 'category', - label: 'Getting Started', - items: [ - 'getting_started/installation', - 'getting_started/first_example', - 'getting_started/first_model', - 'getting_started/dotnet_example', - ], - }, - { - type: 'category', - label: 'Features', - items: [ - { - type: 'category', - label: 'Cognitive Services', - items: cs_pages, - }, + docs: [ { - type: 'category', - label: 'Isolation Forest', - items: if_pages, + type: 'doc', + id: 'Overview', + label: 'What is SynapseML?', }, { - type: 'category', - label: 'Geospatial Services', - items: gs_pages, + type: 'category', + label: 'Get Started', + items: [ + 'Get Started/Create a Spark Cluster', + 'Get Started/Install SynapseML', + 'Get Started/Set up Cognitive Services', + 'Get Started/Quickstart - Your First Models', + ], }, { - type: 'category', - label: 'Responsible AI', - items: rai_pages, - }, - { - type: 'category', - label: 'ONNX', - items: onnx_pages, - }, - { - type: 'category', - label: 'LightGBM', - items: lgbm_pages, - }, - { - type: 'category', - label: 'Vowpal Wabbit', - items: vw_pages, - }, - { - type: 'category', - label: 'Spark Serving', - items: ss_pages, - }, - { - type: 'category', - label: 'OpenCV', - items: ocv_pages, - }, - { - type: 'category', - label: 'Classification', - items: cls_pages, - }, - { - type: 'category', - label: 'Regression', - items: reg_pages, - }, - { - type: 'category', - label: 'Simple Deep Learning', - items: dl_pages, + type: 'category', + label: 'Explore Algorithms', + items: [ + { + type: 'category', + label: 'LightGBM', + items: [ + 'Explore Algorithms/LightGBM/Overview', + 'Explore Algorithms/LightGBM/Quickstart - Classification, Ranking, and Regression', + ], + }, + { + type: 'category', + label: 'AI Services', + items: [ + "Explore Algorithms/AI Services/Overview", + "Explore Algorithms/AI Services/Geospatial Services", + "Explore Algorithms/AI Services/Multivariate Anomaly Detection", + "Explore Algorithms/AI Services/Advanced Usage - Async, Batching, and Multi-Key", + "Explore Algorithms/AI Services/Quickstart - Analyze Celebrity Quotes", + "Explore Algorithms/AI Services/Quickstart - Analyze Text", + "Explore Algorithms/AI Services/Quickstart - Creare a Visual Search Engine", + "Explore Algorithms/AI Services/Quickstart - Create Audiobooks", + "Explore Algorithms/AI Services/Quickstart - Document Question and Answering with PDFs", + "Explore Algorithms/AI Services/Quickstart - Flooding Risk", + "Explore Algorithms/AI Services/Quickstart - Predictive Maintenance", + ], + }, + { + type: 'category', + label: 'OpenAI', + items: [ + "Explore Algorithms/OpenAI/Langchain", + "Explore Algorithms/OpenAI/OpenAI", + "Explore Algorithms/OpenAI/Quickstart - OpenAI Embedding", + "Explore Algorithms/OpenAI/Quickstart - Understand and Search Forms", + ], + }, + { + type: 'category', + label: 'Deep Learning', + items: [ + "Explore Algorithms/Deep Learning/Getting Started", + "Explore Algorithms/Deep Learning/ONNX", + "Explore Algorithms/Deep Learning/Distributed Training", + "Explore Algorithms/Deep Learning/Quickstart - Fine-tune a Text Classifier", + "Explore Algorithms/Deep Learning/Quickstart - Fine-tune a Vision Classifier", + "Explore Algorithms/Deep Learning/Quickstart - ONNX Model Inference", + "Explore Algorithms/Deep Learning/Quickstart - Transfer Learn for Image Classification", + ], + }, + { + type: 'category', + label: 'Responsible AI', + items: [ + "Explore Algorithms/Responsible AI/Interpreting Model Predictions", + "Explore Algorithms/Responsible AI/Tabular Explainers", + "Explore Algorithms/Responsible AI/Text Explainers", + "Explore Algorithms/Responsible AI/Image Explainers", + "Explore Algorithms/Responsible AI/PDP and ICE Explainers", + "Explore Algorithms/Responsible AI/Data Balance Analysis", + "Explore Algorithms/Responsible AI/Explanation Dashboard", + "Explore Algorithms/Responsible AI/Quickstart - Data Balance Analysis", + "Explore Algorithms/Responsible AI/Quickstart - Snow Leopard Detection", + ], + }, + + { + type: 'category', + label: 'Causal Inference', + items: [ + "Explore Algorithms/Causal Inference/Overview", + "Explore Algorithms/Causal Inference/Quickstart - Measure Causal Effects", + "Explore Algorithms/Causal Inference/Quickstart - Measure Heterogeneous Effects", + ], + }, + + { + type: 'category', + label: 'Classification', + items: [ + "Explore Algorithms/Classification/Quickstart - Train Classifier", + "Explore Algorithms/Classification/Quickstart - SparkML vs SynapseML", + "Explore Algorithms/Classification/Quickstart - Vowpal Wabbit on Tabular Data", + "Explore Algorithms/Classification/Quickstart - Vowpal Wabbit on Text Data", + ], + }, + { + type: 'category', + label: 'Regression', + items: [ + "Explore Algorithms/Regression/Quickstart - Data Cleaning", + "Explore Algorithms/Regression/Quickstart - Train Regressor", + "Explore Algorithms/Regression/Quickstart - Vowpal Wabbit and LightGBM", + ], + }, + { + type: 'category', + label: 'Anomaly Detection', + items: [ + "Explore Algorithms/Anomaly Detection/Quickstart - Isolation Forests", + ], + }, + { + type: 'category', + label: 'Hyperparameter Tuning', + items: [ + "Explore Algorithms/Hyperparameter Tuning/HyperOpt", + "Explore Algorithms/Hyperparameter Tuning/Quickstart - Random Search", + ], + }, + { + type: 'category', + label: 'OpenCV', + items: [ + "Explore Algorithms/OpenCV/Image Transformations", + ], + }, + { + type: 'category', + label: 'Vowpal Wabbit', + items: [ + "Explore Algorithms/Vowpal Wabbit/Overview", + "Explore Algorithms/Vowpal Wabbit/Multi-class classification", + "Explore Algorithms/Vowpal Wabbit/Contextual Bandits", + "Explore Algorithms/Vowpal Wabbit/Quickstart - Classification, Quantile Regression, and Regression", + "Explore Algorithms/Vowpal Wabbit/Quickstart - Classification using SparkML Vectors", + "Explore Algorithms/Vowpal Wabbit/Quickstart - Classification using VW-native Format", + ], + }, + { + type: 'category', + label: 'Other Algorithms', + items: [ + "Explore Algorithms/Other Algorithms/Smart Adaptive Recommendations", + "Explore Algorithms/Other Algorithms/Cyber ML", + "Explore Algorithms/Other Algorithms/Quickstart - Anomalous Access Detection", + "Explore Algorithms/Other Algorithms/Quickstart - Exploring Art Across Cultures", + ], + }, + + ], }, { - type: 'category', - label: 'Causal Inference', - items: ci_pages, + type: 'category', + label: 'Use with MLFlow', + items: [ + "Use with MLFlow/Overview", + "Use with MLFlow/Install", + "Use with MLFlow/Autologging", + ], }, { - type: 'category', - label: 'Hyperparameter Tuning', - items: hpt_pages, + type: 'category', + label: 'Deploy Models', + items: [ + "Deploy Models/Overview", + "Deploy Models/Quickstart - Deploying a Classifier", + ], }, { - type: 'category', - label: 'Other', - items: other_pages, - }, + type: 'category', + label: 'Reference', + items: [ + "Reference/Contributor Guide", + "Reference/Developer Setup", + "Reference/Docker Setup", + "Reference/R Setup", + "Reference/Dotnet Setup", + "Reference/Quickstart - LightGBM in Dotnet", - ], - }, - { - type: 'category', - label: 'Transformers', - items: [ - 'documentation/transformers/transformers_cognitive', - 'documentation/transformers/transformers_core', - 'documentation/transformers/transformers_opencv', - 'documentation/transformers/transformers_vw', - 'documentation/transformers/transformers_deep_learning', - ], - }, - { - type: 'category', - label: 'Estimators', - items: [ - 'documentation/estimators/estimators_cognitive', - 'documentation/estimators/estimators_core', - 'documentation/estimators/estimators_lightgbm', - 'documentation/estimators/estimators_vw', - 'documentation/estimators/estimators_causal', - ], - }, - { - type: 'category', - label: 'MLflow', - items: [ - 'mlflow/introduction', - 'mlflow/installation', - 'mlflow/examples', - 'mlflow/autologging' - ], - }, - { - type: 'category', - label: 'Reference', - items: [ - 'reference/developer-readme', - 'reference/contributing_guide', - 'reference/docker', - 'reference/R-setup', - 'reference/dotnet-setup', - 'reference/SAR', - 'reference/cyber', - 'reference/vagrant', - ], - }, - ], + ], + }, + ], }; diff --git a/website/src/pages/index.js b/website/src/pages/index.js index 43c07bc511..4159858fc0 100644 --- a/website/src/pages/index.js +++ b/website/src/pages/index.js @@ -15,7 +15,7 @@ const snippets = [ { label: "Cognitive Services", further: - "docs/features/cognitive_services/CognitiveServices%20-%20Overview#text-analytics-sample", + "docs/Explore%20Algorithms/AI%20Services/Overview#text-analytics-sample", config: `from synapse.ml.cognitive import * sentiment_df = (TextSentiment() @@ -29,7 +29,7 @@ sentiment_df = (TextSentiment() }, { label: "Deep Learning", - further: "docs/features/onnx/ONNX%20-%20Inference%20on%20Spark", + further: "docs/Explore%20Algorithms/Deep%20Learning/ONNX", config: `from synapse.ml.onnx import * model_prediction_df = (ONNXModel() @@ -42,7 +42,7 @@ model_prediction_df = (ONNXModel() }, { label: "Responsible AI", - further: "docs/features/responsible_ai/Model%20Interpretation%20on%20Spark", + further: "docs/Explore%20Algorithms/Responsible%20AI/Interpreting%20Model%20Predictions", config: `from synapse.ml.explainers import * interpretation_df = (TabularSHAP() @@ -56,7 +56,7 @@ interpretation_df = (TabularSHAP() }, { label: "LightGBM", - further: "docs/features/lightgbm/about", + further: "docs/Explore%20Algorithms/LightGBM/Overview", config: `from synapse.ml.lightgbm import * quantile_df = (LightGBMRegressor() @@ -71,7 +71,7 @@ quantile_df = (LightGBMRegressor() { label: "OpenCV", further: - "docs/features/opencv/OpenCV%20-%20Pipeline%20Image%20Transformations", + "docs/Explore%20Algorithms/OpenCV/Image%20Transformations", config: `from synapse.ml.opencv import * image_df = (ImageTransformer() @@ -176,7 +176,7 @@ function Home() { "button button--outline button--primary button--lg", styles.getStarted )} - to={useBaseUrl("docs/getting_started/installation")} + to={useBaseUrl("docs/Get%20Started/Install%20SynapseML")} > Get Started @@ -275,7 +275,7 @@ function Home() { { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1-spark3.3", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2-spark3.3", "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true", @@ -290,7 +290,7 @@ function Home() { { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1,org.apache.spark:spark-avro_2.12:3.3.1", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2,org.apache.spark:spark-avro_2.12:3.3.1", "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true", @@ -309,7 +309,7 @@ function Home() { { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1-spark3.3", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2-spark3.3", "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true", @@ -324,7 +324,7 @@ function Home() { { "name": "synapseml", "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.1,org.apache.spark:spark-avro_2.12:3.3.1", + "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.11.2,org.apache.spark:spark-avro_2.12:3.3.1", "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", "spark.yarn.user.classpath.first": "true", @@ -339,9 +339,9 @@ function Home() { SynapseML can be conveniently installed on existing Spark clusters via the --packages option, examples: This can be used in other Spark contexts too. For example, you @@ -369,12 +369,12 @@ spark-submit --packages com.microsoft.azure:synapseml_2.12:0.11.1 MyApp.jar `}

For the coordinates:

Spark 3.3 Cluster: Spark 3.2 Cluster: with the resolver: @@ -392,7 +392,7 @@ spark-submit --packages com.microsoft.azure:synapseml_2.12:0.11.1 MyApp.jar `} notebooks. To get started with our example notebooks import the following databricks archive: @@ -430,7 +430,7 @@ spark-submit --packages com.microsoft.azure:synapseml_2.12:0.11.1 MyApp.jar `} To try out SynapseML with .NET, you should add SynapseML's assembly into reference: For detailed installation, please refer this{" "} - instruction. + instruction. diff --git a/website/src/plugins/examples/index.js b/website/src/plugins/examples/index.js deleted file mode 100644 index 40377b5760..0000000000 --- a/website/src/plugins/examples/index.js +++ /dev/null @@ -1,83 +0,0 @@ -const path = require("path"); -const fs = require("fs"); -const { parseMarkdownString } = require("@docusaurus/utils"); - -function examples(folder, type) { - return all_examples_for_type(folder, type).filter( - (c) => c.status != "deprecated" - ); -} - -function all_examples_for_type(folder, type) { - let examples = []; - let dir = path.join(__dirname, `../../../docs/${folder}/${type}`); - fs.readdirSync(dir).forEach(function (file) { - if (file.endsWith(".md")) { - let name = file.split(".").slice(0, -1).join("."); - let data = fs.readFileSync(path.join(dir, file)); - const { frontMatter } = parseMarkdownString(data); - frontMatter["name"] = name; - examples.push(frontMatter); - } - }); - return examples; -} - -function all_examples() { - let ex_links = [ - `features/cognitive_services/CognitiveServices - Overview.md`, - `features/classification/Classification - Adult Census.md`, - `features/cognitive_services/CognitiveServices - Overview.md`, - `features/geospatial_services/GeospatialServices - Overview.md`, - `features/other/ConditionalKNN - Exploring Art Across Cultures.md`, - `features/other/CyberML - Anomalous Access Detection.md`, - `features/responsible_ai/DataBalanceAnalysis - Adult Census Income.md`, - `features/responsible_ai/Interpretability - Image Explainers.md`, - `features/onnx/ONNX - Inference on Spark.md`, - `features/lightgbm/LightGBM - Overview.md`, - `features/vw/Vowpal Wabbit - Overview.md`, - ]; - let examples = []; - let dir = path.join(__dirname, `../../../docs`); - ex_links.forEach(function (url) { - let url_path = url.split(".").slice(0, -1).join("."); - let name = url_path.split("/").slice(-1)[0]; - let data = fs.readFileSync(path.join(dir, url)); - const { frontMatter } = parseMarkdownString(data); - frontMatter["url_path"] = url_path; - frontMatter["name"] = name; - examples.push(frontMatter); - }); - return examples; -} - -function listExamplePaths(folder, type) { - let paths = []; - let examples = all_examples_for_type(folder, type); - - examples - .filter((c) => c.status != "deprecated") - .sort() - .forEach(function (info) { - paths.push(`${folder}/${type}/${info.name}`); - }); - - let deprecatedPaths = examples - .filter((c) => c.status == "deprecated") - .map((c) => `${folder}/${type}/${c.name}`); - - if (deprecatedPaths.length > 0) { - paths.push({ - type: "category", - label: "Deprecated", - items: deprecatedPaths, - }); - } - - return paths; -} - -module.exports = { - all_examples: all_examples, - listExamplePaths: listExamplePaths, -}; diff --git a/website/src/theme/FeatureCards/index.js b/website/src/theme/FeatureCards/index.js deleted file mode 100644 index ebe0f03f53..0000000000 --- a/website/src/theme/FeatureCards/index.js +++ /dev/null @@ -1,110 +0,0 @@ -import React from "react"; - -import styles from "./styles.module.css"; -import useBaseUrl from "@docusaurus/useBaseUrl"; - -const features = [ - { - src: "/img/notebooks/cog_services_on_spark_2.svg", - title: "The Cognitive Services on Spark", - body: "Leverage the Microsoft Cognitive Services at unprecedented scales in your existing SparkML pipelines.", - footer: "Read the Paper", - burl: "https://arxiv.org/abs/1810.08744", - }, - { - src: "/img/notebooks/SparkServing3.svg", - title: "Stress Free Serving", - body: "Spark is well known for it's ability to switch between batch and streaming workloads by modifying a single line. \ - We push this concept even further and enable distributed web services with the same API as batch and streaming workloads.", - footer: "Learn More", - burl: "../features/spark_serving/about", - }, - { - src: "/img/notebooks/decision_tree_recolor.png", - title: "Lightning Fast Gradient Boosting", - body: "SynapseML adds GPU enabled gradient boosted machines from the popular framework LightGBM. \ - Users can mix and match frameworks in a single distributed environment and API.", - footer: "Try an Example", - burl: "../features/lightgbm/LightGBM%20-%20Overview", - }, - { - src: "/img/notebooks/vw-blue-dark-orange.svg", - title: "Fast and Sparse Text Analytics", - body: "Vowpal Wabbit on Spark enables new classes of workloads in scalable and performant text analytics", - footer: "Try an Example", - burl: "../features/vw/Vowpal%20Wabbit%20-%20Overview", - }, - { - src: "/img/notebooks/microservice_recolor.png", - title: "Distributed Microservices", - body: "SynapseML provides powerful and idiomatic tools to communicate with any HTTP endpoint service using Spark. \ - Users can now use Spark as a elastic micro-service orchestrator.", - footer: "Learn More", - burl: "../features/http/about", - }, - { - src: "/img/notebooks/LIME-1.svg", - title: "Large Scale Model Interpretability", - body: "Understand any image classifier with a distributed implementation of Local Interpretable Model Agnostic Explanations (LIME).", - footer: "Try an Example", - burl: "../features/responsible_ai/Interpretability%20-%20Image%20Explainers/", - }, - { - src: "/img/notebooks/cntk-1.svg", - title: "Scalable Deep Learning", - body: "SynapseML integrates the distributed computing framework Apache Spark with the flexible deep learning framework CNTK. \ - Enabling deep learning at unprecedented scales.", - footer: "Read the Paper", - burl: "https://arxiv.org/abs/1804.04031", - }, - { - src: "/img/multilingual.svg", - title: "Broad Language Support", - body: "SynapseML's API spans Scala, Python, Java, R, .NET and C# so you can integrate with any ecosystem.", - footer: "Try our PySpark Examples", - burl: "../features/CognitiveServices%20-%20Overview", - }, -]; - -function FeatureCards() { - - return ( - features && - features.length && ( -
- {features.map((props, idx) => ( - - ))} -
- ) - ); -} - -function FeatureCard({ src, title, body, footer, burl }) { - const srcUrl = useBaseUrl(src) - return ( -
-
-
- Image alt text -
-
-

{title}

- {body} -
- -
-
- ); -} - -export default FeatureCards; diff --git a/website/src/theme/FeatureCards/styles.module.css b/website/src/theme/FeatureCards/styles.module.css deleted file mode 100644 index 35a8973b8e..0000000000 --- a/website/src/theme/FeatureCards/styles.module.css +++ /dev/null @@ -1,51 +0,0 @@ -element { - --ifm-global-radius: 0.4rem; - --ifm-card-border-radius: calc(var(--ifm-global-radius)*2); -} - -.layout_grid_row { - display: grid; - grid-auto-columns: 1fr; - grid-template-rows: auto auto; - grid-template-columns: 1fr 1fr 1fr; - grid-column-gap: 32px; - grid-row-gap: 32px; -} - - -.feature_card { - display: flex; - flex-direction: column; - max-width: 500px; - text-align: center; - align-items: center; - padding: 4% 1%; - justify-content: center; - -webkit-box-pack: start; - -webkit-box-align: center; -} - -.card { - background-color: var(--ifm-background-surface-color); - color: var(--ifm-font-base-color); - border-radius: var(--ifm-card-border-radius); - box-shadow: 0 1px 2px 0 rgba(0,0,0,0.1); - display: flex; - flex-direction: column; - height: 100%; -} - -.card__image { - padding-top: 0; - display: inline-block; -} - -.card__body { - padding-bottom: 0; - padding: 1rem 1rem; -} - -.card__footer { - margin-top: auto; - padding: 1rem 1rem; -} diff --git a/website/versioned_docs/version-0.10.0/about.md b/website/versioned_docs/version-0.10.0/about.md deleted file mode 100644 index c36c2cb506..0000000000 --- a/website/versioned_docs/version-0.10.0/about.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: SynapseML -sidebar_label: Introduction -hide_title: true ---- - -import useBaseUrl from "@docusaurus/useBaseUrl"; - -
- -# SynapseML - -SynapseML is an ecosystem of tools aimed towards expanding the distributed computing framework -[Apache Spark](https://github.com/apache/spark) in several new directions. -SynapseML adds many deep learning and data science tools to the Spark ecosystem, -including seamless integration of Spark Machine Learning pipelines with [Microsoft Cognitive Toolkit -(CNTK)](https://github.com/Microsoft/CNTK), [LightGBM](https://github.com/Microsoft/LightGBM) and -[OpenCV](http://www.opencv.org/). These tools enable powerful and highly scalable predictive and analytical models -for many types of datasources. - -SynapseML also brings new networking capabilities to the Spark Ecosystem. With the HTTP on Spark project, users -can embed **any** web service into their SparkML models. In this vein, SynapseML provides easy to use -SparkML transformers for a wide variety of [Azure Cognitive Services](https://azure.microsoft.com/en-us/services/cognitive-services/). For production grade deployment, the Spark Serving project enables high throughput, -submillisecond latency web services, backed by your Spark cluster. - -SynapseML requires Scala 2.12, Spark 3.2+, and Python 3.8+. -See the API documentation [for -Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/index.html#package) and [for -PySpark](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/index.html). - -import Link from '@docusaurus/Link'; - -Get Started - -## Examples - -import NotebookExamples from "@theme/NotebookExamples"; - - - -## Explore our Features - -import FeatureCards from "@theme/FeatureCards"; - - - -## Papers - -- [Large Scale Intelligent Microservices](https://arxiv.org/abs/2009.08044) - -- [Conditional Image Retrieval](https://arxiv.org/abs/2007.07177) - -- [SynapseML: Unifying Machine Learning Ecosystems at Massive Scales](https://arxiv.org/abs/1810.08744) - -- [Flexible and Scalable Deep Learning with MMLSpark](https://arxiv.org/abs/1804.04031) diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/_LightGBM.md b/website/versioned_docs/version-0.10.0/documentation/estimators/_LightGBM.md deleted file mode 100644 index 2101c4e9ab..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/_LightGBM.md +++ /dev/null @@ -1,164 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## LightGBMClassifier - - - - - - -```python -from synapse.ml.lightgbm import * - -lgbmClassifier = (LightGBMClassifier() - .setFeaturesCol("features") - .setRawPredictionCol("rawPrediction") - .setDefaultListenPort(12402) - .setNumLeaves(5) - .setNumIterations(10) - .setObjective("binary") - .setLabelCol("labels") - .setLeafPredictionCol("leafPrediction") - .setFeaturesShapCol("featuresShap")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.lightgbm._ - -val lgbmClassifier = (new LightGBMClassifier() - .setFeaturesCol("features") - .setRawPredictionCol("rawPrediction") - .setDefaultListenPort(12402) - .setNumLeaves(5) - .setNumIterations(10) - .setObjective("binary") - .setLabelCol("labels") - .setLeafPredictionCol("leafPrediction") - .setFeaturesShapCol("featuresShap")) -``` - - - - - - - -## LightGBMRanker - - - - - - - - - -```python -from synapse.ml.lightgbm import * - -lgbmRanker = (LightGBMRanker() - .setLabelCol("labels") - .setFeaturesCol("features") - .setGroupCol("query") - .setDefaultListenPort(12402) - .setRepartitionByGroupingColumn(False) - .setNumLeaves(5) - .setNumIterations(10)) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.lightgbm._ - -val lgbmRanker = (new LightGBMRanker() - .setLabelCol("labels") - .setFeaturesCol("features") - .setGroupCol("query") - .setDefaultListenPort(12402) - .setRepartitionByGroupingColumn(false) - .setNumLeaves(5) - .setNumIterations(10)) -``` - - - - - - - -## LightGBMRegressor - - - - - - - - - -```python -from synapse.ml.lightgbm import * - -lgbmRegressor = (LightGBMRegressor() - .setLabelCol("labels") - .setFeaturesCol("features") - .setDefaultListenPort(12402) - .setNumLeaves(5) - .setNumIterations(10)) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.lightgbm._ - -val lgbmRegressor = (new LightGBMRegressor() - .setLabelCol("labels") - .setFeaturesCol("features") - .setDefaultListenPort(12402) - .setNumLeaves(5) - .setNumIterations(10)) -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/_VW.md b/website/versioned_docs/version-0.10.0/documentation/estimators/_VW.md deleted file mode 100644 index 87e438cd04..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/_VW.md +++ /dev/null @@ -1,112 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## VowpalWabbitRegressor - - - - - - -```python -from synapse.ml.vw import * - -vw = (VowpalWabbitRegressor() - .setLabelCol("Y1") - .setFeaturesCol("features") - .setPredictionCol("pred")) - -vwRegressor = (VowpalWabbitRegressor() - .setNumPasses(20) - .setPassThroughArgs("--holdout_off --loss_function quantile -q :: -l 0.1")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.vw._ - -val vw = (new VowpalWabbitRegressor() - .setLabelCol("Y1") - .setFeaturesCol("features") - .setPredictionCol("pred")) - -val vwRegressor = (new VowpalWabbitRegressor() - .setNumPasses(20) - .setPassThroughArgs("--holdout_off --loss_function quantile -q :: -l 0.1")) - -``` - - - - - - - -## VowpalWabbitContextualBandit - - - - - - - - - -```python -from synapse.ml.vw import * - -cb = (VowpalWabbitContextualBandit() - .setPassThroughArgs("--cb_explore_adf --epsilon 0.2 --quiet") - .setLabelCol("cost") - .setProbabilityCol("prob") - .setChosenActionCol("chosen_action") - .setSharedCol("shared_features") - .setFeaturesCol("action_features") - .setUseBarrierExecutionMode(False)) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.vw._ - -val cb = (new VowpalWabbitContextualBandit() - .setPassThroughArgs("--cb_explore_adf --epsilon 0.2 --quiet") - .setLabelCol("cost") - .setProbabilityCol("prob") - .setChosenActionCol("chosen_action") - .setSharedCol("shared_features") - .setFeaturesCol("action_features") - .setUseBarrierExecutionMode(false)) - -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/cognitive/_MAD.md b/website/versioned_docs/version-0.10.0/documentation/estimators/cognitive/_MAD.md deleted file mode 100644 index 6602d2ab19..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/cognitive/_MAD.md +++ /dev/null @@ -1,105 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## FitMultivariateAnomaly - - - - - - -```python -from synapse.ml.cognitive import * - -anomalyKey = os.environ.get("ANOMALY_API_KEY", getSecret("anomaly-api-key")) -startTime = "2021-01-01T00:00:00Z" -endTime = "2021-01-03T01:59:00Z" -timestampColumn = "timestamp" -inputColumns = ["feature0", "feature1", "feature2"] -containerName = "madtest" -intermediateSaveDir = "intermediateData" -connectionString = os.environ.get("MADTEST_CONNECTION_STRING", getSecret("madtest-connection-string")) - -fitMultivariateAnomaly = (FitMultivariateAnomaly() - .setSubscriptionKey(anomalyKey) - .setLocation("westus2") - .setOutputCol("result") - .setStartTime(startTime) - .setEndTime(endTime) - .setContainerName(containerName) - .setIntermediateSaveDir(intermediateSaveDir) - .setTimestampCol(timestampColumn) - .setInputCols(inputColumns) - .setSlidingWindow(200) - .setConnectionString(connectionString)) - -# uncomment below for fitting your own dataframe -# model = fitMultivariateAnomaly.fit(df) -# fitMultivariateAnomaly.cleanUpIntermediateData() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ - -val startTime: String = "2021-01-01T00:00:00Z" -val endTime: String = "2021-01-02T12:00:00Z" -val timestampColumn: String = "timestamp" -val inputColumns: Array[String] = Array("feature0", "feature1", "feature2") -val containerName: String = "madtest" -val intermediateSaveDir: String = "intermediateData" -val anomalyKey = sys.env.getOrElse("ANOMALY_API_KEY", None) -val connectionString = sys.env.getOrElse("MADTEST_CONNECTION_STRING", None) - -val fitMultivariateAnomaly = (new FitMultivariateAnomaly() - .setSubscriptionKey(anomalyKey) - .setLocation("westus2") - .setOutputCol("result") - .setStartTime(startTime) - .setEndTime(endTime) - .setContainerName(containerName) - .setIntermediateSaveDir(intermediateSaveDir) - .setTimestampCol(timestampColumn) - .setInputCols(inputColumns) - .setSlidingWindow(200) - .setConnectionString(connectionString)) - -val df = (spark.read.format("csv") - .option("header", True) - .load("wasbs://datasets@mmlspark.blob.core.windows.net/MAD/mad_example.csv")) - -val model = fitMultivariateAnomaly.fit(df) - -val result = (model - .setStartTime(startTime) - .setEndTime(endTime) - .setOutputCol("result") - .setTimestampCol(timestampColumn) - .setInputCols(inputColumns) - .transform(df)) - -result.show() - -fitMultivariateAnomaly.cleanUpIntermediateData() -model.cleanUpIntermediateData() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_AutoML.md b/website/versioned_docs/version-0.10.0/documentation/estimators/core/_AutoML.md deleted file mode 100644 index 432d6cf5e8..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_AutoML.md +++ /dev/null @@ -1,214 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - -## AutoML - -### FindBestModel - - - - - - -```python -from synapse.ml.automl import * -from synapse.ml.train import * -from pyspark.ml.classification import RandomForestClassifier - -df = (spark.createDataFrame([ - (0, 2, 0.50, 0.60, 0), - (1, 3, 0.40, 0.50, 1), - (0, 4, 0.78, 0.99, 2), - (1, 5, 0.12, 0.34, 3), - (0, 1, 0.50, 0.60, 0), - (1, 3, 0.40, 0.50, 1), - (0, 3, 0.78, 0.99, 2), - (1, 4, 0.12, 0.34, 3), - (0, 0, 0.50, 0.60, 0), - (1, 2, 0.40, 0.50, 1), - (0, 3, 0.78, 0.99, 2), - (1, 4, 0.12, 0.34, 3) -], ["Label", "col1", "col2", "col3", "col4"])) - -# mocking models -randomForestClassifier = (TrainClassifier() - .setModel(RandomForestClassifier() - .setMaxBins(32) - .setMaxDepth(5) - .setMinInfoGain(0.0) - .setMinInstancesPerNode(1) - .setNumTrees(20) - .setSubsamplingRate(1.0) - .setSeed(0)) - .setFeaturesCol("mlfeatures") - .setLabelCol("Label")) -model = randomForestClassifier.fit(df) - -findBestModel = (FindBestModel() - .setModels([model, model]) - .setEvaluationMetric("accuracy")) -bestModel = findBestModel.fit(df) -bestModel.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.automl._ -import com.microsoft.azure.synapse.ml.train._ -import spark.implicits._ -import org.apache.spark.ml.Transformer - -val df = (Seq( - (0, 2, 0.50, 0.60, 0), - (1, 3, 0.40, 0.50, 1), - (0, 4, 0.78, 0.99, 2), - (1, 5, 0.12, 0.34, 3), - (0, 1, 0.50, 0.60, 0), - (1, 3, 0.40, 0.50, 1), - (0, 3, 0.78, 0.99, 2), - (1, 4, 0.12, 0.34, 3), - (0, 0, 0.50, 0.60, 0), - (1, 2, 0.40, 0.50, 1), - (0, 3, 0.78, 0.99, 2), - (1, 4, 0.12, 0.34, 3) - ).toDF("Label", "col1", "col2", "col3", "col4")) - -// mocking models -val randomForestClassifier = (new TrainClassifier() - .setModel( - new RandomForestClassifier() - .setMaxBins(32) - .setMaxDepth(5) - .setMinInfoGain(0.0) - .setMinInstancesPerNode(1) - .setNumTrees(20) - .setSubsamplingRate(1.0) - .setSeed(0L)) - .setFeaturesCol("mlfeatures") - .setLabelCol("Label")) -val model = randomForestClassifier.fit(df) - -val findBestModel = (new FindBestModel() - .setModels(Array(model.asInstanceOf[Transformer], model.asInstanceOf[Transformer])) - .setEvaluationMetric("accuracy")) -val bestModel = findBestModel.fit(df) -bestModel.transform(df).show() -``` - - - - - - - -### TuneHyperparameters - - - - - - - -```python -from synapse.ml.automl import * -from synapse.ml.train import * -from pyspark.ml.classification import LogisticRegression, RandomForestClassifier, GBTClassifier - - -df = (spark.createDataFrame([ - (0, 1, 1, 1, 1, 1, 1.0, 3, 1, 1), - (0, 1, 1, 1, 1, 2, 1.0, 1, 1, 1), - (0, 1, 1, 1, 1, 2, 1.0, 2, 1, 1), - (0, 1, 2, 3, 1, 2, 1.0, 3, 1, 1), - (0, 3, 1, 1, 1, 2, 1.0, 3, 1, 1) -], ["Label", "Clump_Thickness", "Uniformity_of_Cell_Size", - "Uniformity_of_Cell_Shape", "Marginal_Adhesion", "Single_Epithelial_Cell_Size", - "Bare_Nuclei", "Bland_Chromatin", "Normal_Nucleoli", "Mitoses"])) - -logReg = LogisticRegression() -randForest = RandomForestClassifier() -gbt = GBTClassifier() -smlmodels = [logReg, randForest, gbt] -mmlmodels = [TrainClassifier(model=model, labelCol="Label") for model in smlmodels] - -paramBuilder = (HyperparamBuilder() - .addHyperparam(logReg, logReg.regParam, RangeHyperParam(0.1, 0.3)) - .addHyperparam(randForest, randForest.numTrees, DiscreteHyperParam([5,10])) - .addHyperparam(randForest, randForest.maxDepth, DiscreteHyperParam([3,5])) - .addHyperparam(gbt, gbt.maxBins, RangeHyperParam(8,16)) - .addHyperparam(gbt, gbt.maxDepth, DiscreteHyperParam([3,5]))) -searchSpace = paramBuilder.build() -# The search space is a list of params to tuples of estimator and hyperparam -randomSpace = RandomSpace(searchSpace) - -bestModel = TuneHyperparameters( - evaluationMetric="accuracy", models=mmlmodels, numFolds=2, - numRuns=len(mmlmodels) * 2, parallelism=2, - paramSpace=randomSpace.space(), seed=0).fit(df) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.automl._ -import com.microsoft.azure.synapse.ml.train._ -import spark.implicits._ - -val logReg = new LogisticRegression() -val randForest = new RandomForestClassifier() -val gbt = new GBTClassifier() -val smlmodels = Seq(logReg, randForest, gbt) -val mmlmodels = smlmodels.map(model => new TrainClassifier().setModel(model).setLabelCol("Label")) - -val paramBuilder = new HyperparamBuilder() - .addHyperparam(logReg.regParam, new DoubleRangeHyperParam(0.1, 0.3)) - .addHyperparam(randForest.numTrees, new DiscreteHyperParam(List(5,10))) - .addHyperparam(randForest.maxDepth, new DiscreteHyperParam(List(3,5))) - .addHyperparam(gbt.maxBins, new IntRangeHyperParam(8,16)) -.addHyperparam(gbt.maxDepth, new DiscreteHyperParam(List(3,5))) -val searchSpace = paramBuilder.build() -val randomSpace = new RandomSpace(searchSpace) - -val dataset: DataFrame = Seq( - (0, 1, 1, 1, 1, 1, 1.0, 3, 1, 1), - (0, 1, 1, 1, 1, 2, 1.0, 1, 1, 1), - (0, 1, 1, 1, 1, 2, 1.0, 2, 1, 1), - (0, 1, 2, 3, 1, 2, 1.0, 3, 1, 1), - (0, 3, 1, 1, 1, 2, 1.0, 3, 1, 1)) - .toDF("Label", "Clump_Thickness", "Uniformity_of_Cell_Size", - "Uniformity_of_Cell_Shape", "Marginal_Adhesion", "Single_Epithelial_Cell_Size", - "Bare_Nuclei", "Bland_Chromatin", "Normal_Nucleoli", "Mitoses") - -val tuneHyperparameters = new TuneHyperparameters().setEvaluationMetric("accuracy") - .setModels(mmlmodels.toArray).setNumFolds(2).setNumRuns(mmlmodels.length * 2) - .setParallelism(1).setParamSpace(randomSpace).setSeed(0) -tuneHyperparameters.fit(dataset).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_Featurize.md b/website/versioned_docs/version-0.10.0/documentation/estimators/core/_Featurize.md deleted file mode 100644 index a45010fcc6..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_Featurize.md +++ /dev/null @@ -1,332 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - -## Featurize - -### CleanMissingData - - - - - - -```python -from synapse.ml.featurize import * - -dataset = spark.createDataFrame([ - (0, 2, 0.50, 0.60, 0), - (1, 3, 0.40, None, None), - (0, 4, 0.78, 0.99, 2), - (1, 5, 0.12, 0.34, 3), - (0, 1, 0.50, 0.60, 0), - (None, None, None, None, None), - (0, 3, 0.78, 0.99, 2), - (1, 4, 0.12, 0.34, 3), - (0, None, 0.50, 0.60, 0), - (1, 2, 0.40, 0.50, None), - (0, 3, None, 0.99, 2), - (1, 4, 0.12, 0.34, 3) -], ["col1", "col2", "col3", "col4", "col5"]) - -cmd = (CleanMissingData() - .setInputCols(dataset.columns) - .setOutputCols(dataset.columns) - .setCleaningMode("Mean")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.featurize._ -import java.lang.{Boolean => JBoolean, Double => JDouble, Integer => JInt} -import spark.implicits._ - -def createMockDataset: DataFrame = { - Seq[(JInt, JInt, JDouble, JDouble, JInt)]( - (0, 2, 0.50, 0.60, 0), - (1, 3, 0.40, null, null), - (0, 4, 0.78, 0.99, 2), - (1, 5, 0.12, 0.34, 3), - (0, 1, 0.50, 0.60, 0), - (null, null, null, null, null), - (0, 3, 0.78, 0.99, 2), - (1, 4, 0.12, 0.34, 3), - (0, null, 0.50, 0.60, 0), - (1, 2, 0.40, 0.50, null), - (0, 3, null, 0.99, 2), - (1, 4, 0.12, 0.34, 3)) - .toDF("col1", "col2", "col3", "col4", "col5") - } - -val dataset = createMockDataset -val cmd = (new CleanMissingData() - .setInputCols(dataset.columns) - .setOutputCols(dataset.columns) - .setCleaningMode("Mean")) -``` - - - - - - - -### CountSelector - - - - - - - - - -```python -from synapse.ml.featurize import * -from pyspark.ml.linalg import Vectors - -df = spark.createDataFrame([ - (Vectors.sparse(3, [(0, 1.0), (2, 2.0)]), Vectors.dense(1.0, 0.1, 0)), - (Vectors.sparse(3, [(0, 1.0), (2, 2.0)]), Vectors.dense(1.0, 0.1, 0)) -], ["col1", "col2"]) - -cs = CountSelector().setInputCol("col1").setOutputCol("col3") - -cs.fit(df).transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.featurize._ -import org.apache.spark.ml.linalg.Vectors -import spark.implicits._ - -val df = Seq( - (Vectors.sparse(3, Seq((0, 1.0), (2, 2.0))), Vectors.dense(1.0, 0.1, 0)), - (Vectors.sparse(3, Seq((0, 1.0), (2, 2.0))), Vectors.dense(1.0, 0.1, 0)) - ).toDF("col1", "col2") - -val cs = (new CountSelector() - .setInputCol("col1") - .setOutputCol("col3")) - -cs.fit(df).transform(df).show() -``` - - - - - - - -### Featurize - - - - - - - - - -```python -from synapse.ml.featurize import * - -dataset = spark.createDataFrame([ - (0, 2, 0.50, 0.60, "pokemon are everywhere"), - (1, 3, 0.40, 0.50, "they are in the woods"), - (0, 4, 0.78, 0.99, "they are in the water"), - (1, 5, 0.12, 0.34, "they are in the fields"), - (0, 3, 0.78, 0.99, "pokemon - gotta catch em all") -], ["Label", "col1", "col2", "col3"]) - -feat = (Featurize() - .setNumFeatures(10) - .setOutputCol("testColumn") - .setInputCols(["col1", "col2", "col3"]) - .setOneHotEncodeCategoricals(False)) - -feat.fit(dataset).transform(dataset).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.featurize._ -import spark.implicits._ - -val dataset = Seq( - (0, 2, 0.50, 0.60, "pokemon are everywhere"), - (1, 3, 0.40, 0.50, "they are in the woods"), - (0, 4, 0.78, 0.99, "they are in the water"), - (1, 5, 0.12, 0.34, "they are in the fields"), - (0, 3, 0.78, 0.99, "pokemon - gotta catch em all")).toDF("Label", "col1", "col2", "col3") - -val featureColumns = dataset.columns.filter(_ != "Label") - -val feat = (new Featurize() - .setNumFeatures(10) - .setOutputCol("testColumn") - .setInputCols(featureColumns) - .setOneHotEncodeCategoricals(false)) - -feat.fit(dataset).transform(dataset).show() -``` - - - - - - - -### ValueIndexer - - - - - - - - - -```python -from synapse.ml.featurize import * - -df = spark.createDataFrame([ - (-3, 24, 0.32534, True, "piano"), - (1, 5, 5.67, False, "piano"), - (-3, 5, 0.32534, False, "guitar") -], ["int", "long", "double", "bool", "string"]) - -vi = ValueIndexer().setInputCol("string").setOutputCol("string_cat") - -vi.fit(df).transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.featurize._ -import spark.implicits._ - -val df = Seq[(Int, Long, Double, Boolean, String)]( - (-3, 24L, 0.32534, true, "piano"), - (1, 5L, 5.67, false, "piano"), - (-3, 5L, 0.32534, false, "guitar")).toDF("int", "long", "double", "bool", "string") - -val vi = new ValueIndexer().setInputCol("string").setOutputCol("string_cat") - -vi.fit(df).transform(df).show() -``` - - - - - - -## Featurize Text - -### TextFeaturizer - - - - - - - - - -```python -from synapse.ml.featurize.text import * - -dfRaw = spark.createDataFrame([ - (0, "Hi I"), - (1, "I wish for snow today"), - (2, "we Cant go to the park, because of the snow!"), - (3, "") -], ["label", "sentence"]) - -tfRaw = (TextFeaturizer() - .setInputCol("sentence") - .setOutputCol("features") - .setNumFeatures(20)) - -tfRaw.fit(dfRaw).transform(dfRaw).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.featurize.text._ -import spark.implicits._ - -val dfRaw = Seq((0, "Hi I"), - (1, "I wish for snow today"), - (2, "we Cant go to the park, because of the snow!"), - (3, "")).toDF("label", "sentence") - -val tfRaw = (new TextFeaturizer() - .setInputCol("sentence") - .setOutputCol("features") - .setNumFeatures(20)) - -tfRaw.fit(dfRaw).transform(dfRaw).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_IsolationForest.md b/website/versioned_docs/version-0.10.0/documentation/estimators/core/_IsolationForest.md deleted file mode 100644 index ae542bcf99..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_IsolationForest.md +++ /dev/null @@ -1,65 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## Isolation Forest - -### IsolationForest - - - - - - -```python -from synapse.ml.isolationforest import * - -isolationForest = (IsolationForest() - .setNumEstimators(100) - .setBootstrap(False) - .setMaxSamples(256) - .setMaxFeatures(1.0) - .setFeaturesCol("features") - .setPredictionCol("predictedLabel") - .setScoreCol("outlierScore") - .setContamination(0.02) - .setContaminationError(0.02 * 0.01) - .setRandomSeed(1)) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.isolationforest._ -import spark.implicits._ - -val isolationForest = (new IsolationForest() - .setNumEstimators(100) - .setBootstrap(false) - .setMaxSamples(256) - .setMaxFeatures(1.0) - .setFeaturesCol("features") - .setPredictionCol("predictedLabel") - .setScoreCol("outlierScore") - .setContamination(0.02) - .setContaminationError(0.02 * 0.01) - .setRandomSeed(1)) -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_NN.md b/website/versioned_docs/version-0.10.0/documentation/estimators/core/_NN.md deleted file mode 100644 index eb0f0243c1..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_NN.md +++ /dev/null @@ -1,92 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## NN - -### ConditionalKNN - - - - - - -```python -from synapse.ml.nn import * - -cknn = (ConditionalKNN() - .setOutputCol("matches") - .setFeaturesCol("features")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.nn._ -import spark.implicits._ - -val cknn = (new ConditionalKNN() - .setOutputCol("matches") - .setFeaturesCol("features")) -``` - - - - - - - -### KNN - - - - - - - - - -```python -from synapse.ml.nn import * - -knn = (KNN() - .setOutputCol("matches")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.nn._ -import spark.implicits._ - -val knn = (new KNN() - .setOutputCol("matches")) -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_Recommendation.md b/website/versioned_docs/version-0.10.0/documentation/estimators/core/_Recommendation.md deleted file mode 100644 index 98f9501736..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_Recommendation.md +++ /dev/null @@ -1,379 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## Recommendation - -### RecommendationIndexer, RankingEvaluator, RankingAdapter and RankingTrainValidationSplit - - - - - - -```python -from synapse.ml.recommendation import * -from pyspark.ml.recommendation import ALS -from pyspark.ml.tuning import * - -ratings = (spark.createDataFrame([ - ("11", "Movie 01", 2), - ("11", "Movie 03", 1), - ("11", "Movie 04", 5), - ("11", "Movie 05", 3), - ("11", "Movie 06", 4), - ("11", "Movie 07", 1), - ("11", "Movie 08", 5), - ("11", "Movie 09", 3), - ("22", "Movie 01", 4), - ("22", "Movie 02", 5), - ("22", "Movie 03", 1), - ("22", "Movie 05", 3), - ("22", "Movie 06", 3), - ("22", "Movie 07", 5), - ("22", "Movie 08", 1), - ("22", "Movie 10", 3), - ("33", "Movie 01", 4), - ("33", "Movie 03", 1), - ("33", "Movie 04", 5), - ("33", "Movie 05", 3), - ("33", "Movie 06", 4), - ("33", "Movie 08", 1), - ("33", "Movie 09", 5), - ("33", "Movie 10", 3), - ("44", "Movie 01", 4), - ("44", "Movie 02", 5), - ("44", "Movie 03", 1), - ("44", "Movie 05", 3), - ("44", "Movie 06", 4), - ("44", "Movie 07", 5), - ("44", "Movie 08", 1), - ("44", "Movie 10", 3) - ], ["customerIDOrg", "itemIDOrg", "rating"]) - .dropDuplicates() - .cache()) - -recommendationIndexer = (RecommendationIndexer() - .setUserInputCol("customerIDOrg") - .setUserOutputCol("customerID") - .setItemInputCol("itemIDOrg") - .setItemOutputCol("itemID") - .setRatingCol("rating")) - -transformedDf = (recommendationIndexer.fit(ratings) - .transform(ratings).cache()) - -als = (ALS() - .setNumUserBlocks(1) - .setNumItemBlocks(1) - .setUserCol("customerID") - .setItemCol("itemID") - .setRatingCol("rating") - .setSeed(0)) - -evaluator = (RankingEvaluator() - .setK(3) - .setNItems(10)) - -adapter = (RankingAdapter() - .setK(evaluator.getK()) - .setRecommender(als)) - -adapter.fit(transformedDf).transform(transformedDf).show() - -paramGrid = (ParamGridBuilder() - .addGrid(als.regParam, [1.0]) - .build()) - -tvRecommendationSplit = (RankingTrainValidationSplit() - .setEstimator(als) - .setEvaluator(evaluator) - .setEstimatorParamMaps(paramGrid) - .setTrainRatio(0.8) - .setUserCol(recommendationIndexer.getUserOutputCol()) - .setItemCol(recommendationIndexer.getItemOutputCol()) - .setRatingCol("rating")) - -tvRecommendationSplit.fit(transformedDf).transform(transformedDf).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.recommendation._ -import org.apache.spark.ml.recommendation.ALS -import org.apache.spark.ml.tuning._ -import spark.implicits._ - -val ratings = (Seq( - ("11", "Movie 01", 2), - ("11", "Movie 03", 1), - ("11", "Movie 04", 5), - ("11", "Movie 05", 3), - ("11", "Movie 06", 4), - ("11", "Movie 07", 1), - ("11", "Movie 08", 5), - ("11", "Movie 09", 3), - ("22", "Movie 01", 4), - ("22", "Movie 02", 5), - ("22", "Movie 03", 1), - ("22", "Movie 05", 3), - ("22", "Movie 06", 3), - ("22", "Movie 07", 5), - ("22", "Movie 08", 1), - ("22", "Movie 10", 3), - ("33", "Movie 01", 4), - ("33", "Movie 03", 1), - ("33", "Movie 04", 5), - ("33", "Movie 05", 3), - ("33", "Movie 06", 4), - ("33", "Movie 08", 1), - ("33", "Movie 09", 5), - ("33", "Movie 10", 3), - ("44", "Movie 01", 4), - ("44", "Movie 02", 5), - ("44", "Movie 03", 1), - ("44", "Movie 05", 3), - ("44", "Movie 06", 4), - ("44", "Movie 07", 5), - ("44", "Movie 08", 1), - ("44", "Movie 10", 3)) - .toDF("customerIDOrg", "itemIDOrg", "rating") - .dropDuplicates() - .cache()) - -val recommendationIndexer = (new RecommendationIndexer() - .setUserInputCol("customerIDOrg") - .setUserOutputCol("customerID") - .setItemInputCol("itemIDOrg") - .setItemOutputCol("itemID") - .setRatingCol("rating")) - -val transformedDf = (recommendationIndexer.fit(ratings) - .transform(ratings).cache()) - -val als = (new ALS() - .setNumUserBlocks(1) - .setNumItemBlocks(1) - .setUserCol("customerID") - .setItemCol("itemID") - .setRatingCol("rating") - .setSeed(0)) - -val evaluator = (new RankingEvaluator() - .setK(3) - .setNItems(10)) - -val adapter = (new RankingAdapter() - .setK(evaluator.getK) - .setRecommender(als)) - -adapter.fit(transformedDf).transform(transformedDf).show() - -val paramGrid = (new ParamGridBuilder() - .addGrid(als.regParam, Array(1.0)) - .build()) - -val tvRecommendationSplit = (new RankingTrainValidationSplit() - .setEstimator(als) - .setEvaluator(evaluator) - .setEstimatorParamMaps(paramGrid) - .setTrainRatio(0.8) - .setUserCol(recommendationIndexer.getUserOutputCol) - .setItemCol(recommendationIndexer.getItemOutputCol) - .setRatingCol("rating")) - -tvRecommendationSplit.fit(transformedDf).transform(transformedDf).show() -``` - - - - - - - - - - -### SAR - - - - - - - - - -```python -from synapse.ml.recommendation import * - -ratings = (spark.createDataFrame([ - ("11", "Movie 01", 2), - ("11", "Movie 03", 1), - ("11", "Movie 04", 5), - ("11", "Movie 05", 3), - ("11", "Movie 06", 4), - ("11", "Movie 07", 1), - ("11", "Movie 08", 5), - ("11", "Movie 09", 3), - ("22", "Movie 01", 4), - ("22", "Movie 02", 5), - ("22", "Movie 03", 1), - ("22", "Movie 05", 3), - ("22", "Movie 06", 3), - ("22", "Movie 07", 5), - ("22", "Movie 08", 1), - ("22", "Movie 10", 3), - ("33", "Movie 01", 4), - ("33", "Movie 03", 1), - ("33", "Movie 04", 5), - ("33", "Movie 05", 3), - ("33", "Movie 06", 4), - ("33", "Movie 08", 1), - ("33", "Movie 09", 5), - ("33", "Movie 10", 3), - ("44", "Movie 01", 4), - ("44", "Movie 02", 5), - ("44", "Movie 03", 1), - ("44", "Movie 05", 3), - ("44", "Movie 06", 4), - ("44", "Movie 07", 5), - ("44", "Movie 08", 1), - ("44", "Movie 10", 3) - ], ["customerIDOrg", "itemIDOrg", "rating"]) - .dropDuplicates() - .cache()) - -recommendationIndexer = (RecommendationIndexer() - .setUserInputCol("customerIDOrg") - .setUserOutputCol("customerID") - .setItemInputCol("itemIDOrg") - .setItemOutputCol("itemID") - .setRatingCol("rating")) - -algo = (SAR() - .setUserCol("customerID") - .setItemCol("itemID") - .setRatingCol("rating") - .setTimeCol("timestamp") - .setSupportThreshold(1) - .setSimilarityFunction("jacccard") - .setActivityTimeFormat("EEE MMM dd HH:mm:ss Z yyyy")) - -adapter = (RankingAdapter() - .setK(5) - .setRecommender(algo)) - -res1 = recommendationIndexer.fit(ratings).transform(ratings).cache() - -adapter.fit(res1).transform(res1).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.recommendation._ -import spark.implicits._ - -val ratings = (Seq( - ("11", "Movie 01", 2), - ("11", "Movie 03", 1), - ("11", "Movie 04", 5), - ("11", "Movie 05", 3), - ("11", "Movie 06", 4), - ("11", "Movie 07", 1), - ("11", "Movie 08", 5), - ("11", "Movie 09", 3), - ("22", "Movie 01", 4), - ("22", "Movie 02", 5), - ("22", "Movie 03", 1), - ("22", "Movie 05", 3), - ("22", "Movie 06", 3), - ("22", "Movie 07", 5), - ("22", "Movie 08", 1), - ("22", "Movie 10", 3), - ("33", "Movie 01", 4), - ("33", "Movie 03", 1), - ("33", "Movie 04", 5), - ("33", "Movie 05", 3), - ("33", "Movie 06", 4), - ("33", "Movie 08", 1), - ("33", "Movie 09", 5), - ("33", "Movie 10", 3), - ("44", "Movie 01", 4), - ("44", "Movie 02", 5), - ("44", "Movie 03", 1), - ("44", "Movie 05", 3), - ("44", "Movie 06", 4), - ("44", "Movie 07", 5), - ("44", "Movie 08", 1), - ("44", "Movie 10", 3)) - .toDF("customerIDOrg", "itemIDOrg", "rating") - .dropDuplicates() - .cache()) - -val recommendationIndexer = (new RecommendationIndexer() - .setUserInputCol("customerIDOrg") - .setUserOutputCol("customerID") - .setItemInputCol("itemIDOrg") - .setItemOutputCol("itemID") - .setRatingCol("rating")) - -val algo = (new SAR() - .setUserCol("customerID") - .setItemCol("itemID") - .setRatingCol("rating") - .setTimeCol("timestamp") - .setSupportThreshold(1) - .setSimilarityFunction("jacccard") - .setActivityTimeFormat("EEE MMM dd HH:mm:ss Z yyyy")) - -val adapter = (new RankingAdapter() - .setK(5) - .setRecommender(algo)) - -val res1 = recommendationIndexer.fit(ratings).transform(ratings).cache() - -adapter.fit(res1).transform(res1).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_Stages.md b/website/versioned_docs/version-0.10.0/documentation/estimators/core/_Stages.md deleted file mode 100644 index 853a2a35d1..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_Stages.md +++ /dev/null @@ -1,219 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## Stages - -### ClassBalancer - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([ - (0, 1.0, "Hi I"), - (1, 1.0, "I wish for snow today"), - (2, 2.0, "I wish for snow today"), - (3, 2.0, "I wish for snow today"), - (4, 2.0, "I wish for snow today"), - (5, 2.0, "I wish for snow today"), - (6, 0.0, "I wish for snow today"), - (7, 1.0, "I wish for snow today"), - (8, 0.0, "we Cant go to the park, because of the snow!"), - (9, 2.0, "") - ], ["index", "label", "sentence"])) - -cb = ClassBalancer().setInputCol("label") - -cb.fit(df).transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = Seq( - (0, 1.0, "Hi I"), - (1, 1.0, "I wish for snow today"), - (2, 2.0, "I wish for snow today"), - (3, 2.0, "I wish for snow today"), - (4, 2.0, "I wish for snow today"), - (5, 2.0, "I wish for snow today"), - (6, 0.0, "I wish for snow today"), - (7, 1.0, "I wish for snow today"), - (8, 0.0, "we Cant go to the park, because of the snow!"), - (9, 2.0, "")).toDF("index", "label", "sentence") - -val cb = new ClassBalancer().setInputCol("label") - -cb.fit(df).transform(df).show() -``` - - - - - - - -### MultiColumnAdapter - - - - - - - - - -```python -from synapse.ml.stages import * -from pyspark.ml.feature import Tokenizer - -df = (spark.createDataFrame([ - (0, "This is a test", "this is one too"), - (1, "could be a test", "bar"), - (2, "foo", "bar"), - (3, "foo", "maybe not") - ], ["label", "words1", "words2"])) - -stage1 = Tokenizer() -mca = (MultiColumnAdapter() - .setBaseStage(stage1) - .setInputCols(["words1", "words2"]) - .setOutputCols(["output1", "output2"])) - -mca.fit(df).transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ -import org.apache.spark.ml.feature.Tokenizer - -val df = (Seq( - (0, "This is a test", "this is one too"), - (1, "could be a test", "bar"), - (2, "foo", "bar"), - (3, "foo", "maybe not")) - .toDF("label", "words1", "words2")) - -val stage1 = new Tokenizer() -val mca = (new MultiColumnAdapter() - .setBaseStage(stage1) - .setInputCols(Array[String]("words1", "words2")) - .setOutputCols(Array[String]("output1", "output2"))) - -mca.fit(df).transform(df).show() -``` - - - - - - - -### Timer - - - - - - - - - -```python -from synapse.ml.stages import * -from pyspark.ml.feature import * - -df = (spark.createDataFrame([ - (0, "Hi I"), - (1, "I wish for snow today"), - (2, "we Cant go to the park, because of the snow!"), - (3, "") - ], ["label", "sentence"])) - -tok = (Tokenizer() - .setInputCol("sentence") - .setOutputCol("tokens")) - -df2 = Timer().setStage(tok).fit(df).transform(df) - -df3 = HashingTF().setInputCol("tokens").setOutputCol("hash").transform(df2) - -idf = IDF().setInputCol("hash").setOutputCol("idf") -timer = Timer().setStage(idf) - -timer.fit(df3).transform(df3).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ -import org.apache.spark.ml.feature._ - -val df = (Seq( - (0, "Hi I"), - (1, "I wish for snow today"), - (2, "we Cant go to the park, because of the snow!"), - (3, "") - ).toDF("label", "sentence")) - -val tok = (new Tokenizer() - .setInputCol("sentence") - .setOutputCol("tokens")) - -val df2 = new Timer().setStage(tok).fit(df).transform(df) - -val df3 = new HashingTF().setInputCol("tokens").setOutputCol("hash").transform(df2) - -val idf = new IDF().setInputCol("hash").setOutputCol("idf") -val timer = new Timer().setStage(idf) - -timer.fit(df3).transform(df3).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_Train.md b/website/versioned_docs/version-0.10.0/documentation/estimators/core/_Train.md deleted file mode 100644 index 46853e323a..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/core/_Train.md +++ /dev/null @@ -1,171 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## Train - -### TrainClassifier - - - - - - -```python -from synapse.ml.train import * -from pyspark.ml.classification import LogisticRegression - -df = spark.createDataFrame([ - (0, 2, 0.50, 0.60, 0), - (1, 3, 0.40, 0.50, 1), - (0, 4, 0.78, 0.99, 2), - (1, 5, 0.12, 0.34, 3), - (0, 1, 0.50, 0.60, 0), - (1, 3, 0.40, 0.50, 1), - (0, 3, 0.78, 0.99, 2), - (1, 4, 0.12, 0.34, 3), - (0, 0, 0.50, 0.60, 0), - (1, 2, 0.40, 0.50, 1), - (0, 3, 0.78, 0.99, 2), - (1, 4, 0.12, 0.34, 3)], - ["Label", "col1", "col2", "col3", "col4"] -) - -tc = (TrainClassifier() - .setModel(LogisticRegression()) - .setLabelCol("Label")) - -tc.fit(df).transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.train._ -import org.apache.spark.ml.classification.LogisticRegression - -val df = (Seq( - (0, 2, 0.50, 0.60, 0), - (1, 3, 0.40, 0.50, 1), - (0, 4, 0.78, 0.99, 2), - (1, 5, 0.12, 0.34, 3), - (0, 1, 0.50, 0.60, 0), - (1, 3, 0.40, 0.50, 1), - (0, 3, 0.78, 0.99, 2), - (1, 4, 0.12, 0.34, 3), - (0, 0, 0.50, 0.60, 0), - (1, 2, 0.40, 0.50, 1), - (0, 3, 0.78, 0.99, 2), - (1, 4, 0.12, 0.34, 3)) - .toDF("Label", "col1", "col2", "col3", "col4")) - -val tc = (new TrainClassifier() - .setModel(new LogisticRegression()) - .setLabelCol("Label")) - -tc.fit(df).transform(df).show() -``` - - - - - - - -### TrainRegressor - - - - - - - - - -```python -from synapse.ml.train import * -from pyspark.ml.regression import LinearRegression - -dataset = (spark.createDataFrame([ - (0.0, 2, 0.50, 0.60, 0.0), - (1.0, 3, 0.40, 0.50, 1.0), - (2.0, 4, 0.78, 0.99, 2.0), - (3.0, 5, 0.12, 0.34, 3.0), - (0.0, 1, 0.50, 0.60, 0.0), - (1.0, 3, 0.40, 0.50, 1.0), - (2.0, 3, 0.78, 0.99, 2.0), - (3.0, 4, 0.12, 0.34, 3.0), - (0.0, 0, 0.50, 0.60, 0.0), - (1.0, 2, 0.40, 0.50, 1.0), - (2.0, 3, 0.78, 0.99, 2.0), - (3.0, 4, 0.12, 0.34, 3.0)], - ["label", "col1", "col2", "col3", "col4"])) - -linearRegressor = (LinearRegression() - .setRegParam(0.3) - .setElasticNetParam(0.8)) -trainRegressor = (TrainRegressor() - .setModel(linearRegressor) - .setLabelCol("label")) - -trainRegressor.fit(dataset).transform(dataset).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.train._ -import org.apache.spark.ml.regression.LinearRegression - -val dataset = (spark.createDataFrame(Seq( - (0.0, 2, 0.50, 0.60, 0.0), - (1.0, 3, 0.40, 0.50, 1.0), - (2.0, 4, 0.78, 0.99, 2.0), - (3.0, 5, 0.12, 0.34, 3.0), - (0.0, 1, 0.50, 0.60, 0.0), - (1.0, 3, 0.40, 0.50, 1.0), - (2.0, 3, 0.78, 0.99, 2.0), - (3.0, 4, 0.12, 0.34, 3.0), - (0.0, 0, 0.50, 0.60, 0.0), - (1.0, 2, 0.40, 0.50, 1.0), - (2.0, 3, 0.78, 0.99, 2.0), - (3.0, 4, 0.12, 0.34, 3.0))) - .toDF("label", "col1", "col2", "col3", "col4")) - -val linearRegressor = (new LinearRegression() - .setRegParam(0.3) - .setElasticNetParam(0.8)) -val trainRegressor = (new TrainRegressor() - .setModel(linearRegressor) - .setLabelCol("label")) - -trainRegressor.fit(dataset).transform(dataset).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/estimators_cognitive.md b/website/versioned_docs/version-0.10.0/documentation/estimators/estimators_cognitive.md deleted file mode 100644 index e5cd2ef5f2..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/estimators_cognitive.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Estimators - Cognitive -sidebar_label: Cognitive -hide_title: true ---- - - -import MAD, {toc as MADTOC} from './cognitive/_MAD.md'; - - - -export const toc = [...MADTOC] diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/estimators_core.md b/website/versioned_docs/version-0.10.0/documentation/estimators/estimators_core.md deleted file mode 100644 index 8f32bcfd16..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/estimators_core.md +++ /dev/null @@ -1,42 +0,0 @@ ---- -title: Estimators - Core -sidebar_label: Core -hide_title: true ---- - - -import AutoML, {toc as AutoMLTOC} from './core/_AutoML.md'; - - - - -import Featurize, {toc as FeaturizeTOC} from './core/_Featurize.md'; - - - - -import IsolationForest, {toc as IsolationForestTOC} from './core/_IsolationForest.md'; - - - - -import NN, {toc as NNTOC} from './core/_NN.md'; - - - - -import Recommendation, {toc as RecommendationTOC} from './core/_Recommendation.md'; - - - - -import Stages, {toc as StagesTOC} from './core/_Stages.md'; - - - -import Train, {toc as TrainTOC} from './core/_Train.md'; - - - -export const toc = [...AutoMLTOC, ...FeaturizeTOC, ...IsolationForestTOC, -...NNTOC, ...RecommendationTOC, ...StagesTOC, ...TrainTOC] diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/estimators_lightgbm.md b/website/versioned_docs/version-0.10.0/documentation/estimators/estimators_lightgbm.md deleted file mode 100644 index 2582e41dc6..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/estimators_lightgbm.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Estimators - LightGBM -sidebar_label: LightGBM -hide_title: true ---- - -# LightGBM - -import LightGBM, {toc as LightGBMTOC} from './_LightGBM.md'; - - - -export const toc = [...LightGBMTOC] diff --git a/website/versioned_docs/version-0.10.0/documentation/estimators/estimators_vw.md b/website/versioned_docs/version-0.10.0/documentation/estimators/estimators_vw.md deleted file mode 100644 index 80172ac798..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/estimators/estimators_vw.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Estimators - Vowpal Wabbit -sidebar_label: Vowpal Wabbit -hide_title: true ---- - -# Vowpal Wabbit - -import VW, {toc as VWTOC} from './_VW.md'; - - - -export const toc = [...VWTOC] diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/_OpenCV.md b/website/versioned_docs/version-0.10.0/documentation/transformers/_OpenCV.md deleted file mode 100644 index da354ee532..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/_OpenCV.md +++ /dev/null @@ -1,121 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## ImageTransformer - - - - - - -```python -from synapse.ml.opencv import * -from pyspark.sql.types import FloatType - -# images = (spark.read.format("image") -# .option("dropInvalid", True) -# .load("wasbs://publicwasb@mmlspark.blob.core.windows.net/explainers/images/david-lusvardi-dWcUncxocQY-unsplash.jpg")) - -it = (ImageTransformer(inputCol="image", outputCol="features") - .resize(224, True) - .centerCrop(height=224, width=224) - .normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], color_scale_factor = 1/255) - .setTensorElementType(FloatType())) - -# it.transform(images).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.opencv._ - -val images = (spark.read.format("image") - .option("dropInvalid", true) - .load("wasbs://publicwasb@mmlspark.blob.core.windows.net/explainers/images/david-lusvardi-dWcUncxocQY-unsplash.jpg")) - -val it = (new ImageTransformer() - .setOutputCol("out") - .resize(height = 15, width = 10)) - -it.transform(images).show() -``` - - - - - - - -## ImageSetAugmenter - - - - - - - - - -```python -from synapse.ml.opencv import * - -# images = (spark.read.format("image") -# .option("dropInvalid", True) -# .load("wasbs://publicwasb@mmlspark.blob.core.windows.net/explainers/images/david-lusvardi-dWcUncxocQY-unsplash.jpg")) - -isa = (ImageSetAugmenter() - .setInputCol("image") - .setOutputCol("augmented") - .setFlipLeftRight(True) - .setFlipUpDown(True)) - -# it.transform(images).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.opencv._ - -val images = (spark.read.format("image") - .option("dropInvalid", true) - .load("wasbs://publicwasb@mmlspark.blob.core.windows.net/explainers/images/david-lusvardi-dWcUncxocQY-unsplash.jpg")) - -val isa = (new ImageSetAugmenter() - .setInputCol("image") - .setOutputCol("augmented") - .setFlipLeftRight(true) - .setFlipUpDown(true)) - -isa.transform(images).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/_VW.md b/website/versioned_docs/version-0.10.0/documentation/transformers/_VW.md deleted file mode 100644 index e1465699dd..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/_VW.md +++ /dev/null @@ -1,297 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## VectorZipper - - - - - - -```python -from synapse.ml.vw import * - -df = spark.createDataFrame([ - ("action1_f", "action2_f"), - ("action1_f", "action2_f"), - ("action1_f", "action2_f"), - ("action1_f", "action2_f") -], ["action1", "action2"]) - -actionOneFeaturizer = (VowpalWabbitFeaturizer() - .setInputCols(["action1"]) - .setOutputCol("sequence_one")) - -actionTwoFeaturizer = (VowpalWabbitFeaturizer() - .setInputCols(["action2"]) - .setOutputCol("sequence_two")) - -seqDF = actionTwoFeaturizer.transform(actionOneFeaturizer.transform(df)) - -vectorZipper = (VectorZipper() - .setInputCols(["sequence_one", "sequence_two"]) - .setOutputCol("out")) - -vectorZipper.transform(seqDF).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.vw._ - -val df = (Seq( - ("action1_f", "action2_f"), - ("action1_f", "action2_f"), - ("action1_f", "action2_f"), - ("action1_f", "action2_f") - ).toDF("action1", "action2")) - -val actionOneFeaturizer = (new VowpalWabbitFeaturizer() - .setInputCols(Array("action1")) - .setOutputCol("sequence_one")) - -val actionTwoFeaturizer = (new VowpalWabbitFeaturizer() - .setInputCols(Array("action2")) - .setOutputCol("sequence_two")) - -val seqDF = actionTwoFeaturizer.transform(actionOneFeaturizer.transform(df)) - -val vectorZipper = (new VectorZipper() - .setInputCols(Array("sequence_one", "sequence_two")) - .setOutputCol("out")) - -vectorZipper.transform(seqDF).show() -``` - - - - -```csharp -using System; -using System.Collections.Generic; -using Synapse.ML.Vw; -using Microsoft.Spark.Sql; -using Microsoft.Spark.Sql.Types; - -namespace SynapseMLApp -{ - class Program - { - static void Main(string[] args) - { - SparkSession spark = - SparkSession - .Builder() - .AppName("Example") - .GetOrCreate(); - - DataFrame df = spark.CreateDataFrame( - new List - { - new GenericRow(new object[] {"action1_f", "action2_f"}), - new GenericRow(new object[] {"action1_f", "action2_f"}), - new GenericRow(new object[] {"action1_f", "action2_f"}), - new GenericRow(new object[] {"action1_f", "action2_f"}) - }, - new StructType(new List - { - new StructField("action1", new StringType()), - new StructField("action2", new StringType()) - }) - ); - - var actionOneFeaturizer = new VowpalWabbitFeaturizer() - .SetInputCols(new string[]{"action1"}) - .SetOutputCol("sequence_one"); - var actionTwoFeaturizer = new VowpalWabbitFeaturizer() - .SetInputCols(new string[]{"action2"}) - .SetOutputCol("sequence_two"); - var seqDF = actionTwoFeaturizer.Transform(actionOneFeaturizer.Transform(df)); - - var vectorZipper = new VectorZipper() - .SetInputCols(new string[]{"sequence_one", "sequence_two"}) - .SetOutputCol("out"); - vectorZipper.Transform(seqDF).Show(); - - spark.Stop(); - } - } -} -``` - - - - - - - -## VowpalWabbitClassifier - - - - - - - - - - -```python -from synapse.ml.vw import * - -vw = (VowpalWabbitClassifier() - .setNumBits(10) - .setLearningRate(3.1) - .setPowerT(0) - .setLabelConversion(False)) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.vw._ - -val vw = (new VowpalWabbitClassifier() - .setNumBits(10) - .setLearningRate(3.1) - .setPowerT(0) - .setLabelConversion(false)) -``` - - - - - - - -## VowpalWabbitFeaturizer - - - - - - - - - - -```python -from synapse.ml.vw import * - -featurizer = (VowpalWabbitFeaturizer() - .setStringSplitInputCols(["in"]) - .setPreserveOrderNumBits(2) - .setNumBits(18) - .setPrefixStringsWithColumnName(False) - .setOutputCol("features")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.vw._ - -val featurizer = (new VowpalWabbitFeaturizer() - .setStringSplitInputCols(Array("in")) - .setPreserveOrderNumBits(2) - .setNumBits(18) - .setPrefixStringsWithColumnName(false) - .setOutputCol("features")) -``` - - - - - - - -## VowpalWabbitInteractions - - - - - - - - - - -```python -from synapse.ml.vw import * - -interactions = (VowpalWabbitInteractions() - .setInputCols(["v1"]) - .setOutputCol("out")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.vw._ -import org.apache.spark.ml.linalg._ - -case class Data(v1: Vector, v2: Vector, v3: Vector) - -val df = spark.createDataFrame(Seq(Data( - Vectors.dense(Array(1.0, 2.0, 3.0)), - Vectors.sparse(8, Array(5), Array(4.0)), - Vectors.sparse(11, Array(8, 9), Array(7.0, 8.0)) -))) - -val interactions = (new VowpalWabbitInteractions() - .setInputCols(Array("v1")) - .setOutputCol("out")) - -interactions.transform(df).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_AnomalyDetection.md b/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_AnomalyDetection.md deleted file mode 100644 index b60a0edc71..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_AnomalyDetection.md +++ /dev/null @@ -1,319 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## Anomaly Detection - -### DetectLastAnomaly - - - - - - -```python -from synapse.ml.cognitive import * -from pyspark.sql.functions import lit - -anomalyKey = os.environ.get("ANOMALY_API_KEY", getSecret("anomaly-api-key")) -df = (spark.createDataFrame([ - ("1972-01-01T00:00:00Z", 826.0), - ("1972-02-01T00:00:00Z", 799.0), - ("1972-03-01T00:00:00Z", 890.0), - ("1972-04-01T00:00:00Z", 900.0), - ("1972-05-01T00:00:00Z", 766.0), - ("1972-06-01T00:00:00Z", 805.0), - ("1972-07-01T00:00:00Z", 821.0), - ("1972-08-01T00:00:00Z", 20000.0), - ("1972-09-01T00:00:00Z", 883.0), - ("1972-10-01T00:00:00Z", 898.0), - ("1972-11-01T00:00:00Z", 957.0), - ("1972-12-01T00:00:00Z", 924.0), - ("1973-01-01T00:00:00Z", 881.0), - ("1973-02-01T00:00:00Z", 837.0), - ("1973-03-01T00:00:00Z", 90000.0) -], ["timestamp", "value"]) - .withColumn("group", lit(1)) - .withColumn("inputs", struct(col("timestamp"), col("value"))) - .groupBy(col("group")) - .agg(sort_array(collect_list(col("inputs"))).alias("inputs"))) - -dla = (DetectLastAnomaly() - .setSubscriptionKey(anomalyKey) - .setLocation("westus2") - .setOutputCol("anomalies") - .setSeriesCol("inputs") - .setGranularity("monthly") - .setErrorCol("errors")) - -dla.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ -import org.apache.spark.sql.functions.{col, collect_list, lit, sort_array, struct} - -val anomalyKey = sys.env.getOrElse("ANOMALY_API_KEY", None) -val df = (Seq( - ("1972-01-01T00:00:00Z", 826.0), - ("1972-02-01T00:00:00Z", 799.0), - ("1972-03-01T00:00:00Z", 890.0), - ("1972-04-01T00:00:00Z", 900.0), - ("1972-05-01T00:00:00Z", 766.0), - ("1972-06-01T00:00:00Z", 805.0), - ("1972-07-01T00:00:00Z", 821.0), - ("1972-08-01T00:00:00Z", 20000.0), - ("1972-09-01T00:00:00Z", 883.0), - ("1972-10-01T00:00:00Z", 898.0), - ("1972-11-01T00:00:00Z", 957.0), - ("1972-12-01T00:00:00Z", 924.0), - ("1973-01-01T00:00:00Z", 881.0), - ("1973-02-01T00:00:00Z", 837.0), - ("1973-03-01T00:00:00Z", 90000.0) - ).toDF("timestamp","value") - .withColumn("group", lit(1)) - .withColumn("inputs", struct(col("timestamp"), col("value"))) - .groupBy(col("group")) - .agg(sort_array(collect_list(col("inputs"))).alias("inputs"))) - -val dla = (new DetectLastAnomaly() - .setSubscriptionKey(anomalyKey) - .setLocation("westus2") - .setOutputCol("anomalies") - .setSeriesCol("inputs") - .setGranularity("monthly") - .setErrorCol("errors")) - -dla.transform(df).show() -``` - - - - - - -### DetectAnomalies - - - - - - - - - -```python -from synapse.ml.cognitive import * - -anomalyKey = os.environ.get("ANOMALY_API_KEY", getSecret("anomaly-api-key")) -df = (spark.createDataFrame([ - ("1972-01-01T00:00:00Z", 826.0), - ("1972-02-01T00:00:00Z", 799.0), - ("1972-03-01T00:00:00Z", 890.0), - ("1972-04-01T00:00:00Z", 900.0), - ("1972-05-01T00:00:00Z", 766.0), - ("1972-06-01T00:00:00Z", 805.0), - ("1972-07-01T00:00:00Z", 821.0), - ("1972-08-01T00:00:00Z", 20000.0), - ("1972-09-01T00:00:00Z", 883.0), - ("1972-10-01T00:00:00Z", 898.0), - ("1972-11-01T00:00:00Z", 957.0), - ("1972-12-01T00:00:00Z", 924.0), - ("1973-01-01T00:00:00Z", 881.0), - ("1973-02-01T00:00:00Z", 837.0), - ("1973-03-01T00:00:00Z", 90000.0) -], ["timestamp", "value"]) - .withColumn("group", lit(1)) - .withColumn("inputs", struct(col("timestamp"), col("value"))) - .groupBy(col("group")) - .agg(sort_array(collect_list(col("inputs"))).alias("inputs"))) - -da = (DetectAnomalies() - .setSubscriptionKey(anomalyKey) - .setLocation("westus2") - .setOutputCol("anomalies") - .setSeriesCol("inputs") - .setGranularity("monthly")) - -da.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val anomalyKey = sys.env.getOrElse("ANOMALY_API_KEY", None) -val df = (Seq( - ("1972-01-01T00:00:00Z", 826.0), - ("1972-02-01T00:00:00Z", 799.0), - ("1972-03-01T00:00:00Z", 890.0), - ("1972-04-01T00:00:00Z", 900.0), - ("1972-05-01T00:00:00Z", 766.0), - ("1972-06-01T00:00:00Z", 805.0), - ("1972-07-01T00:00:00Z", 821.0), - ("1972-08-01T00:00:00Z", 20000.0), - ("1972-09-01T00:00:00Z", 883.0), - ("1972-10-01T00:00:00Z", 898.0), - ("1972-11-01T00:00:00Z", 957.0), - ("1972-12-01T00:00:00Z", 924.0), - ("1973-01-01T00:00:00Z", 881.0), - ("1973-02-01T00:00:00Z", 837.0), - ("1973-03-01T00:00:00Z", 90000.0) - ).toDF("timestamp","value") - .withColumn("group", lit(1)) - .withColumn("inputs", struct(col("timestamp"), col("value"))) - .groupBy(col("group")) - .agg(sort_array(collect_list(col("inputs"))).alias("inputs"))) - -val da = (new DetectAnomalies() - .setSubscriptionKey(anomalyKey) - .setLocation("westus2") - .setOutputCol("anomalies") - .setSeriesCol("inputs") - .setGranularity("monthly")) - -da.transform(df).show() -``` - - - - - - -### SimpleDetectAnomalies - - - - - - - - - -```python -from synapse.ml.cognitive import * - -anomalyKey = os.environ.get("ANOMALY_API_KEY", getSecret("anomaly-api-key")) -df = (spark.createDataFrame([ - ("1972-01-01T00:00:00Z", 826.0, 1.0), - ("1972-02-01T00:00:00Z", 799.0, 1.0), - ("1972-03-01T00:00:00Z", 890.0, 1.0), - ("1972-04-01T00:00:00Z", 900.0, 1.0), - ("1972-05-01T00:00:00Z", 766.0, 1.0), - ("1972-06-01T00:00:00Z", 805.0, 1.0), - ("1972-07-01T00:00:00Z", 821.0, 1.0), - ("1972-08-01T00:00:00Z", 20000.0, 1.0), - ("1972-09-01T00:00:00Z", 883.0, 1.0), - ("1972-10-01T00:00:00Z", 898.0, 1.0), - ("1972-11-01T00:00:00Z", 957.0, 1.0), - ("1972-12-01T00:00:00Z", 924.0, 1.0), - ("1973-01-01T00:00:00Z", 881.0, 1.0), - ("1973-02-01T00:00:00Z", 837.0, 1.0), - ("1973-03-01T00:00:00Z", 90000.0, 1.0), - ("1972-01-01T00:00:00Z", 826.0, 2.0), - ("1972-02-01T00:00:00Z", 799.0, 2.0), - ("1972-03-01T00:00:00Z", 890.0, 2.0), - ("1972-04-01T00:00:00Z", 900.0, 2.0), - ("1972-05-01T00:00:00Z", 766.0, 2.0), - ("1972-06-01T00:00:00Z", 805.0, 2.0), - ("1972-07-01T00:00:00Z", 821.0, 2.0), - ("1972-08-01T00:00:00Z", 20000.0, 2.0), - ("1972-09-01T00:00:00Z", 883.0, 2.0), - ("1972-10-01T00:00:00Z", 898.0, 2.0), - ("1972-11-01T00:00:00Z", 957.0, 2.0), - ("1972-12-01T00:00:00Z", 924.0, 2.0), - ("1973-01-01T00:00:00Z", 881.0, 2.0), - ("1973-02-01T00:00:00Z", 837.0, 2.0), - ("1973-03-01T00:00:00Z", 90000.0, 2.0) -], ["timestamp", "value", "group"])) - -sda = (SimpleDetectAnomalies() - .setSubscriptionKey(anomalyKey) - .setLocation("westus2") - .setOutputCol("anomalies") - .setGroupbyCol("group") - .setGranularity("monthly")) - -sda.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val anomalyKey = sys.env.getOrElse("ANOMALY_API_KEY", None) -val baseSeq = Seq( - ("1972-01-01T00:00:00Z", 826.0), - ("1972-02-01T00:00:00Z", 799.0), - ("1972-03-01T00:00:00Z", 890.0), - ("1972-04-01T00:00:00Z", 900.0), - ("1972-05-01T00:00:00Z", 766.0), - ("1972-06-01T00:00:00Z", 805.0), - ("1972-07-01T00:00:00Z", 821.0), - ("1972-08-01T00:00:00Z", 20000.0), - ("1972-09-01T00:00:00Z", 883.0), - ("1972-10-01T00:00:00Z", 898.0), - ("1972-11-01T00:00:00Z", 957.0), - ("1972-12-01T00:00:00Z", 924.0), - ("1973-01-01T00:00:00Z", 881.0), - ("1973-02-01T00:00:00Z", 837.0), - ("1973-03-01T00:00:00Z", 9000.0) - ) -val df = (baseSeq.map(p => (p._1,p._2,1.0)) - .++(baseSeq.map(p => (p._1,p._2,2.0))) - .toDF("timestamp","value","group")) - -val sda = (new SimpleDetectAnomalies() - .setSubscriptionKey(anomalyKey) - .setLocation("westus2") - .setOutputCol("anomalies") - .setGroupbyCol("group") - .setGranularity("monthly")) - -sda.transform(df).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_AzureSearch.md b/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_AzureSearch.md deleted file mode 100644 index c60d9f0ed9..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_AzureSearch.md +++ /dev/null @@ -1,153 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - -## Azure Search - -### AzureSearch - - - - - - -```python -from synapse.ml.cognitive import * - -azureSearchKey = os.environ.get("AZURE_SEARCH_KEY", getSecret("azure-search-key")) -testServiceName = "mmlspark-azure-search" - -indexName = "test-website" - -def createSimpleIndexJson(indexName): - json_str = """ - { - "name": "%s", - "fields": [ - { - "name": "id", - "type": "Edm.String", - "key": true, - "facetable": false - }, - { - "name": "fileName", - "type": "Edm.String", - "searchable": false, - "sortable": false, - "facetable": false - }, - { - "name": "text", - "type": "Edm.String", - "filterable": false, - "sortable": false, - "facetable": false - } - ] - } - """ - - return json_str % indexName - -df = (spark.createDataFrame([ - ("upload", "0", "file0", "text0"), - ("upload", "1", "file1", "text1"), - ("upload", "2", "file2", "text2"), - ("upload", "3", "file3", "text3") -], ["searchAction", "id", "fileName", "text"])) - -ad = (AddDocuments() - .setSubscriptionKey(azureSearchKey) - .setServiceName(testServiceName) - .setOutputCol("out") - .setErrorCol("err") - .setIndexName(indexName) - .setActionCol("searchAction")) - -ad.transform(df).show() - -AzureSearchWriter.writeToAzureSearch(df, - subscriptionKey=azureSearchKey, - actionCol="searchAction", - serviceName=testServiceName, - indexJson=createSimpleIndexJson(indexName)) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val azureSearchKey = sys.env.getOrElse("AZURE_SEARCH_KEY", None) -val testServiceName = "mmlspark-azure-search" - -val indexName = "test-website" - -def createSimpleIndexJson(indexName: String) = { - s""" - |{ - | "name": "$indexName", - | "fields": [ - | { - | "name": "id", - | "type": "Edm.String", - | "key": true, - | "facetable": false - | }, - | { - | "name": "fileName", - | "type": "Edm.String", - | "searchable": false, - | "sortable": false, - | "facetable": false - | }, - | { - | "name": "text", - | "type": "Edm.String", - | "filterable": false, - | "sortable": false, - | "facetable": false - | } - | ] - | } - """.stripMargin -} - -val df = ((0 until 4) - .map(i => ("upload", s"$i", s"file$i", s"text$i")) - .toDF("searchAction", "id", "fileName", "text")) - -val ad = (new AddDocuments() - .setSubscriptionKey(azureSearchKey) - .setServiceName(testServiceName) - .setOutputCol("out") - .setErrorCol("err") - .setIndexName(indexName) - .setActionCol("searchAction")) - -ad.transform(df).show() - -AzureSearchWriter.write(df, - Map("subscriptionKey" -> azureSearchKey, - "actionCol" -> "searchAction", - "serviceName" -> testServiceName, - "indexJson" -> createSimpleIndexJson(indexName))) -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_BingImageSearch.md b/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_BingImageSearch.md deleted file mode 100644 index aca039917d..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_BingImageSearch.md +++ /dev/null @@ -1,95 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - -## Bing Image Search - -### BingImageSearch - - - - - - -```python -from synapse.ml.cognitive import * - -bingSearchKey = os.environ.get("BING_SEARCH_KEY", getSecret("bing-search-key")) - -# Number of images Bing will return per query -imgsPerBatch = 10 -# A list of offsets, used to page into the search results -offsets = [(i*imgsPerBatch,) for i in range(100)] -# Since web content is our data, we create a dataframe with options on that data: offsets -bingParameters = spark.createDataFrame(offsets, ["offset"]) - -# Run the Bing Image Search service with our text query -bingSearch = (BingImageSearch() - .setSubscriptionKey(bingSearchKey) - .setOffsetCol("offset") - .setQuery("Martin Luther King Jr. quotes") - .setCount(imgsPerBatch) - .setOutputCol("images")) - -# Transformer that extracts and flattens the richly structured output of Bing Image Search into a simple URL column -getUrls = BingImageSearch.getUrlTransformer("images", "url") - -# This displays the full results returned -bingSearch.transform(bingParameters).show() - -# Since we have two services, they are put into a pipeline -pipeline = PipelineModel(stages=[bingSearch, getUrls]) - -# Show the results of your search: image URLs -pipeline.transform(bingParameters).show() - -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val bingSearchKey = sys.env.getOrElse("BING_SEARCH_KEY", None) - -// Number of images Bing will return per query -val imgsPerBatch = 10 -// A list of offsets, used to page into the search results -val offsets = (0 until 100).map(i => i*imgsPerBatch) -// Since web content is our data, we create a dataframe with options on that data: offsets -val bingParameters = Seq(offsets).toDF("offset") - -// Run the Bing Image Search service with our text query -val bingSearch = (new BingImageSearch() - .setSubscriptionKey(bingSearchKey) - .setOffsetCol("offset") - .setQuery("Martin Luther King Jr. quotes") - .setCount(imgsPerBatch) - .setOutputCol("images")) - -// Transformer that extracts and flattens the richly structured output of Bing Image Search into a simple URL column -val getUrls = BingImageSearch.getUrlTransformer("images", "url") - -// This displays the full results returned -bingSearch.transform(bingParameters).show() - -// Show the results of your search: image URLs -getUrls.transform(bingSearch.transform(bingParameters)).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_ComputerVision.md b/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_ComputerVision.md deleted file mode 100644 index 17d56c5cdb..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_ComputerVision.md +++ /dev/null @@ -1,547 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## Computer Vision - -### OCR - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) - -df = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test1.jpg", ), - ], ["url", ]) - -ocr = (OCR() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setDetectOrientation(True) - .setOutputCol("ocr")) - -ocr.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - "https://mmlspark.blob.core.windows.net/datasets/OCR/test1.jpg" - ).toDF("url") - - -val ocr = (new OCR() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setDetectOrientation(true) - .setOutputCol("ocr")) - -ocr.transform(df).show() -``` - - - - - - - -### AnalyzeImage - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test1.jpg", "en"), - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test2.png", None), - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test3.png", "en") - ], ["image", "language"]) - - -ai = (AnalyzeImage() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("image") - .setLanguageCol("language") - .setVisualFeatures(["Categories", "Tags", "Description", "Faces", "ImageType", "Color", "Adult", "Objects", "Brands"]) - .setDetails(["Celebrities", "Landmarks"]) - .setOutputCol("features")) - -ai.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test1.jpg", "en"), - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test2.png", null), - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test3.png", "en") - ).toDF("url", "language") - -val ai = (new AnalyzeImage() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setLanguageCol("language") - .setVisualFeatures(Seq("Categories", "Tags", "Description", "Faces", "ImageType", "Color", "Adult", "Objects", "Brands")) - .setDetails(Seq("Celebrities", "Landmarks")) - .setOutputCol("features")) - -ai.transform(df).select("url", "features").show() -``` - - - - - - - -### RecognizeText - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test1.jpg", ), - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test2.png", ), - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test3.png", ) - ], ["url", ]) - -rt = (RecognizeText() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setMode("Printed") - .setOutputCol("ocr") - .setConcurrency(5)) - -rt.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - "https://mmlspark.blob.core.windows.net/datasets/OCR/test1.jpg", - "https://mmlspark.blob.core.windows.net/datasets/OCR/test2.png", - "https://mmlspark.blob.core.windows.net/datasets/OCR/test3.png" - ).toDF("url") - -val rt = (new RecognizeText() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setMode("Printed") - .setOutputCol("ocr") - .setConcurrency(5)) - -rt.transform(df).show() -``` - - - - - - - -### ReadImage - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test1.jpg", ), - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test2.png", ), - ("https://mmlspark.blob.core.windows.net/datasets/OCR/test3.png", ) - ], ["url", ]) - -ri = (ReadImage() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("ocr") - .setConcurrency(5)) - -ri.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - "https://mmlspark.blob.core.windows.net/datasets/OCR/test1.jpg", - "https://mmlspark.blob.core.windows.net/datasets/OCR/test2.png", - "https://mmlspark.blob.core.windows.net/datasets/OCR/test3.png" - ).toDF("url") - -val ri = (new ReadImage() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("ocr") - .setConcurrency(5)) - -ri.transform(df).show() -``` - - - - - - - -### RecognizeDomainSpecificContent - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test2.jpg", ) - ], ["url", ]) - -celeb = (RecognizeDomainSpecificContent() - .setSubscriptionKey(cognitiveKey) - .setModel("celebrities") - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("celebs")) - -celeb.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test2.jpg" - ).toDF("url") - -val celeb = (new RecognizeDomainSpecificContent() - .setSubscriptionKey(cognitiveKey) - .setModel("celebrities") - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("celebs")) - -celeb.transform(df).show() -``` - - - - - - - -### GenerateThumbnails - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg", ) - ], ["url", ]) - -gt = (GenerateThumbnails() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setHeight(50) - .setWidth(50) - .setSmartCropping(True) - .setImageUrlCol("url") - .setOutputCol("thumbnails")) - -gt.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df: DataFrame = Seq( - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg" - ).toDF("url") - -val gt = (new GenerateThumbnails() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setHeight(50) - .setWidth(50) - .setSmartCropping(true) - .setImageUrlCol("url") - .setOutputCol("thumbnails")) - -gt.transform(df).show() -``` - - - - - - - -### TagImage - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg", ) - ], ["url", ]) - -ti = (TagImage() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("tags")) - -ti.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg" - ).toDF("url") - -val ti = (new TagImage() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("tags")) - -ti.transform(df).show() -``` - - - - - - - -### DescribeImage - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg", ) - ], ["url", ]) - -di = (DescribeImage() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setMaxCandidates(3) - .setImageUrlCol("url") - .setOutputCol("descriptions")) - -di.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg" - ).toDF("url") - -val di = (new DescribeImage() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setMaxCandidates(3) - .setImageUrlCol("url") - .setOutputCol("descriptions")) - -di.transform(df).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_Face.md b/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_Face.md deleted file mode 100644 index 26961f8f7a..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_Face.md +++ /dev/null @@ -1,414 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## Face - -### DetectFace - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg",), -], ["url"]) - -face = (DetectFace() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("detected_faces") - .setReturnFaceId(True) - .setReturnFaceLandmarks(False) - .setReturnFaceAttributes(["age", "gender", "headPose", "smile", "facialHair", "glasses", "emotion", - "hair", "makeup", "occlusion", "accessories", "blur", "exposure", "noise"])) - -face.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df: DataFrame = Seq( - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test2.jpg" - ).toDF("url") - -val face = (new DetectFace() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("face") - .setReturnFaceId(true) - .setReturnFaceLandmarks(true) - .setReturnFaceAttributes(Seq( - "age", "gender", "headPose", "smile", "facialHair", "glasses", "emotion", - "hair", "makeup", "occlusion", "accessories", "blur", "exposure", "noise"))) - -face.transform(df).show() -``` - - - - - - - -### FindSimilarFace - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg",), - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test2.jpg",), - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test3.jpg",) -], ["url"]) - -detector = (DetectFace() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("detected_faces") - .setReturnFaceId(True) - .setReturnFaceLandmarks(False) - .setReturnFaceAttributes([])) - -faceIdDF = detector.transform(df).select("detected_faces").select(col("detected_faces").getItem(0).getItem("faceId").alias("id")) -faceIds = [row.asDict()['id'] for row in faceIdDF.collect()] - -findSimilar = (FindSimilarFace() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setOutputCol("similar") - .setFaceIdCol("id") - .setFaceIds(faceIds)) - -findSimilar.transform(faceIdDF).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df: DataFrame = Seq( - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg", - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test2.jpg", - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test3.jpg" - ).toDF("url") -val detector = (new DetectFace() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("detected_faces") - .setReturnFaceId(true) - .setReturnFaceLandmarks(false) - .setReturnFaceAttributes(Seq())) - -val faceIdDF = (detector.transform(df) - .select(col("detected_faces").getItem(0).getItem("faceId").alias("id")) - .cache()) -val faceIds = faceIdDF.collect().map(row => row.getAs[String]("id")) - -val findSimilar = (new FindSimilarFace() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setOutputCol("similar") - .setFaceIdCol("id") - .setFaceIds(faceIds)) - -findSimilar.transform(faceIdDF).show() -``` - - - - - - - -### GroupFaces - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg",), - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test2.jpg",), - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test3.jpg",) -], ["url"]) - -detector = (DetectFace() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("detected_faces") - .setReturnFaceId(True) - .setReturnFaceLandmarks(False) - .setReturnFaceAttributes([])) - -faceIdDF = detector.transform(df).select("detected_faces").select(col("detected_faces").getItem(0).getItem("faceId").alias("id")) -faceIds = [row.asDict()['id'] for row in faceIdDF.collect()] - -group = (GroupFaces() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setOutputCol("grouping") - .setFaceIds(faceIds)) - -group.transform(faceIdDF).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df: DataFrame = Seq( - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg", - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test2.jpg", - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test3.jpg" - ).toDF("url") -val detector = (new DetectFace() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("detected_faces") - .setReturnFaceId(true) - .setReturnFaceLandmarks(false) - .setReturnFaceAttributes(Seq())) - -val faceIdDF = (detector.transform(df) - .select(col("detected_faces").getItem(0).getItem("faceId").alias("id")) - .cache()) -val faceIds = faceIdDF.collect().map(row => row.getAs[String]("id")) - -val group = (new GroupFaces() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setOutputCol("grouping") - .setFaceIds(faceIds)) - -group.transform(faceIdDF).show() -``` - - - - - - - -### IdentifyFaces - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -pgId = "PUT_YOUR_PERSON_GROUP_ID" - -identifyFaces = (IdentifyFaces() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setFaceIdsCol("faces") - .setPersonGroupId(pgId) - .setOutputCol("identified_faces")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val pgId = "PUT_YOUR_PERSON_GROUP_ID" - -val identifyFaces = (new IdentifyFaces() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setFaceIdsCol("faces") - .setPersonGroupId(pgId) - .setOutputCol("identified_faces")) -``` - - - - - - - -### VerifyFaces - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg",), - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test2.jpg",), - ("https://mmlspark.blob.core.windows.net/datasets/DSIR/test3.jpg",) -], ["url"]) - -detector = (DetectFace() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("detected_faces") - .setReturnFaceId(True) - .setReturnFaceLandmarks(False) - .setReturnFaceAttributes([])) - -faceIdDF = detector.transform(df).select("detected_faces").select(col("detected_faces").getItem(0).getItem("faceId").alias("faceId1")) -faceIdDF2 = faceIdDF.withColumn("faceId2", lit(faceIdDF.take(1)[0].asDict()['faceId1'])) - -verify = (VerifyFaces() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setOutputCol("same") - .setFaceId1Col("faceId1") - .setFaceId2Col("faceId2")) - -verify.transform(faceIdDF2).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df: DataFrame = Seq( - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg", - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test2.jpg", - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test3.jpg" - ).toDF("url") - -val detector = (new DetectFace() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("detected_faces") - .setReturnFaceId(true) - .setReturnFaceLandmarks(false) - .setReturnFaceAttributes(Seq())) - -val faceIdDF = (detector.transform(df) - .select(col("detected_faces").getItem(0).getItem("faceId").alias("faceId1")) - .cache()) -val faceIdDF2 = faceIdDF.withColumn("faceId2", lit(faceIdDF.take(1).head.getString(0))) - -val verify = (new VerifyFaces() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setOutputCol("same") - .setFaceId1Col("faceId1") - .setFaceId2Col("faceId2")) - -verify.transform(faceIdDF2).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_FormRecognizer.md b/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_FormRecognizer.md deleted file mode 100644 index d22967f59f..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_FormRecognizer.md +++ /dev/null @@ -1,616 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - -## Form Recognizer - -### AnalyzeLayout - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -imageDf = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/layout1.jpg",) -], ["source",]) - -analyzeLayout = (AnalyzeLayout() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("source") - .setOutputCol("layout") - .setConcurrency(5)) - -(analyzeLayout.transform(imageDf) - .withColumn("lines", flatten(col("layout.analyzeResult.readResults.lines"))) - .withColumn("readLayout", col("lines.text")) - .withColumn("tables", flatten(col("layout.analyzeResult.pageResults.tables"))) - .withColumn("cells", flatten(col("tables.cells"))) - .withColumn("pageLayout", col("cells.text")) - .select("source", "readLayout", "pageLayout")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val imageDf = Seq( - "https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/layout1.jpg" - ).toDF("source") - -val analyzeLayout = (new AnalyzeLayout() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("source") - .setOutputCol("layout") - .setConcurrency(5)) - -analyzeLayout.transform(imageDf).show() -``` - - - - - - - -### AnalyzeReceipts - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -imageDf = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/receipt1.png",), - ("https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/receipt1.png",) -], ["image",]) - -analyzeReceipts = (AnalyzeReceipts() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("image") - .setOutputCol("receipts") - .setConcurrency(5)) - -analyzeReceipts.transform(imageDf).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val imageDf = Seq( - "https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/receipt1.png", - "https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/receipt1.png" - ).toDF("source") - -val analyzeReceipts = (new AnalyzeReceipts() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("source") - .setOutputCol("receipts") - .setConcurrency(5)) - -analyzeReceipts.transform(imageDf).show() -``` - - - - - - - -### AnalyzeBusinessCards - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -imageDf = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/business_card.jpg",) -], ["source",]) - -analyzeBusinessCards = (AnalyzeBusinessCards() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("source") - .setOutputCol("businessCards") - .setConcurrency(5)) - -analyzeBusinessCards.transform(imageDf).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val imageDf = Seq( - "https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/business_card.jpg" - ).toDF("source") - -val analyzeBusinessCards = (new AnalyzeBusinessCards() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("source") - .setOutputCol("businessCards") - .setConcurrency(5)) - -analyzeBusinessCards.transform(imageDf).show() -``` - - - - - - - -### AnalyzeInvoices - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -imageDf = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/invoice2.png",) -], ["source",]) - -analyzeInvoices = (AnalyzeInvoices() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("source") - .setOutputCol("invoices") - .setConcurrency(5)) - -(analyzeInvoices - .transform(imageDf) - .withColumn("documents", explode(col("invoices.analyzeResult.documentResults.fields"))) - .select("source", "documents")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val imageDf = Seq( - "https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/invoice2.png" - ).toDF("source") - -val analyzeInvoices = (new AnalyzeInvoices() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("source") - .setOutputCol("invoices") - .setConcurrency(5)) - -analyzeInvoices.transform(imageD4).show() -``` - - - - - - - -### AnalyzeIDDocuments - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -imageDf = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/id1.jpg",) -], ["source",]) - -analyzeIDDocuments = (AnalyzeIDDocuments() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("source") - .setOutputCol("ids") - .setConcurrency(5)) - -(analyzeIDDocuments - .transform(imageDf) - .withColumn("documents", explode(col("ids.analyzeResult.documentResults.fields"))) - .select("source", "documents")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val imageDf = Seq( - "https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/id1.jpg" - ).toDF("source") - -val analyzeIDDocuments = (new AnalyzeIDDocuments() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("source") - .setOutputCol("ids") - .setConcurrency(5)) - -analyzeIDDocuments.transform(imageDf).show() -``` - - - - - - - -### AnalyzeCustomModel - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -modelId = "02bc2f58-2beb-4ae3-84fb-08f011b2f7b8" # put your own modelId here -imageDf = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/invoice2.png",) -], ["source",]) - -analyzeCustomModel = (AnalyzeCustomModel() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setModelId(modelId) - .setImageUrlCol("source") - .setOutputCol("output") - .setConcurrency(5)) - -(analyzeCustomModel - .transform(imageDf) - .withColumn("keyValuePairs", flatten(col("output.analyzeResult.pageResults.keyValuePairs"))) - .withColumn("keys", col("keyValuePairs.key.text")) - .withColumn("values", col("keyValuePairs.value.text")) - .withColumn("keyValuePairs", create_map(lit("key"), col("keys"), lit("value"), col("values"))) - .select("source", "keyValuePairs")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val modelId = "02bc2f58-2beb-4ae3-84fb-08f011b2f7b8" // put your own modelId here -val imageDf = Seq( - "https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/invoice2.png" - ).toDF("source") - -val analyzeCustomModel = (new AnalyzeCustomModel() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setModelId(modelId) - .setImageUrlCol("source") - .setOutputCol("output") - .setConcurrency(5)) - -analyzeCustomModel.transform(imageDf).show() -``` - - - - - - - -### GetCustomModel - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -modelId = "02bc2f58-2beb-4ae3-84fb-08f011b2f7b8" # put your own modelId here -emptyDf = spark.createDataFrame([("",)]) - -getCustomModel = (GetCustomModel() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setModelId(modelId) - .setIncludeKeys(True) - .setOutputCol("model") - .setConcurrency(5)) - -(getCustomModel - .transform(emptyDf) - .withColumn("modelInfo", col("model.ModelInfo")) - .withColumn("trainResult", col("model.TrainResult")) - .select("modelInfo", "trainResult")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val modelId = "02bc2f58-2beb-4ae3-84fb-08f011b2f7b8" // put your own modelId here -val emptyDf = Seq("").toDF() - -val getCustomModel = (new GetCustomModel() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setModelId(modelId) - .setIncludeKeys(true) - .setOutputCol("model") - .setConcurrency(5)) - -getCustomModel.transform(emptyDf).show() -``` - - - - - - - -### ListCustomModels - - - - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -emptyDf = spark.createDataFrame([("",)]) - -listCustomModels = (ListCustomModels() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setOp("full") - .setOutputCol("models") - .setConcurrency(5)) - -(listCustomModels - .transform(emptyDf) - .withColumn("modelIds", col("models.modelList.modelId")) - .select("modelIds")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val emptyDf = Seq("").toDF() - -val listCustomModels = (new ListCustomModels() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setOp("full") - .setOutputCol("models") - .setConcurrency(5)) - -listCustomModels.transform(emptyDf).show() -``` - - - - - - - -## Form Recognizer V3 - -### AnalyzeDocument - - - - - - -```python -from synapse.ml.cognitive import * - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -imageDf = spark.createDataFrame([ - ("https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/layout1.jpg",) -], ["source",]) - -analyzeDocument = (AnalyzeDocument() - # For supported prebuilt models, please go to documentation page for details - .setPrebuiltModelId("prebuilt-layout") - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("source") - .setOutputCol("result") - .setConcurrency(5)) - -(analyzeDocument.transform(imageDf) - .withColumn("content", col("result.analyzeResult.content")) - .withColumn("cells", flatten(col("result.analyzeResult.tables.cells"))) - .withColumn("cells", col("cells.content")) - .select("source", "result", "content", "cells")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val imageDf = Seq( - "https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/layout1.jpg" - ).toDF("source") - -val analyzeDocument = (new AnalyzeDocument() - .setPrebuiltModelId("prebuilt-layout") - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setImageUrlCol("source") - .setOutputCol("result") - .setConcurrency(5)) - -analyzeDocument.transform(imageDf).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_SpeechToText.md b/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_SpeechToText.md deleted file mode 100644 index 52437be5fb..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_SpeechToText.md +++ /dev/null @@ -1,144 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - -## Speech To Text - -### SpeechToText - - - - - - -```python -from synapse.ml.cognitive import * -import requests - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -link = "https://mmlspark.blob.core.windows.net/datasets/Speech/audio2.wav" -audioBytes = requests.get(link).content -df = spark.createDataFrame([(audioBytes,) - ], ["audio"]) - -stt = (SpeechToText() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setOutputCol("text") - .setAudioDataCol("audio") - .setLanguage("en-US") - .setFormat("simple")) - -stt.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ -import org.apache.commons.compress.utils.IOUtils -import java.net.URL - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val audioBytes = IOUtils.toByteArray(new URL("https://mmlspark.blob.core.windows.net/datasets/Speech/test1.wav").openStream()) - -val df: DataFrame = Seq( - Tuple1(audioBytes) - ).toDF("audio") - -val stt = (new SpeechToText() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setOutputCol("text") - .setAudioDataCol("audio") - .setLanguage("en-US") - .setFormat("simple")) - -stt.transform(df).show() -``` - - - - - - - -### SpeechToTextSDK - - - - - - - - - -```python -from synapse.ml.cognitive import * -import requests - -cognitiveKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([("https://mmlspark.blob.core.windows.net/datasets/Speech/audio2.wav",) - ], ["url"]) - -speech_to_text = (SpeechToTextSDK() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setOutputCol("text") - .setAudioDataCol("url") - .setLanguage("en-US") - .setProfanity("Masked")) - -speech_to_text.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ -import org.apache.commons.compress.utils.IOUtils -import java.net.URL - -val cognitiveKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df: DataFrame = Seq( - "https://mmlspark.blob.core.windows.net/datasets/Speech/audio2.wav" - ).toDF("url") - -val speech_to_text = (new SpeechToTextSDK() - .setSubscriptionKey(cognitiveKey) - .setLocation("eastus") - .setOutputCol("text") - .setAudioDataCol("url") - .setLanguage("en-US") - .setProfanity("Masked")) - -speech_to_text.transform(df).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_TextAnalytics.md b/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_TextAnalytics.md deleted file mode 100644 index fdaf94fb7d..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_TextAnalytics.md +++ /dev/null @@ -1,416 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - -## Text Analytics - -### EntityDetector - - - - - - -```python -from synapse.ml.cognitive import * - -textKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("1", "Microsoft released Windows 10"), - ("2", "In 1975, Bill Gates III and Paul Allen founded the company.") -], ["id", "text"]) - -entity = (EntityDetector() - .setSubscriptionKey(textKey) - .setLocation("eastus") - .setLanguage("en") - .setOutputCol("replies") - .setErrorCol("error")) - -entity.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ -import org.apache.spark.sql.functions.{col, flatten} - -val textKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - ("1", "Microsoft released Windows 10"), - ("2", "In 1975, Bill Gates III and Paul Allen founded the company.") - ).toDF("id", "text") - -val entity = (new EntityDetector() - .setSubscriptionKey(textKey) - .setLocation("eastus") - .setLanguage("en") - .setOutputCol("replies")) - -entity.transform(df).show() -``` - - - - - - - -### KeyPhraseExtractor - - - - - - - -```python -from synapse.ml.cognitive import * - -textKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("en", "Hello world. This is some input text that I love."), - ("fr", "Bonjour tout le monde"), - ("es", "La carretera estaba atascada. Había mucho tráfico el día de ayer.") -], ["lang", "text"]) - -keyPhrase = (KeyPhraseExtractor() - .setSubscriptionKey(textKey) - .setLocation("eastus") - .setLanguageCol("lang") - .setOutputCol("replies") - .setErrorCol("error")) - -keyPhrase.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val textKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - ("en", "Hello world. This is some input text that I love."), - ("fr", "Bonjour tout le monde"), - ("es", "La carretera estaba atascada. Había mucho tráfico el día de ayer."), - ("en", null) - ).toDF("lang", "text") - -val keyPhrase = (new KeyPhraseExtractor() - .setSubscriptionKey(textKey) - .setLocation("eastus") - .setLanguageCol("lang") - .setOutputCol("replies")) - -keyPhrase.transform(df).show() -``` - - - - - - - -### LanguageDetector - - - - - - - - - -```python -from synapse.ml.cognitive import * - -textKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("Hello World",), - ("Bonjour tout le monde",), - ("La carretera estaba atascada. Había mucho tráfico el día de ayer.",), - ("你好",), - ("こんにちは",), - (":) :( :D",) -], ["text",]) - -language = (LanguageDetector() - .setSubscriptionKey(textKey) - .setLocation("eastus") - .setTextCol("text") - .setOutputCol("language") - .setErrorCol("error")) - -language.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val textKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - "Hello World", - "Bonjour tout le monde", - "La carretera estaba atascada. Había mucho tráfico el día de ayer.", - ":) :( :D" - ).toDF("text") - -val language = (new LanguageDetector() - .setSubscriptionKey(textKey) - .setLocation("eastus") - .setOutputCol("replies")) - -language.transform(df).show() -``` - - - - - - - -### NER - - - - - - - - - -```python -from synapse.ml.cognitive import * - -textKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("1", "en", "I had a wonderful trip to Seattle last week."), - ("2", "en", "I visited Space Needle 2 times.") -], ["id", "language", "text"]) - -ner = (NER() - .setSubscriptionKey(textKey) - .setLocation("eastus") - .setLanguageCol("language") - .setOutputCol("replies") - .setErrorCol("error")) - -ner.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val textKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - ("1", "en", "I had a wonderful trip to Seattle last week."), - ("2", "en", "I visited Space Needle 2 times.") - ).toDF("id", "language", "text") - -val ner = (new NER() - .setSubscriptionKey(textKey) - .setLocation("eastus") - .setLanguage("en") - .setOutputCol("response")) - -ner.transform(df).show() -``` - - - - - - - -### PII - - - - - - - - - -```python -from synapse.ml.cognitive import * - -textKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("1", "en", "My SSN is 859-98-0987"), - ("2", "en", - "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."), - ("3", "en", "Is 998.214.865-68 your Brazilian CPF number?") -], ["id", "language", "text"]) - -pii = (PII() - .setSubscriptionKey(textKey) - .setLocation("eastus") - .setLanguage("en") - .setOutputCol("response")) - -pii.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val textKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - ("1", "en", "My SSN is 859-98-0987"), - ("2", "en", - "Your ABA number - 111000025 - is the first 9 digits in the lower left hand corner of your personal check."), - ("3", "en", "Is 998.214.865-68 your Brazilian CPF number?") - ).toDF("id", "language", "text") - -val pii = (new PII() - .setSubscriptionKey(textKey) - .setLocation("eastus") - .setLanguage("en") - .setOutputCol("response")) - -pii.transform(df).show() -``` - - - - - - - -### TextSentiment - - - - - - - - - -```python -from synapse.ml.cognitive import * - -textKey = os.environ.get("COGNITIVE_API_KEY", getSecret("cognitive-api-key")) -df = spark.createDataFrame([ - ("I am so happy today, its sunny!", "en-US"), - ("I am frustrated by this rush hour traffic", "en-US"), - ("The cognitive services on spark aint bad", "en-US"), -], ["text", "language"]) - -sentiment = (TextSentiment() - .setSubscriptionKey(textKey) - .setLocation("eastus") - .setTextCol("text") - .setOutputCol("sentiment") - .setErrorCol("error") - .setLanguageCol("language")) - -sentiment.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val textKey = sys.env.getOrElse("COGNITIVE_API_KEY", None) -val df = Seq( - ("en", "Hello world. This is some input text that I love."), - ("fr", "Bonjour tout le monde"), - ("es", "La carretera estaba atascada. Había mucho tráfico el día de ayer."), - (null, "ich bin ein berliner"), - (null, null), - ("en", null) - ).toDF("lang", "text") - -val sentiment = (new TextSentiment() - .setSubscriptionKey(textKey) - .setLocation("eastus") - .setLanguageCol("lang") - .setModelVersion("latest") - .setShowStats(true) - .setOutputCol("replies")) - -sentiment.transform(df).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_Translator.md b/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_Translator.md deleted file mode 100644 index bfa3961ec8..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/cognitive/_Translator.md +++ /dev/null @@ -1,492 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - -## Translator - -### Translate - - - - - - -```python -from synapse.ml.cognitive import * - -translatorKey = os.environ.get("TRANSLATOR_KEY", getSecret("translator-key")) -df = spark.createDataFrame([ - (["Hello, what is your name?", "Bye"],) -], ["text",]) - -translate = (Translate() - .setSubscriptionKey(translatorKey) - .setLocation("eastus") - .setTextCol("text") - .setToLanguage(["zh-Hans", "fr"]) - .setOutputCol("translation") - .setConcurrency(5)) - -(translate - .transform(df) - .withColumn("translation", flatten(col("translation.translations"))) - .withColumn("translation", col("translation.text")) - .select("translation")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ -import org.apache.spark.sql.functions.{col, flatten} - -val translatorKey = sys.env.getOrElse("TRANSLATOR_KEY", None) -val df = Seq(List("Hello, what is your name?", "Bye")).toDF("text") - -val translate = (new Translate() - .setSubscriptionKey(translatorKey) - .setLocation("eastus") - .setTextCol("text") - .setToLanguage(Seq("zh-Hans", "fr")) - .setOutputCol("translation") - .setConcurrency(5)) - -(translate - .transform(df) - .withColumn("translation", flatten(col("translation.translations"))) - .withColumn("translation", col("translation.text")) - .select("translation")).show() -``` - - - - - - - -### Transliterate - - - - - - - - - -```python -from synapse.ml.cognitive import * - -translatorKey = os.environ.get("TRANSLATOR_KEY", getSecret("translator-key")) -df = spark.createDataFrame([ - (["こんにちは", "さようなら"],) -], ["text",]) - -transliterate = (Transliterate() - .setSubscriptionKey(translatorKey) - .setLocation("eastus") - .setLanguage("ja") - .setFromScript("Jpan") - .setToScript("Latn") - .setTextCol("text") - .setOutputCol("result")) - -(transliterate - .transform(df) - .withColumn("text", col("result.text")) - .withColumn("script", col("result.script")) - .select("text", "script")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ -import org.apache.spark.sql.functions.col - -val translatorKey = sys.env.getOrElse("TRANSLATOR_KEY", None) -val df = Seq(List("こんにちは", "さようなら")).toDF("text") - -val transliterate = (new Transliterate() - .setSubscriptionKey(translatorKey) - .setLocation("eastus") - .setLanguage("ja") - .setFromScript("Jpan") - .setToScript("Latn") - .setTextCol("text") - .setOutputCol("result")) - -(transliterate - .transform(df) - .withColumn("text", col("result.text")) - .withColumn("script", col("result.script")) - .select("text", "script")).show() -``` - - - - - - - -### Detect - - - - - - - - - -```python -from synapse.ml.cognitive import * - -translatorKey = os.environ.get("TRANSLATOR_KEY", getSecret("translator-key")) -df = spark.createDataFrame([ - (["Hello, what is your name?"],) -], ["text",]) - -detect = (Detect() - .setSubscriptionKey(translatorKey) - .setLocation("eastus") - .setTextCol("text") - .setOutputCol("result")) - -(detect - .transform(df) - .withColumn("language", col("result.language")) - .select("language")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ -import org.apache.spark.sql.functions.col - -val translatorKey = sys.env.getOrElse("TRANSLATOR_KEY", None) -val df = Seq(List("Hello, what is your name?")).toDF("text") - -val detect = (new Detect() - .setSubscriptionKey(translatorKey) - .setLocation("eastus") - .setTextCol("text") - .setOutputCol("result")) - -(detect - .transform(df) - .withColumn("language", col("result.language")) - .select("language")).show() -``` - - - - - - - -### BreakSentence - - - - - - - - - -```python -from synapse.ml.cognitive import * - -translatorKey = os.environ.get("TRANSLATOR_KEY", getSecret("translator-key")) -df = spark.createDataFrame([ - (["Hello, what is your name?"],) -], ["text",]) - -breakSentence = (BreakSentence() - .setSubscriptionKey(translatorKey) - .setLocation("eastus") - .setTextCol("text") - .setOutputCol("result")) - -(breakSentence - .transform(df) - .withColumn("sentLen", flatten(col("result.sentLen"))) - .select("sentLen")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ -import org.apache.spark.sql.functions.{col, flatten} - -val translatorKey = sys.env.getOrElse("TRANSLATOR_KEY", None) -val df = Seq(List("Hello, what is your name?")).toDF("text") - -val breakSentence = (new BreakSentence() - .setSubscriptionKey(translatorKey) - .setLocation("eastus") - .setTextCol("text") - .setOutputCol("result")) - -(breakSentence - .transform(df) - .withColumn("sentLen", flatten(col("result.sentLen"))) - .select("sentLen")).show() -``` - - - - - - - -### DictionaryLookup - - - - - - - - - -```python -from synapse.ml.cognitive import * - -translatorKey = os.environ.get("TRANSLATOR_KEY", getSecret("translator-key")) -df = spark.createDataFrame([ - (["fly"],) -], ["text",]) - -dictionaryLookup = (DictionaryLookup() - .setSubscriptionKey(translatorKey) - .setLocation("eastus") - .setFromLanguage("en") - .setToLanguage("es") - .setTextCol("text") - .setOutputCol("result")) - -(dictionaryLookup - .transform(df) - .withColumn("translations", flatten(col("result.translations"))) - .withColumn("normalizedTarget", col("translations.normalizedTarget")) - .select("normalizedTarget")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ -import org.apache.spark.sql.functions.{col, flatten} - -val translatorKey = sys.env.getOrElse("TRANSLATOR_KEY", None) -val df = Seq(List("fly")).toDF("text") - -val dictionaryLookup = (new DictionaryLookup() - .setSubscriptionKey(translatorKey) - .setLocation("eastus") - .setFromLanguage("en") - .setToLanguage("es") - .setTextCol("text") - .setOutputCol("result")) - -(dictionaryLookup - .transform(df) - .withColumn("translations", flatten(col("result.translations"))) - .withColumn("normalizedTarget", col("translations.normalizedTarget")) - .select("normalizedTarget")).show() -``` - - - - - - - -### DictionaryExamples - - - - - - - - - -```python -from synapse.ml.cognitive import * - -translatorKey = os.environ.get("TRANSLATOR_KEY", getSecret("translator-key")) -df = (spark.createDataFrame([ - ("fly", "volar") -], ["text", "translation"]) - .withColumn("textAndTranslation", array(struct(col("text"), col("translation"))))) - -dictionaryExamples = (DictionaryExamples() - .setSubscriptionKey(translatorKey) - .setLocation("eastus") - .setFromLanguage("en") - .setToLanguage("es") - .setTextAndTranslationCol("textAndTranslation") - .setOutputCol("result")) - -(dictionaryExamples - .transform(df) - .withColumn("examples", flatten(col("result.examples"))) - .select("examples")).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ -import org.apache.spark.sql.functions.{col, flatten} - -val translatorKey = sys.env.getOrElse("TRANSLATOR_KEY", None) -val df = Seq(List(TextAndTranslation("fly", "volar"))).toDF("textAndTranslation") - -val dictionaryExamples = (new DictionaryExamples() - .setSubscriptionKey(translatorKey) - .setLocation("eastus") - .setFromLanguage("en") - .setToLanguage("es") - .setTextAndTranslationCol("textAndTranslation") - .setOutputCol("result")) - -(dictionaryExamples - .transform(df) - .withColumn("examples", flatten(col("result.examples"))) - .select("examples")).show() -``` - - - - - - - -### DocumentTranslator - - - - - - - - - -```python -from synapse.ml.cognitive import * - -translatorKey = os.environ.get("TRANSLATOR_KEY", getSecret("translator-key")) -translatorName = os.environ.get("TRANSLATOR_NAME", "mmlspark-translator") - -documentTranslator = (DocumentTranslator() - .setSubscriptionKey(translatorKey) - .setServiceName(translatorName) - .setSourceUrlCol("sourceUrl") - .setTargetsCol("targets") - .setOutputCol("translationStatus")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.cognitive._ -import spark.implicits._ - -val translatorKey = sys.env.getOrElse("TRANSLATOR_KEY", None) -val translatorName = sys.env.getOrElse("TRANSLATOR_NAME", None) - -val documentTranslator = (new DocumentTranslator() - .setSubscriptionKey(translatorKey) - .setServiceName(translatorName) - .setSourceUrlCol("sourceUrl") - .setTargetsCol("targets") - .setOutputCol("translationStatus")) -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Explainers.md b/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Explainers.md deleted file mode 100644 index 4798863e08..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Explainers.md +++ /dev/null @@ -1,574 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## Explainers - -### ImageLIME - - - - - - -```python -from synapse.ml.explainers import * -from synapse.ml.onnx import ONNXModel - -model = ONNXModel() - -lime = (ImageLIME() - .setModel(model) - .setOutputCol("weights") - .setInputCol("image") - .setCellSize(150.0) - .setModifier(50.0) - .setNumSamples(500) - .setTargetCol("probability") - .setTargetClassesCol("top2pred") - .setSamplingFraction(0.7)) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.explainers._ -import com.microsoft.azure.synapse.ml.onnx._ -import spark.implicits._ - -val model = (new ONNXModel()) - -val lime = (new ImageLIME() - .setModel(model) - .setOutputCol("weights") - .setInputCol("image") - .setCellSize(150.0) - .setModifier(50.0) - .setNumSamples(500) - .setTargetCol("probability") - .setTargetClassesCol("top2pred") - .setSamplingFraction(0.7)) -``` - - - - - - - -### ImageSHAP - - - - - - - - - -```python -from synapse.ml.explainers import * -from synapse.ml.onnx import ONNXModel - -model = ONNXModel() - -shap = ( - ImageSHAP() - .setModel(model) - .setOutputCol("shaps") - .setSuperpixelCol("superpixels") - .setInputCol("image") - .setCellSize(150.0) - .setModifier(50.0) - .setNumSamples(500) - .setTargetCol("probability") - .setTargetClassesCol("top2pred") -) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.explainers._ -import com.microsoft.azure.synapse.ml.onnx._ -import spark.implicits._ - -val model = (new ONNXModel()) - -val shap = (new ImageSHAP() - .setModel(model) - .setOutputCol("shaps") - .setSuperpixelCol("superpixels") - .setInputCol("image") - .setCellSize(150.0) - .setModifier(50.0) - .setNumSamples(500) - .setTargetCol("probability") - .setTargetClassesCol("top2pred") -)) -``` - - - - - - - -### TabularLIME - - - - - - - - - -```python -from synapse.ml.explainers import * -from synapse.ml.onnx import ONNXModel - -model = ONNXModel() -data = spark.createDataFrame([ - (-6.0, 0), - (-5.0, 0), - (5.0, 1), - (6.0, 1) -], ["col1", "label"]) - -lime = (TabularLIME() - .setModel(model) - .setInputCols(["col1"]) - .setOutputCol("weights") - .setBackgroundData(data) - .setKernelWidth(0.001) - .setNumSamples(1000) - .setTargetCol("probability") - .setTargetClasses([0, 1])) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.explainers._ -import com.microsoft.azure.synapse.ml.onnx._ -import spark.implicits._ - -val model = (new ONNXModel()) -val data = Seq( - (-6.0, 0), - (-5.0, 0), - (5.0, 1), - (6.0, 1) -).toDF("col1", "label") - -val lime = (new TabularLIME() - .setInputCols(Array("col1")) - .setOutputCol("weights") - .setBackgroundData(data) - .setKernelWidth(0.001) - .setNumSamples(1000) - .setModel(model) - .setTargetCol("probability") - .setTargetClasses(Array(0, 1))) -``` - - - - - - - -### TabularSHAP - - - - - - - - - -```python -from synapse.ml.explainers import * -from synapse.ml.onnx import ONNXModel - -model = ONNXModel() -data = spark.createDataFrame([ - (-5.0, "a", -5.0, 0), - (-5.0, "b", -5.0, 0), - (5.0, "a", 5.0, 1), - (5.0, "b", 5.0, 1) -]*100, ["col1", "label"]) - -shap = (TabularSHAP() - .setInputCols(["col1", "col2", "col3"]) - .setOutputCol("shapValues") - .setBackgroundData(data) - .setNumSamples(1000) - .setModel(model) - .setTargetCol("probability") - .setTargetClasses([1])) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.explainers._ -import com.microsoft.azure.synapse.ml.onnx._ -import spark.implicits._ - -val model = (new ONNXModel()) -val data = (1 to 100).flatMap(_ => Seq( - (-5d, "a", -5d, 0), - (-5d, "b", -5d, 0), - (5d, "a", 5d, 1), - (5d, "b", 5d, 1) - )).toDF("col1", "col2", "col3", "label") - -val shap = (new TabularSHAP() - .setInputCols(Array("col1", "col2", "col3")) - .setOutputCol("shapValues") - .setBackgroundData(data) - .setNumSamples(1000) - .setModel(model) - .setTargetCol("probability") - .setTargetClasses(Array(1))) -``` - - - - - - - -### TextLIME - - - - - - - - - -```python -from synapse.ml.explainers import * -from synapse.ml.onnx import ONNXModel - -model = ONNXModel() - -lime = (TextLIME() - .setModel(model) - .setInputCol("text") - .setTargetCol("prob") - .setTargetClasses([1]) - .setOutputCol("weights") - .setTokensCol("tokens") - .setSamplingFraction(0.7) - .setNumSamples(1000)) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.explainers._ -import com.microsoft.azure.synapse.ml.onnx._ -import spark.implicits._ - -val model = (new ONNXModel()) - -val lime = (new TextLIME() - .setModel(model) - .setInputCol("text") - .setTargetCol("prob") - .setTargetClasses(Array(1)) - .setOutputCol("weights") - .setTokensCol("tokens") - .setSamplingFraction(0.7) - .setNumSamples(1000)) -``` - - - - - - - -### TextSHAP - - - - - - - - - -```python -from synapse.ml.explainers import * -from synapse.ml.onnx import ONNXModel - -model = ONNXModel() - -shap = (TextSHAP() - .setModel(model) - .setInputCol("text") - .setTargetCol("prob") - .setTargetClasses([1]) - .setOutputCol("weights") - .setTokensCol("tokens") - .setNumSamples(1000)) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.explainers._ -import com.microsoft.azure.synapse.ml.onnx._ -import spark.implicits._ - -val model = (new ONNXModel()) - -val shap = (new TextSHAP() - .setModel(model) - .setInputCol("text") - .setTargetCol("prob") - .setTargetClasses(Array(1)) - .setOutputCol("weights") - .setTokensCol("tokens") - .setNumSamples(1000)) -``` - - - - - - - -### VectorLIME - - - - - - - - - -```python -from synapse.ml.explainers import * -from synapse.ml.onnx import ONNXModel - -model = ONNXModel() - -df = spark.createDataFrame([ - ([0.2729799734928408, -0.4637273304253777, 1.565593782147994], 4.541185129673482), - ([1.9511879801376864, 1.495644437589599, -0.4667847796501322], 0.19526424470709836) -]) - -lime = (VectorLIME() - .setModel(model) - .setBackgroundData(df) - .setInputCol("features") - .setTargetCol("label") - .setOutputCol("weights") - .setNumSamples(1000)) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.explainers._ -import spark.implicits._ -import breeze.linalg.{*, DenseMatrix => BDM} -import breeze.stats.distributions.Rand -import org.apache.spark.ml.linalg.Vectors -import org.apache.spark.ml.regression.LinearRegression - -val d1 = 3 -val d2 = 1 -val coefficients: BDM[Double] = new BDM(d1, d2, Array(1.0, -1.0, 2.0)) - -val df = { - val nRows = 100 - val intercept: Double = math.random() - - val x: BDM[Double] = BDM.rand(nRows, d1, Rand.gaussian) - val y = x * coefficients + intercept - - val xRows = x(*, ::).iterator.toSeq.map(dv => Vectors.dense(dv.toArray)) - val yRows = y(*, ::).iterator.toSeq.map(dv => dv(0)) - xRows.zip(yRows).toDF("features", "label") - } - -val model: LinearRegressionModel = new LinearRegression().fit(df) - -val lime = (new VectorLIME() - .setModel(model) - .setBackgroundData(df) - .setInputCol("features") - .setTargetCol(model.getPredictionCol) - .setOutputCol("weights") - .setNumSamples(1000)) -``` - - - - - - - -### VectorSHAP - - - - - - - - - -```python -from synapse.ml.explainers import * -from synapse.ml.onnx import ONNXModel - -model = ONNXModel() - -shap = (VectorSHAP() - .setInputCol("features") - .setOutputCol("shapValues") - .setNumSamples(1000) - .setModel(model) - .setTargetCol("probability") - .setTargetClasses([1])) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.explainers._ -import spark.implicits._ -import breeze.linalg.{*, DenseMatrix => BDM} -import breeze.stats.distributions.RandBasis -import org.apache.spark.ml.classification.LogisticRegression -import org.apache.spark.ml.linalg.Vectors - -val randBasis = RandBasis.withSeed(123) -val m: BDM[Double] = BDM.rand[Double](1000, 5, randBasis.gaussian) -val l: BDV[Double] = m(*, ::).map { - row => - if (row(2) + row(3) > 0.5) 1d else 0d - } -val data = m(*, ::).iterator.zip(l.valuesIterator).map { - case (f, l) => (f.toSpark, l) - }.toSeq.toDF("features", "label") - -val model = new LogisticRegression() - .setFeaturesCol("features") - .setLabelCol("label") - .fit(data) - -val shap = (new VectorSHAP() - .setInputCol("features") - .setOutputCol("shapValues") - .setBackgroundData(data) - .setNumSamples(1000) - .setModel(model) - .setTargetCol("probability") - .setTargetClasses(Array(1)) - -val infer = Seq( - Tuple1(Vectors.dense(1d, 1d, 1d, 1d, 1d)) - ) toDF "features" -val predicted = model.transform(infer) -shap.transform(predicted).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Featurize.md b/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Featurize.md deleted file mode 100644 index 6b23cf6b48..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Featurize.md +++ /dev/null @@ -1,282 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## Featurize - -### DataConversion - - - - - - -```python -from synapse.ml.featurize import * - -df = spark.createDataFrame([ - (True, 1, 2, 3, 4, 5.0, 6.0, "7", "8.0"), - (False, 9, 10, 11, 12, 14.5, 15.5, "16", "17.456"), - (True, -127, 345, 666, 1234, 18.91, 20.21, "100", "200.12345") -], ["bool", "byte", "short", "int", "long", "float", "double", "intstring", "doublestring"]) - -dc = (DataConversion() - .setCols(["byte"]) - .setConvertTo("boolean")) - -dc.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.featurize._ -import spark.implicits._ - -val df = Seq( - (true: Boolean, 1: Byte, 2: Short, 3: Integer, 4: Long, 5.0F, 6.0, "7", "8.0"), - (false, 9: Byte, 10: Short, 11: Integer, 12: Long, 14.5F, 15.5, "16", "17.456"), - (true, -127: Byte, 345: Short, Short.MaxValue + 100, (Int.MaxValue).toLong + 100, 18.91F, 20.21, "100", "200.12345")) - .toDF("bool", "byte", "short", "int", "long", "float", "double", "intstring", "doublestring") - -val dc = (new DataConversion() - .setCols(Array("byte")) - .setConvertTo("boolean")) - -dc.transform(df).show() -``` - - - - - - - -### IndexToValue - - - - - - - - - -```python -from synapse.ml.featurize import * - -df = spark.createDataFrame([ - (-3, 24, 0.32534, True, "piano"), - (1, 5, 5.67, False, "piano"), - (-3, 5, 0.32534, False, "guitar") -], ["int", "long", "double", "bool", "string"]) - -df2 = ValueIndexer().setInputCol("string").setOutputCol("string_cat").fit(df).transform(df) - -itv = (IndexToValue() - .setInputCol("string_cat") - .setOutputCol("string_noncat")) - -itv.transform(df2).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.featurize._ -import spark.implicits._ - -val df = Seq[(Int, Long, Double, Boolean, String)]( - (-3, 24L, 0.32534, true, "piano"), - (1, 5L, 5.67, false, "piano"), - (-3, 5L, 0.32534, false, "guitar")).toDF("int", "long", "double", "bool", "string") - -val df2 = new ValueIndexer().setInputCol("string").setOutputCol("string_cat").fit(df).transform(df) - -val itv = (new IndexToValue() - .setInputCol("string_cat") - .setOutputCol("string_noncat")) - -itv.transform(df2).show() -``` - - - - - - - -## Featurize Text - -### MultiNGram - - - - - - - - - -```python -from synapse.ml.featurize.text import * -from pyspark.ml.feature import Tokenizer - -dfRaw = spark.createDataFrame([ - (0, "Hi I"), - (1, "I wish for snow today"), - (2, "we Cant go to the park, because of the snow!"), - (3, ""), - (4, "1 2 3 4 5 6 7 8 9") -], ["label", "sentence"]) - -dfTok = (Tokenizer() - .setInputCol("sentence") - .setOutputCol("tokens") - .transform(dfRaw)) - -mng = (MultiNGram() - .setLengths([1, 3, 4]) - .setInputCol("tokens") - .setOutputCol("ngrams")) - -mng.transform(dfTok).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.featurize.text._ -import org.apache.spark.ml.feature.Tokenizer -import spark.implicits._ - -val dfRaw = (Seq( - (0, "Hi I"), - (1, "I wish for snow today"), - (2, "we Cant go to the park, because of the snow!"), - (3, ""), - (4, (1 to 10).map(_.toString).mkString(" "))) - .toDF("label", "sentence")) - -val dfTok = (new Tokenizer() - .setInputCol("sentence") - .setOutputCol("tokens") - .transform(dfRaw)) - -val mng = (new MultiNGram() - .setLengths(Array(1, 3, 4)) - .setInputCol("tokens") - .setOutputCol("ngrams")) - -mng.transform(dfTok).show() -``` - - - - - - - -### PageSplitter - - - - - - - - - -```python -from synapse.ml.featurize.text import * - -df = spark.createDataFrame([ - ("words words words wornssaa ehewjkdiw weijnsikjn xnh", ), - ("s s s s s s", ), - ("hsjbhjhnskjhndwjnbvckjbnwkjwenbvfkjhbnwevkjhbnwejhkbnvjkhnbndjkbnd", ), - ("hsjbhjhnskjhndwjnbvckjbnwkjwenbvfkjhbnwevkjhbnwejhkbnvjkhnbndjkbnd 190872340870271091309831097813097130i3u709781", ), - ("", ), - (None, ) -], ["text"]) - -ps = (PageSplitter() - .setInputCol("text") - .setMaximumPageLength(20) - .setMinimumPageLength(10) - .setOutputCol("pages")) - -ps.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.featurize.text._ -import spark.implicits._ - -val df = Seq( - "words words words wornssaa ehewjkdiw weijnsikjn xnh", - "s s s s s s", - "hsjbhjhnskjhndwjnbvckjbnwkjwenbvfkjhbnwevkjhbnwejhkbnvjkhnbndjkbnd", - "hsjbhjhnskjhndwjnbvckjbnwkjwenbvfkjhbnwevkjhbnwejhkbnvjkhnbndjkbnd " + - "190872340870271091309831097813097130i3u709781", - "", - null - ).toDF("text") - -val ps = (new PageSplitter() - .setInputCol("text") - .setMaximumPageLength(20) - .setMinimumPageLength(10) - .setOutputCol("pages")) - -ps.transform(df).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_IO.md b/website/versioned_docs/version-0.10.0/documentation/transformers/core/_IO.md deleted file mode 100644 index 9f58bf38c8..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_IO.md +++ /dev/null @@ -1,341 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## IO - -### HTTPTransformer - - - - - - -```python -from synapse.ml.io.http import * -from pyspark.sql.functions import udf, col -from requests import Request - -def world_bank_request(country): - return Request("GET", "http://api.worldbank.org/v2/country/{}?format=json".format(country)) - -df = (spark.createDataFrame([("br",), ("usa",)], ["country"]) - .withColumn("request", http_udf(world_bank_request)(col("country")))) - -ht = (HTTPTransformer() - .setConcurrency(3) - .setInputCol("request") - .setOutputCol("response")) - -ht.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.io.http._ - -val ht = (new HTTPTransformer() - .setConcurrency(3) - .setInputCol("request") - .setOutputCol("response")) -``` - - - - - - - -### SimpleHTTPTransformer - - - - - - - - - -```python -from synapse.ml.io.http import * -from pyspark.sql.types import StringType, StructType - -sht = (SimpleHTTPTransformer() - .setInputCol("data") - .setOutputParser(JSONOutputParser() - .setDataType(StructType().add("blah", StringType()))) - .setUrl("PUT_YOUR_URL") - .setOutputCol("results") - .setConcurrency(3)) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.io.http._ -import org.apache.spark.sql.types.{StringType, StructType} - -val sht = (new SimpleHTTPTransformer() - .setInputCol("data") - .setOutputParser(new JSONOutputParser() - .setDataType(new StructType().add("blah", StringType))) - .setUrl("PUT_YOUR_URL") - .setOutputCol("results") - .setConcurrency(3)) -``` - - - - - - - -### JSONInputParser - - - - - - - - - -```python -from synapse.ml.io.http import * - -jsonIP = (JSONInputParser() - .setInputCol("data") - .setOutputCol("out") - .setUrl("PUT_YOUR_URL")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.io.http._ - -val jsonIP = (new JSONInputParser() - .setInputCol("data") - .setOutputCol("out") - .setUrl("PUT_YOUR_URL")) -``` - - - - - - - -### JSONOutputParser - - - - - - - - - -```python -from synapse.ml.io.http import * -from pyspark.sql.types import StringType, StructType - -jsonOP = (JSONOutputParser() - .setDataType(StructType().add("foo", StringType())) - .setInputCol("unparsedOutput") - .setOutputCol("parsedOutput")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.io.http._ -import org.apache.spark.sql.types.{StringType, StructType} - -val jsonOP = (new JSONOutputParser() - .setDataType(new StructType().add("foo", StringType)) - .setInputCol("unparsedOutput") - .setOutputCol("parsedOutput")) -``` - - - - - - - -### StringOutputParser - - - - - - - - - -```python -from synapse.ml.io.http import * - -sop = (StringOutputParser() - .setInputCol("unparsedOutput") - .setOutputCol("out")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.io.http._ - -val sop = (new StringOutputParser() - .setInputCol("unparsedOutput") - .setOutputCol("out")) -``` - - - - - - - -### CustomInputParser - - - - - - - - - -```python -from synapse.ml.io.http import * - -cip = (CustomInputParser() - .setInputCol("data") - .setOutputCol("out")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.io.http._ - -val cip = (new CustomInputParser() - .setInputCol("data") - .setOutputCol("out") - .setUDF({ x: Int => new HttpPost(s"http://$x") })) -``` - - - - - - - -### CustomOutputParser - - - - - - - - - -```python -from synapse.ml.io.http import * - -cop = (CustomOutputParser() - .setInputCol("unparsedOutput") - .setOutputCol("out")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.io.http._ - -val cop = (new CustomOutputParser() - .setInputCol("unparsedOutput") - .setOutputCol("out")) -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Image.md b/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Image.md deleted file mode 100644 index ec0162439f..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Image.md +++ /dev/null @@ -1,181 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## Image - -### ResizeImageTransformer - - - - - - -```python -from synapse.ml.image import * - -# images = (spark.read.format("image") -# .option("dropInvalid", True) -# .load("wasbs://datasets@mmlspark.blob.core.windows.net/LIME/greyscale.jpg")) - -rit = (ResizeImageTransformer() - .setOutputCol("out") - .setHeight(15) - .setWidth(10)) - -# rit.transform(images).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.image._ -import spark.implicits._ - -// val images = (spark.read.format("image") -// .option("dropInvalid", true) -// .load("wasbs://datasets@mmlspark.blob.core.windows.net/LIME/greyscale.jpg")) - -val rit = (new ResizeImageTransformer() - .setOutputCol("out") - .setHeight(15) - .setWidth(10)) - -// rit.transform(images).show() -``` - - - - - - - -### UnrollImage - - - - - - - - - -```python -from synapse.ml.image import * -from azure.storage.blob import * - -# images = (spark.read.format("image") -# .option("dropInvalid", True) -# .load("wasbs://datasets@mmlspark.blob.core.windows.net/LIME/greyscale.jpg")) - -# rit = (ResizeImageTransformer() -# .setOutputCol("out") -# .setHeight(15) -# .setWidth(10)) - -# preprocessed = rit.transform(images) - -unroll = (UnrollImage() - .setInputCol("out") - .setOutputCol("final")) - -# unroll.transform(preprocessed).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.image._ -import spark.implicits._ - -val images = (spark.read.format("image") - .option("dropInvalid", true) - .load("wasbs://datasets@mmlspark.blob.core.windows.net/LIME/greyscale.jpg")) - -val rit = (new ResizeImageTransformer() - .setOutputCol("out") - .setHeight(15) - .setWidth(10)) - -val preprocessed = rit.transform(images) - -val unroll = (new UnrollImage() - .setInputCol(rit.getOutputCol) - .setOutputCol("final")) - -unroll.transform(preprocessed).show() -``` - - - - - - - -### UnrollBinaryImage - - - - - - - - - -```python -from synapse.ml.image import * - -unroll = (UnrollBinaryImage() - .setInputCol("input_col") - .setOutputCol("final")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.image._ -import spark.implicits._ - -val unroll = (new UnrollBinaryImage() - .setInputCol("input_col") - .setOutputCol("final")) - -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Stages.md b/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Stages.md deleted file mode 100644 index 0f86f35a6f..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Stages.md +++ /dev/null @@ -1,1038 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## Stages - -### Cacher - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([ - (0, "guitars", "drums"), - (1, "piano", "trumpet"), - (2, "bass", "cymbals"), - (3, "guitars", "drums"), - (4, "piano", "trumpet"), - (5, "bass", "cymbals"), - (6, "guitars", "drums"), - (7, "piano", "trumpet"), - (8, "bass", "cymbals"), - (9, "guitars", "drums"), - (10, "piano", "trumpet"), - (11, "bass", "cymbals") - ], ["numbers", "words", "more"])) - -cacher = Cacher() - -cacher.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = Seq( - (0, "guitars", "drums"), - (1, "piano", "trumpet"), - (2, "bass", "cymbals"), - (3, "guitars", "drums"), - (4, "piano", "trumpet"), - (5, "bass", "cymbals"), - (6, "guitars", "drums"), - (7, "piano", "trumpet"), - (8, "bass", "cymbals"), - (9, "guitars", "drums"), - (10, "piano", "trumpet"), - (11, "bass", "cymbals") - ).toDF("numbers", "words", "more") - -val cacher = new Cacher() - -cacher.transform(df).show() -``` - - - - - - - -### DropColumns - - - - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([ - (0, 0, "guitars", "drums", 1, True), - (1, 1, "piano", "trumpet", 2, False), - (2, 2, "bass", "cymbals", 3, True) - ], ["numbers", "doubles", "words", "more", "longs", "booleans"])) - -dc = DropColumns().setCols([]) - -dc.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = (Seq( - (0, 0.toDouble, "guitars", "drums", 1.toLong, true), - (1, 1.toDouble, "piano", "trumpet", 2.toLong, false), - (2, 2.toDouble, "bass", "cymbals", 3.toLong, true)) - .toDF("numbers", "doubles", "words", "more", "longs", "booleans")) - -val dc = new DropColumns().setCols(Array()) - -dc.transform(df).show() -``` - - - - - - - -### EnsembleByKey - - - - - - - - - -```python -from synapse.ml.stages import * -from pyspark.ml.feature import VectorAssembler - -scoreDF = (spark.createDataFrame([ - (0, "foo", 1.0, .1), - (1, "bar", 4.0, -2.0), - (1, "bar", 0.0, -3.0) - ], ["label1", "label2", "score1", "score2"])) - -va = VectorAssembler().setInputCols(["score1", "score2"]).setOutputCol("v1") -scoreDF2 = va.transform(scoreDF) - -ebk = EnsembleByKey().setKeys(["label1"]).setCols(["score1"]) - -ebk.transform(scoreDF2).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ -import org.apache.spark.ml.feature.VectorAssembler - -val scoreDF = (Seq( - (0, "foo", 1.0, .1), - (1, "bar", 4.0, -2.0), - (1, "bar", 0.0, -3.0)) - .toDF("label1", "label2", "score1", "score2")) - -val va = new VectorAssembler().setInputCols(Array("score1", "score2")).setOutputCol("v1") -val scoreDF2 = va.transform(scoreDF) - -val ebk = new EnsembleByKey().setKey("label1").setCol("score1") - -ebk.transform(scoreDF2).show() -``` - - - - - - - -### Explode - - - - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([ - (0, ["guitars", "drums"]), - (1, ["piano"]), - (2, []) - ], ["numbers", "words"])) - -explode = Explode().setInputCol("words").setOutputCol("exploded") - -explode.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = (Seq( - (0, Seq("guitars", "drums")), - (1, Seq("piano")), - (2, Seq())) - .toDF("numbers", "words")) - -val explode = new Explode().setInputCol("words").setOutputCol("exploded") - -explode.transform(df).show() -``` - - - - - - - -### Lambda - - - - - - - - - -```python -from synapse.ml.stages import * -from pyspark.sql.types import StringType, StructType - -df = (spark.createDataFrame([ - (0, 0.0, "guitars", "drums", 1, True), - (1, 1.0, "piano", "trumpet", 2, False), - (2, 2.0, "bass", "cymbals", 3, True) - ], ["numbers", "doubles", "words", "more", "longs", "booleans"])) - -def transformFunc(df): - return df.select("numbers") - -def transformSchemaFunc(schema): - return StructType([schema("numbers")]) - -l = (Lambda() - .setTransformFunc(transformFunc) - .setTransformSchemaFunc(transformSchemaFunc)) - -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ -import org.apache.spark.sql.types.{StringType, StructType} - -val df = (Seq( - (0, 0.toDouble, "guitars", "drums", 1.toLong, true), - (1, 1.toDouble, "piano", "trumpet", 2.toLong, false), - (2, 2.toDouble, "bass", "cymbals", 3.toLong, true)) - .toDF("numbers", "doubles", "words", "more", "longs", "booleans")) - -val lambda = (new Lambda() - .setTransform(df => df.select("numbers")) - .setTransformSchema(schema => new StructType(Array(schema("numbers"))))) - -lambda.transform(df).show() -``` - - - - - - - -### DynamicMiniBatchTransformer - - - - - - - - - -```python -from synapse.ml.stages import * -from pyspark.sql.types import StringType, StructType - -df = (spark.createDataFrame([(_, "foo") for _ in range(1, 11)], ["in1", "in2"])) - -dmbt = DynamicMiniBatchTransformer() - -dmbt.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = (1 until 11).map(x => (x, "foo")).toDF("in1", "in2") - -val dmbt = new DynamicMiniBatchTransformer() - -dmbt.transform(df).show() -``` - - - - - - - -### FixedMiniBatchTransformer - - - - - - - - - -```python -from synapse.ml.stages import * - -fmbt = (FixedMiniBatchTransformer() - .setBuffered(True) - .setBatchSize(3)) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val fmbt = (new FixedMiniBatchTransformer() - .setBuffered(true) - .setBatchSize(3)) -``` - - - - - - - -### TimeIntervalMiniBatchTransformer - - - - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([(_, "foo") for _ in range(1, 11)], ["in1", "in2"])) - -timbt = (TimeIntervalMiniBatchTransformer() - .setMillisToWait(1000) - .setMaxBatchSize(30)) - -timbt.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = (1 until 11).map(x => (x, "foo")).toDF("in1", "in2") - -val timbt = (new TimeIntervalMiniBatchTransformer() - .setMillisToWait(1000) - .setMaxBatchSize(30)) - -timbt.transform(df).show() -``` - - - - - - - -### FlattenBatch - - - - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([(_, "foo") for _ in range(1, 11)], ["in1", "in2"])) - -transDF = DynamicMiniBatchTransformer().transform(df) - -fb = FlattenBatch() - -fb.transform(transDF).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = (1 until 11).map(x => (x, "foo")).toDF("in1", "in2") - -val transDF = new DynamicMiniBatchTransformer().transform(df) - -val fb = new FlattenBatch() - -fb.transform(transDF).show() -``` - - - - - - - -### RenameColumn - - - - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([ - (0, 0, "guitars", "drums", 1, True), - (1, 1, "piano", "trumpet", 2, False), - (2, 2, "bass", "cymbals", 3, True) -], ["numbers", "doubles", "words", "more", "longs", "booleans"])) - -rc = RenameColumn().setInputCol("words").setOutputCol("numbers") - -rc.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = (Seq( - (0, 0.toDouble, "guitars", "drums", 1.toLong, true), - (1, 1.toDouble, "piano", "trumpet", 2.toLong, false), - (2, 2.toDouble, "bass", "cymbals", 3.toLong, true)) - .toDF("numbers", "doubles", "words", "more", "longs", "booleans")) - -val rc = new RenameColumn().setInputCol("words").setOutputCol("numbers") - -rc.transform(df).show() -``` - - - - - - - -### Repartition - - - - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([ - (0, "guitars", "drums"), - (1, "piano", "trumpet"), - (2, "bass", "cymbals"), - (3, "guitars", "drums"), - (4, "piano", "trumpet"), - (5, "bass", "cymbals"), - (6, "guitars", "drums"), - (7, "piano", "trumpet"), - (8, "bass", "cymbals"), - (9, "guitars", "drums"), - (10, "piano", "trumpet"), - (11, "bass", "cymbals") -], ["numbers", "words", "more"])) - -repartition = Repartition().setN(1) - -repartition.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = (Seq( - (0, "guitars", "drums"), - (1, "piano", "trumpet"), - (2, "bass", "cymbals"), - (3, "guitars", "drums"), - (4, "piano", "trumpet"), - (5, "bass", "cymbals"), - (6, "guitars", "drums"), - (7, "piano", "trumpet"), - (8, "bass", "cymbals"), - (9, "guitars", "drums"), - (10, "piano", "trumpet"), - (11, "bass", "cymbals") - ).toDF("numbers", "words", "more")) - -val repartition = new Repartition().setN(1) - -repartition.transform(df).show() -``` - - - - - - - -### SelectColumns - - - - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([ - (0, 0.0, "guitars", "drums", 1, True), - (1, 1.0, "piano", "trumpet", 2, False), - (2, 2.0, "bass", "cymbals", 3, True) -], ["numbers", "words", "more"])) - -sc = SelectColumns().setCols(["words", "more"]) - -sc.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = (Seq( - (0, 0.toDouble, "guitars", "drums", 1.toLong, true), - (1, 1.toDouble, "piano", "trumpet", 2.toLong, false), - (2, 2.toDouble, "bass", "cymbals", 3.toLong, true)) - .toDF("numbers", "doubles", "words", "more", "longs", "booleans")) - -val sc = new SelectColumns().setCols(Array("words", "more")) - -sc.transform(df).show() -``` - - - - - - - -### StratifiedRepartition - - - - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([ - (0, "Blue", 2), - (0, "Red", 2), - (0, "Green", 2), - (1, "Purple", 2), - (1, "Orange", 2), - (1, "Indigo", 2), - (2, "Violet", 2), - (2, "Black", 2), - (2, "White", 2), - (3, "Gray", 2), - (3, "Yellow", 2), - (3, "Cerulean", 2) -], ["values", "colors", "const"])) - -sr = StratifiedRepartition().setLabelCol("values").setMode("equal") -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = (Seq( - (0, "Blue", 2), - (0, "Red", 2), - (0, "Green", 2), - (1, "Purple", 2), - (1, "Orange", 2), - (1, "Indigo", 2), - (2, "Violet", 2), - (2, "Black", 2), - (2, "White", 2), - (3, "Gray", 2), - (3, "Yellow", 2), - (3, "Cerulean", 2) - ).toDF("values", "colors", "const")) - -val sr = new StratifiedRepartition().setLabelCol("values").setMode("equal") - -sr.transform(df).show() -``` - - - - - - - -### SummarizeData - - - - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([ - (0, 0.0, "guitars", "drums", 1, True), - (1, 1.0, "piano", "trumpet", 2, False), - (2, 2.0, "bass", "cymbals", 3, True) -], ["numbers", "doubles", "words", "more", "longs", "booleans"])) - -summary = SummarizeData() - -summary.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = (Seq( - (0, 0.toDouble, "guitars", "drums", 1.toLong, true), - (1, 1.toDouble, "piano", "trumpet", 2.toLong, false), - (2, 2.toDouble, "bass", "cymbals", 3.toLong, true)) - .toDF("numbers", "doubles", "words", "more", "longs", "booleans")) - -val summary = new SummarizeData() - -summary.transform(df).show() -``` - - - - - - - -### TextPreprocessor - - - - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([ - ("The happy sad boy drank sap", ), - ("The hater sad doy drank sap", ), - ("foo", ), - ("The hater sad doy aABc0123456789Zz_", ) -], ["words1"])) - -testMap = {"happy": "sad", "hater": "sap", - "sad": "sap", "sad doy": "sap"} - -textPreprocessor = (TextPreprocessor() - .setNormFunc("lowerCase") - .setMap(testMap) - .setInputCol("words1") - .setOutputCol("out")) - -textPreprocessor.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = (Seq( - ("The happy sad boy drank sap", ), - ("The hater sad doy drank sap", ), - ("foo", ), - ("The hater sad doy aABc0123456789Zz_", )) - .toDF("words1")) - -val testMap = Map[String, String] ( - "happy" -> "sad", - "hater" -> "sap", - "sad" -> "sap", - "sad doy" -> "sap" - ) - -val textPreprocessor = (new TextPreprocessor() - .setNormFunc("lowerCase") - .setMap(testMap) - .setInputCol("words1") - .setOutputCol("out")) - -textPreprocessor.transform(df).show() -``` - - - - - - - -### UDFTransformer - - - - - - - - - -```python -from synapse.ml.stages import * -from pyspark.sql.functions import udf - -df = (spark.createDataFrame([ - (0, 0.0, "guitars", "drums", 1, True), - (1, 1.0, "piano", "trumpet", 2, False), - (2, 2.0, "bass", "cymbals", 3, True) -], ["numbers", "doubles", "words", "more", "longs", "booleans"])) - -stringToIntegerUDF = udf(lambda x: 1) - -udfTransformer = (UDFTransformer() - .setUDF(stringToIntegerUDF) - .setInputCol("numbers") - .setOutputCol("out")) - -udfTransformer.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ -import org.apache.spark.sql.functions.udf - -val df = (Seq( - (0, 0.toDouble, "guitars", "drums", 1.toLong, true), - (1, 1.toDouble, "piano", "trumpet", 2.toLong, false), - (2, 2.toDouble, "bass", "cymbals", 3.toLong, true)) - .toDF("numbers", "doubles", "words", "more", "longs", "booleans")) - -val stringToIntegerUDF = udf((_: String) => 1) - -val udfTransformer = (new UDFTransformer() - .setUDF(stringToIntegerUDF) - .setInputCol("numbers") - .setOutputCol("out")) - -udfTransformer.transform(df).show() -``` - - - - - - - -### UnicodeNormalize - - - - - - - - - -```python -from synapse.ml.stages import * - -df = (spark.createDataFrame([ - ("Schön", 1), - ("Scho\u0308n", 1), - (None, 1) -], ["words1", "dummy"])) - -unicodeNormalize = (UnicodeNormalize() - .setForm("NFC") - .setInputCol("words1") - .setOutputCol("norm1")) - -unicodeNormalize.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.stages._ - -val df = (Seq( - ("Schön", 1), - ("Scho\u0308n", 1), - (null, 1)) - .toDF("words1", "dummy")) - -val unicodeNormalize = (new UnicodeNormalize() - .setForm("NFC") - .setInputCol("words1") - .setOutputCol("norm1")) - -unicodeNormalize.transform(df).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_SuperpixelTransformer.md b/website/versioned_docs/version-0.10.0/documentation/transformers/core/_SuperpixelTransformer.md deleted file mode 100644 index 64d3bac3de..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_SuperpixelTransformer.md +++ /dev/null @@ -1,46 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## LIME - -### SuperpixelTransformer - - - - - - -```python -from synapse.ml.lime import * - -spt = (SuperpixelTransformer() - .setInputCol("images")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.lime._ - -val spt = (new SuperpixelTransformer() - .setInputCol("images")) -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Train.md b/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Train.md deleted file mode 100644 index 8717abddcd..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/core/_Train.md +++ /dev/null @@ -1,151 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - - - -## Train - -### ComputeModelStatistics - - - - - - -```python -from synapse.ml.train import * -from numpy import random - -df = spark.createDataFrame( - [(random.rand(), random.rand()) for _ in range(4096)], ["label", "prediction"] -) - -cms = (ComputeModelStatistics() - .setLabelCol("label") - .setScoredLabelsCol("prediction") - .setEvaluationMetric("classification")) - -cms.transform(df).show() -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.train._ -import scala.util.Random - -val rand = new Random(1337) -val df = (Seq.fill(4096)(rand.nextDouble()) - .zip(Seq.fill(4096)(rand.nextDouble())) - .toDF("label", "prediction")) - -val cms = (new ComputeModelStatistics() - .setLabelCol("label") - .setScoredLabelsCol("prediction") - .setEvaluationMetric("classification")) - -cms.transform(df).show() -``` - - - - - - - -### ComputePerInstanceStatistics - - - - - - - - - -```python -from synapse.ml.train import * - -cps = (ComputePerInstanceStatistics() - .setLabelCol("label") - .setScoredLabelsCol("LogRegScoredLabelsCol") - .setScoresCol("LogRegScoresCol") - .setScoredProbabilitiesCol("LogRegProbCol") - .setEvaluationMetric("classification")) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.train._ -import org.apache.spark.ml.classification.LogisticRegression -import org.apache.spark.ml.feature.FastVectorAssembler - -val logisticRegression = (new LogisticRegression() - .setRegParam(0.3) - .setElasticNetParam(0.8) - .setMaxIter(10) - .setLabelCol("label") - .setPredictionCol("LogRegScoredLabelsCol") - .setRawPredictionCol("LogRegScoresCol") - .setProbabilityCol("LogRegProbCol") - .setFeaturesCol("features")) - -val dataset = spark.createDataFrame(Seq( - (0.0, 2, 0.50, 0.60, 0.0), - (1.0, 3, 0.40, 0.50, 1.0), - (2.0, 4, 0.78, 0.99, 2.0), - (3.0, 5, 0.12, 0.34, 3.0), - (0.0, 1, 0.50, 0.60, 0.0), - (1.0, 3, 0.40, 0.50, 1.0), - (2.0, 3, 0.78, 0.99, 2.0), - (3.0, 4, 0.12, 0.34, 3.0), - (0.0, 0, 0.50, 0.60, 0.0), - (1.0, 2, 0.40, 0.50, 1.0), - (2.0, 3, 0.78, 0.99, 2.0), - (3.0, 4, 0.12, 0.34, 3.0))) - .toDF("label", "col1", "col2", "col3", "prediction") - -val assembler = (new FastVectorAssembler() - .setInputCols(Array("col1", "col2", "col3")) - .setOutputCol("features")) -val assembledDataset = assembler.transform(dataset) -val model = logisticRegression.fit(assembledDataset) -val scoredData = model.transform(assembledDataset) - -val cps = (new ComputePerInstanceStatistics() - .setLabelCol("label") - .setScoredLabelsCol("LogRegScoredLabelsCol") - .setScoresCol("LogRegScoresCol") - .setScoredProbabilitiesCol("LogRegProbCol") - .setEvaluationMetric("classification")) - -cps.transform(scoredData).show() -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/deep_learning/_ONNXModel.md b/website/versioned_docs/version-0.10.0/documentation/transformers/deep_learning/_ONNXModel.md deleted file mode 100644 index cefea79300..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/deep_learning/_ONNXModel.md +++ /dev/null @@ -1,46 +0,0 @@ -import Tabs from '@theme/Tabs'; -import TabItem from '@theme/TabItem'; -import DocTable from "@theme/DocumentationTable"; - - -## ONNXModel - - - - -```py -from synapse.ml.onnx import ONNXModel - -model_path = "PUT_YOUR_MODEL_PATH" -onnx_ml = (ONNXModel() - .setModelLocation(model_path) - .setFeedDict({"float_input": "features"}) - .setFetchDict({"prediction": "output_label", "rawProbability": "output_probability"})) -``` - - - - -```scala -import com.microsoft.azure.synapse.ml.onnx._ - -val model_path = "PUT_YOUR_MODEL_PATH" -val onnx_ml = (new ONNXModel() - .setModelLocation(model_path) - .setFeedDict(Map("float_input" -> "features")) - .setFetchDict(Map("prediction" -> "output_label", "rawProbability" -> "output_probability"))) -``` - - - - - diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_cognitive.md b/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_cognitive.md deleted file mode 100644 index 0cd76ed4e9..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_cognitive.md +++ /dev/null @@ -1,55 +0,0 @@ ---- -title: Transformers - Cognitive -sidebar_label: Cognitive -hide_title: true ---- - - -import TextAnalytics, {toc as TextAnalyticsTOC} from './cognitive/_TextAnalytics.md'; - - - - -import Translator, {toc as TranslatorTOC} from './cognitive/_Translator.md'; - - - - -import ComputerVision, {toc as ComputerVisionTOC} from './cognitive/_ComputerVision.md'; - - - - -import FormRecognizer, {toc as FormRecognizerTOC} from './cognitive/_FormRecognizer.md'; - - - - -import AnomalyDetection, {toc as AnomalyDetectionTOC} from './cognitive/_AnomalyDetection.md'; - - - - -import Face, {toc as FaceTOC} from './cognitive/_Face.md'; - - - - -import SpeechToText, {toc as SpeechToTextTOC} from './cognitive/_SpeechToText.md'; - - - - -import AzureSearch, {toc as AzureSearchTOC} from './cognitive/_AzureSearch.md'; - - - - -import BingImageSearch, {toc as BingImageSearchTOC} from './cognitive/_BingImageSearch.md'; - - - - -export const toc = [...TextAnalyticsTOC, ...TranslatorTOC, ...ComputerVisionTOC, -...FormRecognizerTOC, ...AnomalyDetectionTOC, ...FaceTOC, ...SpeechToTextTOC, -...AzureSearchTOC, ...BingImageSearchTOC] diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_core.md b/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_core.md deleted file mode 100644 index 8fd5d5afe3..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_core.md +++ /dev/null @@ -1,43 +0,0 @@ ---- -title: Transformers - Core -sidebar_label: Core -hide_title: true ---- - - -import Explainers, {toc as ExplainersTOC} from './core/_Explainers.md'; - - - - -import Featurize, {toc as FeaturizeTOC} from './core/_Featurize.md'; - - - - -import Image, {toc as ImageTOC} from './core/_Image.md'; - - - - -import IO, {toc as IOTOC} from './core/_IO.md'; - - - - -import SuperpixelTransformer, {toc as LIMETOC} from './core/_SuperpixelTransformer.md'; - - - - -import Stages, {toc as StagesTOC} from './core/_Stages.md'; - - - - -import Train, {toc as TrainTOC} from './core/_Train.md'; - - - -export const toc = [...ExplainersTOC, ...FeaturizeTOC, ...ImageTOC, -...IOTOC, ...LIMETOC, ...StagesTOC, ...TrainTOC] diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_deep_learning.md b/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_deep_learning.md deleted file mode 100644 index c02ff1e621..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_deep_learning.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Deep Learning -sidebar_label: Deep Learning ---- - -import ONNXModel, {toc as ONNXModelTOC} from './deep_learning/_ONNXModel.md'; - - - -export const toc = [...ONNXModelTOC] diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_opencv.md b/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_opencv.md deleted file mode 100644 index fb5ce7f173..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_opencv.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Transformers - OpenCV -sidebar_label: OpenCV -hide_title: true ---- - -# OpenCV - -import OpenCV, {toc as OpenCVTOC} from './_OpenCV.md'; - - - -export const toc = [...OpenCVTOC] diff --git a/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_vw.md b/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_vw.md deleted file mode 100644 index 019e7d2f56..0000000000 --- a/website/versioned_docs/version-0.10.0/documentation/transformers/transformers_vw.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Transformers - Vowpal Wabbit -sidebar_label: Vowpal Wabbit -hide_title: true ---- - -# Vowpal Wabbit - -import VW, {toc as VWTOC} from './_VW.md'; - - - -export const toc = [...VWTOC] diff --git a/website/versioned_docs/version-0.10.0/features/classification/Classification - Adult Census with Vowpal Wabbit.md b/website/versioned_docs/version-0.10.0/features/classification/Classification - Adult Census with Vowpal Wabbit.md deleted file mode 100644 index ce79bb5369..0000000000 --- a/website/versioned_docs/version-0.10.0/features/classification/Classification - Adult Census with Vowpal Wabbit.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: Classification - Adult Census with Vowpal Wabbit -hide_title: true -status: stable ---- -# Classification - Adult Census using Vowpal Wabbit in SynapseML - -In this example, we predict incomes from the *Adult Census* dataset using Vowpal Wabbit (VW) classifier in SynapseML. -First, we read the data and split it into train and test sets as in this [example](https://github.com/Microsoft/SynapseML/blob/master/notebooks/Classification%20-%20Adult%20Census.ipynb -). - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() -``` - - -```python -data = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/AdultCensusIncome.parquet" -) -data = data.select(["education", "marital-status", "hours-per-week", "income"]) -train, test = data.randomSplit([0.75, 0.25], seed=123) -train.limit(10).toPandas() -``` - -Next, we define a pipeline that includes feature engineering and training of a VW classifier. We use a featurizer provided by VW that hashes the feature names. -Note that VW expects classification labels being -1 or 1. Thus, the income category is mapped to this space before feeding training data into the pipeline. - - -```python -from pyspark.sql.functions import when, col -from pyspark.ml import Pipeline -from synapse.ml.vw import VowpalWabbitFeaturizer, VowpalWabbitClassifier - -# Define classification label -train = ( - train.withColumn("label", when(col("income").contains("<"), 0.0).otherwise(1.0)) - .repartition(1) - .cache() -) -print(train.count()) - -# Specify featurizer -vw_featurizer = VowpalWabbitFeaturizer( - inputCols=["education", "marital-status", "hours-per-week"], outputCol="features" -) - -# Define VW classification model -args = "--loss_function=logistic --quiet --holdout_off" -vw_model = VowpalWabbitClassifier( - featuresCol="features", labelCol="label", passThroughArgs=args, numPasses=10 -) - -# Create a pipeline -vw_pipeline = Pipeline(stages=[vw_featurizer, vw_model]) -``` - -Then, we are ready to train the model by fitting the pipeline with the training data. - - -```python -# Train the model -vw_trained = vw_pipeline.fit(train) -``` - -After the model is trained, we apply it to predict the income of each sample in the test set. - - -```python -# Making predictions -test = test.withColumn("label", when(col("income").contains("<"), 0.0).otherwise(1.0)) -prediction = vw_trained.transform(test) -prediction.limit(10).toPandas() -``` - -Finally, we evaluate the model performance using `ComputeModelStatistics` function which will compute confusion matrix, accuracy, precision, recall, and AUC by default for classificaiton models. - - -```python -from synapse.ml.train import ComputeModelStatistics - -metrics = ComputeModelStatistics( - evaluationMetric="classification", labelCol="label", scoredLabelsCol="prediction" -).transform(prediction) -metrics.toPandas() -``` diff --git a/website/versioned_docs/version-0.10.0/features/classification/Classification - Adult Census.md b/website/versioned_docs/version-0.10.0/features/classification/Classification - Adult Census.md deleted file mode 100644 index ac0fff7e01..0000000000 --- a/website/versioned_docs/version-0.10.0/features/classification/Classification - Adult Census.md +++ /dev/null @@ -1,67 +0,0 @@ ---- -title: Classification - Adult Census -hide_title: true -status: stable ---- -## Classification - Adult Census - -In this example, we try to predict incomes from the *Adult Census* dataset. - -First, we import the packages (use `help(synapse)` to view contents), - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() -``` - - -```python -import numpy as np -import pandas as pd -``` - -Now let's read the data and split it to train and test sets: - - -```python -data = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/AdultCensusIncome.parquet" -) -data = data.select(["education", "marital-status", "hours-per-week", "income"]) -train, test = data.randomSplit([0.75, 0.25], seed=123) -train.limit(10).toPandas() -``` - -`TrainClassifier` can be used to initialize and fit a model, it wraps SparkML classifiers. -You can use `help(synapse.ml.train.TrainClassifier)` to view the different parameters. - -Note that it implicitly converts the data into the format expected by the algorithm: tokenize -and hash strings, one-hot encodes categorical variables, assembles the features into a vector -and so on. The parameter `numFeatures` controls the number of hashed features. - - -```python -from synapse.ml.train import TrainClassifier -from pyspark.ml.classification import LogisticRegression - -model = TrainClassifier( - model=LogisticRegression(), labelCol="income", numFeatures=256 -).fit(train) -``` - -Finally, we save the model so it can be used in a scoring program. - - -```python -if os.environ.get("AZURE_SERVICE", None) != "Microsoft.ProjectArcadia": - model.write().overwrite().save("dbfs:/AdultCensus.mml") -else: - model.write().overwrite().save( - "abfss://synapse@mmlsparkeuap.dfs.core.windows.net/models/AdultCensus.mml" - ) -``` diff --git a/website/versioned_docs/version-0.10.0/features/classification/Classification - Before and After SynapseML.md b/website/versioned_docs/version-0.10.0/features/classification/Classification - Before and After SynapseML.md deleted file mode 100644 index dcfbcc562f..0000000000 --- a/website/versioned_docs/version-0.10.0/features/classification/Classification - Before and After SynapseML.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -title: Classification - Before and After SynapseML -hide_title: true -status: stable ---- -## Classification - Before and After SynapseML - -### 1. Introduction - -


- -In this tutorial, we perform the same classification task in two -different ways: once using plain **`pyspark`** and once using the -**`synapseml`** library. The two methods yield the same performance, -but one of the two libraries is drastically simpler to use and iterate -on (can you guess which one?). - -The task is simple: Predict whether a user's review of a book sold on -Amazon is good (rating > 3) or bad based on the text of the review. We -accomplish this by training LogisticRegression learners with different -hyperparameters and choosing the best model. - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() -``` - -### 2. Read the data - -We download and read in the data. We show a sample below: - - -```python -rawData = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/BookReviewsFromAmazon10K.parquet" -) -rawData.show(5) -``` - -### 3. Extract more features and process data - -Real data however is more complex than the above dataset. It is common -for a dataset to have features of multiple types: text, numeric, -categorical. To illustrate how difficult it is to work with these -datasets, we add two numerical features to the dataset: the **word -count** of the review and the **mean word length**. - - -```python -from pyspark.sql.functions import udf -from pyspark.sql.types import * - - -def wordCount(s): - return len(s.split()) - - -def wordLength(s): - import numpy as np - - ss = [len(w) for w in s.split()] - return round(float(np.mean(ss)), 2) - - -wordLengthUDF = udf(wordLength, DoubleType()) -wordCountUDF = udf(wordCount, IntegerType()) -``` - - -```python -from synapse.ml.stages import UDFTransformer - -wordLength = "wordLength" -wordCount = "wordCount" -wordLengthTransformer = UDFTransformer( - inputCol="text", outputCol=wordLength, udf=wordLengthUDF -) -wordCountTransformer = UDFTransformer( - inputCol="text", outputCol=wordCount, udf=wordCountUDF -) -``` - - -```python -from pyspark.ml import Pipeline - -data = ( - Pipeline(stages=[wordLengthTransformer, wordCountTransformer]) - .fit(rawData) - .transform(rawData) - .withColumn("label", rawData["rating"] > 3) - .drop("rating") -) -``` - - -```python -data.show(5) -``` - -### 4a. Classify using pyspark - -To choose the best LogisticRegression classifier using the `pyspark` -library, need to *explictly* perform the following steps: - -1. Process the features: - * Tokenize the text column - * Hash the tokenized column into a vector using hashing - * Merge the numeric features with the vector in the step above -2. Process the label column: cast it into the proper type. -3. Train multiple LogisticRegression algorithms on the `train` dataset - with different hyperparameters -4. Compute the area under the ROC curve for each of the trained models - and select the model with the highest metric as computed on the - `test` dataset -5. Evaluate the best model on the `validation` set - -As you can see below, there is a lot of work involved and a lot of -steps where something can go wrong! - - -```python -from pyspark.ml.feature import Tokenizer, HashingTF -from pyspark.ml.feature import VectorAssembler - -# Featurize text column -tokenizer = Tokenizer(inputCol="text", outputCol="tokenizedText") -numFeatures = 10000 -hashingScheme = HashingTF( - inputCol="tokenizedText", outputCol="TextFeatures", numFeatures=numFeatures -) -tokenizedData = tokenizer.transform(data) -featurizedData = hashingScheme.transform(tokenizedData) - -# Merge text and numeric features in one feature column -featureColumnsArray = ["TextFeatures", "wordCount", "wordLength"] -assembler = VectorAssembler(inputCols=featureColumnsArray, outputCol="features") -assembledData = assembler.transform(featurizedData) - -# Select only columns of interest -# Convert rating column from boolean to int -processedData = assembledData.select("label", "features").withColumn( - "label", assembledData.label.cast(IntegerType()) -) -``` - - -```python -from pyspark.ml.evaluation import BinaryClassificationEvaluator -from pyspark.ml.classification import LogisticRegression - -# Prepare data for learning -train, test, validation = processedData.randomSplit([0.60, 0.20, 0.20], seed=123) - -# Train the models on the 'train' data -lrHyperParams = [0.05, 0.1, 0.2, 0.4] -logisticRegressions = [ - LogisticRegression(regParam=hyperParam) for hyperParam in lrHyperParams -] -evaluator = BinaryClassificationEvaluator( - rawPredictionCol="rawPrediction", metricName="areaUnderROC" -) -metrics = [] -models = [] - -# Select the best model -for learner in logisticRegressions: - model = learner.fit(train) - models.append(model) - scoredData = model.transform(test) - metrics.append(evaluator.evaluate(scoredData)) -bestMetric = max(metrics) -bestModel = models[metrics.index(bestMetric)] - -# Get AUC on the validation dataset -scoredVal = bestModel.transform(validation) -print(evaluator.evaluate(scoredVal)) -``` - -### 4b. Classify using synapseml - -Life is a lot simpler when using `synapseml`! - -1. The **`TrainClassifier`** Estimator featurizes the data internally, - as long as the columns selected in the `train`, `test`, `validation` - dataset represent the features - -2. The **`FindBestModel`** Estimator find the best model from a pool of - trained models by find the model which performs best on the `test` - dataset given the specified metric - -3. The **`CompueModelStatistics`** Transformer computes the different - metrics on a scored dataset (in our case, the `validation` dataset) - at the same time - - -```python -from synapse.ml.train import TrainClassifier, ComputeModelStatistics -from synapse.ml.automl import FindBestModel - -# Prepare data for learning -train, test, validation = data.randomSplit([0.60, 0.20, 0.20], seed=123) - -# Train the models on the 'train' data -lrHyperParams = [0.05, 0.1, 0.2, 0.4] -logisticRegressions = [ - LogisticRegression(regParam=hyperParam) for hyperParam in lrHyperParams -] -lrmodels = [ - TrainClassifier(model=lrm, labelCol="label", numFeatures=10000).fit(train) - for lrm in logisticRegressions -] - -# Select the best model -bestModel = FindBestModel(evaluationMetric="AUC", models=lrmodels).fit(test) - - -# Get AUC on the validation dataset -predictions = bestModel.transform(validation) -metrics = ComputeModelStatistics().transform(predictions) -print( - "Best model's AUC on validation set = " - + "{0:.2f}%".format(metrics.first()["AUC"] * 100) -) -``` diff --git a/website/versioned_docs/version-0.10.0/features/classification/Classification - Twitter Sentiment with Vowpal Wabbit.md b/website/versioned_docs/version-0.10.0/features/classification/Classification - Twitter Sentiment with Vowpal Wabbit.md deleted file mode 100644 index c597f737f8..0000000000 --- a/website/versioned_docs/version-0.10.0/features/classification/Classification - Twitter Sentiment with Vowpal Wabbit.md +++ /dev/null @@ -1,225 +0,0 @@ ---- -title: Classification - Twitter Sentiment with Vowpal Wabbit -hide_title: true -status: stable ---- -# Twitter Sentiment Classification using Vowpal Wabbit in SynapseML - -In this example, we show how to build a sentiment classification model using Vowpal Wabbit (VW) in SynapseML. The data set we use to train and evaluate the model is [Sentiment140](http://help.sentiment140.com/for-students/?source=post_page---------------------------) twitter data. First, we import a few packages that we need. - - -```python -import os -import re -import urllib.request -import numpy as np -import pandas as pd -from zipfile import ZipFile -from bs4 import BeautifulSoup -from pyspark.sql.functions import udf, rand, when, col -from pyspark.sql.types import StructType, StructField, DoubleType, StringType -from pyspark.ml import Pipeline -from pyspark.ml.feature import CountVectorizer, RegexTokenizer -from synapse.ml.vw import VowpalWabbitClassifier -from synapse.ml.train import ComputeModelStatistics -from pyspark.mllib.evaluation import BinaryClassificationMetrics -import matplotlib.pyplot as plt -``` - - -```python -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() -``` - - -```python -# URL to download the sentiment140 dataset and data file names -DATA_URL = "https://mmlspark.blob.core.windows.net/publicwasb/twittersentimenttrainingandtestdata.zip" -TRAIN_FILENAME = "training.1600000.processed.noemoticon.csv" -TEST_FILENAME = "testdata.manual.2009.06.14.csv" -# Folder for storing the downloaded data -DATA_FOLDER = "data" -# Data column names -COL_NAMES = ["label", "id", "date", "query_string", "user", "text"] -# Text encoding type of the data -ENCODING = "iso-8859-1" -``` - -## Data Preparation - -We use [Sentiment140](http://help.sentiment140.com/for-students/?source=post_page---------------------------) twitter data which originated from a Standford research project to train and evaluate VW classification model on Spark. The same dataset has been used in a previous [Azure Machine Learning sample](https://github.com/Azure-Samples/MachineLearningSamples-TwitterSentimentPrediction) on twitter sentiment prediction. Before using the data to build the classification model, we first download and clean up the data. - - -```python -def download_data(url, data_folder=DATA_FOLDER, filename="downloaded_data.zip"): - """Download and extract data from url""" - - data_dir = "./" + DATA_FOLDER - if not os.path.exists(data_dir): - os.makedirs(data_dir) - downloaded_filepath = os.path.join(data_dir, filename) - print("Downloading data...") - urllib.request.urlretrieve(url, downloaded_filepath) - print("Extracting data...") - zipfile = ZipFile(downloaded_filepath) - zipfile.extractall(data_dir) - zipfile.close() - print("Finished data downloading and extraction.") - - -download_data(DATA_URL) -``` - -Let's read the training data into a Spark DataFrame. - - -```python -df_train = pd.read_csv( - os.path.join(".", DATA_FOLDER, TRAIN_FILENAME), - header=None, - names=COL_NAMES, - encoding=ENCODING, -) -df_train = spark.createDataFrame(df_train, verifySchema=False) -``` - -We can take a look at the training data and check how many samples it has. We should see that there are 1.6 million samples in the training data. There are 6 fields in the training data: -* label: the sentiment of the tweet (0.0 = negative, 2.0 = neutral, 4.0 = positive) -* id: the id of the tweet -* date: the date of the tweet -* query_string: The query used to extract the data. If there is no query, then this value is NO_QUERY. -* user: the user that tweeted -* text: the text of the tweet - - -```python -df_train.limit(10).toPandas() -``` - - -```python -print("Number of training samples: ", df_train.count()) -``` - -Before training the model, we randomly permute the data to mix negative and positive samples. This is helpful for properly training online learning algorithms like VW. To speed up model training, we use a subset of the data to train the model. If training with the full training set, typically you will see better performance of the model on the test set. - - -```python -df_train = ( - df_train.orderBy(rand()) - .limit(100000) - .withColumn("label", when(col("label") > 0, 1.0).otherwise(0.0)) - .select(["label", "text"]) -) -``` - -## VW SynapseML Training - -Now we are ready to define a pipeline which consists of feture engineering steps and the VW model. - - -```python -# Specify featurizers -tokenizer = RegexTokenizer(inputCol="text", outputCol="words") - -count_vectorizer = CountVectorizer(inputCol="words", outputCol="features") - -# Define VW classification model -args = "--loss_function=logistic --quiet --holdout_off" -vw_model = VowpalWabbitClassifier( - featuresCol="features", labelCol="label", passThroughArgs=args, numPasses=10 -) - -# Create a pipeline -vw_pipeline = Pipeline(stages=[tokenizer, count_vectorizer, vw_model]) -``` - -With the prepared training data, we can fit the model pipeline as follows. - - -```python -vw_trained = vw_pipeline.fit(df_train) -``` - -## Model Performance Evaluation - -After training the model, we evaluate the performance of the model using the test set which is manually labeled. - - -```python -df_test = pd.read_csv( - os.path.join(".", DATA_FOLDER, TEST_FILENAME), - header=None, - names=COL_NAMES, - encoding=ENCODING, -) -df_test = spark.createDataFrame(df_test, verifySchema=False) -``` - -We only use positive and negative tweets in the test set to evaluate the model, since our model is a binary classification model trained with only positive and negative tweets. - - -```python -print("Number of test samples before filtering: ", df_test.count()) -df_test = ( - df_test.filter(col("label") != 2.0) - .withColumn("label", when(col("label") > 0, 1.0).otherwise(0.0)) - .select(["label", "text"]) -) -print("Number of test samples after filtering: ", df_test.count()) -``` - - -```python -# Make predictions -predictions = vw_trained.transform(df_test) -predictions.limit(10).toPandas() -``` - - -```python -# Compute model performance metrics -metrics = ComputeModelStatistics( - evaluationMetric="classification", labelCol="label", scoredLabelsCol="prediction" -).transform(predictions) -metrics.toPandas() -``` - - -```python -# Utility class for plotting ROC curve (https://stackoverflow.com/questions/52847408/pyspark-extract-roc-curve) -class CurveMetrics(BinaryClassificationMetrics): - def __init__(self, *args): - super(CurveMetrics, self).__init__(*args) - - def get_curve(self, method): - rdd = getattr(self._java_model, method)().toJavaRDD() - points = [] - for row in rdd.collect(): - points += [(float(row._1()), float(row._2()))] - return points - - -preds = predictions.select("label", "probability").rdd.map( - lambda row: (float(row["probability"][1]), float(row["label"])) -) -roc_points = CurveMetrics(preds).get_curve("roc") - -# Plot ROC curve -fig = plt.figure() -x_val = [x[0] for x in roc_points] -y_val = [x[1] for x in roc_points] -plt.title("ROC curve on test set") -plt.xlabel("False positive rate") -plt.ylabel("True positive rate") -plt.plot(x_val, y_val) -# Use display() if you're on Azure Databricks or you can do plt.show() -plt.show() -``` - -You should see an ROC curve like the following after the above cell is executed. - - diff --git a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Analyze Text.md b/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Analyze Text.md deleted file mode 100644 index 0bc5819e50..0000000000 --- a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Analyze Text.md +++ /dev/null @@ -1,85 +0,0 @@ ---- -title: CognitiveServices - Analyze Text -hide_title: true -status: stable ---- -# Cognitive Services - Analyze Text - - - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.mssparkutils.credentials import getSecret - - os.environ["TEXT_API_KEY"] = getSecret("mmlspark-build-keys", "cognitive-api-key") - from notebookutils.visualization import display - -# put your service keys here -key = os.environ["TEXT_API_KEY"] -location = "eastus" -``` - - -```python -df = spark.createDataFrame( - data=[ - ["en", "Hello Seattle"], - ["en", "There once was a dog who lived in London and thought she was a human"], - ], - schema=["language", "text"], -) -``` - - -```python -display(df) -``` - - -```python -from synapse.ml.cognitive import * - -text_analyze = ( - TextAnalyze() - .setLocation(location) - .setSubscriptionKey(key) - .setTextCol("text") - .setOutputCol("textAnalysis") - .setErrorCol("error") - .setLanguageCol("language") - # set the tasks to perform - .setEntityRecognitionTasks([{"parameters": {"model-version": "latest"}}]) - .setKeyPhraseExtractionTasks([{"parameters": {"model-version": "latest"}}]) - # Uncomment these lines to add more tasks - # .setEntityRecognitionPiiTasks([{"parameters": { "model-version": "latest"}}]) - # .setEntityLinkingTasks([{"parameters": { "model-version": "latest"}}]) - # .setSentimentAnalysisTasks([{"parameters": { "model-version": "latest"}}]) -) - -df_results = text_analyze.transform(df) -``` - - -```python -display(df_results) -``` - - -```python -from pyspark.sql.functions import col - -# reformat and display for easier viewing -display( - df_results.select( - "language", "text", "error", col("textAnalysis").getItem(0) - ).select( # we are not batching so only have a single result - "language", "text", "error", "textAnalysis[0].*" - ) # explode the Text Analytics tasks into columns -) -``` diff --git a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Celebrity Quote Analysis.md b/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Celebrity Quote Analysis.md deleted file mode 100644 index edfbdf3821..0000000000 --- a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Celebrity Quote Analysis.md +++ /dev/null @@ -1,170 +0,0 @@ ---- -title: CognitiveServices - Celebrity Quote Analysis -hide_title: true -status: stable ---- -# Celebrity Quote Analysis with The Cognitive Services on Spark - - - - -```python -from synapse.ml.cognitive import * -from pyspark.ml import PipelineModel -from pyspark.sql.functions import col, udf -from pyspark.ml.feature import SQLTransformer -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.mssparkutils.credentials import getSecret - - os.environ["VISION_API_KEY"] = getSecret("mmlspark-build-keys", "cognitive-api-key") - os.environ["TEXT_API_KEY"] = getSecret("mmlspark-build-keys", "cognitive-api-key") - os.environ["BING_IMAGE_SEARCH_KEY"] = getSecret( - "mmlspark-build-keys", "bing-search-key" - ) - -# put your service keys here -TEXT_API_KEY = os.environ["TEXT_API_KEY"] -VISION_API_KEY = os.environ["VISION_API_KEY"] -BING_IMAGE_SEARCH_KEY = os.environ["BING_IMAGE_SEARCH_KEY"] -``` - -### Extracting celebrity quote images using Bing Image Search on Spark - -Here we define two Transformers to extract celebrity quote images. - - - - -```python -imgsPerBatch = 10 # the number of images Bing will return for each query -offsets = [ - (i * imgsPerBatch,) for i in range(100) -] # A list of offsets, used to page into the search results -bingParameters = spark.createDataFrame(offsets, ["offset"]) - -bingSearch = ( - BingImageSearch() - .setSubscriptionKey(BING_IMAGE_SEARCH_KEY) - .setOffsetCol("offset") - .setQuery("celebrity quotes") - .setCount(imgsPerBatch) - .setOutputCol("images") -) - -# Transformer to that extracts and flattens the richly structured output of Bing Image Search into a simple URL column -getUrls = BingImageSearch.getUrlTransformer("images", "url") -``` - -### Recognizing Images of Celebrities -This block identifies the name of the celebrities for each of the images returned by the Bing Image Search. - - - - -```python -celebs = ( - RecognizeDomainSpecificContent() - .setSubscriptionKey(VISION_API_KEY) - .setModel("celebrities") - .setUrl("https://eastus.api.cognitive.microsoft.com/vision/v2.0/") - .setImageUrlCol("url") - .setOutputCol("celebs") -) - -# Extract the first celebrity we see from the structured response -firstCeleb = SQLTransformer( - statement="SELECT *, celebs.result.celebrities[0].name as firstCeleb FROM __THIS__" -) -``` - -### Reading the quote from the image. -This stage performs OCR on the images to recognize the quotes. - - - - -```python -from synapse.ml.stages import UDFTransformer - -recognizeText = ( - RecognizeText() - .setSubscriptionKey(VISION_API_KEY) - .setUrl("https://eastus.api.cognitive.microsoft.com/vision/v2.0/recognizeText") - .setImageUrlCol("url") - .setMode("Printed") - .setOutputCol("ocr") - .setConcurrency(5) -) - - -def getTextFunction(ocrRow): - if ocrRow is None: - return None - return "\n".join([line.text for line in ocrRow.recognitionResult.lines]) - - -# this transformer wil extract a simpler string from the structured output of recognize text -getText = ( - UDFTransformer() - .setUDF(udf(getTextFunction)) - .setInputCol("ocr") - .setOutputCol("text") -) -``` - -### Understanding the Sentiment of the Quote - - - - -```python -sentimentTransformer = ( - TextSentiment() - .setTextCol("text") - .setUrl("https://eastus.api.cognitive.microsoft.com/text/analytics/v3.0/sentiment") - .setSubscriptionKey(TEXT_API_KEY) - .setOutputCol("sentiment") -) - -# Extract the sentiment score from the API response body -getSentiment = SQLTransformer( - statement="SELECT *, sentiment[0].sentiment as sentimentLabel FROM __THIS__" -) -``` - -### Tying it all together - -Now that we have built the stages of our pipeline its time to chain them together into a single model that can be used to process batches of incoming data - - - - -```python -from synapse.ml.stages import SelectColumns - -# Select the final coulmns -cleanupColumns = SelectColumns().setCols( - ["url", "firstCeleb", "text", "sentimentLabel"] -) - -celebrityQuoteAnalysis = PipelineModel( - stages=[ - bingSearch, - getUrls, - celebs, - firstCeleb, - recognizeText, - getText, - sentimentTransformer, - getSentiment, - cleanupColumns, - ] -) - -celebrityQuoteAnalysis.transform(bingParameters).show(5) -``` diff --git a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Create a Multilingual Search Engine from Forms.md b/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Create a Multilingual Search Engine from Forms.md deleted file mode 100644 index 1293223bc2..0000000000 --- a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Create a Multilingual Search Engine from Forms.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: CognitiveServices - Create a Multilingual Search Engine from Forms -hide_title: true -status: stable ---- -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.mssparkutils.credentials import getSecret - - os.environ["VISION_API_KEY"] = getSecret("mmlspark-build-keys", "cognitive-api-key") - os.environ["AZURE_SEARCH_KEY"] = getSecret( - "mmlspark-build-keys", "azure-search-key" - ) - os.environ["TRANSLATOR_KEY"] = getSecret("mmlspark-build-keys", "translator-key") - from notebookutils.visualization import display - - -key = os.environ["VISION_API_KEY"] -search_key = os.environ["AZURE_SEARCH_KEY"] -translator_key = os.environ["TRANSLATOR_KEY"] - -search_service = "mmlspark-azure-search" -search_index = "form-demo-index" -``` - - -```python -from pyspark.sql.functions import udf -from pyspark.sql.types import StringType - - -def blob_to_url(blob): - [prefix, postfix] = blob.split("@") - container = prefix.split("/")[-1] - split_postfix = postfix.split("/") - account = split_postfix[0] - filepath = "/".join(split_postfix[1:]) - return "https://{}/{}/{}".format(account, container, filepath) - - -df2 = ( - spark.read.format("binaryFile") - .load("wasbs://ignite2021@mmlsparkdemo.blob.core.windows.net/form_subset/*") - .select("path") - .limit(10) - .select(udf(blob_to_url, StringType())("path").alias("url")) - .cache() -) -``` - - -```python -display(df2) -``` - - - - -```python -from synapse.ml.cognitive import AnalyzeInvoices - -analyzed_df = ( - AnalyzeInvoices() - .setSubscriptionKey(key) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("invoices") - .setErrorCol("errors") - .setConcurrency(5) - .transform(df2) - .cache() -) -``` - - -```python -display(analyzed_df) -``` - - -```python -from synapse.ml.cognitive import FormOntologyLearner - -organized_df = ( - FormOntologyLearner() - .setInputCol("invoices") - .setOutputCol("extracted") - .fit(analyzed_df) - .transform(analyzed_df) - .select("url", "extracted.*") - .cache() -) -``` - - -```python -display(organized_df) -``` - - -```python -from pyspark.sql.functions import explode, col - -itemized_df = ( - organized_df.select("*", explode(col("Items")).alias("Item")) - .drop("Items") - .select("Item.*", "*") - .drop("Item") -) -``` - - -```python -display(itemized_df) -``` - - -```python -display(itemized_df.where(col("ProductCode") == 48)) -``` - - -```python -from synapse.ml.cognitive import Translate - -translated_df = ( - Translate() - .setSubscriptionKey(translator_key) - .setLocation("eastus") - .setTextCol("Description") - .setErrorCol("TranslationError") - .setOutputCol("output") - .setToLanguage(["zh-Hans", "fr", "ru", "cy"]) - .setConcurrency(5) - .transform(itemized_df) - .withColumn("Translations", col("output.translations")[0]) - .drop("output", "TranslationError") - .cache() -) -``` - - -```python -display(translated_df) -``` - - -```python -from synapse.ml.cognitive import * -from pyspark.sql.functions import monotonically_increasing_id, lit - -( - translated_df.withColumn("DocID", monotonically_increasing_id().cast("string")) - .withColumn("SearchAction", lit("upload")) - .writeToAzureSearch( - subscriptionKey=search_key, - actionCol="SearchAction", - serviceName=search_service, - indexName=search_index, - keyCol="DocID", - ) -) -``` - - -```python -import requests - -url = "https://{}.search.windows.net/indexes/{}/docs/search?api-version=2019-05-06".format( - search_service, search_index -) -requests.post(url, json={"search": "door"}, headers={"api-key": search_key}).json() -``` - - -```python - -``` diff --git a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Multivariate Anomaly Detection.md b/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Multivariate Anomaly Detection.md deleted file mode 100644 index b434a50236..0000000000 --- a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Multivariate Anomaly Detection.md +++ /dev/null @@ -1,630 +0,0 @@ ---- -title: CognitiveServices - Multivariate Anomaly Detection -hide_title: true -status: stable ---- -# Recipe: Cognitive Services - Multivariate Anomaly Detection -This recipe shows how you can use SynapseML and Azure Cognitive Services on Apache Spark for multivariate anomaly detection. Multivariate anomaly detection allows for the detection of anomalies among many variables or timeseries, taking into account all the inter-correlations and dependencies between the different variables. In this scenario, we use SynapseML to train a model for multivariate anomaly detection using the Azure Cognitive Services, and we then use to the model to infer multivariate anomalies within a dataset containing synthetic measurements from three IoT sensors. - -To learn more about the Anomaly Detector Cognitive Service please refer to [ this documentation page](https://docs.microsoft.com/en-us/azure/cognitive-services/anomaly-detector/). - -### Prerequisites -- An Azure subscription - [Create one for free](https://azure.microsoft.com/en-us/free/) - -### Setup -#### Create an Anomaly Detector resource -Follow the instructions below to create an `Anomaly Detector` resource using the Azure portal or alternatively, you can also use the Azure CLI to create this resource. - -- In the Azure Portal, click `Create` in your resource group, and then type `Anomaly Detector`. Click on the Anomaly Detector resource. -- Give the resource a name, and ideally use the same region as the rest of your resource group. Use the default options for the rest, and then click `Review + Create` and then `Create`. -- Once the Anomaly Detector resource is created, open it and click on the `Keys and Endpoints` panel on the left. Copy the key for the Anomaly Detector resource into the `ANOMALY_API_KEY` environment variable, or store it in the `anomalyKey` variable in the cell below. - -#### Create a Storage Account resource -In order to save intermediate data, you will need to create an Azure Blob Storage Account. Within that storage account, create a container for storing the intermediate data. Make note of the container name, and copy the connection string to that container. You will need this later to populate the `containerName` variable and the `BLOB_CONNECTION_STRING` environment variable. - -#### Enter your service keys -Let's start by setting up the environment variables for our service keys. The next cell sets the `ANOMALY_API_KEY` and the `BLOB_CONNECTION_STRING` environment variables based on the values stored in our Azure Key Vault. If you are running this in your own environment, make sure you set these environment variables before you proceed. - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.mssparkutils.credentials import getSecret - - os.environ["ANOMALY_API_KEY"] = getSecret("mmlspark-build-keys", "anomaly-api-key") - os.environ["BLOB_CONNECTION_STRING"] = getSecret( - "mmlspark-build-keys", "madtest-connection-string" - ) -``` - - - -
- - -Now, lets read the `ANOMALY_API_KEY` and `BLOB_CONNECTION_STRING` environment variables and set the `containerName` and `location` variables. - - -```python -# An Anomaly Dectector subscription key -anomalyKey = os.environ["ANOMALY_API_KEY"] -# A connection string to your blob storage account -connectionString = os.environ["BLOB_CONNECTION_STRING"] -# The name of the container where you will store intermediate data -containerName = "intermediate-data-container-name" -# The location of the anomaly detector resource that you created -location = "westus2" -``` - - - -
- - -Let's import all the necessary modules. - - -```python -import numpy as np -import pandas as pd - -import pyspark -from pyspark.sql.functions import col -from pyspark.sql.functions import lit -from pyspark.sql.types import DoubleType -import matplotlib.pyplot as plt - -import synapse.ml -from synapse.ml.cognitive import * -``` - - - -
- - -Now, let's read our sample data into a Spark DataFrame. - - -```python -df = ( - spark.read.format("csv") - .option("header", "true") - .load("wasbs://publicwasb@mmlspark.blob.core.windows.net/MVAD/sample.csv") -) - -df = ( - df.withColumn("sensor_1", col("sensor_1").cast(DoubleType())) - .withColumn("sensor_2", col("sensor_2").cast(DoubleType())) - .withColumn("sensor_3", col("sensor_3").cast(DoubleType())) -) - -# Let's inspect the dataframe: -df.show(5) -``` - -We can now create an `estimator` object, which will be used to train our model. In the cell below, we specify the start and end times for the training data. We also specify the input columns to use, and the name of the column that contains the timestamps. Finally, we specify the number of data points to use in the anomaly detection sliding window, and we set the connection string to the Azure Blob Storage Account. - - -```python -trainingStartTime = "2020-06-01T12:00:00Z" -trainingEndTime = "2020-07-02T17:55:00Z" -intermediateSaveDir = "intermediateData" -timestampColumn = "timestamp" -inputColumns = ["sensor_1", "sensor_2", "sensor_3"] - -estimator = ( - FitMultivariateAnomaly() - .setSubscriptionKey(anomalyKey) - .setLocation(location) - .setStartTime(trainingStartTime) - .setEndTime(trainingEndTime) - .setContainerName(containerName) - .setIntermediateSaveDir(intermediateSaveDir) - .setTimestampCol(timestampColumn) - .setInputCols(inputColumns) - .setSlidingWindow(200) - .setConnectionString(connectionString) -) -``` - - - -
- - -Now that we have created the `estimator`, let's fit it to the data: - - -```python -model = estimator.fit(df) -``` - - - -
- - -Once the training is done, we can now use the model for inference. The code in the next cell specifies the start and end times for the data we would like to detect the anomlies in. It will then show the results. - - -```python -inferenceStartTime = "2020-07-02T18:00:00Z" -inferenceEndTime = "2020-07-06T05:15:00Z" - -result = ( - model.setStartTime(inferenceStartTime) - .setEndTime(inferenceEndTime) - .setOutputCol("results") - .setErrorCol("errors") - .setInputCols(inputColumns) - .setTimestampCol(timestampColumn) - .transform(df) -) - -result.show(5) -``` - -When we called `.show(5)` in the previous cell, it showed us the first five rows in the dataframe. The results were all `null` because they were not inside the inferrence window. - -To show the results only for the inferred data, lets select the columns we need. We can then order the rows in the dataframe by ascending order, and filter the result to only show the rows that are in the range of the inference window. In our case `inferenceEndTime` is the same as the last row in the dataframe, so can ignore that. - -Finally, to be able to better plot the results, lets convert the Spark dataframe to a Pandas dataframe. - -This is what the next cell does: - - -```python -rdf = ( - result.select( - "timestamp", - *inputColumns, - "results.contributors", - "results.isAnomaly", - "results.severity" - ) - .orderBy("timestamp", ascending=True) - .filter(col("timestamp") >= lit(inferenceStartTime)) - .toPandas() -) - -rdf -``` - - - -
/databricks/spark/python/pyspark/sql/pandas/conversion.py:92: UserWarning: toPandas attempted Arrow optimization because 'spark.sql.execution.arrow.pyspark.enabled' is set to true; however, failed by the reason below: - Unable to convert the field contributors. If this column is not necessary, you may consider dropping it or converting to primitive type before the conversion. -Direct cause: Unsupported type in conversion to Arrow: ArrayType(StructType(List(StructField(contributionScore,DoubleType,true),StructField(variable,StringType,true))),true) -Attempting non-optimization as 'spark.sql.execution.arrow.pyspark.fallback.enabled' is set to true. - warnings.warn(msg) -Out[8]:
- - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
timestampsensor_1sensor_2sensor_3contributorsisAnomalyseverity
02020-07-02T18:00:00Z1.0696800.3931733.129125NoneFalse0.00000
12020-07-02T18:05:00Z0.9327840.2149593.077339[(0.5516611337661743, series_1), (0.3133429884...True0.06478
22020-07-02T18:10:00Z1.0122140.4660372.909561NoneFalse0.00000
32020-07-02T18:15:00Z1.1221820.3984383.029489NoneFalse0.00000
42020-07-02T18:20:00Z1.0913100.2821372.948016NoneFalse0.00000
........................
9952020-07-06T04:55:00Z-0.4434380.768980-0.710800NoneFalse0.00000
9962020-07-06T05:00:00Z-0.5294000.822140-0.944681NoneFalse0.00000
9972020-07-06T05:05:00Z-0.3779110.738591-0.871468NoneFalse0.00000
9982020-07-06T05:10:00Z-0.5019930.727775-0.786263NoneFalse0.00000
9992020-07-06T05:15:00Z-0.4041380.806980-0.883521NoneFalse0.00000
-

1000 rows × 7 columns

-
- - -Let's now format the `contributors` column that stores the contribution score from each sensor to the detected anomalies. The next cell formats this data, and splits the contribution score of each sensor into its own column. - - -```python -def parse(x): - if type(x) is list: - return dict([item[::-1] for item in x]) - else: - return {"series_0": 0, "series_1": 0, "series_2": 0} - - -rdf["contributors"] = rdf["contributors"].apply(parse) -rdf = pd.concat( - [rdf.drop(["contributors"], axis=1), pd.json_normalize(rdf["contributors"])], axis=1 -) -rdf -``` - - - -
Out[9]:
- - - -
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
timestampsensor_1sensor_2sensor_3isAnomalyseverityseries_0series_1series_2
02020-07-02T18:00:00Z1.0696800.3931733.129125False0.000000.0000000.0000000.000000
12020-07-02T18:05:00Z0.9327840.2149593.077339True0.064780.3133430.5516610.134996
22020-07-02T18:10:00Z1.0122140.4660372.909561False0.000000.0000000.0000000.000000
32020-07-02T18:15:00Z1.1221820.3984383.029489False0.000000.0000000.0000000.000000
42020-07-02T18:20:00Z1.0913100.2821372.948016False0.000000.0000000.0000000.000000
..............................
9952020-07-06T04:55:00Z-0.4434380.768980-0.710800False0.000000.0000000.0000000.000000
9962020-07-06T05:00:00Z-0.5294000.822140-0.944681False0.000000.0000000.0000000.000000
9972020-07-06T05:05:00Z-0.3779110.738591-0.871468False0.000000.0000000.0000000.000000
9982020-07-06T05:10:00Z-0.5019930.727775-0.786263False0.000000.0000000.0000000.000000
9992020-07-06T05:15:00Z-0.4041380.806980-0.883521False0.000000.0000000.0000000.000000
-

1000 rows × 9 columns

-
- - -Great! We now have the contribution scores of sensors 1, 2, and 3 in the `series_0`, `series_1`, and `series_2` columns respectively. - -Let's run the next cell to plot the results. The `minSeverity` parameter in the first line specifies the minimum severity of the anomalies to be plotted. - - -```python -minSeverity = 0.1 - - -####### Main Figure ####### -plt.figure(figsize=(23, 8)) -plt.plot( - rdf["timestamp"], - rdf["sensor_1"], - color="tab:orange", - line, - linewidth=2, - label="sensor_1", -) -plt.plot( - rdf["timestamp"], - rdf["sensor_2"], - color="tab:green", - line, - linewidth=2, - label="sensor_2", -) -plt.plot( - rdf["timestamp"], - rdf["sensor_3"], - color="tab:blue", - line, - linewidth=2, - label="sensor_3", -) -plt.grid(axis="y") -plt.tick_params(axis="x", which="both", bottom=False, labelbottom=False) -plt.legend() - -anoms = list(rdf["severity"] >= minSeverity) -_, _, ymin, ymax = plt.axis() -plt.vlines(np.where(anoms), ymin=ymin, ymax=ymax, color="r", alpha=0.8) - -plt.legend() -plt.title( - "A plot of the values from the three sensors with the detected anomalies highlighted in red." -) -plt.show() - -####### Severity Figure ####### -plt.figure(figsize=(23, 1)) -plt.tick_params(axis="x", which="both", bottom=False, labelbottom=False) -plt.plot( - rdf["timestamp"], - rdf["severity"], - color="black", - line, - linewidth=2, - label="Severity score", -) -plt.plot( - rdf["timestamp"], - [minSeverity] * len(rdf["severity"]), - color="red", - line, - linewidth=1, - label="minSeverity", -) -plt.grid(axis="y") -plt.legend() -plt.ylim([0, 1]) -plt.title("Severity of the detected anomalies") -plt.show() - -####### Contributors Figure ####### -plt.figure(figsize=(23, 1)) -plt.tick_params(axis="x", which="both", bottom=False, labelbottom=False) -plt.bar( - rdf["timestamp"], rdf["series_0"], width=2, color="tab:orange", label="sensor_1" -) -plt.bar( - rdf["timestamp"], - rdf["series_1"], - width=2, - color="tab:green", - label="sensor_2", - bottom=rdf["series_0"], -) -plt.bar( - rdf["timestamp"], - rdf["series_2"], - width=2, - color="tab:blue", - label="sensor_3", - bottom=rdf["series_0"] + rdf["series_1"], -) -plt.grid(axis="y") -plt.legend() -plt.ylim([0, 1]) -plt.title("The contribution of each sensor to the detected anomaly") -plt.show() -``` - - - -The plots above show the raw data from the sensors (inside the inference window) in orange, green, and blue. The red vertical lines in the first figure show the detected anomalies that have a severity greater than or equal to `minSeverity`. - -The second plot shows the severity score of all the detected anomalies, with the `minSeverity` threshold shown in the dotted red line. - -Finally, the last plot shows the contribution of the data from each sensor to the detected anomalies. This helps us diagnose and understand the most likely cause of each anomaly. diff --git a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - OpenAI.md b/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - OpenAI.md deleted file mode 100644 index 9366e724c3..0000000000 --- a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - OpenAI.md +++ /dev/null @@ -1,181 +0,0 @@ ---- -title: CognitiveServices - OpenAI -hide_title: true -status: stable ---- -# Cognitive Services - OpenAI - -Large language models are capable of successfully completing multiple downstream tasks with little training data required from users. This is because these models are already trained using enormous amounts of text. The 175 billion-parameter GPT-3 model for example, can generate text and even code given a short prompt containing instructions. - -While large models are becoming more powerful, more multimodal, and relatively cheaper to train, inferencing also needs to scale to handle larger volume of requests from customers. Using SynapseML, customers can now leverage enterprise grade models from Azure OpenAI Service to apply advanced language models on data stored in Azure Synapse Analytics. - -SynapseML is an open source library with a set of consistent APIs that integrate with a number of deep learning and data science tools, including Azure OpenAI. The OpenAI project itself maintains a [great tool](https://github.com/openai/openai-quickstart-node) for experimenting with GPT-3 to get an idea of how it works. SynapseML's integration with Azure OpenAI provides a simple and intuitive coding interface that can be called from Scala, Python or R. It is intended for use in industrial-grade applications, but it is also flexible enough to nimbly handle the demands of consumer website. - -This tutorial walks you through a couple steps you need to perform to integrate Azure OpenAI Services to Azure SynapseML and how to apply the large language models available in Azure OpenAI at a distributed scale. - -First, set up some administrative details. - - -``` -import os - -service_name = "M3Test11" -deployment_name = "text-davinci-001" -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.mssparkutils.credentials import getSecret - - os.environ["OPENAI_API_KEY"] = getSecret("mmlspark-build-keys", "openai-api-key") - from notebookutils.visualization import display - -# put your service keys here -key = os.environ["OPENAI_API_KEY"] -location = "eastus" -assert key is not None and service_name is not None -``` - -Next, create a dataframe consisting of a series of rows, with one prompt per row. Each prompt is followed by a comma and then ensconsed in a set of parentheses. This format forms a tuple. Then add a string to identify the column containing the prompts. - - -``` -# Create or load a dataframe of text, can load directly from adls or other databases - -df = spark.createDataFrame( - [ - ("Once upon a time",), - ("Hello my name is",), - ("The best code is code thats",), - ("The meaning of life is",), - ] -).toDF("prompt") -``` - -To set up the completion interaction with the OpenAI service, create an `OpenAICompletion` object. Set `MaxTokens` to 200. A token is around 4 characters, and this limit applies to the some of the prompt and the result. Set the prompt column with the same name used to identify the prompt column in the dataframe. - - -``` -from synapse.ml.cognitive import OpenAICompletion - -completion = ( - OpenAICompletion() - .setSubscriptionKey(key) - .setDeploymentName(deployment_name) - .setUrl("https://{}.openai.azure.com/".format(service_name)) - .setMaxTokens(200) - .setPromptCol("prompt") - .setOutputCol("completions") -) -``` - -Now that you have the dataframe and the completion object, you can obtain the prompt completions. - - -``` -# Map the dataframe through OpenAI -completed_df = completion.transform(df).cache() -``` - -And display them. - - -``` -from pyspark.sql.functions import col - -display(completed_df.select(col("prompt"), col("completions.choices.text"))) -``` - -The method above makes several requests to the service, one for each prompt. To complete multiple prompts in a single request, use batch mode. First, in the OpenAICompletion object, instead of setting the Prompt column to "Prompt", specify "batchPrompt" for the BatchPrompt column. - -The method used above makes several requests to the service, one for each prompt. To complete multiple prompts in a single request, use batch mode. - -To do so, create a dataframe with a list of prompts per row. - -In the `OpenAICompletion` object, rather than setting the `prompt` column, set the `batchPrompt` column instead. - -In the call to `transform` a request will then be made per row. Since there are multiple prompts in a single row, each request will be sent with all prompts in that row. The results will contain a row for each row in the request. - -Note that as of this writing there is currently a limit of 20 prompts in a single request, as well as a hard limit of 2048 "tokens", or approximately 1500 words. - - -``` -df = spark.createDataFrame( - [ - (["The time has come", "Pleased to", "Today stocks", "Here's to"],), - (["The only thing", "Ask not what", "Every litter", "I am"],), - ] -).toDF("batchPrompt") - -batchCompletion = ( - OpenAICompletion() - .setSubscriptionKey(key) - .setDeploymentName(deployment_name) - .setUrl("https://{}.openai.azure.com/".format(service_name)) - .setMaxTokens(200) - .setBatchPromptCol("batchPrompt") - .setOutputCol("completions") -) - -completed_df = batchCompletion.transform(df).cache() -display(completed_df.select(col("batchPrompt"), col("completions.choices.text"))) -``` - -If your data is in column format, you can transpose it to row format using SynapseML's `FixedMiniBatcherTransformer`, along with help from Spark's `coalesce` method. - - -``` -from pyspark.sql.types import StringType -from synapse.ml.stages import FixedMiniBatchTransformer - -df = spark.createDataFrame( - ["This land is", "If I had a", "How many roads", "You can get anything"], - StringType(), -).toDF("batchPrompt") - -# Force a single partition -df = df.coalesce(1) - -df = FixedMiniBatchTransformer(batchSize=4, buffered=False).transform(df) - -completed_df = batchCompletion.transform(df).cache() -display(completed_df.select(col("batchPrompt"), col("completions.choices.text"))) -``` - -You can try your hand at translation. - - -``` -df = spark.createDataFrame( - [ - ("Japanese: Ookina hako\nEnglish: Big box\nJapanese: Midori tako\nEnglish:",), - ( - "French: Quel heure et il au Montreal?\nEnglish: What time is it in Montreal?\nFrench: Ou est le poulet?\nEnglish:", - ), - ] -).toDF("prompt") - -completed_df = completion.transform(df).cache() -display(completed_df.select(col("prompt"), col("completions.choices.text"))) -``` - -You can prompt for general knowledge. - - -``` -df = spark.createDataFrame( - [ - ( - "Q: Where is the Grand Canyon?\nA: The Grand Canyon is in Arizona.\n\nQ: What is the weight of the Burj Khalifa in kilograms?\nA:", - ) - ] -).toDF("prompt") - -completed_df = completion.transform(df).cache() -display(completed_df.select(col("prompt"), col("completions.choices.text"))) -``` - - -``` - -``` diff --git a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Overview.md b/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Overview.md deleted file mode 100644 index 55ddef3efd..0000000000 --- a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Overview.md +++ /dev/null @@ -1,540 +0,0 @@ ---- -title: CognitiveServices - Overview -hide_title: true -status: stable ---- - - -# Cognitive Services - -[Azure Cognitive Services](https://azure.microsoft.com/en-us/services/cognitive-services/) are a suite of APIs, SDKs, and services available to help developers build intelligent applications without having direct AI or data science skills or knowledge by enabling developers to easily add cognitive features into their applications. The goal of Azure Cognitive Services is to help developers create applications that can see, hear, speak, understand, and even begin to reason. The catalog of services within Azure Cognitive Services can be categorized into five main pillars - Vision, Speech, Language, Web Search, and Decision. - -## Usage - -### Vision -[**Computer Vision**](https://azure.microsoft.com/en-us/services/cognitive-services/computer-vision/) -- Describe: provides description of an image in human readable language ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/DescribeImage.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DescribeImage)) -- Analyze (color, image type, face, adult/racy content): analyzes visual features of an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeImage.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeImage)) -- OCR: reads text from an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/OCR.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.OCR)) -- Recognize Text: reads text from an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/RecognizeText.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.RecognizeText)) -- Thumbnail: generates a thumbnail of user-specified size from the image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/GenerateThumbnails.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.GenerateThumbnails)) -- Recognize domain-specific content: recognizes domain-specific content (celebrity, landmark) ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/RecognizeDomainSpecificContent.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.RecognizeDomainSpecificContent)) -- Tag: identifies list of words that are relevant to the in0put image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/TagImage.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.TagImage)) - -[**Face**](https://azure.microsoft.com/en-us/services/cognitive-services/face/) -- Detect: detects human faces in an image ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/DetectFace.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DetectFace)) -- Verify: verifies whether two faces belong to a same person, or a face belongs to a person ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/VerifyFaces.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.VerifyFaces)) -- Identify: finds the closest matches of the specific query person face from a person group ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/IdentifyFaces.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.IdentifyFaces)) -- Find similar: finds similar faces to the query face in a face list ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/FindSimilarFace.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.FindSimilarFace)) -- Group: divides a group of faces into disjoint groups based on similarity ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/GroupFaces.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.GroupFaces)) - -### Speech -[**Speech Services**](https://azure.microsoft.com/en-us/services/cognitive-services/speech-services/) -- Speech-to-text: transcribes audio streams ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/SpeechToText.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.SpeechToText)) -- Conversation Transcription: transcribes audio streams into live transcripts with identified speakers. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/ConversationTranscription.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.ConversationTranscription)) -- Text to Speech: Converts text to realistic audio ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/TextToSpeech.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.TextToSpeech)) - - -### Language -[**Text Analytics**](https://azure.microsoft.com/en-us/services/cognitive-services/text-analytics/) -- Language detection: detects language of the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/LanguageDetector.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.LanguageDetector)) -- Key phrase extraction: identifies the key talking points in the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/KeyPhraseExtractor.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.KeyPhraseExtractor)) -- Named entity recognition: identifies known entities and general named entities in the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/NER.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.NER)) -- Sentiment analysis: returns a score betwee 0 and 1 indicating the sentiment in the input text ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/TextSentiment.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.TextSentiment)) -- Healthcare Entity Extraction: Extracts medical entities and relationships from text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/HealthcareSDK.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.HealthcareSDK)) - - -### Translation -[**Translator**](https://azure.microsoft.com/en-us/services/cognitive-services/translator/) -- Translate: Translates text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/Translate.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.Translate)) -- Transliterate: Converts text in one language from one script to another script. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/Transliterate.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.Transliterate)) -- Detect: Identifies the language of a piece of text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/Detect.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.Detect)) -- BreakSentence: Identifies the positioning of sentence boundaries in a piece of text. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/BreakSentence.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.BreakSentence)) -- Dictionary Lookup: Provides alternative translations for a word and a small number of idiomatic phrases. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/DictionaryLookup.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DictionaryLookup)) -- Dictionary Examples: Provides examples that show how terms in the dictionary are used in context. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/DictionaryExamples.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DictionaryExamples)) -- Document Translation: Translates documents across all supported languages and dialects while preserving document structure and data format. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/DocumentTranslator.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DocumentTranslator)) - -### Form Recognizer -[**Form Recognizer**](https://azure.microsoft.com/en-us/services/form-recognizer/) -- Analyze Layout: Extract text and layout information from a given document. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeLayout.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeLayout)) -- Analyze Receipts: Detects and extracts data from receipts using optical character recognition (OCR) and our receipt model, enabling you to easily extract structured data from receipts such as merchant name, merchant phone number, transaction date, transaction total, and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeReceipts.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeReceipts)) -- Analyze Business Cards: Detects and extracts data from business cards using optical character recognition (OCR) and our business card model, enabling you to easily extract structured data from business cards such as contact names, company names, phone numbers, emails, and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeBusinessCards.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeBusinessCards)) -- Analyze Invoices: Detects and extracts data from invoices using optical character recognition (OCR) and our invoice understanding deep learning models, enabling you to easily extract structured data from invoices such as customer, vendor, invoice ID, invoice due date, total, invoice amount due, tax amount, ship to, bill to, line items and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeInvoices.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeInvoices)) -- Analyze ID Documents: Detects and extracts data from identification documents using optical character recognition (OCR) and our ID document model, enabling you to easily extract structured data from ID documents such as first name, last name, date of birth, document number, and more. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeIDDocuments.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeIDDocuments)) -- Analyze Custom Form: Extracts information from forms (PDFs and images) into structured data based on a model created from a set of representative training forms. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/AnalyzeCustomModel.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AnalyzeCustomModel)) -- Get Custom Model: Get detailed information about a custom model. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/GetCustomModel.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/ListCustomModels.html)) -- List Custom Models: Get information about all custom models. ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/ListCustomModels.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.ListCustomModels)) - -### Decision -[**Anomaly Detector**](https://azure.microsoft.com/en-us/services/cognitive-services/anomaly-detector/) -- Anomaly status of latest point: generates a model using preceding points and determines whether the latest point is anomalous ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/DetectLastAnomaly.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DetectLastAnomaly)) -- Find anomalies: generates a model using an entire series and finds anomalies in the series ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/DetectAnomalies.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.DetectAnomalies)) - -### Search -- [Bing Image search](https://azure.microsoft.com/en-us/services/cognitive-services/bing-image-search-api/) ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/com/microsoft/azure/synapse/ml/cognitive/BingImageSearch.html), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.BingImageSearch)) -- [Azure Cognitive search](https://docs.microsoft.com/en-us/azure/search/search-what-is-azure-search) ([Scala](https://mmlspark.blob.core.windows.net/docs/0.10.0/scala/index.html#com.microsoft.azure.synapse.ml.cognitive.AzureSearchWriter$), [Python](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.cognitive.html#module-synapse.ml.cognitive.AzureSearchWriter)) - -## Prerequisites - -1. Follow the steps in [Getting started](https://docs.microsoft.com/en-us/azure/cognitive-services/big-data/getting-started) to set up your Azure Databricks and Cognitive Services environment. This tutorial shows you how to install SynapseML and how to create your Spark cluster in Databricks. -1. After you create a new notebook in Azure Databricks, copy the **Shared code** below and paste into a new cell in your notebook. -1. Choose a service sample, below, and copy paste it into a second new cell in your notebook. -1. Replace any of the service subscription key placeholders with your own key. -1. Choose the run button (triangle icon) in the upper right corner of the cell, then select **Run Cell**. -1. View results in a table below the cell. - -## Shared code - -To get started, we'll need to add this code to the project: - - -```python -from pyspark.sql.functions import udf, col -from synapse.ml.io.http import HTTPTransformer, http_udf -from requests import Request -from pyspark.sql.functions import lit -from pyspark.ml import PipelineModel -from pyspark.sql.functions import col -import os -``` - - -```python -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.mssparkutils.credentials import getSecret - - os.environ["ANOMALY_API_KEY"] = getSecret( - "mmlspark-build-keys", "cognitive-api-key" - ) - os.environ["COGNITIVE_SERVICE_KEY"] = getSecret( - "mmlspark-build-keys", "cognitive-api-key" - ) - os.environ["BING_IMAGE_SEARCH_KEY"] = getSecret( - "mmlspark-build-keys", "bing-search-key" - ) - os.environ["TRANSLATOR_KEY"] = getSecret("mmlspark-build-keys", "translator-key") - os.environ["AZURE_SEARCH_KEY"] = getSecret( - "mmlspark-build-keys", "azure-search-key" - ) - from notebookutils.visualization import display -``` - - -```python -from synapse.ml.cognitive import * - -# A general Cognitive Services key for Text Analytics, Computer Vision and Form Recognizer (or use separate keys that belong to each service) -service_key = os.environ["COGNITIVE_SERVICE_KEY"] -# A Bing Search v7 subscription key -bing_search_key = os.environ["BING_IMAGE_SEARCH_KEY"] -# An Anomaly Dectector subscription key -anomaly_key = os.environ["ANOMALY_API_KEY"] -# A Translator subscription key -translator_key = os.environ["TRANSLATOR_KEY"] -# An Azure search key -search_key = os.environ["AZURE_SEARCH_KEY"] -``` - -## Text Analytics sample - -The [Text Analytics](https://azure.microsoft.com/en-us/services/cognitive-services/text-analytics/) service provides several algorithms for extracting intelligent insights from text. For example, we can find the sentiment of given input text. The service will return a score between 0.0 and 1.0 where low scores indicate negative sentiment and high score indicates positive sentiment. This sample uses three simple sentences and returns the sentiment for each. - - -```python -# Create a dataframe that's tied to it's column names -df = spark.createDataFrame( - [ - ("I am so happy today, its sunny!", "en-US"), - ("I am frustrated by this rush hour traffic", "en-US"), - ("The cognitive services on spark aint bad", "en-US"), - ], - ["text", "language"], -) - -# Run the Text Analytics service with options -sentiment = ( - TextSentiment() - .setTextCol("text") - .setLocation("eastus") - .setSubscriptionKey(service_key) - .setOutputCol("sentiment") - .setErrorCol("error") - .setLanguageCol("language") -) - -# Show the results of your text query in a table format -display( - sentiment.transform(df).select( - "text", col("sentiment")[0].getItem("sentiment").alias("sentiment") - ) -) -``` - -## Text Analytics for Health Sample - -The [Text Analytics for Heatlth Service](https://docs.microsoft.com/en-us/azure/cognitive-services/language-service/text-analytics-for-health/overview?tabs=ner) extracts and labels relevant medical information from unstructured texts such as doctor's notes, discharge summaries, clinical documents, and electronic health records. - - -```python -df = spark.createDataFrame( - [ - ("20mg of ibuprofen twice a day",), - ("1tsp of Tylenol every 4 hours",), - ("6-drops of Vitamin B-12 every evening",), - ], - ["text"], -) - -healthcare = ( - HealthcareSDK() - .setSubscriptionKey(service_key) - .setLocation("eastus") - .setLanguage("en") - .setOutputCol("response") -) - -display(healthcare.transform(df)) -``` - -## Translator sample -[Translator](https://azure.microsoft.com/en-us/services/cognitive-services/translator/) is a cloud-based machine translation service and is part of the Azure Cognitive Services family of cognitive APIs used to build intelligent apps. Translator is easy to integrate in your applications, websites, tools, and solutions. It allows you to add multi-language user experiences in 90 languages and dialects and can be used for text translation with any operating system. In this sample, we do a simple text translation by providing the sentences you want to translate and target languages you want to translate to. - - -```python -from pyspark.sql.functions import col, flatten - -# Create a dataframe including sentences you want to translate -df = spark.createDataFrame( - [(["Hello, what is your name?", "Bye"],)], - [ - "text", - ], -) - -# Run the Translator service with options -translate = ( - Translate() - .setSubscriptionKey(translator_key) - .setLocation("eastus") - .setTextCol("text") - .setToLanguage(["zh-Hans"]) - .setOutputCol("translation") -) - -# Show the results of the translation. -display( - translate.transform(df) - .withColumn("translation", flatten(col("translation.translations"))) - .withColumn("translation", col("translation.text")) - .select("translation") -) -``` - -## Form Recognizer sample -[Form Recognizer](https://azure.microsoft.com/en-us/services/form-recognizer/) is a part of Azure Applied AI Services that lets you build automated data processing software using machine learning technology. Identify and extract text, key/value pairs, selection marks, tables, and structure from your documents—the service outputs structured data that includes the relationships in the original file, bounding boxes, confidence and more. In this sample, we analyze a business card image and extract its information into structured data. - - -```python -from pyspark.sql.functions import col, explode - -# Create a dataframe containing the source files -imageDf = spark.createDataFrame( - [ - ( - "https://mmlspark.blob.core.windows.net/datasets/FormRecognizer/business_card.jpg", - ) - ], - [ - "source", - ], -) - -# Run the Form Recognizer service -analyzeBusinessCards = ( - AnalyzeBusinessCards() - .setSubscriptionKey(service_key) - .setLocation("eastus") - .setImageUrlCol("source") - .setOutputCol("businessCards") -) - -# Show the results of recognition. -display( - analyzeBusinessCards.transform(imageDf) - .withColumn( - "documents", explode(col("businessCards.analyzeResult.documentResults.fields")) - ) - .select("source", "documents") -) -``` - -## Computer Vision sample - -[Computer Vision](https://azure.microsoft.com/en-us/services/cognitive-services/computer-vision/) analyzes images to identify structure such as faces, objects, and natural-language descriptions. In this sample, we tag a list of images. Tags are one-word descriptions of things in the image like recognizable objects, people, scenery, and actions. - - -```python -# Create a dataframe with the image URLs -df = spark.createDataFrame( - [ - ( - "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/objects.jpg", - ), - ( - "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/dog.jpg", - ), - ( - "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/house.jpg", - ), - ], - [ - "image", - ], -) - -# Run the Computer Vision service. Analyze Image extracts infortmation from/about the images. -analysis = ( - AnalyzeImage() - .setLocation("eastus") - .setSubscriptionKey(service_key) - .setVisualFeatures( - ["Categories", "Color", "Description", "Faces", "Objects", "Tags"] - ) - .setOutputCol("analysis_results") - .setImageUrlCol("image") - .setErrorCol("error") -) - -# Show the results of what you wanted to pull out of the images. -display(analysis.transform(df).select("image", "analysis_results.description.tags")) -``` - -## Bing Image Search sample - -[Bing Image Search](https://azure.microsoft.com/en-us/services/cognitive-services/bing-image-search-api/) searches the web to retrieve images related to a user's natural language query. In this sample, we use a text query that looks for images with quotes. It returns a list of image URLs that contain photos related to our query. - - -```python -# Number of images Bing will return per query -imgsPerBatch = 10 -# A list of offsets, used to page into the search results -offsets = [(i * imgsPerBatch,) for i in range(100)] -# Since web content is our data, we create a dataframe with options on that data: offsets -bingParameters = spark.createDataFrame(offsets, ["offset"]) - -# Run the Bing Image Search service with our text query -bingSearch = ( - BingImageSearch() - .setSubscriptionKey(bing_search_key) - .setOffsetCol("offset") - .setQuery("Martin Luther King Jr. quotes") - .setCount(imgsPerBatch) - .setOutputCol("images") -) - -# Transformer that extracts and flattens the richly structured output of Bing Image Search into a simple URL column -getUrls = BingImageSearch.getUrlTransformer("images", "url") - -# This displays the full results returned, uncomment to use -# display(bingSearch.transform(bingParameters)) - -# Since we have two services, they are put into a pipeline -pipeline = PipelineModel(stages=[bingSearch, getUrls]) - -# Show the results of your search: image URLs -display(pipeline.transform(bingParameters)) -``` - -## Speech-to-Text sample -The [Speech-to-text](https://azure.microsoft.com/en-us/services/cognitive-services/speech-services/) service converts streams or files of spoken audio to text. In this sample, we transcribe one audio file. - - -```python -# Create a dataframe with our audio URLs, tied to the column called "url" -df = spark.createDataFrame( - [("https://mmlspark.blob.core.windows.net/datasets/Speech/audio2.wav",)], ["url"] -) - -# Run the Speech-to-text service to translate the audio into text -speech_to_text = ( - SpeechToTextSDK() - .setSubscriptionKey(service_key) - .setLocation("eastus") - .setOutputCol("text") - .setAudioDataCol("url") - .setLanguage("en-US") - .setProfanity("Masked") -) - -# Show the results of the translation -display(speech_to_text.transform(df).select("url", "text.DisplayText")) -``` - -## Text-to-Speech sample -[Text to speech](https://azure.microsoft.com/en-us/services/cognitive-services/text-to-speech/#overview) is a service that allows one to build apps and services that speak naturally, choosing from more than 270 neural voices across 119 languages and variants. - - -```python -from synapse.ml.cognitive import TextToSpeech - -# Create a dataframe with text and an output file location -df = spark.createDataFrame( - [ - ( - "Reading out lod is fun! Check out aka.ms/spark for more information", - "dbfs:/output.mp3", - ) - ], - ["text", "output_file"], -) - -tts = ( - TextToSpeech() - .setSubscriptionKey(service_key) - .setTextCol("text") - .setLocation("eastus") - .setVoiceName("en-US-JennyNeural") - .setOutputFileCol("output_file") -) - -# Check to make sure there were no errors during audio creation -display(tts.transform(df)) -``` - -## Anomaly Detector sample - -[Anomaly Detector](https://azure.microsoft.com/en-us/services/cognitive-services/anomaly-detector/) is great for detecting irregularities in your time series data. In this sample, we use the service to find anomalies in the entire time series. - - -```python -# Create a dataframe with the point data that Anomaly Detector requires -df = spark.createDataFrame( - [ - ("1972-01-01T00:00:00Z", 826.0), - ("1972-02-01T00:00:00Z", 799.0), - ("1972-03-01T00:00:00Z", 890.0), - ("1972-04-01T00:00:00Z", 900.0), - ("1972-05-01T00:00:00Z", 766.0), - ("1972-06-01T00:00:00Z", 805.0), - ("1972-07-01T00:00:00Z", 821.0), - ("1972-08-01T00:00:00Z", 20000.0), - ("1972-09-01T00:00:00Z", 883.0), - ("1972-10-01T00:00:00Z", 898.0), - ("1972-11-01T00:00:00Z", 957.0), - ("1972-12-01T00:00:00Z", 924.0), - ("1973-01-01T00:00:00Z", 881.0), - ("1973-02-01T00:00:00Z", 837.0), - ("1973-03-01T00:00:00Z", 9000.0), - ], - ["timestamp", "value"], -).withColumn("group", lit("series1")) - -# Run the Anomaly Detector service to look for irregular data -anamoly_detector = ( - SimpleDetectAnomalies() - .setSubscriptionKey(anomaly_key) - .setLocation("eastus") - .setTimestampCol("timestamp") - .setValueCol("value") - .setOutputCol("anomalies") - .setGroupbyCol("group") - .setGranularity("monthly") -) - -# Show the full results of the analysis with the anomalies marked as "True" -display( - anamoly_detector.transform(df).select("timestamp", "value", "anomalies.isAnomaly") -) -``` - -## Arbitrary web APIs - -With HTTP on Spark, any web service can be used in your big data pipeline. In this example, we use the [World Bank API](http://api.worldbank.org/v2/country/) to get information about various countries around the world. - - -```python -# Use any requests from the python requests library - - -def world_bank_request(country): - return Request( - "GET", "http://api.worldbank.org/v2/country/{}?format=json".format(country) - ) - - -# Create a dataframe with spcificies which countries we want data on -df = spark.createDataFrame([("br",), ("usa",)], ["country"]).withColumn( - "request", http_udf(world_bank_request)(col("country")) -) - -# Much faster for big data because of the concurrency :) -client = ( - HTTPTransformer().setConcurrency(3).setInputCol("request").setOutputCol("response") -) - -# Get the body of the response - - -def get_response_body(resp): - return resp.entity.content.decode() - - -# Show the details of the country data returned -display( - client.transform(df).select( - "country", udf(get_response_body)(col("response")).alias("response") - ) -) -``` - -## Azure Cognitive search sample - -In this example, we show how you can enrich data using Cognitive Skills and write to an Azure Search Index using SynapseML. - - -```python -search_service = "mmlspark-azure-search" -search_index = "test-33467690" - -df = spark.createDataFrame( - [ - ( - "upload", - "0", - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test1.jpg", - ), - ( - "upload", - "1", - "https://mmlspark.blob.core.windows.net/datasets/DSIR/test2.jpg", - ), - ], - ["searchAction", "id", "url"], -) - -tdf = ( - AnalyzeImage() - .setSubscriptionKey(service_key) - .setLocation("eastus") - .setImageUrlCol("url") - .setOutputCol("analyzed") - .setErrorCol("errors") - .setVisualFeatures( - ["Categories", "Tags", "Description", "Faces", "ImageType", "Color", "Adult"] - ) - .transform(df) - .select("*", "analyzed.*") - .drop("errors", "analyzed") -) - -tdf.writeToAzureSearch( - subscriptionKey=search_key, - actionCol="searchAction", - serviceName=search_service, - indexName=search_index, - keyCol="id", -) -``` diff --git a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Predictive Maintenance.md b/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Predictive Maintenance.md deleted file mode 100644 index c9e1248b87..0000000000 --- a/website/versioned_docs/version-0.10.0/features/cognitive_services/CognitiveServices - Predictive Maintenance.md +++ /dev/null @@ -1,193 +0,0 @@ ---- -title: CognitiveServices - Predictive Maintenance -hide_title: true -status: stable ---- -# Recipe: Predictive maintenance with the Cognitive Services for Big Data - -This recipe shows how you can use Azure Synapse Analytics and Cognitive Services on Apache Spark for predictive maintenance of IoT devices. We'll follow along with the [CosmosDB and Synapse Link](https://github.com/Azure-Samples/cosmosdb-synapse-link-samples) sample. To keep things simple, in this recipe we'll read the data straight from a CSV file rather than getting streamed data through CosmosDB and Synapse Link. We strongly encourage you to look over the Synapse Link sample. - -## Hypothetical scenario - -The hypothetical scenario is a Power Plant, where IoT devices are monitoring [steam turbines](https://en.wikipedia.org/wiki/Steam_turbine). The IoTSignals collection has Revolutions per minute (RPM) and Megawatts (MW) data for each turbine. Signals from steam turbines are being analyzed and anomalous signals are detected. - -There could be outliers in the data in random frequency. In those situations, RPM values will go up and MW output will go down, for circuit protection. The idea is to see the data varying at the same time, but with different signals. - -## Prerequisites - -* An Azure subscription - [Create one for free](https://azure.microsoft.com/en-us/free/) -* [Azure Synapse workspace](https://docs.microsoft.com/en-us/azure/synapse-analytics/get-started-create-workspace) configured with a [serverless Apache Spark pool](https://docs.microsoft.com/en-us/azure/synapse-analytics/get-started-analyze-spark) - -## Setup - -### Create an Anomaly Detector resource - -Azure Cognitive Services are represented by Azure resources that you subscribe to. Create a resource for Translator using the [Azure portal](https://docs.microsoft.com/en-us/azure/cognitive-services/cognitive-services-apis-create-account?tabs=multiservice%2Clinux) or [Azure CLI](https://docs.microsoft.com/en-us/azure/cognitive-services/cognitive-services-apis-create-account-cli?tabs=linux). You can also: - -- View an existing resource in the [Azure portal](https://portal.azure.com/). - -Make note of the endpoint and the key for this resource, you'll need it in this guide. - -## Enter your service keys - -Let's start by adding your key and location. - - -``` -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.mssparkutils.credentials import getSecret - - os.environ["ANOMALY_API_KEY"] = getSecret( - "mmlspark-build-keys", "cognitive-api-key" - ) - -service_key = os.environ["ANOMALY_API_KEY"] # Paste your anomaly detector key here -location = "westus2" # Paste your anomaly detector location here - -assert service_key is not None -``` - -## Read data into a DataFrame - -Next, let's read the IoTSignals file into a DataFrame. Open a new notebook in your Synapse workspace and create a DataFrame from the file. - - -``` -df_signals = spark.read.csv( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/iot/IoTSignals.csv", - header=True, - inferSchema=True, -) -``` - -### Run anomaly detection using Cognitive Services on Spark - -The goal is to find instances where the signals from the IoT devices were outputting anomalous values so that we can see when something is going wrong and do predictive maintenance. To do that, let's use Anomaly Detector on Spark: - - -``` -from pyspark.sql.functions import col, struct -from synapse.ml.cognitive import SimpleDetectAnomalies -from synapse.ml.core.spark import FluentAPI - -detector = ( - SimpleDetectAnomalies() - .setSubscriptionKey(service_key) - .setLocation(location) - .setOutputCol("anomalies") - .setGroupbyCol("grouping") - .setSensitivity(95) - .setGranularity("secondly") -) - -df_anomaly = ( - df_signals.where(col("unitSymbol") == "RPM") - .withColumn("timestamp", col("dateTime").cast("string")) - .withColumn("value", col("measureValue").cast("double")) - .withColumn("grouping", struct("deviceId")) - .mlTransform(detector) -).cache() - -df_anomaly.createOrReplaceTempView("df_anomaly") -``` - -Let's take a look at the data: - - -``` -df_anomaly.select("timestamp", "value", "deviceId", "anomalies.isAnomaly").show(3) -``` - -This cell should yield a result that looks like: - -| timestamp | value | deviceId | isAnomaly | -|:--------------------|--------:|:-----------|:------------| -| 2020-05-01 18:33:51 | 3174 | dev-7 | False | -| 2020-05-01 18:33:52 | 2976 | dev-7 | False | -| 2020-05-01 18:33:53 | 2714 | dev-7 | False | - -## Visualize anomalies for one of the devices - -IoTSignals.csv has signals from multiple IoT devices. We'll focus on a specific device and visualize anomalous outputs from the device. - - -``` -df_anomaly_single_device = spark.sql( - """ -select - timestamp, - measureValue, - anomalies.expectedValue, - anomalies.expectedValue + anomalies.upperMargin as expectedUpperValue, - anomalies.expectedValue - anomalies.lowerMargin as expectedLowerValue, - case when anomalies.isAnomaly=true then 1 else 0 end as isAnomaly -from - df_anomaly -where deviceid = 'dev-1' and timestamp < '2020-04-29' -order by timestamp -limit 200""" -) -``` - -Now that we have created a dataframe that represents the anomalies for a particular device, we can visualize these anomalies: - - -``` -import matplotlib.pyplot as plt -from pyspark.sql.functions import col - -adf = df_anomaly_single_device.toPandas() -adf_subset = df_anomaly_single_device.where(col("isAnomaly") == 1).toPandas() - -plt.figure(figsize=(23, 8)) -plt.plot( - adf["timestamp"], - adf["expectedUpperValue"], - color="darkred", - line, - linewidth=0.25, - label="UpperMargin", -) -plt.plot( - adf["timestamp"], - adf["expectedValue"], - color="darkgreen", - line, - linewidth=2, - label="Expected Value", -) -plt.plot( - adf["timestamp"], - adf["measureValue"], - "b", - color="royalblue", - line, - linewidth=2, - label="Actual", -) -plt.plot( - adf["timestamp"], - adf["expectedLowerValue"], - color="black", - line, - linewidth=0.25, - label="Lower Margin", -) -plt.plot(adf_subset["timestamp"], adf_subset["measureValue"], "ro", label="Anomaly") -plt.legend() -plt.title("RPM Anomalies with Confidence Intervals") -plt.show() -``` - -If successful, your output will look like this: - -![Anomaly Detector Plot](https://github.com/MicrosoftDocs/azure-docs/raw/master/articles/cognitive-services/big-data/media/anomaly-output.png) - -## Next steps - -Learn how to do predictive maintenance at scale with Azure Cognitive Services, Azure Synapse Analytics, and Azure CosmosDB. For more information, see the full sample on [GitHub](https://github.com/Azure-Samples/cosmosdb-synapse-link-samples). diff --git a/website/versioned_docs/version-0.10.0/features/geospatial_services/GeospatialServices - Flooding Risk.md b/website/versioned_docs/version-0.10.0/features/geospatial_services/GeospatialServices - Flooding Risk.md deleted file mode 100644 index 7745a26091..0000000000 --- a/website/versioned_docs/version-0.10.0/features/geospatial_services/GeospatialServices - Flooding Risk.md +++ /dev/null @@ -1,198 +0,0 @@ ---- -title: GeospatialServices - Flooding Risk -hide_title: true -status: stable ---- -# Visualizing Customer addresses on a flood plane - -King County (WA) publishes flood plain data as well as tax parcel data. We can use the addresses in the tax parcel data and use the geocoder to calculate coordinates. Using this coordinates and the flood plain data we can enrich out dataset with a flag indicating whether the house is in a flood zone or not. - -The following data has been sourced from King County's Open data portal. [_Link_](https://data.kingcounty.gov/) -1. [Address Data](https://mmlspark.blob.core.windows.net/publicwasb/maps/KingCountyAddress.csv) -1. [Flood plains](https://mmlspark.blob.core.windows.net/publicwasb/maps/KingCountyFloodPlains.geojson) - -For this demonstration, please follow the instructions on setting up your azure maps account from the overview notebook. - -## Prerequisites -1. Upload the flood plains data as map data to your creator resource - - -```python -import os -import json -import time -import requests -from requests.adapters import HTTPAdapter -from requests.packages.urllib3.util.retry import Retry - -# Configure more resiliant requests to stop flakiness -retry_strategy = Retry( - total=3, - status_forcelist=[429, 500, 502, 503, 504], - method_whitelist=["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"], -) -adapter = HTTPAdapter(max_retries=retry_strategy) -http = requests.Session() -http.mount("https://", adapter) -http.mount("http://", adapter) - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.mssparkutils.credentials import getSecret - - os.environ["AZURE_MAPS_KEY"] = getSecret("mmlspark-build-keys", "azuremaps-api-key") - from notebookutils.visualization import display - - -# Azure Maps account key -azureMapsKey = os.environ["AZURE_MAPS_KEY"] # Replace this with your azure maps key - -# Creator Geo prefix -# for this example, assuming that the creator resource is created in `EAST US 2`. -atlas_geo_prefix = "us" - -# Load flood plains data -flood_plain_geojson = http.get( - "https://mmlspark.blob.core.windows.net/publicwasb/maps/KingCountyFloodPlains.geojson" -).content - -# Upload this flood plains data to your maps/creator account. This is a Long-Running async operation and takes approximately 15~30 seconds to complete -r = http.post( - f"https://{atlas_geo_prefix}.atlas.microsoft.com/mapData/upload?api-version=1.0&dataFormat=geojson&subscription-key={azureMapsKey}", - json=json.loads(flood_plain_geojson), -) - -# Poll for resource upload completion -resource_location = r.headers.get("location") -for _ in range(20): - resource = json.loads( - http.get(f"{resource_location}&subscription-key={azureMapsKey}").content - ) - status = resource["status"].lower() - if status == "running": - time.sleep(5) # wait in a polling loop - elif status == "succeeded": - break - else: - raise ValueError("Unknown status {}".format(status)) - -# Once the above operation returns a HTTP 201, get the user_data_id of the flood plains data, you uploaded to your map account. -user_data_id_resource_url = resource["resourceLocation"] -user_data_id = json.loads( - http.get(f"{user_data_id_resource_url}&subscription-key={azureMapsKey}").content -)["udid"] -``` - -Now that we have the flood plains data setup in our maps account, we can use the `CheckPointInPolygon` function to check if a location `(lat,lon)` coordinate is in a flood zone. - -### Load address data: - - -```python -data = spark.read.option("header", "true").csv( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/maps/KingCountyAddress.csv" -) - -# Visualize incoming schema -print("Schema:") -data.printSchema() - -# Choose a subset of the data for this example -subset_data = data.limit(50) -display(subset_data) -``` - -### Wire-up the Address Geocoder - -We will use the address geocoder to enrich the dataset with location coordinates of the addresses. - - -```python -from pyspark.sql.functions import col -from synapse.ml.cognitive import * -from synapse.ml.stages import FixedMiniBatchTransformer, FlattenBatch -from synapse.ml.geospatial import * - - -def extract_location_fields(df): - # Use this function to select only lat/lon columns into the dataframe - return df.select( - col("*"), - col("output.response.results") - .getItem(0) - .getField("position") - .getField("lat") - .alias("Latitude"), - col("output.response.results") - .getItem(0) - .getField("position") - .getField("lon") - .alias("Longitude"), - ).drop("output") - - -# Azure Maps geocoder to enhance the dataframe with location data -geocoder = ( - AddressGeocoder() - .setSubscriptionKey(azureMapsKey) - .setAddressCol("FullAddress") - .setOutputCol("output") -) - -# Set up a fixed mini batch transformer to geocode addresses -batched_dataframe = geocoder.transform( - FixedMiniBatchTransformer().setBatchSize(10).transform(subset_data.coalesce(1)) -) -geocoded_addresses = extract_location_fields( - FlattenBatch().transform(batched_dataframe) -) - -# Display the results -display(geocoded_addresses) -``` - -Now that we have geocoded the addresses, we can now use the `CheckPointInPolygon` function to check if a property is in a flood zone or not. - -### Setup Check Point In Polygon - - -```python -def extract_point_in_polygon_result_fields(df): - # Use this function to select only lat/lon columns into the dataframe - return df.select( - col("*"), - col("output.result.pointInPolygons").alias("In Polygon"), - col("output.result.intersectingGeometries").alias("Intersecting Polygons"), - ).drop("output") - - -check_point_in_polygon = ( - CheckPointInPolygon() - .setSubscriptionKey(azureMapsKey) - .setGeography(atlas_geo_prefix) - .setUserDataIdentifier(user_data_id) - .setLatitudeCol("Latitude") - .setLongitudeCol("Longitude") - .setOutputCol("output") -) - - -flood_plain_addresses = extract_point_in_polygon_result_fields( - check_point_in_polygon.transform(geocoded_addresses) -) - -# Display the results -display(flood_plain_addresses) -``` - -### Cleanup Uploaded User Data (Optional) -You can (optionally) delete the uploaded geojson polygon. - - -```python -res = http.delete( - f"https://{atlas_geo_prefix}.atlas.microsoft.com/mapData/{user_data_id}?api-version=1.0&subscription-key={azureMapsKey}" -) -``` diff --git a/website/versioned_docs/version-0.10.0/features/geospatial_services/GeospatialServices - Overview.md b/website/versioned_docs/version-0.10.0/features/geospatial_services/GeospatialServices - Overview.md deleted file mode 100644 index 6aa98af78c..0000000000 --- a/website/versioned_docs/version-0.10.0/features/geospatial_services/GeospatialServices - Overview.md +++ /dev/null @@ -1,291 +0,0 @@ ---- -title: GeospatialServices - Overview -hide_title: true -status: stable ---- - - -# Azure Maps Geospatial Services - -[Microsoft Azure Maps ](https://azure.microsoft.com/en-us/services/azure-maps/) provides developers from all industries with powerful geospatial capabilities. Those geospatial capabilities are packed with the freshest mapping data. Azure Maps is available for web, mobile (iOS and Android), Microsoft Power BI, Microsoft Power Apps and Microsoft Synapse. Azure Maps is an Open API compliant set of REST APIs. The following are only a high-level overview of the services which Azure Maps offers - Maps, Search, Routing, Traffic, Weather, Time Zones, Geolocation, Geofencing, Map Data, Creator, and Spatial Operations. - -## Usage - -### Geocode addresses -[**Address Geocoding**](https://docs.microsoft.com/en-us/rest/api/maps/search/post-search-address-batch) The Search Address Batch API sends batches of queries to Search Address API using just a single API call. This API geocodes text addresses or partial addresses and the geocoding search index will be queried for everything above the street level data. **Note** that the geocoder is very tolerant of typos and incomplete addresses. It will also handle everything from exact street addresses or street or intersections as well as higher level geographies such as city centers, counties, states etc. - -### Reverse Geocode Coordinates -[**Reverse Geocoding**](https://docs.microsoft.com/en-us/rest/api/maps/search/post-search-address-reverse-batch) The Search Address Reverse Batch API sends batches of queries to Search Address Reverse API using just a single API call. This API takes in location coordinates and translates them into human readable street addresses. Most often this is needed in tracking applications where you receive a GPS feed from the device or asset and wish to know what address where the coordinate is located. - -### Get Point In Polygon -[**Get Point in Polygon**](https://docs.microsoft.com/en-us/rest/api/maps/spatial/get-point-in-polygon) This API returns a boolean value indicating whether a point is inside a set of polygons. The set of polygons can we pre-created by using the [**Data Upload API**](https://docs.microsoft.com/en-us/rest/api/maps/data/upload-preview) referenced by a unique udid. - -## Prerequisites - -1. Sign into the [Azure Portal](https://portal.azure.com) and create an Azure Maps account by following these [instructions](https://docs.microsoft.com/en-us/azure/azure-maps/how-to-manage-account-keys#create-a-new-account). -1. Once the Maps account is created, provision a Maps Creator Resource by following these [instructions](https://docs.microsoft.com/en-us/azure/azure-maps/how-to-manage-creator#create-creator-resource). Creator is a [geographically scoped service](https://docs.microsoft.com/en-us/azure/azure-maps/creator-geographic-scope). Pick appropriate location while provisioning the creator resource. -1. Follow these [instructions](https://docs.microsoft.com/en-us/azure/cognitive-services/big-data/getting-started#create-an-apache-spark-cluster) to set up your Azure Databricks environment and install SynapseML. -1. After you create a new notebook in Azure Databricks, copy the **Shared code** below and paste into a new cell in your notebook. -1. Choose a service sample, below, and copy paste it into a second new cell in your notebook. -1. Replace the `AZUREMAPS_API_KEY` placeholders with your own [Maps account key](https://docs.microsoft.com/en-us/azure/azure-maps/how-to-manage-authentication#view-authentication-details). -1. Choose the run button (triangle icon) in the upper right corner of the cell, then select **Run Cell**. -1. View results in a table below the cell. - -## Shared code - -To get started, we'll need to add this code to the project: - - -```python -from pyspark.sql.functions import udf, col -from pyspark.sql.types import StructType, StructField, DoubleType -from pyspark.sql.functions import lit -from pyspark.ml import PipelineModel -from pyspark.sql.functions import col -import os -import requests -from requests.adapters import HTTPAdapter -from requests.packages.urllib3.util.retry import Retry - -# Configure more resiliant requests to stop flakiness -retry_strategy = Retry( - total=3, - status_forcelist=[429, 500, 502, 503, 504], - method_whitelist=["HEAD", "GET", "PUT", "DELETE", "OPTIONS", "TRACE"], -) -adapter = HTTPAdapter(max_retries=retry_strategy) -http = requests.Session() -http.mount("https://", adapter) -http.mount("http://", adapter) -``` - - -```python -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.mssparkutils.credentials import getSecret - - os.environ["AZURE_MAPS_KEY"] = getSecret("mmlspark-build-keys", "azuremaps-api-key") - from notebookutils.visualization import display -``` - - -```python -from synapse.ml.cognitive import * -from synapse.ml.geospatial import * - -# An Azure Maps account key -azureMapsKey = os.environ["AZURE_MAPS_KEY"] -``` - -## Geocoding sample - -The azure maps geocoder sends batches of queries to the [Search Address API](https://docs.microsoft.com/en-us/rest/api/maps/search/getsearchaddress). The API limits the batch size to 10000 queries per request. - - -```python -from synapse.ml.stages import FixedMiniBatchTransformer, FlattenBatch - -df = spark.createDataFrame( - [ - ("One, Microsoft Way, Redmond",), - ("400 Broad St, Seattle",), - ("350 5th Ave, New York",), - ("Pike Pl, Seattle",), - ("Champ de Mars, 5 Avenue Anatole France, 75007 Paris",), - ], - [ - "address", - ], -) - - -def extract_location_fields(df): - # Use this function to select only lat/lon columns into the dataframe - return df.select( - col("*"), - col("output.response.results") - .getItem(0) - .getField("position") - .getField("lat") - .alias("Latitude"), - col("output.response.results") - .getItem(0) - .getField("position") - .getField("lon") - .alias("Longitude"), - ).drop("output") - - -# Run the Azure Maps geocoder to enhance the data with location data -geocoder = ( - AddressGeocoder() - .setSubscriptionKey(azureMapsKey) - .setAddressCol("address") - .setOutputCol("output") -) - -# Show the results of your text query in a table format -display( - extract_location_fields( - geocoder.transform(FixedMiniBatchTransformer().setBatchSize(10).transform(df)) - ) -) -``` - -## Reverse Geocoding sample - -The azure maps reverse geocoder sends batches of queries to the [Search Address Reverse API](https://docs.microsoft.com/en-us/rest/api/maps/search/get-search-address-reverse) using just a single API call. The API allows caller to batch up to 10,000 queries per request - - -```python -# Create a dataframe that's tied to it's column names -df = spark.createDataFrame( - ( - ( - (48.858561, 2.294911), - (47.639765, -122.127896), - (47.621028, -122.348170), - (47.734012, -122.102737), - ) - ), - StructType([StructField("lat", DoubleType()), StructField("lon", DoubleType())]), -) - -# Run the Azure Maps geocoder to enhance the data with location data -rev_geocoder = ( - ReverseAddressGeocoder() - .setSubscriptionKey(azureMapsKey) - .setLatitudeCol("lat") - .setLongitudeCol("lon") - .setOutputCol("output") -) - -# Show the results of your text query in a table format - -display( - rev_geocoder.transform(FixedMiniBatchTransformer().setBatchSize(10).transform(df)) - .select( - col("*"), - col("output.response.addresses") - .getItem(0) - .getField("address") - .getField("freeformAddress") - .alias("In Polygon"), - col("output.response.addresses") - .getItem(0) - .getField("address") - .getField("country") - .alias("Intersecting Polygons"), - ) - .drop("output") -) -``` - -## Check Point In Polygon sample - -This API returns a boolean value indicating whether a point is inside a set of polygons. The polygon can be added to you creator account using the [**Data Upload API**](https://docs.microsoft.com/en-us/rest/api/maps/data/upload-preview). The API then returnrs a unique udid to reference the polygon. - -### Setup geojson Polygons in your azure maps creator account - -Based on where the creator resource was provisioned, we need to prefix the appropriate geography code to the azure maps URL. In this example, the assumption is that the creator resource was provisioned in `East US 2` Location and hence we pick `us` as our geo prefix. - - -```python -import time -import json - -# Choose a geography, you want your data to reside in. -# Allowed values -# us => North American datacenters -# eu -> European datacenters -url_geo_prefix = "us" - -# Upload a geojson with polygons in them -r = http.post( - f"https://{url_geo_prefix}.atlas.microsoft.com/mapData/upload?api-version=1.0&dataFormat=geojson&subscription-key={azureMapsKey}", - json={ - "type": "FeatureCollection", - "features": [ - { - "type": "Feature", - "properties": {"geometryId": "test_geometry"}, - "geometry": { - "type": "Polygon", - "coordinates": [ - [ - [-122.14290618896484, 47.67856488312544], - [-122.03956604003906, 47.67856488312544], - [-122.03956604003906, 47.7483271435476], - [-122.14290618896484, 47.7483271435476], - [-122.14290618896484, 47.67856488312544], - ] - ], - }, - } - ], - }, -) - -long_running_operation = r.headers.get("location") -time.sleep(30) # Sometimes this may take upto 30 seconds -print(f"Status Code: {r.status_code}, Long Running Operation: {long_running_operation}") -# This Operation completes in approximately 5 ~ 15 seconds -user_data_id_resource_url = json.loads( - http.get(f"{long_running_operation}&subscription-key={azureMapsKey}").content -)["resourceLocation"] -user_data_id = json.loads( - http.get(f"{user_data_id_resource_url}&subscription-key={azureMapsKey}").content -)["udid"] -``` - -### Use the function to check if point is in polygon - - -```python -# Create a dataframe that's tied to it's column names -df = spark.createDataFrame( - ( - ( - (48.858561, 2.294911), - (47.639765, -122.127896), - (47.621028, -122.348170), - (47.734012, -122.102737), - ) - ), - StructType([StructField("lat", DoubleType()), StructField("lon", DoubleType())]), -) - -# Run the Azure Maps geocoder to enhance the data with location data -check_point_in_polygon = ( - CheckPointInPolygon() - .setSubscriptionKey(azureMapsKey) - .setGeography(url_geo_prefix) - .setUserDataIdentifier(user_data_id) - .setLatitudeCol("lat") - .setLongitudeCol("lon") - .setOutputCol("output") -) - -# Show the results of your text query in a table format -display( - check_point_in_polygon.transform(df) - .select( - col("*"), - col("output.result.pointInPolygons").alias("In Polygon"), - col("output.result.intersectingGeometries").alias("Intersecting Polygons"), - ) - .drop("output") -) -``` - -### Cleanup - - -```python -res = http.delete( - f"https://{url_geo_prefix}.atlas.microsoft.com/mapData/{user_data_id}?api-version=1.0&subscription-key={azureMapsKey}" -) -``` diff --git a/website/versioned_docs/version-0.10.0/features/isolation_forest/IsolationForest - Multivariate Anomaly Detection.md b/website/versioned_docs/version-0.10.0/features/isolation_forest/IsolationForest - Multivariate Anomaly Detection.md deleted file mode 100644 index b004f4280e..0000000000 --- a/website/versioned_docs/version-0.10.0/features/isolation_forest/IsolationForest - Multivariate Anomaly Detection.md +++ /dev/null @@ -1,484 +0,0 @@ ---- -title: IsolationForest - Multivariate Anomaly Detection -hide_title: true -status: stable ---- -# Recipe: Multivariate Anomaly Detection with Isolation Forest -This recipe shows how you can use SynapseML on Apache Spark for multivariate anomaly detection. Multivariate anomaly detection allows for the detection of anomalies among many variables or timeseries, taking into account all the inter-correlations and dependencies between the different variables. In this scenario, we use SynapseML to train an Isolation Forest model for multivariate anomaly detection, and we then use to the trained model to infer multivariate anomalies within a dataset containing synthetic measurements from three IoT sensors. - -To learn more about the Isolation Forest model please refer to the original paper by [Liu _et al._](https://cs.nju.edu.cn/zhouzh/zhouzh.files/publication/icdm08b.pdf?q=isolation-forest). - -## Library imports - - -```python -import os -from IPython import get_ipython -from IPython.terminal.interactiveshell import TerminalInteractiveShell -import uuid -import mlflow -import matplotlib.pyplot as plt - -from pyspark.sql import functions as F -from pyspark.ml.feature import VectorAssembler -from pyspark.sql.types import * -from pyspark.ml import Pipeline - -from synapse.ml.isolationforest import * - -from synapse.ml.explainers import * -``` - - -```python -%matplotlib inline -``` - - -```python -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - shell = TerminalInteractiveShell.instance() - shell.define_macro("foo", """a,b=10,20""") - from notebookutils.visualization import display -``` - -## Input data - - -```python -# Table inputs -timestampColumn = "timestamp" # str: the name of the timestamp column in the table -inputCols = [ - "sensor_1", - "sensor_2", - "sensor_3", -] # list(str): the names of the input variables - -# Training Start time, and number of days to use for training: -trainingStartTime = ( - "2022-02-24T06:00:00Z" # datetime: datetime for when to start the training -) -trainingEndTime = ( - "2022-03-08T23:55:00Z" # datetime: datetime for when to end the training -) -inferenceStartTime = ( - "2022-03-09T09:30:00Z" # datetime: datetime for when to start the training -) -inferenceEndTime = ( - "2022-03-20T23:55:00Z" # datetime: datetime for when to end the training -) - -# Isolation Forest parameters -contamination = 0.021 -num_estimators = 100 -max_samples = 256 -max_features = 1.0 - -# MLFlow experiment -artifact_path = "isolationforest" -experiment_name = f"/Shared/isolation_forest_experiment-{str(uuid.uuid1())}/" -model_name = "isolation-forest-model" -model_version = 1 -``` - -## Read data - - -```python -df = ( - spark.read.format("csv") - .option("header", "true") - .load( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/generated_sample_mvad_data.csv" - ) -) -``` - -cast columns to appropriate data types - - -```python -df = ( - df.orderBy(timestampColumn) - .withColumn("timestamp", F.date_format(timestampColumn, "yyyy-MM-dd'T'HH:mm:ss'Z'")) - .withColumn("sensor_1", F.col("sensor_1").cast(DoubleType())) - .withColumn("sensor_2", F.col("sensor_2").cast(DoubleType())) - .withColumn("sensor_3", F.col("sensor_3").cast(DoubleType())) - .drop("_c5") -) - -display(df) -``` - -## Training data preparation - - -```python -# filter to data with timestamps within the training window -df_train = df.filter( - (F.col(timestampColumn) >= trainingStartTime) - & (F.col(timestampColumn) <= trainingEndTime) -) -display(df_train) -``` - -## Test data preparation - - -```python -# filter to data with timestamps within the inference window -df_test = df.filter( - (F.col(timestampColumn) >= inferenceStartTime) - & (F.col(timestampColumn) <= inferenceEndTime) -) -display(df_test) -``` - -## Train Isolation Forest model - - -```python -isolationForest = ( - IsolationForest() - .setNumEstimators(num_estimators) - .setBootstrap(False) - .setMaxSamples(max_samples) - .setMaxFeatures(max_features) - .setFeaturesCol("features") - .setPredictionCol("predictedLabel") - .setScoreCol("outlierScore") - .setContamination(contamination) - .setContaminationError(0.01 * contamination) - .setRandomSeed(1) -) -``` - -Next, we create an ML pipeline to train the Isolation Forest model. We also demonstrate how to create an MLFlow experiment and register the trained model. - -Note that MLFlow model registration is strictly only required if accessing the trained model at a later time. For training the model, and performing inferencing in the same notebook, the model object model is sufficient. - - -```python -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - !pip install --upgrade sqlparse -``` - - -```python -mlflow.set_experiment(experiment_name) -with mlflow.start_run(): - va = VectorAssembler(inputCols=inputCols, outputCol="features") - pipeline = Pipeline(stages=[va, isolationForest]) - model = pipeline.fit(df_train) - mlflow.spark.log_model( - model, artifact_path=artifact_path, registered_model_name=model_name - ) -``` - -## Perform inferencing - -Load the trained Isolation Forest Model - - -```python -model_uri = f"models:/{model_name}/{model_version}" -model = mlflow.spark.load_model(model_uri) -``` - -Perform inferencing - - -```python -df_test_pred = model.transform(df_test) -display(df_test_pred) -``` - -## ML interpretability -In this section, we use ML interpretability tools to help unpack the contribution of each sensor to the detected anomalies at any point in time. - - -```python -# Here, we create a TabularSHAP explainer, set the input columns to all the features the model takes, specify the model and the target output column -# we are trying to explain. In this case, we are trying to explain the "outlierScore" output. -shap = TabularSHAP( - inputCols=inputCols, - outputCol="shapValues", - model=model, - targetCol="outlierScore", - backgroundData=F.broadcast(df_test), -) -``` - -Display the dataframe with `shapValues` column - - -```python -shap_df = shap.transform(df_test_pred) -display(shap_df) -``` - - -```python -# Define UDF -vec2array = udf(lambda vec: vec.toArray().tolist(), ArrayType(FloatType())) -``` - - -```python -# Here, we extract the SHAP values, the original features and the outlier score column. Then we convert it to a Pandas DataFrame for visualization. -# For each observation, the first element in the SHAP values vector is the base value (the mean output of the background dataset), -# and each of the following elements represents the SHAP values for each feature -shaps = ( - shap_df.withColumn("shapValues", vec2array(F.col("shapValues").getItem(0))) - .select( - ["shapValues", "outlierScore"] + inputCols + [timestampColumn, "prediction"] - ) - .withColumn("sensor_1_localimp", F.col("shapValues")[1]) - .withColumn("sensor_2_localimp", F.col("shapValues")[2]) - .withColumn("sensor_3_localimp", F.col("shapValues")[3]) -) -``` - - -```python -shaps_local = shaps.toPandas() -shaps_local -``` - -Retrieve local feature importances - - -```python -local_importance_values = shaps_local[["shapValues"]] -eval_data = shaps_local[inputCols] -``` - - -```python -# Removing the first element in the list of local importance values (this is the base value or mean output of the background dataset) -list_local_importance_values = local_importance_values.values.tolist() -converted_importance_values = [] -bias = [] -for classarray in list_local_importance_values: - for rowarray in classarray: - converted_list = rowarray.tolist() - bias.append(converted_list[0]) - # remove the bias from local importance values - del converted_list[0] - converted_importance_values.append(converted_list) -``` - -Next, install libraries required for ML Interpretability analysis - - -```python -!pip install --upgrade raiwidgets interpret-community -``` - - -```python -from interpret_community.adapter import ExplanationAdapter - -adapter = ExplanationAdapter(inputCols, classification=False) -global_explanation = adapter.create_global( - converted_importance_values, eval_data, expected_values=bias -) -``` - - -```python -# view the global importance values -global_explanation.global_importance_values -``` - - -```python -# view the local importance values -global_explanation.local_importance_values -``` - - -```python -# Defining a wrapper class with predict method for creating the Explanation Dashboard - - -class wrapper(object): - def __init__(self, model): - self.model = model - - def predict(self, data): - sparkdata = spark.createDataFrame(data) - return ( - model.transform(sparkdata) - .select("outlierScore") - .toPandas() - .values.flatten() - .tolist() - ) -``` - -## Visualize results - -Visualize anomaly results and feature contribution scores (derived from local feature importance) - - -```python -def visualize(rdf): - anoms = list(rdf["prediction"] == 1) - - fig = plt.figure(figsize=(26, 12)) - - ax = fig.add_subplot(611) - ax.title.set_text(f"Multivariate Anomaly Detection Results") - ax.plot( - rdf[timestampColumn], - rdf["sensor_1"], - color="tab:orange", - line, - linewidth=2, - label="sensor_1", - ) - ax.grid(axis="y") - _, _, ymin, ymax = plt.axis() - ax.vlines( - rdf[timestampColumn][anoms], - ymin=ymin, - ymax=ymax, - color="tab:red", - alpha=0.2, - linewidth=6, - ) - ax.tick_params(axis="x", which="both", bottom=False, labelbottom=False) - ax.set_ylabel("sensor1_value") - ax.legend() - - ax = fig.add_subplot(612, sharex=ax) - ax.plot( - rdf[timestampColumn], - rdf["sensor_2"], - color="tab:green", - line, - linewidth=2, - label="sensor_2", - ) - ax.grid(axis="y") - _, _, ymin, ymax = plt.axis() - ax.vlines( - rdf[timestampColumn][anoms], - ymin=ymin, - ymax=ymax, - color="tab:red", - alpha=0.2, - linewidth=6, - ) - ax.tick_params(axis="x", which="both", bottom=False, labelbottom=False) - ax.set_ylabel("sensor2_value") - ax.legend() - - ax = fig.add_subplot(613, sharex=ax) - ax.plot( - rdf[timestampColumn], - rdf["sensor_3"], - color="tab:purple", - line, - linewidth=2, - label="sensor_3", - ) - ax.grid(axis="y") - _, _, ymin, ymax = plt.axis() - ax.vlines( - rdf[timestampColumn][anoms], - ymin=ymin, - ymax=ymax, - color="tab:red", - alpha=0.2, - linewidth=6, - ) - ax.tick_params(axis="x", which="both", bottom=False, labelbottom=False) - ax.set_ylabel("sensor3_value") - ax.legend() - - ax = fig.add_subplot(614, sharex=ax) - ax.tick_params(axis="x", which="both", bottom=False, labelbottom=False) - ax.plot( - rdf[timestampColumn], - rdf["outlierScore"], - color="black", - line, - linewidth=2, - label="Outlier score", - ) - ax.set_ylabel("outlier score") - ax.grid(axis="y") - ax.legend() - - ax = fig.add_subplot(615, sharex=ax) - ax.tick_params(axis="x", which="both", bottom=False, labelbottom=False) - ax.bar( - rdf[timestampColumn], - rdf["sensor_1_localimp"].abs(), - width=2, - color="tab:orange", - label="sensor_1", - ) - ax.bar( - rdf[timestampColumn], - rdf["sensor_2_localimp"].abs(), - width=2, - color="tab:green", - label="sensor_2", - bottom=rdf["sensor_1_localimp"].abs(), - ) - ax.bar( - rdf[timestampColumn], - rdf["sensor_3_localimp"].abs(), - width=2, - color="tab:purple", - label="sensor_3", - bottom=rdf["sensor_1_localimp"].abs() + rdf["sensor_2_localimp"].abs(), - ) - ax.set_ylabel("Contribution scores") - ax.grid(axis="y") - ax.legend() - - plt.show() -``` - - -```python -visualize(shaps_local) -``` - -When you run the cell above, you will see the following plots: - -![](https://mmlspark.blob.core.windows.net/graphics/notebooks/mvad_results_local_importances.jpg) - -- The first 3 plots above show the sensor time series data in the inference window, in orange, green, purple and blue. The red vertical lines show the detected anomalies (`prediction` = 1).. -- The fourth plot shows the outlierScore of all the points, with the `minOutlierScore` threshold shown by the dotted red horizontal line -- The last plot shows the contribution scores of each sensor to the `outlierScore` for that point. - -Plot aggregate feature importance - - -```python -plt.figure(figsize=(10, 7)) -plt.bar(inputCols, global_explanation.global_importance_values) -plt.ylabel("global importance values") -``` - -When you run the cell above, you will see the following global feature importance plot: - -![](https://mmlspark.blob.core.windows.net/graphics/notebooks/global_feature_importance.jpg) - -Visualize the explanation in the ExplanationDashboard from https://github.com/microsoft/responsible-ai-widgets. - - -```python -# View the model explanation in the ExplanationDashboard -from raiwidgets import ExplanationDashboard - -ExplanationDashboard(global_explanation, wrapper(model), dataset=eval_data) -``` diff --git a/website/versioned_docs/version-0.10.0/features/lightgbm/LightGBM - Overview.md b/website/versioned_docs/version-0.10.0/features/lightgbm/LightGBM - Overview.md deleted file mode 100644 index 7558ba1b3a..0000000000 --- a/website/versioned_docs/version-0.10.0/features/lightgbm/LightGBM - Overview.md +++ /dev/null @@ -1,303 +0,0 @@ ---- -title: LightGBM - Overview -hide_title: true -status: stable ---- -# LightGBM - -[LightGBM](https://github.com/Microsoft/LightGBM) is an open-source, -distributed, high-performance gradient boosting (GBDT, GBRT, GBM, or -MART) framework. This framework specializes in creating high-quality and -GPU enabled decision tree algorithms for ranking, classification, and -many other machine learning tasks. LightGBM is part of Microsoft's -[DMTK](http://github.com/microsoft/dmtk) project. - -### Advantages of LightGBM - -- **Composability**: LightGBM models can be incorporated into existing - SparkML Pipelines, and used for batch, streaming, and serving - workloads. -- **Performance**: LightGBM on Spark is 10-30% faster than SparkML on - the Higgs dataset, and achieves a 15% increase in AUC. [Parallel - experiments](https://github.com/Microsoft/LightGBM/blob/master/docs/Experiments.rst#parallel-experiment) - have verified that LightGBM can achieve a linear speed-up by using - multiple machines for training in specific settings. -- **Functionality**: LightGBM offers a wide array of [tunable - parameters](https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst), - that one can use to customize their decision tree system. LightGBM on - Spark also supports new types of problems such as quantile regression. -- **Cross platform** LightGBM on Spark is available on Spark, PySpark, and SparklyR - -### LightGBM Usage: - -- LightGBMClassifier: used for building classification models. For example, to predict whether a company will bankrupt or not, we could build a binary classification model with LightGBMClassifier. -- LightGBMRegressor: used for building regression models. For example, to predict the house price, we could build a regression model with LightGBMRegressor. -- LightGBMRanker: used for building ranking models. For example, to predict website searching result relevance, we could build a ranking model with LightGBMRanker. - -## Bankruptcy Prediction with LightGBM Classifier - - - -In this example, we use LightGBM to build a classification model in order to predict bankruptcy. - -#### Read dataset - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display -``` - - -```python -df = ( - spark.read.format("csv") - .option("header", True) - .option("inferSchema", True) - .load( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/company_bankruptcy_prediction_data.csv" - ) -) -# print dataset size -print("records read: " + str(df.count())) -print("Schema: ") -df.printSchema() -``` - - -```python -display(df) -``` - -#### Split the dataset into train and test - - -```python -train, test = df.randomSplit([0.85, 0.15], seed=1) -``` - -#### Add featurizer to convert features to vector - - -```python -from pyspark.ml.feature import VectorAssembler - -feature_cols = df.columns[1:] -featurizer = VectorAssembler(inputCols=feature_cols, outputCol="features") -train_data = featurizer.transform(train)["Bankrupt?", "features"] -test_data = featurizer.transform(test)["Bankrupt?", "features"] -``` - -#### Check if the data is unbalanced - - -```python -display(train_data.groupBy("Bankrupt?").count()) -``` - -#### Model Training - - -```python -from synapse.ml.lightgbm import LightGBMClassifier - -model = LightGBMClassifier( - objective="binary", featuresCol="features", labelCol="Bankrupt?", isUnbalance=True -) -``` - - -```python -model = model.fit(train_data) -``` - -By calling "saveNativeModel", it allows you to extract the underlying lightGBM model for fast deployment after you train on Spark. - - -```python -from synapse.ml.lightgbm import LightGBMClassificationModel - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - model.saveNativeModel("/models/lgbmclassifier.model") - model = LightGBMClassificationModel.loadNativeModelFromFile( - "/models/lgbmclassifier.model" - ) -else: - model.saveNativeModel("/lgbmclassifier.model") - model = LightGBMClassificationModel.loadNativeModelFromFile("/lgbmclassifier.model") -``` - -#### Feature Importances Visualization - - -```python -import pandas as pd -import matplotlib.pyplot as plt - -feature_importances = model.getFeatureImportances() -fi = pd.Series(feature_importances, index=feature_cols) -fi = fi.sort_values(ascending=True) -f_index = fi.index -f_values = fi.values - -# print feature importances -print("f_index:", f_index) -print("f_values:", f_values) - -# plot -x_index = list(range(len(fi))) -x_index = [x / len(fi) for x in x_index] -plt.rcParams["figure.figsize"] = (20, 20) -plt.barh( - x_index, f_values, height=0.028, align="center", color="tan", tick_label=f_index -) -plt.xlabel("importances") -plt.ylabel("features") -plt.show() -``` - -#### Model Prediction - - -```python -predictions = model.transform(test_data) -predictions.limit(10).toPandas() -``` - - -```python -from synapse.ml.train import ComputeModelStatistics - -metrics = ComputeModelStatistics( - evaluationMetric="classification", - labelCol="Bankrupt?", - scoredLabelsCol="prediction", -).transform(predictions) -display(metrics) -``` - -## Quantile Regression for Drug Discovery with LightGBMRegressor - - - -In this example, we show how to use LightGBM to build a simple regression model. - -#### Read dataset - - -```python -triazines = spark.read.format("libsvm").load( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/triazines.scale.svmlight" -) -``` - - -```python -# print some basic info -print("records read: " + str(triazines.count())) -print("Schema: ") -triazines.printSchema() -display(triazines.limit(10)) -``` - -#### Split dataset into train and test - - -```python -train, test = triazines.randomSplit([0.85, 0.15], seed=1) -``` - -#### Model Training - - -```python -from synapse.ml.lightgbm import LightGBMRegressor - -model = LightGBMRegressor( - objective="quantile", alpha=0.2, learningRate=0.3, numLeaves=31 -).fit(train) -``` - - -```python -print(model.getFeatureImportances()) -``` - -#### Model Prediction - - -```python -scoredData = model.transform(test) -display(scoredData) -``` - - -```python -from synapse.ml.train import ComputeModelStatistics - -metrics = ComputeModelStatistics( - evaluationMetric="regression", labelCol="label", scoresCol="prediction" -).transform(scoredData) -display(metrics) -``` - -## LightGBM Ranker - -#### Read dataset - - -```python -df = spark.read.format("parquet").load( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/lightGBMRanker_train.parquet" -) -# print some basic info -print("records read: " + str(df.count())) -print("Schema: ") -df.printSchema() -display(df.limit(10)) -``` - -#### Model Training - - -```python -from synapse.ml.lightgbm import LightGBMRanker - -features_col = "features" -query_col = "query" -label_col = "labels" -lgbm_ranker = LightGBMRanker( - labelCol=label_col, - featuresCol=features_col, - groupCol=query_col, - predictionCol="preds", - leafPredictionCol="leafPreds", - featuresShapCol="importances", - repartitionByGroupingColumn=True, - numLeaves=32, - numIterations=200, - evalAt=[1, 3, 5], - metric="ndcg", -) -``` - - -```python -lgbm_ranker_model = lgbm_ranker.fit(df) -``` - -#### Model Prediction - - -```python -dt = spark.read.format("parquet").load( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/lightGBMRanker_test.parquet" -) -predictions = lgbm_ranker_model.transform(dt) -predictions.limit(10).toPandas() -``` diff --git a/website/versioned_docs/version-0.10.0/features/lightgbm/about.md b/website/versioned_docs/version-0.10.0/features/lightgbm/about.md deleted file mode 100644 index b32715d875..0000000000 --- a/website/versioned_docs/version-0.10.0/features/lightgbm/about.md +++ /dev/null @@ -1,143 +0,0 @@ ---- -title: LightGBM -hide_title: true -sidebar_label: About ---- - -# LightGBM on Apache Spark - -### LightGBM - -[LightGBM](https://github.com/Microsoft/LightGBM) is an open-source, -distributed, high-performance gradient boosting (GBDT, GBRT, GBM, or -MART) framework. This framework specializes in creating high-quality and -GPU enabled decision tree algorithms for ranking, classification, and -many other machine learning tasks. LightGBM is part of Microsoft's -[DMTK](http://github.com/microsoft/dmtk) project. - -### Advantages of LightGBM - -- **Composability**: LightGBM models can be incorporated into existing - SparkML Pipelines, and used for batch, streaming, and serving - workloads. -- **Performance**: LightGBM on Spark is 10-30% faster than SparkML on - the Higgs dataset, and achieves a 15% increase in AUC. [Parallel - experiments](https://github.com/Microsoft/LightGBM/blob/master/docs/Experiments.rst#parallel-experiment) - have verified that LightGBM can achieve a linear speed-up by using - multiple machines for training in specific settings. -- **Functionality**: LightGBM offers a wide array of [tunable - parameters](https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst), - that one can use to customize their decision tree system. LightGBM on - Spark also supports new types of problems such as quantile regression. -- **Cross platform** LightGBM on Spark is available on Spark, PySpark, and SparklyR - -### Usage - -In PySpark, you can run the `LightGBMClassifier` via: - -```python -from synapse.ml.lightgbm import LightGBMClassifier -model = LightGBMClassifier(learningRate=0.3, - numIterations=100, - numLeaves=31).fit(train) -``` - -Similarly, you can run the `LightGBMRegressor` by setting the -`application` and `alpha` parameters: - -```python -from synapse.ml.lightgbm import LightGBMRegressor -model = LightGBMRegressor(application='quantile', - alpha=0.3, - learningRate=0.3, - numIterations=100, - numLeaves=31).fit(train) -``` - -For an end to end application, check out the LightGBM [notebook -example](../LightGBM%20-%20Overview). - -### Arguments/Parameters - -SynapseML exposes getters/setters for many common LightGBM parameters. -In python, you can use property-value pairs, or in Scala use -fluent setters. Examples of both are shown in this section. - -```scala -import com.microsoft.azure.synapse.ml.lightgbm.LightGBMClassifier -val classifier = new LightGBMClassifier() - .setLearningRate(0.2) - .setNumLeaves(50) -``` - -LightGBM has far more parameters than SynapseML exposes. For cases where you -need to set some parameters that SynapseML doesn't expose a setter for, use -passThroughArgs. This argument is just a free string that you can use to add extra parameters -to the command SynapseML sends to configure LightGBM. - -In python: -```python -from synapse.ml.lightgbm import LightGBMClassifier -model = LightGBMClassifier(passThroughArgs="force_row_wise=true min_sum_hessian_in_leaf=2e-3", - numIterations=100, - numLeaves=31).fit(train) -``` - -In Scala: -```scala -import com.microsoft.azure.synapse.ml.lightgbm.LightGBMClassifier -val classifier = new LightGBMClassifier() - .setPassThroughArgs("force_row_wise=true min_sum_hessian_in_leaf=2e-3") - .setLearningRate(0.2) - .setNumLeaves(50) -``` - -For formatting options and specific argument documentation, see -[LightGBM docs](https://lightgbm.readthedocs.io/en/v3.3.2/Parameters.html). Some -parameters SynapseML will set specifically for the Spark distributed environment and -shouldn't be changed. Some parameters are for CLI mode only, and won't work within -Spark. - -You can mix passThroughArgs and explicit args, as shown in the example. SynapseML will -merge them to create one argument string to send to LightGBM. If you set a parameter in -both places, the passThroughArgs will take precedence. - -### Architecture - -LightGBM on Spark uses the Simple Wrapper and Interface Generator (SWIG) -to add Java support for LightGBM. These Java Binding use the Java Native -Interface call into the [distributed C++ -API](https://github.com/Microsoft/LightGBM/blob/master/include/LightGBM/c_api.h). - -We initialize LightGBM by calling -[`LGBM_NetworkInit`](https://github.com/Microsoft/LightGBM/blob/master/include/LightGBM/c_api.h) -with the Spark executors within a MapPartitions call. We then pass each -workers partitions into LightGBM to create the in-memory distributed -dataset for LightGBM. We can then train LightGBM to produce a model -that can then be used for inference. - -The `LightGBMClassifier` and `LightGBMRegressor` use the SparkML API, -inherit from the same base classes, integrate with SparkML pipelines, -and can be tuned with [SparkML's cross -validators](https://spark.apache.org/docs/latest/ml-tuning.html). - -Models built can be saved as SparkML pipeline with native LightGBM model -using `saveNativeModel()`. Additionally, they're fully compatible with [PMML](https://en.wikipedia.org/wiki/Predictive_Model_Markup_Language) and -can be converted to PMML format through the -[JPMML-SparkML-LightGBM](https://github.com/alipay/jpmml-sparkml-lightgbm) plugin. - -### Barrier Execution Mode - -By default LightGBM uses regular spark paradigm for launching tasks and communicates with the driver to coordinate task execution. -The driver thread aggregates all task host:port information and then communicates the full list back to the workers in order for NetworkInit to be called. -This procedure requires the driver to know how many tasks there are, and a mismatch between the expected number of tasks and the actual number will cause the initialization to deadlock. -To avoid this issue, use the `UseBarrierExecutionMode` flag, to use Apache Spark's `barrier()` stage to ensure all tasks execute at the same time. -Barrier execution mode simplifies the logic to aggregate `host:port` information across all tasks. -To use it in scala, you can call setUseBarrierExecutionMode(true), for example: - - val lgbm = new LightGBMClassifier() - .setLabelCol(labelColumn) - .setObjective(binaryObjective) - .setUseBarrierExecutionMode(true) - ... - diff --git a/website/versioned_docs/version-0.10.0/features/onnx/ONNX - Inference on Spark.md b/website/versioned_docs/version-0.10.0/features/onnx/ONNX - Inference on Spark.md deleted file mode 100644 index 85f14749ec..0000000000 --- a/website/versioned_docs/version-0.10.0/features/onnx/ONNX - Inference on Spark.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -title: ONNX - Inference on Spark -hide_title: true -status: stable ---- -## ONNX Inference on Spark - -In this example, we will train a LightGBM model, convert the model to ONNX format and use the converted model to infer some testing data on Spark. - -Python dependencies: - -- onnxmltools==1.7.0 -- lightgbm==3.2.1 - - -Load training data - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display -``` - - -```python -df = ( - spark.read.format("csv") - .option("header", True) - .option("inferSchema", True) - .load( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/company_bankruptcy_prediction_data.csv" - ) -) - -display(df) -``` - -Use LightGBM to train a model - - -```python -from pyspark.ml.feature import VectorAssembler -from synapse.ml.lightgbm import LightGBMClassifier - -feature_cols = df.columns[1:] -featurizer = VectorAssembler(inputCols=feature_cols, outputCol="features") - -train_data = featurizer.transform(df)["Bankrupt?", "features"] - -model = ( - LightGBMClassifier(featuresCol="features", labelCol="Bankrupt?") - .setEarlyStoppingRound(300) - .setLambdaL1(0.5) - .setNumIterations(1000) - .setNumThreads(-1) - .setMaxDeltaStep(0.5) - .setNumLeaves(31) - .setMaxDepth(-1) - .setBaggingFraction(0.7) - .setFeatureFraction(0.7) - .setBaggingFreq(2) - .setObjective("binary") - .setIsUnbalance(True) - .setMinSumHessianInLeaf(20) - .setMinGainToSplit(0.01) -) - -model = model.fit(train_data) -``` - -Export the trained model to a LightGBM booster, convert it to ONNX format. - - -```python -import lightgbm as lgb -from lightgbm import Booster, LGBMClassifier - - -def convertModel(lgbm_model: LGBMClassifier or Booster, input_size: int) -> bytes: - from onnxmltools.convert import convert_lightgbm - from onnxconverter_common.data_types import FloatTensorType - - initial_types = [("input", FloatTensorType([-1, input_size]))] - onnx_model = convert_lightgbm( - lgbm_model, initial_types=initial_types, target_opset=9 - ) - return onnx_model.SerializeToString() - - -booster_model_str = model.getLightGBMBooster().modelStr().get() -booster = lgb.Booster(model_str=booster_model_str) -model_payload_ml = convertModel(booster, len(feature_cols)) -``` - -Load the ONNX payload into an `ONNXModel`, and inspect the model inputs and outputs. - - -```python -from synapse.ml.onnx import ONNXModel - -onnx_ml = ONNXModel().setModelPayload(model_payload_ml) - -print("Model inputs:" + str(onnx_ml.getModelInputs())) -print("Model outputs:" + str(onnx_ml.getModelOutputs())) -``` - -Map the model input to the input dataframe's column name (FeedDict), and map the output dataframe's column names to the model outputs (FetchDict). - - -```python -onnx_ml = ( - onnx_ml.setDeviceType("CPU") - .setFeedDict({"input": "features"}) - .setFetchDict({"probability": "probabilities", "prediction": "label"}) - .setMiniBatchSize(5000) -) -``` - -Create some testing data and transform the data through the ONNX model. - - -```python -from pyspark.ml.feature import VectorAssembler -import pandas as pd -import numpy as np - -n = 1000 * 1000 -m = 95 -test = np.random.rand(n, m) -testPdf = pd.DataFrame(test) -cols = list(map(str, testPdf.columns)) -testDf = spark.createDataFrame(testPdf) -testDf = testDf.union(testDf).repartition(200) -testDf = ( - VectorAssembler() - .setInputCols(cols) - .setOutputCol("features") - .transform(testDf) - .drop(*cols) - .cache() -) - -display(onnx_ml.transform(testDf)) -``` diff --git a/website/versioned_docs/version-0.10.0/features/onnx/about.md b/website/versioned_docs/version-0.10.0/features/onnx/about.md deleted file mode 100644 index cdff8551e5..0000000000 --- a/website/versioned_docs/version-0.10.0/features/onnx/about.md +++ /dev/null @@ -1,49 +0,0 @@ ---- -title: ONNX model inferencing on Spark -hide_title: true -sidebar_label: About -description: Learn how to use the ONNX model transformer to run inference for an ONNX model on Spark. ---- - -# ONNX model inferencing on Spark - -## ONNX - -[ONNX](https://onnx.ai/) is an open format to represent both deep learning and traditional machine learning models. With ONNX, AI developers can more easily move models between state-of-the-art tools and choose the combination that is best for them. - -SynapseML now includes a Spark transformer to bring a trained ONNX model to Apache Spark, so you can run inference on your data with Spark's large-scale data processing power. - -## Usage - -1. Create a `com.microsoft.azure.synapse.ml.onnx.ONNXModel` object and use `setModelLocation` or `setModelPayload` to load the ONNX model. - - For example: - - ```scala - val onnx = new ONNXModel().setModelLocation("/path/to/model.onnx") - ``` - -2. Use ONNX visualization tool (for example, [Netron](https://netron.app/)) to inspect the ONNX model's input and output nodes. - - ![Screenshot that illustrates an ONNX model's input and output nodes](https://mmlspark.blob.core.windows.net/graphics/ONNXModelInputsOutputs.png) - -3. Set the parameters properly to the `ONNXModel` object. - - The `com.microsoft.azure.synapse.ml.onnx.ONNXModel` class provides a set of parameters to control the behavior of the inference. - - | Parameter | Description | Default Value | - |:------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------| - | feedDict | Map the ONNX model's expected input node names to the input DataFrame's column names. Make sure the input DataFrame's column schema matches with the corresponding input's shape of the ONNX model. For example, an image classification model may have an input node of shape `[1, 3, 224, 224]` with type Float. It is assumed that the first dimension (1) is the batch size. Then the input DataFrame's corresponding column's type should be `ArrayType(ArrayType(ArrayType(FloatType)))`. | None | - | fetchDict | Map the output DataFrame's column names to the ONNX model's output node names. | None | - | miniBatcher | Specify the MiniBatcher to use. | `FixedMiniBatchTransformer` with batch size 10 | - | softMaxDict | A map between output DataFrame columns, where the value column will be computed from taking the softmax of the key column. If the 'rawPrediction' column contains logits outputs, then one can set softMaxDict to `Map("rawPrediction" -> "probability")` to obtain the probability outputs. | None | - | argMaxDict | A map between output DataFrame columns, where the value column will be computed from taking the argmax of the key column. This can be used to convert probability or logits output to the predicted label. | None | - | deviceType | Specify a device type the model inference runs on. Supported types are: CPU or CUDA. If not specified, auto detection will be used. | None | - | optimizationLevel | Specify the [optimization level](https://onnxruntime.ai/docs/resources/graph-optimizations.html#graph-optimization-levels) for the ONNX graph optimizations. Supported values are: `NO_OPT`, `BASIC_OPT`, `EXTENDED_OPT`, `ALL_OPT`. | `ALL_OPT` | - -4. Call `transform` method to run inference on the input DataFrame. - -## Example - -- [Interpretability - Image Explainers](../../responsible_ai/Interpretability%20-%20Image%20Explainers) -- [ONNX - Inference on Spark](../ONNX%20-%20Inference%20on%20Spark) diff --git a/website/versioned_docs/version-0.10.0/features/opencv/OpenCV - Pipeline Image Transformations.md b/website/versioned_docs/version-0.10.0/features/opencv/OpenCV - Pipeline Image Transformations.md deleted file mode 100644 index f50ca88928..0000000000 --- a/website/versioned_docs/version-0.10.0/features/opencv/OpenCV - Pipeline Image Transformations.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -title: OpenCV - Pipeline Image Transformations -hide_title: true -status: stable ---- -## OpenCV - Pipeline Image Transformations - -This example shows how to manipulate the collection of images. -First, the images are downloaded to the local directory. -Second, they are copied to your cluster's attached HDFS. - -The images are loaded from the directory (for fast prototyping, consider loading a fraction of -images). Inside the dataframe, each image is a single field in the image column. The image has -sub-fields (path, height, width, OpenCV type and OpenCV bytes). - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - -import synapse.ml -import numpy as np -from synapse.ml.opencv import toNDArray -from synapse.ml.io import * - -imageDir = "wasbs://publicwasb@mmlspark.blob.core.windows.net/sampleImages" -images = spark.read.image().load(imageDir).cache() -images.printSchema() -print(images.count()) -``` - -We can also alternatively stream the images with a similiar api. -Check the [Structured Streaming Programming Guide](https://spark.apache.org/docs/latest/structured-streaming-programming-guide.html) -for more details on streaming. - - -```python -import time - -imageStream = spark.readStream.image().load(imageDir) -query = ( - imageStream.select("image.height") - .writeStream.format("memory") - .queryName("heights") - .start() -) -time.sleep(3) -print("Streaming query activity: {}".format(query.isActive)) -``` - -Wait a few seconds and then try querying for the images below. -Note that when streaming a directory of images that already exists it will -consume all images in a single batch. If one were to move images into the -directory, the streaming engine would pick up on them and send them as -another batch. - - -```python -heights = spark.sql("select * from heights") -print("Streamed {} heights".format(heights.count())) -``` - -After we have streamed the images we can stop the query: - - -```python -from py4j.protocol import Py4JJavaError - -try: - query.stop() -except Py4JJavaError as e: - print(e) -``` - -When collected from the *DataFrame*, the image data are stored in a *Row*, which is Spark's way -to represent structures (in the current example, each dataframe row has a single Image, which -itself is a Row). It is possible to address image fields by name and use `toNDArray()` helper -function to convert the image into numpy array for further manipulations. - - -```python -from PIL import Image -import matplotlib.pyplot as plt - -data = images.take(3) # take first three rows of the dataframe -im = data[2][0] # the image is in the first column of a given row - -print("image type: {}, number of fields: {}".format(type(im), len(im))) -print("image path: {}".format(im.origin)) -print("height: {}, width: {}, OpenCV type: {}".format(im.height, im.width, im.mode)) - -arr = toNDArray(im) # convert to numpy array -print(images.count()) -plt.imshow(Image.fromarray(arr, "RGB")) # display the image inside notebook -``` - -Use `ImageTransformer` for the basic image manipulation: resizing, cropping, etc. -Internally, operations are pipelined and backed by OpenCV implementation. - - -```python -from synapse.ml.opencv import ImageTransformer - -tr = ( - ImageTransformer() # images are resized and then cropped - .setOutputCol("transformed") - .resize(size=(200, 200)) - .crop(0, 0, height=180, width=180) -) - -small = tr.transform(images).select("transformed") - -im = small.take(3)[2][0] # take third image -plt.imshow(Image.fromarray(toNDArray(im), "RGB")) # display the image inside notebook -``` - -For the advanced image manipulations, use Spark UDFs. -The SynapseML package provides conversion function between *Spark Row* and -*ndarray* image representations. - - -```python -from pyspark.sql.functions import udf -from synapse.ml.opencv import ImageSchema, toNDArray, toImage - - -def u(row): - array = toNDArray(row) # convert Image to numpy ndarray[height, width, 3] - array[:, :, 2] = 0 - return toImage(array) # numpy array back to Spark Row structure - - -noBlueUDF = udf(u, ImageSchema) - -noblue = small.withColumn("noblue", noBlueUDF(small["transformed"])).select("noblue") - -im = noblue.take(3)[2][0] # take second image -plt.imshow(Image.fromarray(toNDArray(im), "RGB")) # display the image inside notebook -``` - -Images could be unrolled into the dense 1D vectors suitable for CNTK evaluation. - - -```python -from synapse.ml.image import UnrollImage - -unroller = UnrollImage().setInputCol("noblue").setOutputCol("unrolled") - -unrolled = unroller.transform(noblue).select("unrolled") - -vector = unrolled.take(1)[0][0] -print(type(vector)) -len(vector.toArray()) -``` - - -```python - -``` diff --git a/website/versioned_docs/version-0.10.0/features/other/AzureSearchIndex - Met Artworks.md b/website/versioned_docs/version-0.10.0/features/other/AzureSearchIndex - Met Artworks.md deleted file mode 100644 index 61ba4ec8d2..0000000000 --- a/website/versioned_docs/version-0.10.0/features/other/AzureSearchIndex - Met Artworks.md +++ /dev/null @@ -1,108 +0,0 @@ ---- -title: AzureSearchIndex - Met Artworks -hide_title: true -status: stable ---- -

Creating a searchable Art Database with The MET's open-access collection

- -In this example, we show how you can enrich data using Cognitive Skills and write to an Azure Search Index using SynapseML. We use a subset of The MET's open-access collection and enrich it by passing it through 'Describe Image' and a custom 'Image Similarity' skill. The results are then written to a searchable index. - - -```python -import os, sys, time, json, requests -from pyspark.ml import Transformer, Estimator, Pipeline -from pyspark.ml.feature import SQLTransformer -from pyspark.sql.functions import lit, udf, col, split -``` - - -```python -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.mssparkutils.credentials import getSecret - - os.environ["VISION_API_KEY"] = getSecret("mmlspark-build-keys", "cognitive-api-key") - os.environ["AZURE_SEARCH_KEY"] = getSecret( - "mmlspark-build-keys", "azure-search-key" - ) -``` - - -```python -VISION_API_KEY = os.environ["VISION_API_KEY"] -AZURE_SEARCH_KEY = os.environ["AZURE_SEARCH_KEY"] -search_service = "mmlspark-azure-search" -search_index = "test" -``` - - -```python -data = ( - spark.read.format("csv") - .option("header", True) - .load("wasbs://publicwasb@mmlspark.blob.core.windows.net/metartworks_sample.csv") - .withColumn("searchAction", lit("upload")) - .withColumn("Neighbors", split(col("Neighbors"), ",").cast("array")) - .withColumn("Tags", split(col("Tags"), ",").cast("array")) - .limit(25) -) -``` - - - - -```python -from synapse.ml.cognitive import AnalyzeImage -from synapse.ml.stages import SelectColumns - -# define pipeline -describeImage = ( - AnalyzeImage() - .setSubscriptionKey(VISION_API_KEY) - .setLocation("eastus") - .setImageUrlCol("PrimaryImageUrl") - .setOutputCol("RawImageDescription") - .setErrorCol("Errors") - .setVisualFeatures( - ["Categories", "Description", "Faces", "ImageType", "Color", "Adult"] - ) - .setConcurrency(5) -) - -df2 = ( - describeImage.transform(data) - .select("*", "RawImageDescription.*") - .drop("Errors", "RawImageDescription") -) -``` - - - -Before writing the results to a Search Index, you must define a schema which must specify the name, type, and attributes of each field in your index. Refer [Create a basic index in Azure Search](https://docs.microsoft.com/en-us/azure/search/search-what-is-an-index) for more information. - - -```python -from synapse.ml.cognitive import * - -df2.writeToAzureSearch( - subscriptionKey=AZURE_SEARCH_KEY, - actionCol="searchAction", - serviceName=search_service, - indexName=search_index, - keyCol="ObjectID", -) -``` - -The Search Index can be queried using the [Azure Search REST API](https://docs.microsoft.com/rest/api/searchservice/) by sending GET or POST requests and specifying query parameters that give the criteria for selecting matching documents. For more information on querying refer [Query your Azure Search index using the REST API](https://docs.microsoft.com/en-us/rest/api/searchservice/Search-Documents) - - -```python -url = "https://{}.search.windows.net/indexes/{}/docs/search?api-version=2019-05-06".format( - search_service, search_index -) -requests.post( - url, json={"search": "Glass"}, headers={"api-key": AZURE_SEARCH_KEY} -).json() -``` diff --git a/website/versioned_docs/version-0.10.0/features/other/ConditionalKNN - Exploring Art Across Cultures.md b/website/versioned_docs/version-0.10.0/features/other/ConditionalKNN - Exploring Art Across Cultures.md deleted file mode 100644 index 8a8a766168..0000000000 --- a/website/versioned_docs/version-0.10.0/features/other/ConditionalKNN - Exploring Art Across Cultures.md +++ /dev/null @@ -1,232 +0,0 @@ ---- -title: ConditionalKNN - Exploring Art Across Cultures -hide_title: true -status: stable ---- -# Exploring Art across Culture and Medium with Fast, Conditional, k-Nearest Neighbors - - - -This notebook serves as a guideline for match-finding via k-nearest-neighbors. In the code below, we will set up code that allows queries involving cultures and mediums of art amassed from the Metropolitan Museum of Art in NYC and the Rijksmuseum in Amsterdam. - -### Overview of the BallTree -The structure functioning behind the kNN model is a BallTree, which is a recursive binary tree where each node (or "ball") contains a partition of the points of data to be queried. Building a BallTree involves assigning data points to the "ball" whose center they are closest to (with respect to a certain specified feature), resulting in a structure that allows binary-tree-like traversal and lends itself to finding k-nearest neighbors at a BallTree leaf. - -#### Setup -Import necessary Python libraries and prepare dataset. - - -```python -from pyspark.sql.types import BooleanType -from pyspark.sql.types import * -from pyspark.ml.feature import Normalizer -from pyspark.sql.functions import lit, array, array_contains, udf, col, struct -from synapse.ml.nn import ConditionalKNN, ConditionalKNNModel -from PIL import Image -from io import BytesIO - -import requests -import numpy as np -import matplotlib.pyplot as plt - -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display -``` - -Our dataset comes from a table containing artwork information from both the Met and Rijks museums. The schema is as follows: - -- **id**: A unique identifier for a piece of art - - Sample Met id: *388395* - - Sample Rijks id: *SK-A-2344* -- **Title**: Art piece title, as written in the museum's database -- **Artist**: Art piece artist, as written in the museum's database -- **Thumbnail_Url**: Location of a JPEG thumbnail of the art piece -- **Image_Url** Location of an image of the art piece hosted on the Met/Rijks website -- **Culture**: Category of culture that the art piece falls under - - Sample culture categories: *latin american*, *egyptian*, etc -- **Classification**: Category of medium that the art piece falls under - - Sample medium categories: *woodwork*, *paintings*, etc -- **Museum_Page**: Link to the work of art on the Met/Rijks website -- **Norm_Features**: Embedding of the art piece image -- **Museum**: Specifies which museum the piece originated from - - -```python -# loads the dataset and the two trained CKNN models for querying by medium and culture -df = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/met_and_rijks.parquet" -) -display(df.drop("Norm_Features")) -``` - -#### Define categories to be queried on -We will be using two kNN models: one for culture, and one for medium. The categories for each grouping are defined below. - - -```python -# mediums = ['prints', 'drawings', 'ceramics', 'textiles', 'paintings', "musical instruments","glass", 'accessories', 'photographs', "metalwork", -# "sculptures", "weapons", "stone", "precious", "paper", "woodwork", "leatherwork", "uncategorized"] - -mediums = ["paintings", "glass", "ceramics"] - -# cultures = ['african (general)', 'american', 'ancient american', 'ancient asian', 'ancient european', 'ancient middle-eastern', 'asian (general)', -# 'austrian', 'belgian', 'british', 'chinese', 'czech', 'dutch', 'egyptian']#, 'european (general)', 'french', 'german', 'greek', -# 'iranian', 'italian', 'japanese', 'latin american', 'middle eastern', 'roman', 'russian', 'south asian', 'southeast asian', -# 'spanish', 'swiss', 'various'] - -cultures = ["japanese", "american", "african (general)"] - -# Uncomment the above for more robust and large scale searches! - -classes = cultures + mediums - -medium_set = set(mediums) -culture_set = set(cultures) -selected_ids = {"AK-RBK-17525-2", "AK-MAK-1204", "AK-RAK-2015-2-9"} - -small_df = df.where( - udf( - lambda medium, culture, id_val: (medium in medium_set) - or (culture in culture_set) - or (id_val in selected_ids), - BooleanType(), - )("Classification", "Culture", "id") -) - -small_df.count() -``` - -### Define and fit ConditionalKNN models -Below, we create ConditionalKNN models for both the medium and culture columns; each model takes in an output column, features column (feature vector), values column (cell values under the output column), and label column (the quality that the respective KNN is conditioned on). - - -```python -medium_cknn = ( - ConditionalKNN() - .setOutputCol("Matches") - .setFeaturesCol("Norm_Features") - .setValuesCol("Thumbnail_Url") - .setLabelCol("Classification") - .fit(small_df) -) -``` - - -```python -culture_cknn = ( - ConditionalKNN() - .setOutputCol("Matches") - .setFeaturesCol("Norm_Features") - .setValuesCol("Thumbnail_Url") - .setLabelCol("Culture") - .fit(small_df) -) -``` - -#### Define matching and visualizing methods - -After the intial dataset and category setup, we prepare methods that will query and visualize the conditional kNN's results. - -`addMatches()` will create a Dataframe with a handful of matches per category. - - -```python -def add_matches(classes, cknn, df): - results = df - for label in classes: - results = cknn.transform( - results.withColumn("conditioner", array(lit(label))) - ).withColumnRenamed("Matches", "Matches_{}".format(label)) - return results -``` - -`plot_urls()` calls `plot_img` to visualize top matches for each category into a grid. - - -```python -def plot_img(axis, url, title): - try: - response = requests.get(url) - img = Image.open(BytesIO(response.content)).convert("RGB") - axis.imshow(img, aspect="equal") - except: - pass - if title is not None: - axis.set_title(title, fontsize=4) - axis.axis("off") - - -def plot_urls(url_arr, titles, filename): - nx, ny = url_arr.shape - - plt.figure(figsize=(nx * 5, ny * 5), dpi=1600) - fig, axes = plt.subplots(ny, nx) - - # reshape required in the case of 1 image query - if len(axes.shape) == 1: - axes = axes.reshape(1, -1) - - for i in range(nx): - for j in range(ny): - if j == 0: - plot_img(axes[j, i], url_arr[i, j], titles[i]) - else: - plot_img(axes[j, i], url_arr[i, j], None) - - plt.savefig(filename, dpi=1600) # saves the results as a PNG - - display(plt.show()) -``` - -### Putting it all together -Below, we define `test_all()` to take in the data, CKNN models, the art id values to query on, and the file path to save the output visualization to. The medium and culture models were previously trained and loaded. - - -```python -# main method to test a particular dataset with two CKNN models and a set of art IDs, saving the result to filename.png - - -def test_all(data, cknn_medium, cknn_culture, test_ids, root): - is_nice_obj = udf(lambda obj: obj in test_ids, BooleanType()) - test_df = data.where(is_nice_obj("id")) - - results_df_medium = add_matches(mediums, cknn_medium, test_df) - results_df_culture = add_matches(cultures, cknn_culture, results_df_medium) - - results = results_df_culture.collect() - - original_urls = [row["Thumbnail_Url"] for row in results] - - culture_urls = [ - [row["Matches_{}".format(label)][0]["value"] for row in results] - for label in cultures - ] - culture_url_arr = np.array([original_urls] + culture_urls)[:, :] - plot_urls(culture_url_arr, ["Original"] + cultures, root + "matches_by_culture.png") - - medium_urls = [ - [row["Matches_{}".format(label)][0]["value"] for row in results] - for label in mediums - ] - medium_url_arr = np.array([original_urls] + medium_urls)[:, :] - plot_urls(medium_url_arr, ["Original"] + mediums, root + "matches_by_medium.png") - - return results_df_culture -``` - -### Demo -The following cell performs batched queries given desired image IDs and a filename to save the visualization. - - - - - -```python -# sample query -result_df = test_all(small_df, medium_cknn, culture_cknn, selected_ids, root=".") -``` diff --git a/website/versioned_docs/version-0.10.0/features/other/CyberML - Anomalous Access Detection.md b/website/versioned_docs/version-0.10.0/features/other/CyberML - Anomalous Access Detection.md deleted file mode 100644 index 11285230ed..0000000000 --- a/website/versioned_docs/version-0.10.0/features/other/CyberML - Anomalous Access Detection.md +++ /dev/null @@ -1,346 +0,0 @@ ---- -title: CyberML - Anomalous Access Detection -hide_title: true -status: stable ---- -# CyberML - Anomalous Access Detection - -Here we demonstrate a novel CyberML model which can learn user access patterns and then automatically detect anomalous user access based on learned behavior. -The model internally uses Collaborative Filtering for Implicit Feedback as published here: http://yifanhu.net/PUB/cf.pdf -and is based on Apache Spark's implementation of this: https://spark.apache.org/docs/2.2.0/ml-collaborative-filtering.html. - -This notebook demonstrates a usage example of Anomalous Resource Access model. -All the model requires is a dataset in which there are 'users' which access 'resources'. -The model is based on Collaborative Filtering and it uses Machine Learning to learn access patterns of users and resources. -When a user accesses a resource which is outside of the user's learned profile then this access recieves a high anomaly score. - -In this notebook we provide a usage example and a synthetic dataset in which there are 3 departments: -(1) Finance, (2) HR and (3) Engineering. -In the training data users access only a subset of resources from their own departments. -To evaluate the model we use two datasets. -The first contains access patterns unseen during training in which users access resources within their departments (again, resources they didn't access during training but within their department). -The latter contains users accessing resources from outside their department. -We then use the model to assign anomaly scores expecting that the first get low anomaly scores and the latter recieve high anomaly scores. -This is what this example demonstrates. - -Note: the data does NOT contain information about departments, this information is implictly learned by the model by analyzing the access patterns. - -# Create an Azure Databricks cluster and install the following libs - -1. In Cluster Libraries install from library source Maven: -Coordinates: com.microsoft.azure:synapseml_2.12:0.10.0 -Repository: https://mmlspark.azureedge.net/maven - -2. In Cluster Libraries install from PyPI the library called plotly - -# Setup & Initialization - - -``` -# this is used to produce the synthetic dataset for this test -from synapse.ml.cyber.dataset import DataFactory - -# the access anomalies model generator -from synapse.ml.cyber.anomaly.collaborative_filtering import AccessAnomaly - -from pyspark.sql import functions as f, types as t -``` - - -``` -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() -``` - -# Loadup datasets - - -``` -spark.sparkContext.setCheckpointDir("dbfs:/checkpoint_path/") - -factory = DataFactory( - num_hr_users=25, - num_hr_resources=50, - num_fin_users=35, - num_fin_resources=75, - num_eng_users=15, - num_eng_resources=25, - single_component=True, -) - -training_pdf = factory.create_clustered_training_data(ratio=0.4) - -# a tenant id is used when independant datasets originate from different tenants, in this example we set all tenants-ids to the same value -training_df = spark.createDataFrame(training_pdf).withColumn("tenant_id", f.lit(0)) -ingroup_df = spark.createDataFrame( - factory.create_clustered_intra_test_data(training_pdf) -).withColumn("tenant_id", f.lit(0)) -outgroup_df = spark.createDataFrame( - factory.create_clustered_inter_test_data() -).withColumn("tenant_id", f.lit(0)) -``` - - -``` -training_df.show() -``` - - -``` -print(training_df.count()) -print(ingroup_df.count()) -print(outgroup_df.count()) -``` - -# Model setup & training - - -``` -access_anomaly = AccessAnomaly( - tenantCol="tenant_id", - userCol="user", - resCol="res", - likelihoodCol="likelihood", - maxIter=1000, -) -``` - - -``` -model = access_anomaly.fit(training_df) -``` - -# Apply model & show result stats - - -``` -ingroup_scored_df = model.transform(ingroup_df) -``` - - -``` -ingroup_scored_df.agg( - f.min("anomaly_score").alias("min_anomaly_score"), - f.max("anomaly_score").alias("max_anomaly_score"), - f.mean("anomaly_score").alias("mean_anomaly_score"), - f.stddev("anomaly_score").alias("stddev_anomaly_score"), -).show() -``` - - -``` -outgroup_scored_df = model.transform(outgroup_df) -``` - - -``` -outgroup_scored_df.agg( - f.min("anomaly_score").alias("min_anomaly_score"), - f.max("anomaly_score").alias("max_anomaly_score"), - f.mean("anomaly_score").alias("mean_anomaly_score"), - f.stddev("anomaly_score").alias("stddev_anomaly_score"), -).show() -``` - -# Examine results - - -``` -# -# Select a subset of results to send to Log Analytics -# - -full_res_df = outgroup_scored_df.orderBy(f.desc("anomaly_score")).cache() - -from pyspark.sql.window import Window - -w = Window.partitionBy("tenant_id", "user", "res").orderBy(f.desc("anomaly_score")) - -# select values above threshold -results_above_threshold = full_res_df.filter(full_res_df.anomaly_score > 1.0) - -# get distinct resource/user and corresponding timestamp and highest score -results_to_la = ( - results_above_threshold.withColumn("index", f.row_number().over(w)) - .orderBy(f.desc("anomaly_score")) - .select("tenant_id", f.col("user"), f.col("res"), "anomaly_score") - .where("index == 1") - .limit(100) - .cache() -) - -# add a fake timestamp to the results -results_to_la = results_to_la.withColumn("timestamp", f.current_timestamp()) - -display(results_to_la) -``` - -# Display all resource accesses by users with highest anomalous score - - -``` -from plotly import __version__ -from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot, offline - -import numpy as np -import pandas as pd - -print(__version__) # requires version >= 1.9.0 - -# run plotly in offline mode -offline.init_notebook_mode() -``` - - -``` -# Find all server accesses of users with high predicted scores -# For display, limit to top 25 results -results_to_display = results_to_la.orderBy(f.desc("anomaly_score")).limit(25).cache() -interesting_records = full_res_df.join(results_to_display, ["user"], "left_semi") -non_anomalous_records = interesting_records.join( - results_to_display, ["user", "res"], "left_anti" -) - -top_non_anomalous_records = ( - non_anomalous_records.groupBy("tenant_id", "user", "res") - .agg( - f.count("*").alias("count"), - ) - .select(f.col("tenant_id"), f.col("user"), f.col("res"), "count") -) - -# pick only a subset of non-anomalous record for UI -w = Window.partitionBy( - "tenant_id", - "user", -).orderBy(f.desc("count")) - -# pick top non-anomalous set -top_non_anomalous_accesses = ( - top_non_anomalous_records.withColumn("index", f.row_number().over(w)) - .orderBy(f.desc("count")) - .select("tenant_id", f.col("user"), f.col("res"), f.col("count")) - .where("index in (1,2,3,4,5)") - .limit(25) -) - -# add back anomalous record -fileShare_accesses = ( - top_non_anomalous_accesses.select("user", "res", "count") - .union(results_to_display.select("user", "res", f.lit(1).alias("count"))) - .cache() -) -``` - - -``` -# get unique users and file shares -high_scores_df = fileShare_accesses.toPandas() -unique_arr = np.append(high_scores_df.user.unique(), high_scores_df.res.unique()) - -unique_df = pd.DataFrame(data=unique_arr, columns=["name"]) -unique_df["index"] = range(0, len(unique_df.index)) - -# create index for source & target and color for the normal accesses -normal_line_color = "rgba(211, 211, 211, 0.8)" -anomolous_color = "red" -x = ( - pd.merge(high_scores_df, unique_df, how="left", left_on="user", right_on="name") - .drop(["name"], axis=1) - .rename(columns={"index": "userIndex"}) -) -all_access_index_df = ( - pd.merge(x, unique_df, how="left", left_on="res", right_on="name") - .drop(["name"], axis=1) - .rename(columns={"index": "resIndex"}) -) -all_access_index_df["color"] = normal_line_color - -# results_to_display index, color and -y = results_to_display.toPandas().drop( - ["tenant_id", "timestamp", "anomaly_score"], axis=1 -) -y = ( - pd.merge(y, unique_df, how="left", left_on="user", right_on="name") - .drop(["name"], axis=1) - .rename(columns={"index": "userIndex"}) -) -high_scores_index_df = ( - pd.merge(y, unique_df, how="left", left_on="res", right_on="name") - .drop(["name"], axis=1) - .rename(columns={"index": "resIndex"}) -) -high_scores_index_df["count"] = 1 -high_scores_index_df["color"] = anomolous_color - -# substract 1 for the red entries in all_access df -hsi_df = high_scores_index_df[["user", "res", "count"]].rename( - columns={"count": "hsiCount"} -) -all_access_updated_count_df = pd.merge( - all_access_index_df, - hsi_df, - how="left", - left_on=["user", "res"], - right_on=["user", "res"], -) -all_access_updated_count_df["count"] = np.where( - all_access_updated_count_df["hsiCount"] == 1, - all_access_updated_count_df["count"] - 1, - all_access_updated_count_df["count"], -) -all_access_updated_count_df = all_access_updated_count_df.loc[ - all_access_updated_count_df["count"] > 0 -] -all_access_updated_count_df = all_access_updated_count_df[ - ["user", "res", "count", "userIndex", "resIndex", "color"] -] - -# combine the two tables -frames = [all_access_updated_count_df, high_scores_index_df] -display_df = pd.concat(frames, sort=True) -# display_df.head() -``` - - -``` -data_trace = dict( - type="sankey", - domain=dict(x=[0, 1], y=[0, 1]), - orientation="h", - valueformat=".0f", - node=dict( - pad=10, - thickness=30, - line=dict(color="black", width=0), - label=unique_df["name"].dropna(axis=0, how="any"), - ), - link=dict( - source=display_df["userIndex"].dropna(axis=0, how="any"), - target=display_df["resIndex"].dropna(axis=0, how="any"), - value=display_df["count"].dropna(axis=0, how="any"), - color=display_df["color"].dropna(axis=0, how="any"), - ), -) - -layout = dict( - title="All resources accessed by users with highest anomalous scores", - height=772, - font=dict(size=10), -) - -fig = dict(data=[data_trace], layout=layout) - -p = plot(fig, output_type="div") - -displayHTML(p) -``` - - -``` - -``` diff --git a/website/versioned_docs/version-0.10.0/features/other/DeepLearning - BiLSTM Medical Entity Extraction.md b/website/versioned_docs/version-0.10.0/features/other/DeepLearning - BiLSTM Medical Entity Extraction.md deleted file mode 100644 index 8b608d4d42..0000000000 --- a/website/versioned_docs/version-0.10.0/features/other/DeepLearning - BiLSTM Medical Entity Extraction.md +++ /dev/null @@ -1,261 +0,0 @@ ---- -title: DeepLearning - BiLSTM Medical Entity Extraction -hide_title: true -status: stable ---- -## DeepLearning - BiLSTM Medical Entity Extraction - -In this tutorial we use a Bidirectional LSTM entity extractor from the synapseml -model downloader to extract entities from PubMed medical abstracts - -Our goal is to identify useful entities in a block of free-form text. This is a -nontrivial task because entities might be referenced in the text using variety of -synonymns, abbreviations, or formats. Our target output for this model is a set -of tags that specify what kind of entity is referenced. The model we use was -trained on a large dataset of publically tagged pubmed abstracts. An example -annotated sequence is given below, "O" represents no tag: - -|I-Chemical | O |I-Chemical | O | O |I-Chemical | O |I-Chemical | O | O | O | O |I-Disease |I-Disease| O | O | -|:---: |:---:|:---: |:---:|:---:|:---: |:---:|:---: |:---:|:---: |:---:|:---:|:---: |:---: |:---:|:---: | -|Baricitinib| , |Methotrexate| , | or |Baricitinib|Plus |Methotrexate| in |Patients|with |Early|Rheumatoid|Arthritis| Who |Had...| - - - - -```python -from synapse.ml.cntk import CNTKModel -from synapse.ml.downloader import ModelDownloader -from pyspark.sql.functions import udf, col -from pyspark.sql.types import IntegerType, ArrayType, FloatType, StringType -from pyspark.sql import Row - -from os.path import abspath, join -import numpy as np -from nltk.tokenize import sent_tokenize, word_tokenize -import os, tarfile, pickle -import urllib.request -import nltk - -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display -``` - -Get the model and extract the data. - - -```python -modelName = "BiLSTM" -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - modelDir = "abfss://synapse@mmlsparkeuap.dfs.core.windows.net/models/" - dataDir = "./nltkdata" -else: - modelDir = "dbfs:/models/" - dataDir = "/dbfs/nltkdata" - -d = ModelDownloader(spark, modelDir) -modelSchema = d.downloadByName(modelName) -nltk.download("punkt", dataDir) -nltk.data.path.append(dataDir) -``` - - -```python -modelName = "BiLSTM" -modelDir = abspath("models") -if not os.path.exists(modelDir): - os.makedirs(modelDir) -d = ModelDownloader(spark, "file://" + modelDir) -modelSchema = d.downloadByName(modelName) -nltk.download("punkt") -``` - -Download the embeddings - -We use the nltk punkt sentence and word tokenizers and a set of embeddings trained on PubMed Articles - - -```python -wordEmbFileName = "WordEmbeddings_PubMed.pkl" -pickleFile = join(abspath("models"), wordEmbFileName) -if not os.path.isfile(pickleFile): - urllib.request.urlretrieve( - "https://mmlspark.blob.core.windows.net/datasets/" + wordEmbFileName, pickleFile - ) -``` - -Load the embeddings and create functions for encoding sentences - - -```python -pickleContent = pickle.load(open(pickleFile, "rb"), encoding="latin-1") -wordToIndex = pickleContent["word_to_index"] -wordvectors = pickleContent["wordvectors"] -classToEntity = pickleContent["class_to_entity"] - -nClasses = len(classToEntity) -nFeatures = wordvectors.shape[1] -maxSentenceLen = 613 -``` - - -```python -content = "Baricitinib, Methotrexate, or Baricitinib Plus Methotrexate in Patients with Early Rheumatoid\ - Arthritis Who Had Received Limited or No Treatment with Disease-Modifying-Anti-Rheumatic-Drugs (DMARDs):\ - Phase 3 Trial Results. Keywords: Janus kinase (JAK), methotrexate (MTX) and rheumatoid arthritis (RA) and\ - Clinical research. In 2 completed phase 3 studies, baricitinib (bari) improved disease activity with a\ - satisfactory safety profile in patients (pts) with moderately-to-severely active RA who were inadequate\ - responders to either conventional synthetic1 or biologic2DMARDs. This abstract reports results from a\ - phase 3 study of bari administered as monotherapy or in combination with methotrexate (MTX) to pts with\ - early active RA who had limited or no prior treatment with DMARDs. MTX monotherapy was the active comparator." -``` - - -```python -sentences = sent_tokenize(content) -df = spark.createDataFrame(enumerate(sentences), ["index", "sentence"]) -``` - - -```python -# Add the tokenizers to all worker nodes -def prepNLTK(partition): - nltk.data.path.append("/dbfs/nltkdata") - return partition - - -df = df.rdd.mapPartitions(prepNLTK).toDF() -``` - - -```python -def safe_tokenize(sent): - try: - return word_tokenize(sent) - except LookupError: - prepNLTK(None) - return word_tokenize(sent) - - -tokenizeUDF = udf(safe_tokenize, ArrayType(StringType())) -df = df.withColumn("tokens", tokenizeUDF("sentence")) - -countUDF = udf(len, IntegerType()) -df = df.withColumn("count", countUDF("tokens")) - - -def wordToEmb(word): - return wordvectors[wordToIndex.get(word.lower(), wordToIndex["UNK"])] - - -def featurize(tokens): - X = np.zeros((maxSentenceLen, nFeatures)) - X[-len(tokens) :, :] = np.array([wordToEmb(word) for word in tokens]) - return [float(x) for x in X.reshape(maxSentenceLen, nFeatures).flatten()] - - -def safe_show(df, retries): - try: - df.show() - except Exception as e: - if retries >= 1: - safe_show(df, retries - 1) - else: - raise e - - -featurizeUDF = udf(featurize, ArrayType(FloatType())) - -df = df.withColumn("features", featurizeUDF("tokens")).cache() -safe_show(df, 5) # Can be flaky on build server -``` - -Run the CNTKModel - - -```python -model = ( - CNTKModel() - .setModelLocation(modelSchema.uri) - .setInputCol("features") - .setOutputCol("probs") - .setOutputNodeIndex(0) - .setMiniBatchSize(1) -) - -df = model.transform(df).cache() -df.show() -``` - - -```python -def probsToEntities(probs, wordCount): - reshaped_probs = np.array(probs).reshape(maxSentenceLen, nClasses) - reshaped_probs = reshaped_probs[-wordCount:, :] - return [classToEntity[np.argmax(probs)] for probs in reshaped_probs] - - -toEntityUDF = udf(probsToEntities, ArrayType(StringType())) -df = df.withColumn("entities", toEntityUDF("probs", "count")) -df.show() -``` - -Show the annotated text - - -```python -# Color Code the Text based on the entity type -colors = { - "B-Disease": "blue", - "I-Disease": "blue", - "B-Drug": "lime", - "I-Drug": "lime", - "B-Chemical": "lime", - "I-Chemical": "lime", - "O": "black", - "NONE": "black", -} - - -def prettyPrint(words, annotations): - formattedWords = [] - for word, annotation in zip(words, annotations): - formattedWord = "{}".format( - colors[annotation], word - ) - if annotation in {"O", "NONE"}: - formattedWords.append(formattedWord) - else: - formattedWords.append("{}".format(formattedWord)) - return " ".join(formattedWords) - - -prettyPrintUDF = udf(prettyPrint, StringType()) -df = df.withColumn("formattedSentence", prettyPrintUDF("tokens", "entities")).select( - "formattedSentence" -) - -sentences = [row["formattedSentence"] for row in df.collect()] -``` - - -```python -from IPython.core.display import display, HTML - -for sentence in sentences: - display(HTML(sentence)) -``` - -Example text used in this demo has been taken from: - -Fleischmann R, Takeuchi T, Schlichting DE, Macias WL, Rooney T, Gurbuz S, Stoykov I, -Beattie SD, Kuo WL, Schiff M. Baricitinib, Methotrexate, or Baricitinib Plus Methotrexate -in Patients with Early Rheumatoid Arthritis Who Had Received Limited or No Treatment with -Disease-Modifying Anti-Rheumatic Drugs (DMARDs): Phase 3 Trial Results [abstract]. -Arthritis Rheumatol. 2015; 67 (suppl 10). -http://acrabstracts.org/abstract/baricitinib-methotrexate-or-baricitinib-plus-methotrexate-in-patients-with-early-rheumatoid-arthritis-who-had-received-limited-or-no-treatment-with-disease-modifying-anti-rheumatic-drugs-dmards-p/. -Accessed August 18, 2017. diff --git a/website/versioned_docs/version-0.10.0/features/other/DeepLearning - CIFAR10 Convolutional Network.md b/website/versioned_docs/version-0.10.0/features/other/DeepLearning - CIFAR10 Convolutional Network.md deleted file mode 100644 index bf2ee534ac..0000000000 --- a/website/versioned_docs/version-0.10.0/features/other/DeepLearning - CIFAR10 Convolutional Network.md +++ /dev/null @@ -1,124 +0,0 @@ ---- -title: DeepLearning - CIFAR10 Convolutional Network -hide_title: true -status: stable ---- -## DeepLearning - CIFAR10 Convolutional Network - - -```python -from synapse.ml.cntk import CNTKModel -from synapse.ml.downloader import ModelDownloader -from pyspark.sql.functions import udf -from pyspark.sql.types import IntegerType -from os.path import abspath -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display -``` - -Set some paths. - - -```python -cdnURL = "https://mmlspark.azureedge.net/datasets" - -# Please note that this is a copy of the CIFAR10 dataset originally found here: -# http://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz -imagesWithLabels = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/CIFAR10_test.parquet" -) -``` - - -```python -modelName = "ConvNet" -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - modelDir = "abfss://synapse@mmlsparkeuap.dfs.core.windows.net/models/" -else: - modelDir = "dbfs:/models/" -``` - -Get the model - - -```python -d = ModelDownloader(spark, modelDir) -model = d.downloadByName(modelName) -``` - -Evaluate CNTK model. - - -```python -import time - -start = time.time() - -# Use CNTK model to get log probabilities -cntkModel = ( - CNTKModel() - .setInputCol("images") - .setOutputCol("output") - .setModelLocation(model.uri) - .setOutputNode("z") -) -scoredImages = cntkModel.transform(imagesWithLabels) - -# Transform the log probabilities to predictions -def argmax(x): - return max(enumerate(x), key=lambda p: p[1])[0] - - -argmaxUDF = udf(argmax, IntegerType()) -imagePredictions = scoredImages.withColumn("predictions", argmaxUDF("output")).select( - "predictions", "labels" -) - -numRows = imagePredictions.count() - -end = time.time() -print("classifying {} images took {} seconds".format(numRows, end - start)) -``` - -Plot confusion matrix. - - -```python -imagePredictions = imagePredictions.toPandas() -y, y_hat = imagePredictions["labels"], imagePredictions["predictions"] -``` - - -```python -import matplotlib.pyplot as plt -import numpy as np -from sklearn.metrics import confusion_matrix - -cm = confusion_matrix(y, y_hat) - -labels = [ - "airplane", - "automobile", - "bird", - "cat", - "deer", - "dog", - "frog", - "horse", - "ship", - "truck", -] -plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues) -plt.colorbar() -tick_marks = np.arange(len(labels)) -plt.xticks(tick_marks, labels, rotation=90) -plt.yticks(tick_marks, labels) -plt.xlabel("Predicted label") -plt.ylabel("True Label") -display(plt.show()) -``` diff --git a/website/versioned_docs/version-0.10.0/features/other/DeepLearning - Flower Image Classification.md b/website/versioned_docs/version-0.10.0/features/other/DeepLearning - Flower Image Classification.md deleted file mode 100644 index a0b3b615ef..0000000000 --- a/website/versioned_docs/version-0.10.0/features/other/DeepLearning - Flower Image Classification.md +++ /dev/null @@ -1,162 +0,0 @@ ---- -title: DeepLearning - Flower Image Classification -hide_title: true -status: stable ---- -## Deep Learning - Flower Image Classification - - -```python -from pyspark.ml import Transformer, Estimator, Pipeline -from pyspark.ml.classification import LogisticRegression -from synapse.ml.downloader import ModelDownloader -import os, sys, time -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display -``` - - -```python -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - modelDir = "abfss://synapse@mmlsparkeuap.dfs.core.windows.net/models/" -else: - modelDir = "dbfs:/models/" - -model = ModelDownloader(spark, modelDir).downloadByName("ResNet50") -``` - - -```python -# Load the images -# use flowers_and_labels.parquet on larger cluster in order to get better results -imagesWithLabels = ( - spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/flowers_and_labels2.parquet" - ) - .withColumnRenamed("bytes", "image") - .sample(0.1) -) - -imagesWithLabels.printSchema() -``` - -![Smiley face](https://i.imgur.com/p2KgdYL.jpg) - - -```python -from synapse.ml.opencv import ImageTransformer -from synapse.ml.image import UnrollImage -from synapse.ml.cntk import ImageFeaturizer -from synapse.ml.stages import * - -# Make some featurizers -it = ImageTransformer().setOutputCol("scaled").resize(size=(60, 60)) - -ur = UnrollImage().setInputCol("scaled").setOutputCol("features") - -dc1 = DropColumns().setCols(["scaled", "image"]) - -lr1 = ( - LogisticRegression().setMaxIter(8).setFeaturesCol("features").setLabelCol("labels") -) - -dc2 = DropColumns().setCols(["features"]) - -basicModel = Pipeline(stages=[it, ur, dc1, lr1, dc2]) -``` - - -```python -resnet = ( - ImageFeaturizer() - .setInputCol("image") - .setOutputCol("features") - .setModelLocation(model.uri) - .setLayerNames(model.layerNames) - .setCutOutputLayers(1) -) - -dc3 = DropColumns().setCols(["image"]) - -lr2 = ( - LogisticRegression().setMaxIter(8).setFeaturesCol("features").setLabelCol("labels") -) - -dc4 = DropColumns().setCols(["features"]) - -deepModel = Pipeline(stages=[resnet, dc3, lr2, dc4]) -``` - -![Resnet 18](https://i.imgur.com/Mb4Dyou.png) - -### How does it work? - -![Convolutional network weights](http://i.stack.imgur.com/Hl2H6.png) - -### Run the experiment - - -```python -def timedExperiment(model, train, test): - start = time.time() - result = model.fit(train).transform(test).toPandas() - print("Experiment took {}s".format(time.time() - start)) - return result -``` - - -```python -train, test = imagesWithLabels.randomSplit([0.8, 0.2]) -train.count(), test.count() -``` - - -```python -basicResults = timedExperiment(basicModel, train, test) -``` - - -```python -deepResults = timedExperiment(deepModel, train, test) -``` - -### Plot confusion matrix. - - -```python -import matplotlib.pyplot as plt -from sklearn.metrics import confusion_matrix -import numpy as np - - -def evaluate(results, name): - y, y_hat = results["labels"], results["prediction"] - y = [int(l) for l in y] - - accuracy = np.mean([1.0 if pred == true else 0.0 for (pred, true) in zip(y_hat, y)]) - cm = confusion_matrix(y, y_hat) - cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis] - - plt.text( - 40, 10, "$Accuracy$ $=$ ${}\%$".format(round(accuracy * 100, 1)), fontsize=14 - ) - plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues) - plt.colorbar() - plt.xlabel("$Predicted$ $label$", fontsize=18) - plt.ylabel("$True$ $Label$", fontsize=18) - plt.title("$Normalized$ $CM$ $for$ ${}$".format(name)) - - -plt.figure(figsize=(12, 5)) -plt.subplot(1, 2, 1) -evaluate(deepResults, "CNTKModel + LR") -plt.subplot(1, 2, 2) -evaluate(basicResults, "LR") -# Note that on the larger dataset the accuracy will bump up from 44% to >90% -display(plt.show()) -``` diff --git a/website/versioned_docs/version-0.10.0/features/other/DeepLearning - Transfer Learning.md b/website/versioned_docs/version-0.10.0/features/other/DeepLearning - Transfer Learning.md deleted file mode 100644 index c5530fb66a..0000000000 --- a/website/versioned_docs/version-0.10.0/features/other/DeepLearning - Transfer Learning.md +++ /dev/null @@ -1,92 +0,0 @@ ---- -title: DeepLearning - Transfer Learning -hide_title: true -status: stable ---- -## DeepLearning - Transfer Learning - -Classify automobile vs airplane using DNN featurization and transfer learning -against a subset of images from CIFAR-10 dataset. - -Load DNN Model and pick one of the inner layers as feature output - - -```python -from synapse.ml.cntk import CNTKModel -from synapse.ml.downloader import ModelDownloader -import numpy as np, os, urllib, tarfile, pickle, array -from os.path import abspath -from pyspark.sql.functions import col, udf -from pyspark.sql.types import * -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - - -modelName = "ConvNet" -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - modelDir = "abfss://synapse@mmlsparkeuap.dfs.core.windows.net/models/" -else: - modelDir = "dbfs:/models/" - -d = ModelDownloader(spark, modelDir) -model = d.downloadByName(modelName) -print(model.layerNames) -cntkModel = ( - CNTKModel() - .setInputCol("images") - .setOutputCol("features") - .setModelLocation(model.uri) - .setOutputNode("l8") -) -``` - -Format raw CIFAR data into correct shape. - - -```python -imagesWithLabels = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/CIFAR10_test.parquet" -) -``` - -Select airplanes (label=0) and automobiles (label=1) - - -```python -imagesWithLabels = imagesWithLabels.filter("labels<2") -imagesWithLabels.cache() -``` - -Featurize images - - -```python -featurizedImages = cntkModel.transform(imagesWithLabels).select(["features", "labels"]) -``` - -Use featurized images to train a classifier - - -```python -from synapse.ml.train import TrainClassifier -from pyspark.ml.classification import RandomForestClassifier - -train, test = featurizedImages.randomSplit([0.75, 0.25]) - -model = TrainClassifier(model=RandomForestClassifier(), labelCol="labels").fit(train) -``` - -Evaluate the accuracy of the model - - -```python -from synapse.ml.train import ComputeModelStatistics - -predictions = model.transform(test) -metrics = ComputeModelStatistics(evaluationMetric="accuracy").transform(predictions) -metrics.show() -``` diff --git a/website/versioned_docs/version-0.10.0/features/other/HyperParameterTuning - Fighting Breast Cancer.md b/website/versioned_docs/version-0.10.0/features/other/HyperParameterTuning - Fighting Breast Cancer.md deleted file mode 100644 index c71a6b5937..0000000000 --- a/website/versioned_docs/version-0.10.0/features/other/HyperParameterTuning - Fighting Breast Cancer.md +++ /dev/null @@ -1,107 +0,0 @@ ---- -title: HyperParameterTuning - Fighting Breast Cancer -hide_title: true -status: stable ---- -## HyperParameterTuning - Fighting Breast Cancer - -We can do distributed randomized grid search hyperparameter tuning with SynapseML. - -First, we import the packages - - -```python -import pandas as pd -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() -``` - -Now let's read the data and split it to tuning and test sets: - - -```python -data = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/BreastCancer.parquet" -).cache() -tune, test = data.randomSplit([0.80, 0.20]) -tune.limit(10).toPandas() -``` - -Next, define the models that wil be tuned: - - -```python -from synapse.ml.automl import TuneHyperparameters -from synapse.ml.train import TrainClassifier -from pyspark.ml.classification import ( - LogisticRegression, - RandomForestClassifier, - GBTClassifier, -) - -logReg = LogisticRegression() -randForest = RandomForestClassifier() -gbt = GBTClassifier() -smlmodels = [logReg, randForest, gbt] -mmlmodels = [TrainClassifier(model=model, labelCol="Label") for model in smlmodels] -``` - -We can specify the hyperparameters using the HyperparamBuilder. -We can add either DiscreteHyperParam or RangeHyperParam hyperparameters. -TuneHyperparameters will randomly choose values from a uniform distribution. - - -```python -from synapse.ml.automl import * - -paramBuilder = ( - HyperparamBuilder() - .addHyperparam(logReg, logReg.regParam, RangeHyperParam(0.1, 0.3)) - .addHyperparam(randForest, randForest.numTrees, DiscreteHyperParam([5, 10])) - .addHyperparam(randForest, randForest.maxDepth, DiscreteHyperParam([3, 5])) - .addHyperparam(gbt, gbt.maxBins, RangeHyperParam(8, 16)) - .addHyperparam(gbt, gbt.maxDepth, DiscreteHyperParam([3, 5])) -) -searchSpace = paramBuilder.build() -# The search space is a list of params to tuples of estimator and hyperparam -print(searchSpace) -randomSpace = RandomSpace(searchSpace) -``` - -Next, run TuneHyperparameters to get the best model. - - -```python -bestModel = TuneHyperparameters( - evaluationMetric="accuracy", - models=mmlmodels, - numFolds=2, - numRuns=len(mmlmodels) * 2, - parallelism=1, - paramSpace=randomSpace.space(), - seed=0, -).fit(tune) -``` - -We can view the best model's parameters and retrieve the underlying best model pipeline - - -```python -print(bestModel.getBestModelInfo()) -print(bestModel.getBestModel()) -``` - -We can score against the test set and view metrics. - - -```python -from synapse.ml.train import ComputeModelStatistics - -prediction = bestModel.transform(test) -metrics = ComputeModelStatistics().transform(prediction) -metrics.limit(10).toPandas() -``` diff --git a/website/versioned_docs/version-0.10.0/features/other/TextAnalytics - Amazon Book Reviews with Word2Vec.md b/website/versioned_docs/version-0.10.0/features/other/TextAnalytics - Amazon Book Reviews with Word2Vec.md deleted file mode 100644 index 2d9a5853df..0000000000 --- a/website/versioned_docs/version-0.10.0/features/other/TextAnalytics - Amazon Book Reviews with Word2Vec.md +++ /dev/null @@ -1,145 +0,0 @@ ---- -title: TextAnalytics - Amazon Book Reviews with Word2Vec -hide_title: true -status: stable ---- -## TextAnalytics - Amazon Book Reviews with Word2Vec - -Yet again, now using the `Word2Vec` Estimator from Spark. We can use the tree-based -learners from spark in this scenario due to the lower dimensionality representation of -features. - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() -``` - - -```python -import pandas as pd -``` - - -```python -data = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/BookReviewsFromAmazon10K.parquet" -) -data.limit(10).toPandas() -``` - -Modify the label column to predict a rating greater than 3. - - -```python -processedData = data.withColumn("label", data["rating"] > 3).select(["text", "label"]) -processedData.limit(5).toPandas() -``` - -Split the dataset into train, test and validation sets. - - -```python -train, test, validation = processedData.randomSplit([0.60, 0.20, 0.20]) -``` - -Use `Tokenizer` and `Word2Vec` to generate the features. - - -```python -from pyspark.ml import Pipeline -from pyspark.ml.feature import Tokenizer, Word2Vec - -tokenizer = Tokenizer(inputCol="text", outputCol="words") -partitions = train.rdd.getNumPartitions() -word2vec = Word2Vec( - maxIter=4, seed=42, inputCol="words", outputCol="features", numPartitions=partitions -) -textFeaturizer = Pipeline(stages=[tokenizer, word2vec]).fit(train) -``` - -Transform each of the train, test and validation datasets. - - -```python -ptrain = textFeaturizer.transform(train).select(["label", "features"]) -ptest = textFeaturizer.transform(test).select(["label", "features"]) -pvalidation = textFeaturizer.transform(validation).select(["label", "features"]) -ptrain.limit(5).toPandas() -``` - -Generate several models with different parameters from the training data. - - -```python -from pyspark.ml.classification import ( - LogisticRegression, - RandomForestClassifier, - GBTClassifier, -) -from synapse.ml.train import TrainClassifier -import itertools - -lrHyperParams = [0.05, 0.2] -logisticRegressions = [ - LogisticRegression(regParam=hyperParam) for hyperParam in lrHyperParams -] -lrmodels = [ - TrainClassifier(model=lrm, labelCol="label").fit(ptrain) - for lrm in logisticRegressions -] - -rfHyperParams = itertools.product([5, 10], [2, 3]) -randomForests = [ - RandomForestClassifier(numTrees=hyperParam[0], maxDepth=hyperParam[1]) - for hyperParam in rfHyperParams -] -rfmodels = [ - TrainClassifier(model=rfm, labelCol="label").fit(ptrain) for rfm in randomForests -] - -gbtHyperParams = itertools.product([8, 16], [2, 3]) -gbtclassifiers = [ - GBTClassifier(maxBins=hyperParam[0], maxDepth=hyperParam[1]) - for hyperParam in gbtHyperParams -] -gbtmodels = [ - TrainClassifier(model=gbt, labelCol="label").fit(ptrain) for gbt in gbtclassifiers -] - -trainedModels = lrmodels + rfmodels + gbtmodels -``` - -Find the best model for the given test dataset. - - -```python -from synapse.ml.automl import FindBestModel - -bestModel = FindBestModel(evaluationMetric="AUC", models=trainedModels).fit(ptest) -bestModel.getRocCurve().show() -bestModel.getBestModelMetrics().show() -bestModel.getAllModelMetrics().show() -``` - -Get the accuracy from the validation dataset. - - -```python -from synapse.ml.train import ComputeModelStatistics - -predictions = bestModel.transform(pvalidation) -metrics = ComputeModelStatistics().transform(predictions) -print( - "Best model's accuracy on validation set = " - + "{0:.2f}%".format(metrics.first()["accuracy"] * 100) -) -print( - "Best model's AUC on validation set = " - + "{0:.2f}%".format(metrics.first()["AUC"] * 100) -) -``` diff --git a/website/versioned_docs/version-0.10.0/features/other/TextAnalytics - Amazon Book Reviews.md b/website/versioned_docs/version-0.10.0/features/other/TextAnalytics - Amazon Book Reviews.md deleted file mode 100644 index 36fc7f2cda..0000000000 --- a/website/versioned_docs/version-0.10.0/features/other/TextAnalytics - Amazon Book Reviews.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -title: TextAnalytics - Amazon Book Reviews -hide_title: true -status: stable ---- -## TextAnalytics - Amazon Book Reviews - -Again, try to predict Amazon book ratings greater than 3 out of 5, this time using -the `TextFeaturizer` module which is a composition of several text analytics APIs that -are native to Spark. - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() -``` - - -```python -import pandas as pd -``` - - -```python -data = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/BookReviewsFromAmazon10K.parquet" -) -data.limit(10).toPandas() -``` - -Use `TextFeaturizer` to generate our features column. We remove stop words, and use TF-IDF -to generate 2²⁰ sparse features. - - -```python -from synapse.ml.featurize.text import TextFeaturizer - -textFeaturizer = ( - TextFeaturizer() - .setInputCol("text") - .setOutputCol("features") - .setUseStopWordsRemover(True) - .setUseIDF(True) - .setMinDocFreq(5) - .setNumFeatures(1 << 16) - .fit(data) -) -``` - - -```python -processedData = textFeaturizer.transform(data) -processedData.limit(5).toPandas() -``` - -Change the label so that we can predict whether the rating is greater than 3 using a binary -classifier. - - -```python -processedData = processedData.withColumn("label", processedData["rating"] > 3).select( - ["features", "label"] -) -processedData.limit(5).toPandas() -``` - -Train several Logistic Regression models with different regularizations. - - -```python -train, test, validation = processedData.randomSplit([0.60, 0.20, 0.20]) -from pyspark.ml.classification import LogisticRegression - -lrHyperParams = [0.05, 0.1, 0.2, 0.4] -logisticRegressions = [ - LogisticRegression(regParam=hyperParam) for hyperParam in lrHyperParams -] - -from synapse.ml.train import TrainClassifier - -lrmodels = [ - TrainClassifier(model=lrm, labelCol="label").fit(train) - for lrm in logisticRegressions -] -``` - -Find the model with the best AUC on the test set. - - -```python -from synapse.ml.automl import FindBestModel, BestModel - -bestModel = FindBestModel(evaluationMetric="AUC", models=lrmodels).fit(test) -bestModel.getRocCurve().show() -bestModel.getBestModelMetrics().show() -bestModel.getAllModelMetrics().show() -``` - -Use the optimized `ComputeModelStatistics` API to find the model accuracy. - - -```python -from synapse.ml.train import ComputeModelStatistics - -predictions = bestModel.transform(validation) -metrics = ComputeModelStatistics().transform(predictions) -print( - "Best model's accuracy on validation set = " - + "{0:.2f}%".format(metrics.first()["accuracy"] * 100) -) -``` diff --git a/website/versioned_docs/version-0.10.0/features/regression/Regression - Auto Imports.md b/website/versioned_docs/version-0.10.0/features/regression/Regression - Auto Imports.md deleted file mode 100644 index d22d393ac3..0000000000 --- a/website/versioned_docs/version-0.10.0/features/regression/Regression - Auto Imports.md +++ /dev/null @@ -1,230 +0,0 @@ ---- -title: Regression - Auto Imports -hide_title: true -status: stable ---- -## Regression - Auto Imports - -This sample notebook is based on the Gallery [Sample 6: Train, Test, Evaluate -for Regression: Auto Imports -Dataset](https://gallery.cortanaintelligence.com/Experiment/670fbfc40c4f44438bfe72e47432ae7a) -for AzureML Studio. This experiment demonstrates how to build a regression -model to predict the automobile's price. The process includes training, testing, -and evaluating the model on the Automobile Imports data set. - -This sample demonstrates the use of several members of the synapseml library: -- [`TrainRegressor` - ](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.train.html?#module-synapse.ml.train.TrainRegressor) -- [`SummarizeData` - ](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.stages.html?#module-synapse.ml.stages.SummarizeData) -- [`CleanMissingData` - ](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.featurize.html?#module-synapse.ml.featurize.CleanMissingData) -- [`ComputeModelStatistics` - ](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.train.html?#module-synapse.ml.train.ComputeModelStatistics) -- [`FindBestModel` - ](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.automl.html?#module-synapse.ml.automl.FindBestModel) - -First, import the pandas package so that we can read and parse the datafile -using `pandas.read_csv()` - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() -``` - - -```python -data = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/AutomobilePriceRaw.parquet" -) -``` - -To learn more about the data that was just read into the DataFrame, -summarize the data using `SummarizeData` and print the summary. For each -column of the DataFrame, `SummarizeData` will report the summary statistics -in the following subcategories for each column: -* Feature name -* Counts - - Count - - Unique Value Count - - Missing Value Count -* Quantiles - - Min - - 1st Quartile - - Median - - 3rd Quartile - - Max -* Sample Statistics - - Sample Variance - - Sample Standard Deviation - - Sample Skewness - - Sample Kurtosis -* Percentiles - - P0.5 - - P1 - - P5 - - P95 - - P99 - - P99.5 - -Note that several columns have missing values (`normalized-losses`, `bore`, -`stroke`, `horsepower`, `peak-rpm`, `price`). This summary can be very -useful during the initial phases of data discovery and characterization. - - -```python -from synapse.ml.stages import SummarizeData - -summary = SummarizeData().transform(data) -summary.toPandas() -``` - -Split the dataset into train and test datasets. - - -```python -# split the data into training and testing datasets -train, test = data.randomSplit([0.6, 0.4], seed=123) -train.limit(10).toPandas() -``` - -Now use the `CleanMissingData` API to replace the missing values in the -dataset with something more useful or meaningful. Specify a list of columns -to be cleaned, and specify the corresponding output column names, which are -not required to be the same as the input column names. `CleanMissiongData` -offers the options of "Mean", "Median", or "Custom" for the replacement -value. In the case of "Custom" value, the user also specifies the value to -use via the "customValue" parameter. In this example, we will replace -missing values in numeric columns with the median value for the column. We -will define the model here, then use it as a Pipeline stage when we train our -regression models and make our predictions in the following steps. - - -```python -from synapse.ml.featurize import CleanMissingData - -cols = ["normalized-losses", "stroke", "bore", "horsepower", "peak-rpm", "price"] -cleanModel = ( - CleanMissingData().setCleaningMode("Median").setInputCols(cols).setOutputCols(cols) -) -``` - -Now we will create two Regressor models for comparison: Poisson Regression -and Random Forest. PySpark has several regressors implemented: -* `LinearRegression` -* `IsotonicRegression` -* `DecisionTreeRegressor` -* `RandomForestRegressor` -* `GBTRegressor` (Gradient-Boosted Trees) -* `AFTSurvivalRegression` (Accelerated Failure Time Model Survival) -* `GeneralizedLinearRegression` -- fit a generalized model by giving symbolic - description of the linear preditor (link function) and a description of the - error distribution (family). The following families are supported: - - `Gaussian` - - `Binomial` - - `Poisson` - - `Gamma` - - `Tweedie` -- power link function specified through `linkPower` -Refer to the -[Pyspark API Documentation](http://spark.apache.org/docs/latest/api/python/) -for more details. - -`TrainRegressor` creates a model based on the regressor and other parameters -that are supplied to it, then trains data on the model. - -In this next step, Create a Poisson Regression model using the -`GeneralizedLinearRegressor` API from Spark and create a Pipeline using the -`CleanMissingData` and `TrainRegressor` as pipeline stages to create and -train the model. Note that because `TrainRegressor` expects a `labelCol` to -be set, there is no need to set `linkPredictionCol` when setting up the -`GeneralizedLinearRegressor`. Fitting the pipe on the training dataset will -train the model. Applying the `transform()` of the pipe to the test dataset -creates the predictions. - - -```python -# train Poisson Regression Model -from pyspark.ml.regression import GeneralizedLinearRegression -from pyspark.ml import Pipeline -from synapse.ml.train import TrainRegressor - -glr = GeneralizedLinearRegression(family="poisson", link="log") -poissonModel = TrainRegressor().setModel(glr).setLabelCol("price").setNumFeatures(256) -poissonPipe = Pipeline(stages=[cleanModel, poissonModel]).fit(train) -poissonPrediction = poissonPipe.transform(test) -``` - -Next, repeat these steps to create a Random Forest Regression model using the -`RandomRorestRegressor` API from Spark. - - -```python -# train Random Forest regression on the same training data: -from pyspark.ml.regression import RandomForestRegressor - -rfr = RandomForestRegressor(maxDepth=30, maxBins=128, numTrees=8, minInstancesPerNode=1) -randomForestModel = TrainRegressor(model=rfr, labelCol="price", numFeatures=256).fit( - train -) -randomForestPipe = Pipeline(stages=[cleanModel, randomForestModel]).fit(train) -randomForestPrediction = randomForestPipe.transform(test) -``` - -After the models have been trained and scored, compute some basic statistics -to evaluate the predictions. The following statistics are calculated for -regression models to evaluate: -* Mean squared error -* Root mean squared error -* R^2 -* Mean absolute error - -Use the `ComputeModelStatistics` API to compute basic statistics for -the Poisson and the Random Forest models. - - -```python -from synapse.ml.train import ComputeModelStatistics - -poissonMetrics = ComputeModelStatistics().transform(poissonPrediction) -print("Poisson Metrics") -poissonMetrics.toPandas() -``` - - -```python -randomForestMetrics = ComputeModelStatistics().transform(randomForestPrediction) -print("Random Forest Metrics") -randomForestMetrics.toPandas() -``` - -We can also compute per instance statistics for `poissonPrediction`: - - -```python -from synapse.ml.train import ComputePerInstanceStatistics - - -def demonstrateEvalPerInstance(pred): - return ( - ComputePerInstanceStatistics() - .transform(pred) - .select("price", "prediction", "L1_loss", "L2_loss") - .limit(10) - .toPandas() - ) - - -demonstrateEvalPerInstance(poissonPrediction) -``` - -and with `randomForestPrediction`: - - -```python -demonstrateEvalPerInstance(randomForestPrediction) -``` diff --git a/website/versioned_docs/version-0.10.0/features/regression/Regression - Flight Delays with DataCleaning.md b/website/versioned_docs/version-0.10.0/features/regression/Regression - Flight Delays with DataCleaning.md deleted file mode 100644 index ba43626fee..0000000000 --- a/website/versioned_docs/version-0.10.0/features/regression/Regression - Flight Delays with DataCleaning.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -title: Regression - Flight Delays with DataCleaning -hide_title: true -status: stable ---- -## Regression - Flight Delays with DataCleaning - -This example notebook is similar to -[Regression - Flight Delays](https://github.com/microsoft/SynapseML/blob/master/notebooks/Regression%20-%20Flight%20Delays.ipynb). -In this example, we will demonstrate the use of `DataConversion()` in two -ways. First, to convert the data type of several columns after the dataset -has been read in to the Spark DataFrame instead of specifying the data types -as the file is read in. Second, to convert columns to categorical columns -instead of iterating over the columns and applying the `StringIndexer`. - -This sample demonstrates how to use the following APIs: -- [`TrainRegressor` - ](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.train.html?#module-synapse.ml.train.TrainRegressor) -- [`ComputePerInstanceStatistics` - ](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.train.html?#module-synapse.ml.train.ComputePerInstanceStatistics) -- [`DataConversion` - ](https://mmlspark.blob.core.windows.net/docs/0.10.0/pyspark/synapse.ml.featurize.html?#module-synapse.ml.featurize.DataConversion) - -First, import the pandas package - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() -``` - - -```python -import pandas as pd -``` - -Next, import the CSV dataset: retrieve the file if needed, save it locally, -read the data into a pandas dataframe via `read_csv()`, then convert it to -a Spark dataframe. - -Print the schema of the dataframe, and note the columns that are `long`. - - -```python -flightDelay = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/On_Time_Performance_2012_9.parquet" -) -# print some basic info -print("records read: " + str(flightDelay.count())) -print("Schema: ") -flightDelay.printSchema() -flightDelay.limit(10).toPandas() -``` - -Use the `DataConversion` transform API to convert the columns listed to -double. - -The `DataConversion` API accepts the following types for the `convertTo` -parameter: -* `boolean` -* `byte` -* `short` -* `integer` -* `long` -* `float` -* `double` -* `string` -* `toCategorical` -* `clearCategorical` -* `date` -- converts a string or long to a date of the format - "yyyy-MM-dd HH:mm:ss" unless another format is specified by -the `dateTimeFormat` parameter. - -Again, print the schema and note that the columns are now `double` -instead of long. - - -```python -from synapse.ml.featurize import DataConversion - -flightDelay = DataConversion( - cols=[ - "Quarter", - "Month", - "DayofMonth", - "DayOfWeek", - "OriginAirportID", - "DestAirportID", - "CRSDepTime", - "CRSArrTime", - ], - convertTo="double", -).transform(flightDelay) -flightDelay.printSchema() -flightDelay.limit(10).toPandas() -``` - -Split the datasest into train and test sets. - - -```python -train, test = flightDelay.randomSplit([0.75, 0.25]) -``` - -Create a regressor model and train it on the dataset. - -First, use `DataConversion` to convert the columns `Carrier`, `DepTimeBlk`, -and `ArrTimeBlk` to categorical data. Recall that in Notebook 102, this -was accomplished by iterating over the columns and converting the strings -to index values using the `StringIndexer` API. The `DataConversion` API -simplifies the task by allowing you to specify all columns that will have -the same end type in a single command. - -Create a LinearRegression model using the Limited-memory BFGS solver -(`l-bfgs`), an `ElasticNet` mixing parameter of `0.3`, and a `Regularization` -of `0.1`. - -Train the model with the `TrainRegressor` API fit on the training dataset. - - -```python -from synapse.ml.train import TrainRegressor, TrainedRegressorModel -from pyspark.ml.regression import LinearRegression - -trainCat = DataConversion( - cols=["Carrier", "DepTimeBlk", "ArrTimeBlk"], convertTo="toCategorical" -).transform(train) -testCat = DataConversion( - cols=["Carrier", "DepTimeBlk", "ArrTimeBlk"], convertTo="toCategorical" -).transform(test) -lr = LinearRegression().setRegParam(0.1).setElasticNetParam(0.3) -model = TrainRegressor(model=lr, labelCol="ArrDelay").fit(trainCat) -``` - -Score the regressor on the test data. - - -```python -scoredData = model.transform(testCat) -scoredData.limit(10).toPandas() -``` - -Compute model metrics against the entire scored dataset - - -```python -from synapse.ml.train import ComputeModelStatistics - -metrics = ComputeModelStatistics().transform(scoredData) -metrics.toPandas() -``` - -Finally, compute and show statistics on individual predictions in the test -dataset, demonstrating the usage of `ComputePerInstanceStatistics` - - -```python -from synapse.ml.train import ComputePerInstanceStatistics - -evalPerInstance = ComputePerInstanceStatistics().transform(scoredData) -evalPerInstance.select("ArrDelay", "prediction", "L1_loss", "L2_loss").limit( - 10 -).toPandas() -``` diff --git a/website/versioned_docs/version-0.10.0/features/regression/Regression - Flight Delays.md b/website/versioned_docs/version-0.10.0/features/regression/Regression - Flight Delays.md deleted file mode 100644 index f9259fd3df..0000000000 --- a/website/versioned_docs/version-0.10.0/features/regression/Regression - Flight Delays.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: Regression - Flight Delays -hide_title: true -status: stable ---- -## Regression - Flight Delays - -In this example, we run a linear regression on the *Flight Delay* dataset to predict the delay times. - -We demonstrate how to use the `TrainRegressor` and the `ComputePerInstanceStatistics` APIs. - -First, import the packages. - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() -``` - - -```python -import numpy as np -import pandas as pd -import synapse.ml -``` - -Next, import the CSV dataset. - - -```python -flightDelay = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/On_Time_Performance_2012_9.parquet" -) -# print some basic info -print("records read: " + str(flightDelay.count())) -print("Schema: ") -flightDelay.printSchema() -flightDelay.limit(10).toPandas() -``` - -Split the dataset into train and test sets. - - -```python -train, test = flightDelay.randomSplit([0.75, 0.25]) -``` - -Train a regressor on dataset with `l-bfgs`. - - -```python -from synapse.ml.train import TrainRegressor, TrainedRegressorModel -from pyspark.ml.regression import LinearRegression -from pyspark.ml.feature import StringIndexer - -# Convert columns to categorical -catCols = ["Carrier", "DepTimeBlk", "ArrTimeBlk"] -trainCat = train -testCat = test -for catCol in catCols: - simodel = StringIndexer(inputCol=catCol, outputCol=catCol + "Tmp").fit(train) - trainCat = ( - simodel.transform(trainCat) - .drop(catCol) - .withColumnRenamed(catCol + "Tmp", catCol) - ) - testCat = ( - simodel.transform(testCat) - .drop(catCol) - .withColumnRenamed(catCol + "Tmp", catCol) - ) -lr = LinearRegression().setRegParam(0.1).setElasticNetParam(0.3) -model = TrainRegressor(model=lr, labelCol="ArrDelay").fit(trainCat) -``` - -Save, load, or Score the regressor on the test data. - - -```python -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - model_name = "/models/flightDelayModel.mml" -else: - model_name = "dbfs:/flightDelayModel.mml" - -model.write().overwrite().save(model_name) -flightDelayModel = TrainedRegressorModel.load(model_name) - -scoredData = flightDelayModel.transform(testCat) -scoredData.limit(10).toPandas() -``` - -Compute model metrics against the entire scored dataset - - -```python -from synapse.ml.train import ComputeModelStatistics - -metrics = ComputeModelStatistics().transform(scoredData) -metrics.toPandas() -``` - -Finally, compute and show per-instance statistics, demonstrating the usage -of `ComputePerInstanceStatistics`. - - -```python -from synapse.ml.train import ComputePerInstanceStatistics - -evalPerInstance = ComputePerInstanceStatistics().transform(scoredData) -evalPerInstance.select("ArrDelay", "prediction", "L1_loss", "L2_loss").limit( - 10 -).toPandas() -``` diff --git a/website/versioned_docs/version-0.10.0/features/regression/Regression - Vowpal Wabbit vs. LightGBM vs. Linear Regressor.md b/website/versioned_docs/version-0.10.0/features/regression/Regression - Vowpal Wabbit vs. LightGBM vs. Linear Regressor.md deleted file mode 100644 index 4387aff14c..0000000000 --- a/website/versioned_docs/version-0.10.0/features/regression/Regression - Vowpal Wabbit vs. LightGBM vs. Linear Regressor.md +++ /dev/null @@ -1,253 +0,0 @@ ---- -title: Regression - Vowpal Wabbit vs. LightGBM vs. Linear Regressor -hide_title: true -status: stable ---- -# Vowpal Wabbit and LightGBM for a Regression Problem - -This notebook shows how to build simple regression models by using -[Vowpal Wabbit (VW)](https://github.com/VowpalWabbit/vowpal_wabbit) and -[LightGBM](https://github.com/microsoft/LightGBM) with SynapseML. - We also compare the results with - [Spark MLlib Linear Regression](https://spark.apache.org/docs/latest/ml-classification-regression.html#linear-regression). - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display -``` - - -```python -import math -from synapse.ml.train import ComputeModelStatistics -from synapse.ml.vw import VowpalWabbitRegressor, VowpalWabbitFeaturizer -from synapse.ml.lightgbm import LightGBMRegressor -import numpy as np -import pandas as pd -from pyspark.ml.feature import VectorAssembler -from pyspark.ml.regression import LinearRegression -from sklearn.datasets import load_boston -``` - -## Prepare Dataset -We use [*Boston house price* dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html) -. -The data was collected in 1978 from Boston area and consists of 506 entries with 14 features including the value of homes. -We use `sklearn.datasets` module to download it easily, then split the set into training and testing by 75/25. - - -```python -boston = load_boston() - -feature_cols = ["f" + str(i) for i in range(boston.data.shape[1])] -header = ["target"] + feature_cols -df = spark.createDataFrame( - pd.DataFrame(data=np.column_stack((boston.target, boston.data)), columns=header) -).repartition(1) -print("Dataframe has {} rows".format(df.count())) -display(df.limit(10).toPandas()) -``` - - -```python -train_data, test_data = df.randomSplit([0.75, 0.25], seed=42) -``` - -Following is the summary of the training set. - - -```python -display(train_data.summary().toPandas()) -``` - -Plot feature distributions over different target values (house prices in our case). - - -```python -features = train_data.columns[1:] -values = train_data.drop("target").toPandas() -ncols = 5 -nrows = math.ceil(len(features) / ncols) -``` - -## Baseline - Spark MLlib Linear Regressor - -First, we set a baseline performance by using Linear Regressor in Spark MLlib. - - -```python -featurizer = VectorAssembler(inputCols=feature_cols, outputCol="features") -lr_train_data = featurizer.transform(train_data)["target", "features"] -lr_test_data = featurizer.transform(test_data)["target", "features"] -display(lr_train_data.limit(10).toPandas()) -``` - - -```python -# By default, `maxIter` is 100. Other params you may want to change include: `regParam`, `elasticNetParam`, etc. -lr = LinearRegression(labelCol="target") - -lr_model = lr.fit(lr_train_data) -lr_predictions = lr_model.transform(lr_test_data) - -display(lr_predictions.limit(10).toPandas()) -``` - -We evaluate the prediction result by using `synapse.ml.train.ComputeModelStatistics` which returns four metrics: -* [MSE (Mean Squared Error)](https://en.wikipedia.org/wiki/Mean_squared_error) -* [RMSE (Root Mean Squared Error)](https://en.wikipedia.org/wiki/Root-mean-square_deviation) = sqrt(MSE) -* [R quared](https://en.wikipedia.org/wiki/Coefficient_of_determination) -* [MAE (Mean Absolute Error)](https://en.wikipedia.org/wiki/Mean_absolute_error) - - -```python -metrics = ComputeModelStatistics( - evaluationMetric="regression", labelCol="target", scoresCol="prediction" -).transform(lr_predictions) - -results = metrics.toPandas() -results.insert(0, "model", ["Spark MLlib - Linear Regression"]) -display(results) -``` - -## Vowpal Wabbit - -Perform VW-style feature hashing. Many types (numbers, string, bool, map of string to (number, string)) are supported. - - -```python -vw_featurizer = VowpalWabbitFeaturizer(inputCols=feature_cols, outputCol="features") - -vw_train_data = vw_featurizer.transform(train_data)["target", "features"] -vw_test_data = vw_featurizer.transform(test_data)["target", "features"] -display(vw_train_data.limit(10).toPandas()) -``` - -See [VW wiki](https://github.com/vowpalWabbit/vowpal_wabbit/wiki/Command-Line-Arguments) for command line arguments. - - -```python -# Use the same number of iterations as Spark MLlib's Linear Regression (=100) -args = "--holdout_off --loss_function quantile -l 7 -q :: --power_t 0.3" -vwr = VowpalWabbitRegressor(labelCol="target", passThroughArgs=args, numPasses=100) - -# To reduce number of partitions (which will effect performance), use `vw_train_data.repartition(1)` -vw_train_data_2 = vw_train_data.repartition(1).cache() -print(vw_train_data_2.count()) -vw_model = vwr.fit(vw_train_data_2.repartition(1)) -vw_predictions = vw_model.transform(vw_test_data) - -display(vw_predictions.limit(10).toPandas()) -``` - - -```python -metrics = ComputeModelStatistics( - evaluationMetric="regression", labelCol="target", scoresCol="prediction" -).transform(vw_predictions) - -vw_result = metrics.toPandas() -vw_result.insert(0, "model", ["Vowpal Wabbit"]) -results = results.append(vw_result, ignore_index=True) - -display(results) -``` - -## LightGBM - - -```python -lgr = LightGBMRegressor( - objective="quantile", - alpha=0.2, - learningRate=0.3, - numLeaves=31, - labelCol="target", - numIterations=100, -) - -# Using one partition since the training dataset is very small -repartitioned_data = lr_train_data.repartition(1).cache() -print(repartitioned_data.count()) -lg_model = lgr.fit(repartitioned_data) -lg_predictions = lg_model.transform(lr_test_data) - -display(lg_predictions.limit(10).toPandas()) -``` - - -```python -metrics = ComputeModelStatistics( - evaluationMetric="regression", labelCol="target", scoresCol="prediction" -).transform(lg_predictions) - -lg_result = metrics.toPandas() -lg_result.insert(0, "model", ["LightGBM"]) - -results = results.append(lg_result, ignore_index=True) - -display(results) -``` - -Following figure shows the actual-vs.-prediction graphs of the results: - -lr-vw-lg - - -```python -if os.environ.get("AZURE_SERVICE", None) != "Microsoft.ProjectArcadia": - from matplotlib.colors import ListedColormap, Normalize - from matplotlib.cm import get_cmap - import matplotlib.pyplot as plt - - f, axes = plt.subplots(nrows, ncols, sharey=True, figsize=(30, 10)) - f.tight_layout() - yy = [r["target"] for r in train_data.select("target").collect()] - for irow in range(nrows): - axes[irow][0].set_ylabel("target") - for icol in range(ncols): - try: - feat = features[irow * ncols + icol] - xx = values[feat] - axes[irow][icol].scatter(xx, yy, s=10, alpha=0.25) - axes[irow][icol].set_xlabel(feat) - axes[irow][icol].get_yaxis().set_ticks([]) - except IndexError: - f.delaxes(axes[irow][icol]) - - cmap = get_cmap("YlOrRd") - - target = np.array(test_data.select("target").collect()).flatten() - model_preds = [ - ("Spark MLlib Linear Regression", lr_predictions), - ("Vowpal Wabbit", vw_predictions), - ("LightGBM", lg_predictions), - ] - - f, axes = plt.subplots(1, len(model_preds), sharey=True, figsize=(18, 6)) - f.tight_layout() - - for i, (model_name, preds) in enumerate(model_preds): - preds = np.array(preds.select("prediction").collect()).flatten() - err = np.absolute(preds - target) - - norm = Normalize() - clrs = cmap(np.asarray(norm(err)))[:, :-1] - axes[i].scatter(preds, target, s=60, c=clrs, edgecolors="#888888", alpha=0.75) - axes[i].plot((0, 60), (0, 60), line, color="#888888") - axes[i].set_xlabel("Predicted values") - if i == 0: - axes[i].set_ylabel("Actual values") - axes[i].set_title(model_name) -``` - - -```python - -``` diff --git a/website/versioned_docs/version-0.10.0/features/responsible_ai/Data Balance Analysis.md b/website/versioned_docs/version-0.10.0/features/responsible_ai/Data Balance Analysis.md deleted file mode 100644 index 07f00ad07b..0000000000 --- a/website/versioned_docs/version-0.10.0/features/responsible_ai/Data Balance Analysis.md +++ /dev/null @@ -1,215 +0,0 @@ ---- -title: Data Balance Analysis on Spark -hide_title: true -sidebar_label: Data Balance Analysis -description: Learn how to do Data Balance Analysis on Spark to determine how well features and feature values are represented in your dataset. ---- - -# Data Balance Analysis on Spark - -## Context - -Data Balance Analysis is relevant for gaining an overall understanding of datasets, but it becomes essential when thinking about building AI systems in a responsible way, especially in terms of fairness. - -AI systems can sometimes exhibit unwanted, unfair behaviors. These behaviors can cause fairness-related harms that affect various groups of people. They may amplify the marginalization of particular groups whose needs and contexts are often overlooked during AI development and deployment. Fairness-related harms can have varying severities, and the cumulative impact of even seemingly non-severe harms can be burdensome. - -Fairness-related harms include: - -* **Allocation harms**: When an AI system extends or withholds opportunities or resources in ways that negatively impact people’s lives. -* **Quality of service harms**: When an AI system does not work as well for one group of people as it does for another. -* **Stereotyping harms**: When an AI system makes unfair generalizations about groups of people and reinforces negative stereotypes. -* **Demeaning harms**: When an AI system is actively derogatory or offensive. -* **Over/underrepresentation harms**: When an AI system over/underrepresents some groups of people or may even erase some groups entirely. - -**Note**: *Because fairness in AI is fundamentally a sociotechnical challenge, it's often impossible to fully “de-bias” an AI system. Instead, teams tasked with developing and deploying AI systems must work to identify, measure, and mitigate fairness-related harms as much as possible. Data Balance Analysis is a tool to help do so, in combination with others.* - -Data Balance Analysis consists of a combination of three groups of measures: Feature Balance Measures, Distribution Balance Measures, and Aggregate Balance Measures. - -In summary, Data Balance Analysis, when used as a step for building ML models, has the following benefits: - -* It reduces the costs of building ML through the early identification of data representation gaps. Before proceeding to train their models, data scientists can seek mitigation steps such as collecting more data, following a specific sampling mechanism, creating synthetic data, and so on. -* It enables easy end-to-end debugging of ML systems in combination with the [RAI Toolbox](https://responsibleaitoolbox.ai/responsible-ai-toolbox-capabilities/) by providing a clear view of model-related issues versus data-related issues. - -## Examples - -* [Data Balance Analysis - Adult Census Income](../../../features/responsible_ai/DataBalanceAnalysis%20-%20Adult%20Census%20Income) - -## Usage - -Data Balance Analysis currently supports three transformers in the `synapse.ml.exploratory` namespace: - -* FeatureBalanceMeasure - supervised (requires label column) -* DistributionBalanceMeasure - unsupervised (doesn't require label column) -* AggregateBalanceMeasure - unsupervised (doesn't require label column) - -1. Import all three transformers. - - For example: - - ```python - from synapse.ml.exploratory import AggregateBalanceMeasure, DistributionBalanceMeasure, FeatureBalanceMeasure - ``` - -2. Load your dataset, define features of interest, and ensure that the label column is binary. The `FeatureBalanceMeasure` transformer currently only supports binary labels, but support for numerical labels will be added soon. - - For example: - - ```python - import pyspark.sql.functions as F - - features = ["race", "sex"] - label = "income" - - df = spark.read.parquet("wasbs://publicwasb@mmlspark.blob.core.windows.net/AdultCensusIncome.parquet") - - # Convert the "income" column from {<=50K, >50K} to {0, 1} to represent our binary classification label column - df = df.withColumn(label, F.when(F.col(label).contains("<=50K"), F.lit(0)).otherwise(F.lit(1))) - ``` - -3. Create a `FeatureBalanceMeasure` transformer and call `setSensitiveCols` to set the list of sensitive features and call `setLabelCol` to set the binary label column. Then, call the `transform` method with your dataset and visualize the resulting dataframe. - - For example: - - ```python - feature_balance_measures = ( - FeatureBalanceMeasure() - .setSensitiveCols(features) - .setLabelCol(label) - .transform(df) - ) - feature_balance_measures.show(truncate=False) - ``` - -4. Create a `DistributionBalanceMeasure` transformer and call `setSensitiveCols` to set the list of sensitive features. Then, call the `transform` method with your dataset and visualize the resulting dataframe. - - For example: - - ```python - distribution_balance_measures = ( - DistributionBalanceMeasure() - .setSensitiveCols(features) - .transform(df) - ) - distribution_balance_measures.show(truncate=False) - ``` - -5. Create a `AggregateBalanceMeasure` transformer and call `setSensitiveCols` to set the list of sensitive features. Then, call the `transform` method with your dataset and visualize the resulting dataframe. - - For example: - - ```python - aggregate_balance_measures = ( - AggregateBalanceMeasure() - .setSensitiveCols(features) - .transform(df) - ) - aggregate_balance_measures.show(truncate=False) - ``` - -Note: If you're running this notebook in a Spark environment such as Azure Synapse or Databricks, then you can easily visualize the imbalance measures by calling the built-in plotting features `display()`. - -## Measure Explanations - -### Feature Balance Measures - -Feature Balance Measures allow us to see whether each combination of sensitive feature is receiving the positive outcome (true prediction) at balanced probability. - -In this context, we define a feature balance measure, called the parity, for label y. It is the difference between the association metrics of two different sensitive classes $[x_A, x_B]$, with respect to the association metric $A(x_i, y)$. That is: - -$parity(y \vert x_A, x_B, A(\cdot)) \coloneqq A(x_A, y) - A(x_B, y)$ - -Using the dataset, we can see if the various sexes and races are receiving >50k income at equal or unequal rates. - -Note: Many of these metrics were influenced by this paper [Measuring Model Biases in the Absence of Ground Truth](https://arxiv.org/abs/2103.03417). - -| Association Metric | Family | Description | Interpretation/Formula | Reference | -|----------------------------------------------------|-----------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------| -| Statistical Parity | Fairness | Proportion of each segment of a protected class (gender, for example) that should receive the positive outcome at equal rates. | Closer to zero means better parity. $DP = P(Y \vert A = Male) - P(Y \vert A = Female)$. | [Link](https://en.wikipedia.org/wiki/Fairness_%28machine_learning%29) | -| Pointwise Mutual Information (PMI), normalized PMI | Entropy | The PMI of a pair of feature values (ex: Gender=Male and Gender=Female) quantifies the discrepancy between the probability of their coincidence given their joint distribution and their individual distributions (assuming independence). | Range (normalized) $[-1, 1]$. -1 for no co-occurrences. 0 for co-occurrences at random. 1 for complete co-occurrences. | [Link](https://en.wikipedia.org/wiki/Pointwise_mutual_information) | -| Sorensen-Dice Coefficient (SDC) | Intersection-over-Union | Used to gauge the similarity of two samples. Related to F1 score. | Equals twice the number of elements common to both sets divided by the sum of the number of elements in each set. | [Link](https://en.wikipedia.org/wiki/S%C3%B8rensen%E2%80%93Dice_coefficient) | -| Jaccard Index | Intersection-over-Union | Similar to SDC, gauges the similarity and diversity of sample sets. | Equals the size of the intersection divided by the size of the union of the sample sets. | [Link](https://en.wikipedia.org/wiki/Jaccard_index) | -| Kendall Rank Correlation | Correlation and Statistical Tests | Used to measure the ordinal association between two measured quantities. | High when observations have a similar rank and low when observations have a dissimilar rank between the two variables. | [Link](https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient) | -| Log-Likelihood Ratio | Correlation and Statistical Tests | Calculates the degree to which data supports one variable versus another. Log of the likelihood ratio, which gives the probability of correctly predicting the label in ratio to probability of incorrectly predicting label. | If likelihoods are similar, it should be close to 0. | [Link](https://en.wikipedia.org/wiki/Likelihood_function#Likelihood_ratio) | -| t-test | Correlation and Statistical Tests | Used to compare the means of two groups (pairwise). | Value looked up in t-Distribution tell if statistically significant or not. | [Link](https://en.wikipedia.org/wiki/Student's_t-test) | - -### Distribution Balance Measures - -Distribution Balance Measures allow us to compare our data with a reference distribution (currently only uniform distribution is supported as a reference distribution). They are calculated per sensitive column and don't depend on the label column. - -For example, let's assume we have a dataset with nine rows and a Gender column, and we observe that: - -* "Male" appears four times -* "Female" appears three times -* "Other" appears twice - -Assuming the uniform distribution: - -$$ -ReferenceCount \coloneqq \frac{numRows}{numFeatureValues} -$$ - -$$ -ReferenceProbability \coloneqq \frac{1}{numFeatureValues} -$$ - -Feature Value | Observed Count | Reference Count | Observed Probability | Reference Probabiliy -| - | - | - | - | - -Male | 4 | 9/3 = 3 | 4/9 = 0.44 | 3/9 = 0.33 -Female | 3 | 9/3 = 3 | 3/9 = 0.33 | 3/9 = 0.33 -Other | 2 | 9/3 = 3 | 2/9 = 0.22 | 3/9 = 0.33 - -We can use distance measures to find out how far our observed and reference distributions of these feature values are. Some of these distance measures include: - -| Measure | Description | Interpretation | Reference | -|--------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------| -| KL Divergence | Measure of how one probability distribution is different from a second, reference probability distribution. Measure of the information gained when one revises one's beliefs from the prior probability distribution Q to the posterior probability distribution P. In other words, it is the amount of information lost when Q is used to approximate P. | Non-negative. 0 means P = Q. | [Link](https://en.wikipedia.org/wiki/Kullback%E2%80%93Leibler_divergence) | -| JS Distance | Measuring the similarity between two probability distributions. Symmetrized and smoothed version of the Kullback–Leibler (KL) divergence. Square root of JS Divergence. | Range [0, 1]. 0 means perfectly same to balanced distribution. | [Link](https://en.wikipedia.org/wiki/Jensen%E2%80%93Shannon_divergence) | -| Wasserstein Distance | This distance is also known as the earth mover’s distance, since it can be seen as the minimum amount of “work” required to transform u into v, where “work” is measured as the amount of distribution weight that must be moved multiplied by the distance it has to be moved. | Non-negative. 0 means P = Q. | [Link](https://en.wikipedia.org/wiki/Wasserstein_metric) | -| Infinity Norm Distance | Distance between two vectors is the greatest of their differences along any coordinate dimension. Also called Chebyshev distance or chessboard distance. | Non-negative. 0 means same distribution. | [Link](https://en.wikipedia.org/wiki/Chebyshev_distance) | -| Total Variation Distance | It is equal to half the L1 (Manhattan) distance between the two distributions. Take the difference between the two proportions in each category, add up the absolute values of all the differences, and then divide the sum by 2. | Non-negative. 0 means same distribution. | [Link](https://en.wikipedia.org/wiki/Total_variation_distance_of_probability_measures) | -| Chi-Squared Test | The chi-square test tests the null hypothesis that the categorical data has the given frequencies given expected frequencies in each category. | p-value gives evidence against null-hypothesis that difference in observed and expected frequencies is by random chance. | [Link](https://en.wikipedia.org/wiki/Chi-squared_test) | - -### Aggregate Balance Measures - -Aggregate Balance Measures allow us to obtain a higher notion of inequality. They're calculated on the set of all sensitive columns and don't depend on the label column. - -These measures look at distribution of records across all combinations of sensitive columns. For example, if Sex and Race are specified as sensitive features, it then tries to quantify imbalance across all combinations of the two specified features - (Male, Black), (Female, White), (Male, Asian-Pac-Islander), etc. - -| Measure | Description | Interpretation | Reference | -|----------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------| -| Atkinson Index | It presents the percentage of total income that a given society would have to forego in order to have more equal shares of income between its citizens. This measure depends on the degree of societal aversion to inequality (a theoretical parameter decided by the researcher). A higher value entails greater social utility or willingness by individuals to accept smaller incomes in exchange for a more equal distribution. An important feature of the Atkinson index is that it can be decomposed into within-group and between-group inequality. | Range $[0, 1]$. 0 if perfect equality. 1 means maximum inequality. In our case, it is the proportion of records for a sensitive columns’ combination. | [Link](https://en.wikipedia.org/wiki/Atkinson_index) | -| Theil T Index | GE(1) = Theil's T and is more sensitive to differences at the top of the distribution. The Theil index is a statistic used to measure economic inequality. The Theil index measures an entropic "distance" the population is away from the "ideal" egalitarian state of everyone having the same income. | If everyone has the same income, then T_T equals 0. If one person has all the income, then T_T gives the result $ln(N)$. 0 means equal income and larger values mean higher level of disproportion. | [Link](https://en.wikipedia.org/wiki/Theil_index) | -| Theil L Index | GE(0) = Theil's L and is more sensitive to differences at the lower end of the distribution. Logarithm of (mean income)/(income i), over all the incomes included in the summation. It is also referred to as the mean log deviation measure. Because a transfer from a larger income to a smaller one will change the smaller income's ratio more than it changes the larger income's ratio, the transfer-principle is satisfied by this index. | Same interpretation as Theil T Index. | [Link](https://en.wikipedia.org/wiki/Theil_index) | - -## Mitigation - -It will not be a stretch to say that every real-world dataset has caveats, biases, and imbalances. Data collection is costly. Data Imbalance mitigation or de-biasing data is an area of research. There are many techniques available at various stages of ML lifecycle: during pre-processing, in-processing, and post processing. Here we outline a couple of pre-processing techniques - - -### Resampling - -Resampling involves under-sampling from majority class and over-sampling from minority class. A naïve way to over-sample would be to duplicate records. Similarly, to under-sample one could remove records at random. - -* Caveats: - - 1. Under-sampling may remove valuable information. - 2. Over-sampling may cause overfitting and poor generalization on test set. - -![Bar chart undersampling and oversampling](https://mmlspark.blob.core.windows.net/graphics/responsible_ai/DataBalanceAnalysis_SamplingBar.png) - -There are smarter techniques to under-sample and over-sample in literature and implemented in Python’s [imbalanced-learn](https://imbalanced-learn.org/stable/) package. - -For example, we can cluster the records of the majority class, and do the under-sampling by removing records from each cluster, thus seeking to preserve information. - -One technique of under-sampling is use of Tomek Links. Tomek links are pairs of instances that are very close but of opposite classes. Removing the instances of the majority class of each pair increases the space between the two classes, facilitating the classification process. A similar way to under-sample majority class is using Near-Miss. It first calculates the distance between all the points in the larger class with the points in the smaller class. When two points belonging to different classes are very close to each other in the distribution, this algorithm eliminates the datapoint of the larger class thereby trying to balance the distribution. - -![Tomek Links](https://mmlspark.blob.core.windows.net/graphics/responsible_ai/DataBalanceAnalysis_TomekLinks.png) - -In over-sampling, instead of creating exact copies of the minority class records, we can introduce small variations into those copies, creating more diverse synthetic samples. This technique is called SMOTE (Synthetic Minority Oversampling Technique). It randomly picks a point from the minority class and computes the k-nearest neighbors for this point. The synthetic points are added between the chosen point and its neighbors. - -![Synthetic Samples](https://mmlspark.blob.core.windows.net/graphics/responsible_ai/DataBalanceAnalysis_SyntheticSamples.png) - -### Reweighting - -There is an expected and observed value in each table cell. The weight is the value of expected / observed. Reweighting is easy to extend to multiple features with more than two groups. The weights are then incorporated in loss function of model training. - -![Reweighting](https://mmlspark.blob.core.windows.net/graphics/responsible_ai/DataBalanceAnalysis_Reweight.png) diff --git a/website/versioned_docs/version-0.10.0/features/responsible_ai/DataBalanceAnalysis - Adult Census Income.md b/website/versioned_docs/version-0.10.0/features/responsible_ai/DataBalanceAnalysis - Adult Census Income.md deleted file mode 100644 index 008ab179c5..0000000000 --- a/website/versioned_docs/version-0.10.0/features/responsible_ai/DataBalanceAnalysis - Adult Census Income.md +++ /dev/null @@ -1,338 +0,0 @@ ---- -title: DataBalanceAnalysis - Adult Census Income -hide_title: true -status: stable ---- -## Data Balance Analysis using the Adult Census Income dataset - -In this example, we will conduct Data Balance Analysis (which consists on running three groups of measures) on the Adult Census Income dataset to determine how well features and feature values are represented in the dataset. - -This dataset can be used to predict whether annual income exceeds $50,000/year or not based on demographic data from the 1994 U.S. Census. The dataset we're reading contains 32,561 rows and 14 columns/features. - -[More info on the dataset here](https://archive.ics.uci.edu/ml/datasets/Adult) - ---- -Data Balance Analysis consists of a combination of three groups of measures: Feature Balance Measures, Distribution Balance Measures, and Aggregate Balance Measures. -In summary, Data Balance Analysis, when used as a step for building ML models, has the following benefits: - -* It reduces costs of ML building through the early identification of data representation gaps that prompt data scientists to seek mitigation steps (such as collecting more data, following a specific sampling mechanism, creating synthetic data, and so on) before proceeding to train their models. -* It enables easy end-to-end debugging of ML systems in combination with the [RAI Toolbox](https://responsibleaitoolbox.ai/responsible-ai-toolbox-capabilities/) by providing a clear view of model-related issues versus data-related issues. - ---- - -Note: If you are running this notebook in a Spark environment such as Azure Synapse or Databricks, then you can easily visualize the imbalance measures using the built-in plotting features. - -Python dependencies: - -```text -matplotlib==3.2.2 -numpy==1.19.2 -``` - - - -```python -import matplotlib -import matplotlib.pyplot as plt -import numpy as np -import pyspark.sql.functions as F -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display -``` - - -```python -df = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/AdultCensusIncome.parquet" -) -display(df) -``` - - -```python -# Convert the "income" column from {<=50K, >50K} to {0, 1} to represent our binary classification label column -label_col = "income" -df = df.withColumn( - label_col, F.when(F.col(label_col).contains("<=50K"), F.lit(0)).otherwise(F.lit(1)) -) -``` - -### Perform preliminary analysis on columns of interest - - -```python -display(df.groupBy("race").count()) -``` - - -```python -display(df.groupBy("sex").count()) -``` - - -```python -# Choose columns/features to do data balance analysis on -cols_of_interest = ["race", "sex"] -display(df.select(cols_of_interest + [label_col])) -``` - -### [Calculate Feature Balance Measures](/docs/features/responsible_ai/Data%20Balance%20Analysis/) - -Feature Balance Measures allow us to see whether each combination of sensitive feature is receiving the positive outcome (true prediction) at equal rates. - -In this context, we define a feature balance measure, also referred to as the parity, for label y as the absolute difference between the association metrics of two different sensitive classes $[x_A, x_B]$, with respect to the association metric $A(x_i, y)$. That is: - -$$parity(y \vert x_A, x_B, A(\cdot)) \coloneqq A(x_A, y) - A(x_B, y)$$ - -Using the dataset, we can see if the various sexes and races are receiving >50k income at equal or unequal rates. - -Note: Many of these metrics were influenced by this paper [Measuring Model Biases in the Absence of Ground Truth](https://arxiv.org/abs/2103.03417). - - - -```python -from synapse.ml.exploratory import FeatureBalanceMeasure - -feature_balance_measures = ( - FeatureBalanceMeasure() - .setSensitiveCols(cols_of_interest) - .setLabelCol(label_col) - .setVerbose(True) - .transform(df) -) - -# Sort by Statistical Parity descending for all features -display(feature_balance_measures.sort(F.abs("FeatureBalanceMeasure.dp").desc())) -``` - - -```python -# Drill down to feature == "sex" -display( - feature_balance_measures.filter(F.col("FeatureName") == "sex").sort( - F.abs("FeatureBalanceMeasure.dp").desc() - ) -) -``` - - -```python -# Drill down to feature == "race" -display( - feature_balance_measures.filter(F.col("FeatureName") == "race").sort( - F.abs("FeatureBalanceMeasure.dp").desc() - ) -) -``` - -#### Visualize Feature Balance Measures - - -```python -races = [row["race"] for row in df.groupBy("race").count().select("race").collect()] -dp_rows = ( - feature_balance_measures.filter(F.col("FeatureName") == "race") - .select("ClassA", "ClassB", "FeatureBalanceMeasure.dp") - .collect() -) -race_dp_values = [(row["ClassA"], row["ClassB"], row["dp"]) for row in dp_rows] - -race_dp_array = np.zeros((len(races), len(races))) -for class_a, class_b, dp_value in race_dp_values: - i, j = races.index(class_a), races.index(class_b) - dp_value = round(dp_value, 2) - race_dp_array[i, j] = dp_value - race_dp_array[j, i] = -1 * dp_value - -colormap = "RdBu" -dp_min, dp_max = -1.0, 1.0 - -fig, ax = plt.subplots() -im = ax.imshow(race_dp_array, vmin=dp_min, vmax=dp_max, cmap=colormap) - -cbar = ax.figure.colorbar(im, ax=ax) -cbar.ax.set_ylabel("Statistical Parity", rotation=-90, va="bottom") - -ax.set_xticks(np.arange(len(races))) -ax.set_yticks(np.arange(len(races))) -ax.set_xticklabels(races) -ax.set_yticklabels(races) - -plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor") - -for i in range(len(races)): - for j in range(len(races)): - text = ax.text(j, i, race_dp_array[i, j], ha="center", va="center", color="k") - -ax.set_title("Statistical Parity of Races in Adult Dataset") -fig.tight_layout() -plt.show() -``` - -![Statistical Parity of Races in Adult Dataset](https://mmlspark.blob.core.windows.net/graphics/responsible_ai/DataBalanceAnalysis_AdultCensusIncome_RacesDP.png) - -#### Interpret Feature Balance Measures - -Statistical Parity: -* When it is positive, it means that ClassA sees the positive outcome more than ClassB. -* When it is negative, it means that ClassB sees the positive outcome more than ClassA. - ---- - -From the results, we can tell the following: - -For Sex: -* SP(Male, Female) = 0.1963 shows "Male" observations are associated with ">50k" income label more often than "Female" observations. - -For Race: -* SP(Other, Asian-Pac-Islander) = -0.1734 shows "Other" observations are associated with ">50k" income label less than "Asian-Pac-Islander" observations. -* SP(White, Other) = 0.1636 shows "White" observations are associated with ">50k" income label more often than "Other" observations. -* SP(Asian-Pac-Islander, Amer-Indian-Eskimo) = 0.1494 shows "Asian-Pac-Islander" observations are associated with ">50k" income label more often than "Amer-Indian-Eskimo" observations. - -Again, you can take mitigation steps to upsample/downsample your data to be less biased towards certain features and feature values. - -Built-in mitigation steps are coming soon. - -### Calculate [Distribution Balance Measures](/docs/features/responsible_ai/Data%20Balance%20Analysis/) - -Distribution Balance Measures allow us to compare our data with a reference distribution (i.e. uniform distribution). They are calculated per sensitive column and don't use the label column. | - - -```python -from synapse.ml.exploratory import DistributionBalanceMeasure - -distribution_balance_measures = ( - DistributionBalanceMeasure().setSensitiveCols(cols_of_interest).transform(df) -) - -# Sort by JS Distance descending -display( - distribution_balance_measures.sort( - F.abs("DistributionBalanceMeasure.js_dist").desc() - ) -) -``` - -#### Visualize Distribution Balance Measures - - -```python -distribution_rows = distribution_balance_measures.collect() -race_row = [row for row in distribution_rows if row["FeatureName"] == "race"][0][ - "DistributionBalanceMeasure" -] -sex_row = [row for row in distribution_rows if row["FeatureName"] == "sex"][0][ - "DistributionBalanceMeasure" -] - -measures_of_interest = [ - "kl_divergence", - "js_dist", - "inf_norm_dist", - "total_variation_dist", - "wasserstein_dist", -] -race_measures = [round(race_row[measure], 4) for measure in measures_of_interest] -sex_measures = [round(sex_row[measure], 4) for measure in measures_of_interest] - -x = np.arange(len(measures_of_interest)) -width = 0.35 - -fig, ax = plt.subplots() -rects1 = ax.bar(x - width / 2, race_measures, width, label="Race") -rects2 = ax.bar(x + width / 2, sex_measures, width, label="Sex") - -ax.set_xlabel("Measure") -ax.set_ylabel("Value") -ax.set_title("Distribution Balance Measures of Sex and Race in Adult Dataset") -ax.set_xticks(x) -ax.set_xticklabels(measures_of_interest) -ax.legend() - -plt.setp(ax.get_xticklabels(), rotation=20, ha="right", rotation_mode="default") - - -def autolabel(rects): - for rect in rects: - height = rect.get_height() - ax.annotate( - "{}".format(height), - xy=(rect.get_x() + rect.get_width() / 2, height), - xytext=(0, 1), # 1 point vertical offset - textcoords="offset points", - ha="center", - va="bottom", - ) - - -autolabel(rects1) -autolabel(rects2) - -fig.tight_layout() - -plt.show() -``` - -![Distribution Balance Measures of Sex and Race in Adult Dataset](https://mmlspark.blob.core.windows.net/graphics/responsible_ai/DataBalanceAnalysis_AdultCensusIncome_DistributionMeasures.png) - -#### Interpret Distribution Balance Measures - -Race has a JS Distance of 0.5104 while Sex has a JS Distance of 0.1217. - -Knowing that JS Distance is between [0, 1] where 0 means perfectly balanced distribution, we can tell that: -* There is a larger disparity between various races than various sexes in our dataset. -* Race is nowhere close to a perfectly balanced distribution (i.e. some races are seen ALOT more than others in our dataset). -* Sex is fairly close to a perfectly balanced distribution. - -### Calculate [Aggregate Balance Measures](/docs/features/responsible_ai/Data%20Balance%20Analysis/) - -Aggregate Balance Measures allow us to obtain a higher notion of inequality. They are calculated on the global set of sensitive columns and don't use the label column. - -These measures look at distribution of records across all combinations of sensitive columns. For example, if Sex and Race are sensitive columns, it shall try to quantify imbalance across all combinations - (Male, Black), (Female, White), (Male, Asian-Pac-Islander), etc. - - -```python -from synapse.ml.exploratory import AggregateBalanceMeasure - -aggregate_balance_measures = ( - AggregateBalanceMeasure().setSensitiveCols(cols_of_interest).transform(df) -) - -display(aggregate_balance_measures) -``` - -#### Interpret Aggregate Balance Measures - -An Atkinson Index of 0.7779 lets us know that 77.79% of data points need to be foregone to have a more equal share among our features. - -It lets us know that our dataset is leaning towards maximum inequality, and we should take actionable steps to: -* Upsample data points where the feature value is barely observed. -* Downsample data points where the feature value is observed much more than others. - -### Summary - -Throughout the course of this sample notebook, we have: -1. Chosen "Race" and "Sex" as columns of interest in the Adult Census Income dataset. -2. Done preliminary analysis on our dataset. -3. Ran the 3 groups of measures that compose our **Data Balance Analysis**: - * **Feature Balance Measures** - * Calculated Feature Balance Measures to see that the highest Statistical Parity is in "Sex": Males see >50k income much more than Females. - * Visualized Statistical Parity of Races to see that Asian-Pac-Islander sees >50k income much more than Other, in addition to other race combinations. - * **Distribution Balance Measures** - * Calculated Distribution Balance Measures to see that "Sex" is much closer to a perfectly balanced distribution than "Race". - * Visualized various distribution balance measures to compare their values for "Race" and "Sex". - * **Aggregate Balance Measures** - * Calculated Aggregate Balance Measures to see that we need to forego 77.79% of data points to have a perfectly balanced dataset. We identified that our dataset is leaning towards maximum inequality, and we should take actionable steps to: - * Upsample data points where the feature value is barely observed. - * Downsample data points where the feature value is observed much more than others. - -**In conclusion:** -* These measures provide an indicator of disparity on the data, allowing for users to explore potential mitigations before proceeding to train. -* Users can use these measures to set thresholds on their level of "tolerance" for data representation. -* Production pipelines can use these measures as baseline for models that require frequent retraining on new data. -* These measures can also be saved as key metadata for the model/service built and added as part of model cards or transparency notes helping drive overall accountability for the ML service built and its performance across different demographics or sensitive attributes. diff --git a/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Explanation Dashboard.md b/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Explanation Dashboard.md deleted file mode 100644 index 87d943ff7b..0000000000 --- a/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Explanation Dashboard.md +++ /dev/null @@ -1,241 +0,0 @@ ---- -title: Interpretability - Explanation Dashboard -hide_title: true -status: stable ---- -## Interpretability - Explanation Dashboard - -In this example, similar to the "Interpretability - Tabular SHAP explainer" notebook, we use Kernel SHAP to explain a tabular classification model built from the Adults Census dataset and then visualize the explanation in the ExplanationDashboard from https://github.com/microsoft/responsible-ai-widgets. - -First we import the packages and define some UDFs we will need later. - - -```python -import pyspark -from IPython import get_ipython -from IPython.terminal.interactiveshell import TerminalInteractiveShell -from synapse.ml.explainers import * -from pyspark.ml import Pipeline -from pyspark.ml.classification import LogisticRegression -from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler -from pyspark.sql.types import * -from pyspark.sql.functions import * -import pandas as pd -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - shell = TerminalInteractiveShell.instance() - shell.define_macro("foo", """a,b=10,20""") - from notebookutils.visualization import display - - -vec_access = udf(lambda v, i: float(v[i]), FloatType()) -vec2array = udf(lambda vec: vec.toArray().tolist(), ArrayType(FloatType())) -``` - -Now let's read the data and train a simple binary classification model. - - -```python -df = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/AdultCensusIncome.parquet" -).cache() - -labelIndexer = StringIndexer( - inputCol="income", outputCol="label", stringOrderType="alphabetAsc" -).fit(df) -print("Label index assigment: " + str(set(zip(labelIndexer.labels, [0, 1])))) - -training = labelIndexer.transform(df) -display(training) -categorical_features = [ - "workclass", - "education", - "marital-status", - "occupation", - "relationship", - "race", - "sex", - "native-country", -] -categorical_features_idx = [col + "_idx" for col in categorical_features] -categorical_features_enc = [col + "_enc" for col in categorical_features] -numeric_features = [ - "age", - "education-num", - "capital-gain", - "capital-loss", - "hours-per-week", -] - -strIndexer = StringIndexer( - inputCols=categorical_features, outputCols=categorical_features_idx -) -onehotEnc = OneHotEncoder( - inputCols=categorical_features_idx, outputCols=categorical_features_enc -) -vectAssem = VectorAssembler( - inputCols=categorical_features_enc + numeric_features, outputCol="features" -) -lr = LogisticRegression(featuresCol="features", labelCol="label", weightCol="fnlwgt") -pipeline = Pipeline(stages=[strIndexer, onehotEnc, vectAssem, lr]) -model = pipeline.fit(training) -``` - -After the model is trained, we randomly select some observations to be explained. - - -```python -explain_instances = ( - model.transform(training).orderBy(rand()).limit(5).repartition(200).cache() -) -display(explain_instances) -``` - -We create a TabularSHAP explainer, set the input columns to all the features the model takes, specify the model and the target output column we are trying to explain. In this case, we are trying to explain the "probability" output which is a vector of length 2, and we are only looking at class 1 probability. Specify targetClasses to `[0, 1]` if you want to explain class 0 and 1 probability at the same time. Finally we sample 100 rows from the training data for background data, which is used for integrating out features in Kernel SHAP. - - -```python -shap = TabularSHAP( - inputCols=categorical_features + numeric_features, - outputCol="shapValues", - numSamples=5000, - model=model, - targetCol="probability", - targetClasses=[1], - backgroundData=broadcast(training.orderBy(rand()).limit(100).cache()), -) - -shap_df = shap.transform(explain_instances) -``` - -Once we have the resulting dataframe, we extract the class 1 probability of the model output, the SHAP values for the target class, the original features and the true label. Then we convert it to a pandas dataframe for visisualization. -For each observation, the first element in the SHAP values vector is the base value (the mean output of the background dataset), and each of the following element is the SHAP values for each feature. - - -```python -shaps = ( - shap_df.withColumn("probability", vec_access(col("probability"), lit(1))) - .withColumn("shapValues", vec2array(col("shapValues").getItem(0))) - .select( - ["shapValues", "probability", "label"] + categorical_features + numeric_features - ) -) - -shaps_local = shaps.toPandas() -shaps_local.sort_values("probability", ascending=False, inplace=True, ignore_index=True) -pd.set_option("display.max_colwidth", None) -shaps_local -``` - -We can visualize the explanation in the [interpret-community format](https://github.com/interpretml/interpret-community) in the ExplanationDashboard from https://github.com/microsoft/responsible-ai-widgets/ - - -```python -import pandas as pd -import numpy as np - -features = categorical_features + numeric_features -features_with_base = ["Base"] + features - -rows = shaps_local.shape[0] - -local_importance_values = shaps_local[["shapValues"]] -eval_data = shaps_local[features] -true_y = np.array(shaps_local[["label"]]) -``` - - -```python -list_local_importance_values = local_importance_values.values.tolist() -converted_importance_values = [] -bias = [] -for classarray in list_local_importance_values: - for rowarray in classarray: - converted_list = rowarray.tolist() - bias.append(converted_list[0]) - # remove the bias from local importance values - del converted_list[0] - converted_importance_values.append(converted_list) -``` - -When running Synapse Analytics, please follow instructions here [Package management - Azure Synapse Analytics | Microsoft Docs](https://docs.microsoft.com/en-us/azure/synapse-analytics/spark/apache-spark-azure-portal-add-libraries) to install ["raiwidgets"](https://pypi.org/project/raiwidgets/) and ["interpret-community"](https://pypi.org/project/interpret-community/) packages. - - -```python -!pip install --upgrade raiwidgets -!pip install itsdangerous==2.0.1 -``` - - -```python -!pip install --upgrade interpret-community -``` - - -```python -from interpret_community.adapter import ExplanationAdapter - -adapter = ExplanationAdapter(features, classification=True) -global_explanation = adapter.create_global( - converted_importance_values, eval_data, expected_values=bias -) -``` - - -```python -# view the global importance values -global_explanation.global_importance_values -``` - - -```python -# view the local importance values -global_explanation.local_importance_values -``` - - -```python -class wrapper(object): - def __init__(self, model): - self.model = model - - def predict(self, data): - sparkdata = spark.createDataFrame(data) - return ( - model.transform(sparkdata) - .select("prediction") - .toPandas() - .values.flatten() - .tolist() - ) - - def predict_proba(self, data): - sparkdata = spark.createDataFrame(data) - prediction = ( - model.transform(sparkdata) - .select("probability") - .toPandas() - .values.flatten() - .tolist() - ) - proba_list = [vector.values.tolist() for vector in prediction] - return proba_list -``` - - -```python -# view the explanation in the ExplanationDashboard -from raiwidgets import ExplanationDashboard - -ExplanationDashboard( - global_explanation, wrapper(model), dataset=eval_data, true_y=true_y -) -``` - -Your results will look like: - - diff --git a/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Image Explainers.md b/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Image Explainers.md deleted file mode 100644 index d006ef0d9e..0000000000 --- a/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Image Explainers.md +++ /dev/null @@ -1,239 +0,0 @@ ---- -title: Interpretability - Image Explainers -hide_title: true -status: stable ---- -## Interpretability - Image Explainers - -In this example, we use LIME and Kernel SHAP explainers to explain the ResNet50 model's multi-class output of an image. - -First we import the packages and define some UDFs and a plotting function we will need later. - - -```python -from synapse.ml.explainers import * -from synapse.ml.onnx import ONNXModel -from synapse.ml.opencv import ImageTransformer -from synapse.ml.io import * -from pyspark.ml import Pipeline -from pyspark.ml.classification import LogisticRegression -from pyspark.ml.feature import StringIndexer -from pyspark.sql.functions import * -from pyspark.sql.types import * -import numpy as np -import pyspark -import urllib.request -import matplotlib.pyplot as plt -import PIL, io -from PIL import Image -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display - -vec_slice = udf( - lambda vec, indices: (vec.toArray())[indices].tolist(), ArrayType(FloatType()) -) -arg_top_k = udf( - lambda vec, k: (-vec.toArray()).argsort()[:k].tolist(), ArrayType(IntegerType()) -) - - -def downloadBytes(url: str): - with urllib.request.urlopen(url) as url: - barr = url.read() - return barr - - -def rotate_color_channel(bgr_image_array, height, width, nChannels): - B, G, R, *_ = np.asarray(bgr_image_array).reshape(height, width, nChannels).T - rgb_image_array = np.array((R, G, B)).T - return rgb_image_array - - -def plot_superpixels(image_rgb_array, sp_clusters, weights, green_threshold=99): - superpixels = sp_clusters - green_value = np.percentile(weights, green_threshold) - img = Image.fromarray(image_rgb_array, mode="RGB").convert("RGBA") - image_array = np.asarray(img).copy() - for (sp, v) in zip(superpixels, weights): - if v > green_value: - for (x, y) in sp: - image_array[y, x, 1] = 255 - image_array[y, x, 3] = 200 - plt.clf() - plt.imshow(image_array) - if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - plt.show() - else: - display() -``` - -Create a dataframe for a testing image, and use the ResNet50 ONNX model to infer the image. - -The result shows 39.6% probability of "violin" (889), and 38.4% probability of "upright piano" (881). - - -```python -from synapse.ml.io import * - -image_df = spark.read.image().load( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/explainers/images/david-lusvardi-dWcUncxocQY-unsplash.jpg" -) -display(image_df) - -# Rotate the image array from BGR into RGB channels for visualization later. -row = image_df.select( - "image.height", "image.width", "image.nChannels", "image.data" -).head() -locals().update(row.asDict()) -rgb_image_array = rotate_color_channel(data, height, width, nChannels) - -# Download the ONNX model -modelPayload = downloadBytes( - "https://mmlspark.blob.core.windows.net/publicwasb/ONNXModels/resnet50-v2-7.onnx" -) - -featurizer = ( - ImageTransformer(inputCol="image", outputCol="features") - .resize(224, True) - .centerCrop(224, 224) - .normalize( - mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225], - color_scale_factor=1 / 255, - ) - .setTensorElementType(FloatType()) -) - -onnx = ( - ONNXModel() - .setModelPayload(modelPayload) - .setFeedDict({"data": "features"}) - .setFetchDict({"rawPrediction": "resnetv24_dense0_fwd"}) - .setSoftMaxDict({"rawPrediction": "probability"}) - .setMiniBatchSize(1) -) - -model = Pipeline(stages=[featurizer, onnx]).fit(image_df) -``` - - -```python -predicted = ( - model.transform(image_df) - .withColumn("top2pred", arg_top_k(col("probability"), lit(2))) - .withColumn("top2prob", vec_slice(col("probability"), col("top2pred"))) -) - -display(predicted.select("top2pred", "top2prob")) -``` - -First we use the LIME image explainer to explain the model's top 2 classes' probabilities. - - -```python -lime = ( - ImageLIME() - .setModel(model) - .setOutputCol("weights") - .setInputCol("image") - .setCellSize(150.0) - .setModifier(50.0) - .setNumSamples(500) - .setTargetCol("probability") - .setTargetClassesCol("top2pred") - .setSamplingFraction(0.7) -) - -lime_result = ( - lime.transform(predicted) - .withColumn("weights_violin", col("weights").getItem(0)) - .withColumn("weights_piano", col("weights").getItem(1)) - .cache() -) - -display(lime_result.select(col("weights_violin"), col("weights_piano"))) -lime_row = lime_result.head() -``` - -We plot the LIME weights for "violin" output and "upright piano" output. - -Green area are superpixels with LIME weights above 95 percentile. - - -```python -plot_superpixels( - rgb_image_array, - lime_row["superpixels"]["clusters"], - list(lime_row["weights_violin"]), - 95, -) -plot_superpixels( - rgb_image_array, - lime_row["superpixels"]["clusters"], - list(lime_row["weights_piano"]), - 95, -) -``` - -Your results will look like: - - - -Then we use the Kernel SHAP image explainer to explain the model's top 2 classes' probabilities. - - -```python -shap = ( - ImageSHAP() - .setModel(model) - .setOutputCol("shaps") - .setSuperpixelCol("superpixels") - .setInputCol("image") - .setCellSize(150.0) - .setModifier(50.0) - .setNumSamples(500) - .setTargetCol("probability") - .setTargetClassesCol("top2pred") -) - -shap_result = ( - shap.transform(predicted) - .withColumn("shaps_violin", col("shaps").getItem(0)) - .withColumn("shaps_piano", col("shaps").getItem(1)) - .cache() -) - -display(shap_result.select(col("shaps_violin"), col("shaps_piano"))) -shap_row = shap_result.head() -``` - -We plot the SHAP values for "piano" output and "cell" output. - -Green area are superpixels with SHAP values above 95 percentile. - -> Notice that we drop the base value from the SHAP output before rendering the superpixels. The base value is the model output for the background (all black) image. - - -```python -plot_superpixels( - rgb_image_array, - shap_row["superpixels"]["clusters"], - list(shap_row["shaps_violin"][1:]), - 95, -) -plot_superpixels( - rgb_image_array, - shap_row["superpixels"]["clusters"], - list(shap_row["shaps_piano"][1:]), - 95, -) -``` - -Your results will look like: - - diff --git a/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - PDP and ICE explainer.md b/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - PDP and ICE explainer.md deleted file mode 100644 index afb5f4b175..0000000000 --- a/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - PDP and ICE explainer.md +++ /dev/null @@ -1,528 +0,0 @@ ---- -title: Interpretability - PDP and ICE explainer -hide_title: true -status: stable ---- -## Partial Dependence (PDP) and Individual Conditional Expectation (ICE) plots - -Partial Dependence Plot (PDP) and Individual Condition Expectation (ICE) are interpretation methods which describe the average behavior of a classification or regression model. They are particularly useful when the model developer wants to understand generally how the model depends on individual feature values, overall model behavior and do debugging. - -To practice responsible AI, it is crucial to understand which features drive your model's predictions. This knowledge can facilitate the creation of Transparency Notes, facilitate auditing and compliance, help satisfy regulatory requirements, and improve both transparency and accountability . - -The goal of this notebook is to show how these methods work for a pretrained model. - -In this example, we train a classification model with the Adult Census Income dataset. Then we treat the model as an opaque-box model and calculate the PDP and ICE plots for some selected categorical and numeric features. - -This dataset can be used to predict whether annual income exceeds $50,000/year or not based on demographic data from the 1994 U.S. Census. The dataset we're reading contains 32,561 rows and 14 columns/features. - -[More info on the dataset here](https://archive.ics.uci.edu/ml/datasets/Adult) - -We will train a classification model to predict >= 50K or < 50K based on our features. - ---- -Python dependencies: - -matplotlib==3.2.2 - - -```python -from pyspark.ml import Pipeline -from pyspark.ml.classification import GBTClassifier -from pyspark.ml.feature import VectorAssembler, StringIndexer, OneHotEncoder -import pyspark.sql.functions as F -from pyspark.ml.evaluation import BinaryClassificationEvaluator - -from synapse.ml.explainers import ICETransformer - -import matplotlib.pyplot as plt - -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display -``` - -### Read and prepare the dataset - - -```python -df = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/AdultCensusIncome.parquet" -) -display(df) -``` - -### Fit the model and view the predictions - - -```python -categorical_features = [ - "race", - "workclass", - "marital-status", - "education", - "occupation", - "relationship", - "native-country", - "sex", -] -numeric_features = [ - "age", - "education-num", - "capital-gain", - "capital-loss", - "hours-per-week", -] -``` - - -```python -string_indexer_outputs = [feature + "_idx" for feature in categorical_features] -one_hot_encoder_outputs = [feature + "_enc" for feature in categorical_features] - -pipeline = Pipeline( - stages=[ - StringIndexer() - .setInputCol("income") - .setOutputCol("label") - .setStringOrderType("alphabetAsc"), - StringIndexer() - .setInputCols(categorical_features) - .setOutputCols(string_indexer_outputs), - OneHotEncoder() - .setInputCols(string_indexer_outputs) - .setOutputCols(one_hot_encoder_outputs), - VectorAssembler( - inputCols=one_hot_encoder_outputs + numeric_features, outputCol="features" - ), - GBTClassifier(weightCol="fnlwgt", maxDepth=7, maxIter=100), - ] -) - -model = pipeline.fit(df) -``` - -Check that model makes sense and has reasonable output. For this, we will check the model performance by calculating the ROC-AUC score. - - -```python -data = model.transform(df) -display(data.select("income", "probability", "prediction")) -``` - - -```python -eval_auc = BinaryClassificationEvaluator( - labelCol="label", rawPredictionCol="prediction" -) -eval_auc.evaluate(data) -``` - -## Partial Dependence Plots - -Partial dependence plots (PDP) show the dependence between the target response and a set of input features of interest, marginalizing over the values of all other input features. It can show whether the relationship between the target response and the input feature is linear, smooth, monotonic, or more complex. This is relevant when you want to have an overall understanding of model behavior. E.g. Identifying specific age group have a favorable predictions vs other age groups. - -If you want to learn more please check out the [scikit-learn page on partial dependence plots](https://scikit-learn.org/stable/modules/partial_dependence.html#partial-dependence-plots). - -### Setup the transformer for PDP - -To plot PDP we need to set up the instance of `ICETransformer` first and set the `kind` parameter to `average` and then call the `transform` function. - -For the setup we need to pass the pretrained model, specify the target column ("probability" in our case), and pass categorical and numeric feature names. - -Categorical and numeric features can be passed as a list of names. But we can specify parameters for the features by passing a list of dicts where each dict represents one feature. - -For the numeric features a dictionary can look like this: - -{"name": "capital-gain", "numSplits": 20, "rangeMin": 0.0, "rangeMax": 10000.0, "outputColName": "capital-gain_dependance"} - -Where the required key-value pair is `name` - the name of the numeric feature. Next key-values pairs are optional: `numSplits` - the number of splits for the value range for the numeric feature, `rangeMin` - specifies the min value of the range for the numeric feature, `rangeMax` - specifies the max value of the range for the numeric feature, `outputColName` - the name for output column with explanations for the feature. - - -For the categorical features a dictionary can look like this: - -{"name": "marital-status", "numTopValues": 10, "outputColName": "marital-status_dependance"} - -Where the required key-value pair is `name` - the name of the numeric feature. Next key-values pairs are optional: `numTopValues` - the max number of top-occurring values to be included in the categorical feature, `outputColName` - the name for output column with explanations for the feature. - - -```python -pdp = ICETransformer( - model=model, - targetCol="probability", - kind="average", - targetClasses=[1], - categoricalFeatures=categorical_features, - numericFeatures=numeric_features, -) -``` - -PDP transformer returns a dataframe of 1 row * {number features to explain} columns. Each column contains a map between the feature's values and the model's average dependence for the that feature value. - - -```python -output_pdp = pdp.transform(df) -display(output_pdp) -``` - -### Visualization - - -```python -# Helper functions for visualization - - -def get_pandas_df_from_column(df, col_name): - keys_df = df.select(F.explode(F.map_keys(F.col(col_name)))).distinct() - keys = list(map(lambda row: row[0], keys_df.collect())) - key_cols = list(map(lambda f: F.col(col_name).getItem(f).alias(str(f)), keys)) - final_cols = key_cols - pandas_df = df.select(final_cols).toPandas() - return pandas_df - - -def plot_dependence_for_categorical(df, col, col_int=True, figsize=(20, 5)): - dict_values = {} - col_names = list(df.columns) - - for col_name in col_names: - dict_values[col_name] = df[col_name][0].toArray()[0] - marklist = sorted( - dict_values.items(), key=lambda x: int(x[0]) if col_int else x[0] - ) - sortdict = dict(marklist) - - fig = plt.figure(figsize=figsize) - plt.bar(sortdict.keys(), sortdict.values()) - - plt.xlabel(col, size=13) - plt.ylabel("Dependence") - plt.show() - - -def plot_dependence_for_numeric(df, col, col_int=True, figsize=(20, 5)): - dict_values = {} - col_names = list(df.columns) - - for col_name in col_names: - dict_values[col_name] = df[col_name][0].toArray()[0] - marklist = sorted( - dict_values.items(), key=lambda x: int(x[0]) if col_int else x[0] - ) - sortdict = dict(marklist) - - fig = plt.figure(figsize=figsize) - - plt.plot(list(sortdict.keys()), list(sortdict.values())) - - plt.xlabel(col, size=13) - plt.ylabel("Dependence") - plt.ylim(0.0) - plt.show() -``` - -#### Example 1: "age" - -We can observe non-linear dependency. The model predicts that income rapidly grows from 24-46 y.o. age, after 46 y.o. model predictions slightly drops and from 68 y.o. remains stable. - - -```python -df_education_num = get_pandas_df_from_column(output_pdp, "age_dependence") -plot_dependence_for_numeric(df_education_num, "age") -``` - -Your results will look like: - -![pdp_age](https://mmlspark.blob.core.windows.net/graphics/explainers/pdp_age.png) - -#### Example 2: "marital-status" - -The model seems to treat "married-cv-spouse" as one category and tend to give a higher average prediction, and all others as a second category with the lower average prediction. - - -```python -df_occupation = get_pandas_df_from_column(output_pdp, "marital-status_dependence") -plot_dependence_for_categorical(df_occupation, "marital-status", False, figsize=(30, 5)) -``` - -Your results will look like: -![pdp_marital-status](https://mmlspark.blob.core.windows.net/graphics/explainers/pdp_marital-status.png) - -#### Example 3: "capital-gain" - -In the first graph, we run PDP with default parameters. We can see that this representation is not super useful because it is not granular enough. By default the range of numeric features are calculated dynamically from the data. - -In the second graph, we set rangeMin = 0 and rangeMax = 10000 to visualize more granular interpretations for the feature of interest. Now we can see more clearly how the model made decisions in a smaller region. - - -```python -df_education_num = get_pandas_df_from_column(output_pdp, "capital-gain_dependence") -plot_dependence_for_numeric(df_education_num, "capital-gain_dependence") -``` - -Your results will look like: - -![pdp_capital-gain-first](https://mmlspark.blob.core.windows.net/graphics/explainers/pdp_capital-gain-first.png) - - -```python -pdp_cap_gain = ICETransformer( - model=model, - targetCol="probability", - kind="average", - targetClasses=[1], - numericFeatures=[ - {"name": "capital-gain", "numSplits": 20, "rangeMin": 0.0, "rangeMax": 10000.0} - ], - numSamples=50, -) -output_pdp_cap_gain = pdp_cap_gain.transform(df) -df_education_num_gain = get_pandas_df_from_column( - output_pdp_cap_gain, "capital-gain_dependence" -) -plot_dependence_for_numeric(df_education_num_gain, "capital-gain_dependence") -``` - -Your results will look like: - -![pdp_capital-gain-second](https://mmlspark.blob.core.windows.net/graphics/explainers/pdp_capital-gain-second.png) - -### Conclusions - -PDP can be used to show how features influences model predictions on average and help modeler catch unexpected behavior from the model. - -## Individual Conditional Expectation - -ICE plots display one line per instance that shows how the instance’s prediction changes when a feature values changes. Each line represents the predictions for one instance if we vary the feature of interest. This is relevant when you want to observe model prediction for instances individually in more details. - - -If you want to learn more please check out the [scikit-learn page on ICE plots](https://scikit-learn.org/stable/modules/partial_dependence.html#individual-conditional-expectation-ice-plot). - -### Setup the transformer for ICE - -To plot ICE we need to set up the instance of `ICETransformer` first and set the `kind` parameter to `individual` and then call the `transform` function. For the setup we need to pass the pretrained model, specify the target column ("probability" in our case), and pass categorical and numeric feature names. For better visualization we set the number of samples to 50. - - -```python -ice = ICETransformer( - model=model, - targetCol="probability", - targetClasses=[1], - categoricalFeatures=categorical_features, - numericFeatures=numeric_features, - numSamples=50, -) - -output = ice.transform(df) -``` - -### Visualization - - -```python -# Helper functions for visualization -from math import pi - -from collections import defaultdict - - -def plot_ice_numeric(df, col, col_int=True, figsize=(20, 10)): - dict_values = defaultdict(list) - col_names = list(df.columns) - num_instances = df.shape[0] - - instances_y = {} - i = 0 - - for col_name in col_names: - for i in range(num_instances): - dict_values[i].append(df[col_name][i].toArray()[0]) - - fig = plt.figure(figsize=figsize) - for i in range(num_instances): - plt.plot(col_names, dict_values[i], "k") - - plt.xlabel(col, size=13) - plt.ylabel("Dependence") - plt.ylim(0.0) - - -def plot_ice_categorical(df, col, col_int=True, figsize=(20, 10)): - dict_values = defaultdict(list) - col_names = list(df.columns) - num_instances = df.shape[0] - - angles = [n / float(df.shape[1]) * 2 * pi for n in range(df.shape[1])] - angles += angles[:1] - - instances_y = {} - i = 0 - - for col_name in col_names: - for i in range(num_instances): - dict_values[i].append(df[col_name][i].toArray()[0]) - - fig = plt.figure(figsize=figsize) - ax = plt.subplot(111, polar=True) - plt.xticks(angles[:-1], col_names) - - for i in range(num_instances): - values = dict_values[i] - values += values[:1] - ax.plot(angles, values, "k") - ax.fill(angles, values, "teal", alpha=0.1) - - plt.xlabel(col, size=13) - plt.show() - - -def overlay_ice_with_pdp(df_ice, df_pdp, col, col_int=True, figsize=(20, 5)): - dict_values = defaultdict(list) - col_names_ice = list(df_ice.columns) - num_instances = df_ice.shape[0] - - instances_y = {} - i = 0 - - for col_name in col_names_ice: - for i in range(num_instances): - dict_values[i].append(df_ice[col_name][i].toArray()[0]) - - fig = plt.figure(figsize=figsize) - for i in range(num_instances): - plt.plot(col_names_ice, dict_values[i], "k") - - dict_values_pdp = {} - col_names = list(df_pdp.columns) - - for col_name in col_names: - dict_values_pdp[col_name] = df_pdp[col_name][0].toArray()[0] - marklist = sorted( - dict_values_pdp.items(), key=lambda x: int(x[0]) if col_int else x[0] - ) - sortdict = dict(marklist) - - plt.plot(col_names_ice, list(sortdict.values()), "r", linewidth=5) - - plt.xlabel(col, size=13) - plt.ylabel("Dependence") - plt.ylim(0.0) - plt.show() -``` - -#### Example 1: Numeric feature: "age" - -We can overlay the PDP on top of ICE plots. In the graph, the red line shows the PDP plot for the "age" feature, and the black lines show ICE plots for 50 randomly selected observations. - -The visualization shows that all curves in the ICE plot follow a similar course. This means that the PDP (red line) is already a good summary of the relationships between the displayed feature "age" and the model's average predictions of "income". - - -```python -age_df_ice = get_pandas_df_from_column(output, "age_dependence") -age_df_pdp = get_pandas_df_from_column(output_pdp, "age_dependence") - -overlay_ice_with_pdp(age_df_ice, age_df_pdp, col="age_dependence", figsize=(30, 10)) -``` - -Your results will look like: -![pdp_age_overlayed](https://mmlspark.blob.core.windows.net/graphics/explainers/pdp_age_overlayed.png) - -#### Example 2: Categorical feature: "occupation" - -For visualization of categorical features, we are using a star plot. - -- The X-axis here is a circle which is splitted into equal parts, each representing a feature value. -- The Y-coordinate shows the dependence values. Each line represents a sample observation. - -Here we can see that "Farming-fishing" drives the least predictions - because values accumulated near the lowest probabilities, but, for example, "Exec-managerial" seems to have one of the highest impacts for model predictions. - - -```python -occupation_dep = get_pandas_df_from_column(output, "occupation_dependence") - -plot_ice_categorical(occupation_dep, "occupation_dependence", figsize=(30, 10)) -``` - -Your results will look like: - -![pdp_occupation-star-plot](https://mmlspark.blob.core.windows.net/graphics/explainers/pdp_occupation-star-plot.png) - -### Conclusions - -ICE plots show model behavior on individual observations. Each line represents the prediction from the model if we vary the feature of interest. - -## PDP-based Feature Importance - -Using PDP we can calculate a simple partial dependence-based feature importance measure. We note that a flat PDP indicates that varying the feature does not affect the prediction. The more the PDP varies, the more "important" the feature is. - - - -If you want to learn more please check out [Christoph M's Interpretable ML Book](https://christophm.github.io/interpretable-ml-book/pdp.html#pdp-based-feature-importance). - -### Setup the transformer for PDP-based Feature Importance - -To plot PDP-based feature importance, we first need to set up the instance of `ICETransformer` by setting the `kind` parameter to `feature`. We can then call the `transform` function. - -`transform` returns a two-column table where the first columns are feature importance values and the second are corresponding features names. The rows are sorted in descending order by feature importance values. - - -```python -pdp_based_imp = ICETransformer( - model=model, - targetCol="probability", - kind="feature", - targetClasses=[1], - categoricalFeatures=categorical_features, - numericFeatures=numeric_features, -) - -output_pdp_based_imp = pdp_based_imp.transform(df) -display(output_pdp_based_imp) -``` - -### Visualization - - -```python -# Helper functions for visualization - - -def plot_pdp_based_imp(df, figsize=(35, 5)): - values_list = list(df.select("pdpBasedDependence").toPandas()["pdpBasedDependence"]) - names = list(df.select("featureNames").toPandas()["featureNames"]) - dependence_values = [] - for vec in values_list: - dependence_values.append(vec.toArray()[0]) - - fig = plt.figure(figsize=figsize) - plt.bar(names, dependence_values) - - plt.xlabel("Feature names", size=13) - plt.ylabel("PDP-based-feature-imporance") - plt.show() -``` - -This shows that the features `capital-gain` and `education-num` were the most important for the model, and `sex` and `education` were the least important. - - -```python -plot_pdp_based_imp(output_pdp_based_imp) -``` - -Your results will look like: - -![pdp_based-importance](https://mmlspark.blob.core.windows.net/graphics/explainers/pdp-based-importance.png) - -## Overall conclusions - - -Interpretation methods are very important responsible AI tools. - -Partial dependence plots (PDP) and Individual Conditional Expectation (ICE) plots can be used to visualize and analyze interaction between the target response and a set of input features of interest. - -PDPs show the dependence of the average prediction when varying each feature. In contrast, ICE shows the dependence for individual samples. The approaches can help give rough estimates of a function's deviation from a baseline. This is important not only to help debug and understand how a model behaves but is a useful step in building responsible AI systems. These methodologies can improve transparency and provide model consumers with an extra level of accountability by model creators. - -Using examples above we showed how to calculate and visualize such plots at a scalable manner to understand how a classification or regression model makes predictions, which features heavily impact the model, and how model prediction changes when feature value changes. diff --git a/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Snow Leopard Detection.md b/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Snow Leopard Detection.md deleted file mode 100644 index 50f68e757c..0000000000 --- a/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Snow Leopard Detection.md +++ /dev/null @@ -1,270 +0,0 @@ ---- -title: Interpretability - Snow Leopard Detection -hide_title: true -status: stable ---- -## Automated Snow Leopard Detection with Synapse Machine Learning - - - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display - from notebookutils.mssparkutils.credentials import getSecret - - os.environ["BING_IMAGE_SEARCH_KEY"] = getSecret( - "mmlspark-build-keys", "bing-search-key" - ) - -# WARNING this notebook requires alot of memory. -# If you get a heap space error, try dropping the number of images bing returns -# or by writing out the images to parquet first - -# Replace the following with a line like: BING_IMAGE_SEARCH_KEY = "hdwo2oyd3o928s....." -BING_IMAGE_SEARCH_KEY = os.environ["BING_IMAGE_SEARCH_KEY"] -``` - - -```python -from synapse.ml.cognitive import * -from synapse.ml.core.spark import FluentAPI -from pyspark.sql.functions import lit - - -def bingPhotoSearch(name, queries, pages): - offsets = [offset * 10 for offset in range(0, pages)] - parameters = [(query, offset) for offset in offsets for query in queries] - - return ( - spark.createDataFrame(parameters, ("queries", "offsets")) - .mlTransform( - BingImageSearch() # Apply Bing Image Search - .setSubscriptionKey(BING_IMAGE_SEARCH_KEY) # Set the API Key - .setOffsetCol("offsets") # Specify a column containing the offsets - .setQueryCol("queries") # Specify a column containing the query words - .setCount(10) # Specify the number of images to return per offset - .setImageType("photo") # Specify a filter to ensure we get photos - .setOutputCol("images") - ) - .mlTransform(BingImageSearch.getUrlTransformer("images", "urls")) - .withColumn("labels", lit(name)) - .limit(400) - ) -``` - - - - -```python -def displayDF(df, n=5, image_cols=set(["urls"])): - rows = df.take(n) - cols = df.columns - header = "".join(["" + c + "" for c in cols]) - - style = """ - - - - -""" - - table = [] - for row in rows: - table.append("") - for col in cols: - if col in image_cols: - rep = ''.format(row[col]) - else: - rep = row[col] - table.append("{}".format(rep)) - table.append("") - tableHTML = "".join(table) - - body = """ - - - - {} - - {} -
- - - """.format( - header, tableHTML - ) - try: - displayHTML(style + body) - except: - pass -``` - - -```python -snowLeopardQueries = ["snow leopard"] -snowLeopardUrls = bingPhotoSearch("snow leopard", snowLeopardQueries, pages=100) -displayDF(snowLeopardUrls) -``` - - -```python -randomWords = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/random_words.parquet" -).cache() -randomWords.show() -``` - - -```python -randomLinks = ( - randomWords.mlTransform( - BingImageSearch() - .setSubscriptionKey(BING_IMAGE_SEARCH_KEY) - .setCount(10) - .setQueryCol("words") - .setOutputCol("images") - ) - .mlTransform(BingImageSearch.getUrlTransformer("images", "urls")) - .withColumn("label", lit("other")) - .limit(400) -) - -displayDF(randomLinks) -``` - - -```python -images = ( - snowLeopardUrls.union(randomLinks) - .distinct() - .repartition(100) - .mlTransform( - BingImageSearch.downloadFromUrls("urls", "image", concurrency=5, timeout=5000) - ) - .dropna() -) - -train, test = images.randomSplit([0.7, 0.3], seed=1) -``` - - -```python -from pyspark.ml import Pipeline -from pyspark.ml.feature import StringIndexer -from pyspark.ml.classification import LogisticRegression -from pyspark.sql.functions import udf -from synapse.ml.downloader import ModelDownloader -from synapse.ml.cntk import ImageFeaturizer -from synapse.ml.stages import UDFTransformer -from pyspark.sql.types import * - - -def getIndex(row): - return float(row[1]) - - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - network = ModelDownloader( - spark, "abfss://synapse@mmlsparkeuap.dfs.core.windows.net/models/" - ).downloadByName("ResNet50") -else: - network = ModelDownloader(spark, "dbfs:/Models/").downloadByName("ResNet50") - -model = Pipeline( - stages=[ - StringIndexer(inputCol="labels", outputCol="index"), - ImageFeaturizer( - inputCol="image", outputCol="features", cutOutputLayers=1 - ).setModel(network), - LogisticRegression(maxIter=5, labelCol="index", regParam=10.0), - UDFTransformer() - .setUDF(udf(getIndex, DoubleType())) - .setInputCol("probability") - .setOutputCol("leopard_prob"), - ] -) - -fitModel = model.fit(train) -``` - - - - -```python -def plotConfusionMatrix(df, label, prediction, classLabels): - from synapse.ml.plot import confusionMatrix - import matplotlib.pyplot as plt - - fig = plt.figure(figsize=(4.5, 4.5)) - confusionMatrix(df, label, prediction, classLabels) - display(fig) - - -if os.environ.get("AZURE_SERVICE", None) != "Microsoft.ProjectArcadia": - plotConfusionMatrix( - fitModel.transform(test), "index", "prediction", fitModel.stages[0].labels - ) -``` - - -```python -import urllib.request -from synapse.ml.lime import ImageLIME - -test_image_url = ( - "https://mmlspark.blob.core.windows.net/graphics/SnowLeopardAD/snow_leopard1.jpg" -) -with urllib.request.urlopen(test_image_url) as url: - barr = url.read() -test_subsample = spark.createDataFrame([(bytearray(barr),)], ["image"]) - -lime = ( - ImageLIME() - .setModel(fitModel) - .setPredictionCol("leopard_prob") - .setOutputCol("weights") - .setInputCol("image") - .setCellSize(100.0) - .setModifier(50.0) - .setNSamples(300) -) - -result = lime.transform(test_subsample) -``` - - -```python -import matplotlib.pyplot as plt -import PIL, io, numpy as np - - -def plot_superpixels(row): - image_bytes = row["image"] - superpixels = row["superpixels"]["clusters"] - weights = list(row["weights"]) - mean_weight = np.percentile(weights, 90) - img = (PIL.Image.open(io.BytesIO(image_bytes))).convert("RGBA") - image_array = np.asarray(img).copy() - for (sp, w) in zip(superpixels, weights): - if w > mean_weight: - for (x, y) in sp: - image_array[y, x, 1] = 255 - image_array[y, x, 3] = 200 - plt.clf() - plt.imshow(image_array) - display() - - -# Gets first row from the LIME-transformed data frame -if os.environ.get("AZURE_SERVICE", None) != "Microsoft.ProjectArcadia": - plot_superpixels(result.take(1)[0]) -``` - -### Your results will look like: - diff --git a/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Tabular SHAP explainer.md b/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Tabular SHAP explainer.md deleted file mode 100644 index b012cc6e1d..0000000000 --- a/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Tabular SHAP explainer.md +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Interpretability - Tabular SHAP explainer -hide_title: true -status: stable ---- -## Interpretability - Tabular SHAP explainer - -In this example, we use Kernel SHAP to explain a tabular classification model built from the Adults Census dataset. - -First we import the packages and define some UDFs we will need later. - - -```python -import pyspark -from synapse.ml.explainers import * -from pyspark.ml import Pipeline -from pyspark.ml.classification import LogisticRegression -from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler -from pyspark.sql.types import * -from pyspark.sql.functions import * -import pandas as pd -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display - - -vec_access = udf(lambda v, i: float(v[i]), FloatType()) -vec2array = udf(lambda vec: vec.toArray().tolist(), ArrayType(FloatType())) -``` - -Now let's read the data and train a simple binary classification model. - - -```python -df = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/AdultCensusIncome.parquet" -) - -labelIndexer = StringIndexer( - inputCol="income", outputCol="label", stringOrderType="alphabetAsc" -).fit(df) -print("Label index assigment: " + str(set(zip(labelIndexer.labels, [0, 1])))) - -training = labelIndexer.transform(df).cache() -display(training) -categorical_features = [ - "workclass", - "education", - "marital-status", - "occupation", - "relationship", - "race", - "sex", - "native-country", -] -categorical_features_idx = [col + "_idx" for col in categorical_features] -categorical_features_enc = [col + "_enc" for col in categorical_features] -numeric_features = [ - "age", - "education-num", - "capital-gain", - "capital-loss", - "hours-per-week", -] - -strIndexer = StringIndexer( - inputCols=categorical_features, outputCols=categorical_features_idx -) -onehotEnc = OneHotEncoder( - inputCols=categorical_features_idx, outputCols=categorical_features_enc -) -vectAssem = VectorAssembler( - inputCols=categorical_features_enc + numeric_features, outputCol="features" -) -lr = LogisticRegression(featuresCol="features", labelCol="label", weightCol="fnlwgt") -pipeline = Pipeline(stages=[strIndexer, onehotEnc, vectAssem, lr]) -model = pipeline.fit(training) -``` - -After the model is trained, we randomly select some observations to be explained. - - -```python -explain_instances = ( - model.transform(training).orderBy(rand()).limit(5).repartition(200).cache() -) -display(explain_instances) -``` - -We create a TabularSHAP explainer, set the input columns to all the features the model takes, specify the model and the target output column we are trying to explain. In this case, we are trying to explain the "probability" output which is a vector of length 2, and we are only looking at class 1 probability. Specify targetClasses to `[0, 1]` if you want to explain class 0 and 1 probability at the same time. Finally we sample 100 rows from the training data for background data, which is used for integrating out features in Kernel SHAP. - - -```python -shap = TabularSHAP( - inputCols=categorical_features + numeric_features, - outputCol="shapValues", - numSamples=5000, - model=model, - targetCol="probability", - targetClasses=[1], - backgroundData=broadcast(training.orderBy(rand()).limit(100).cache()), -) - -shap_df = shap.transform(explain_instances) -``` - -Once we have the resulting dataframe, we extract the class 1 probability of the model output, the SHAP values for the target class, the original features and the true label. Then we convert it to a pandas dataframe for visisualization. -For each observation, the first element in the SHAP values vector is the base value (the mean output of the background dataset), and each of the following element is the SHAP values for each feature. - - -```python -shaps = ( - shap_df.withColumn("probability", vec_access(col("probability"), lit(1))) - .withColumn("shapValues", vec2array(col("shapValues").getItem(0))) - .select( - ["shapValues", "probability", "label"] + categorical_features + numeric_features - ) -) - -shaps_local = shaps.toPandas() -shaps_local.sort_values("probability", ascending=False, inplace=True, ignore_index=True) -pd.set_option("display.max_colwidth", None) -shaps_local -``` - -We use plotly subplot to visualize the SHAP values. - - -```python -from plotly.subplots import make_subplots -import plotly.graph_objects as go -import pandas as pd - -features = categorical_features + numeric_features -features_with_base = ["Base"] + features - -rows = shaps_local.shape[0] - -fig = make_subplots( - rows=rows, - cols=1, - subplot_titles="Probability: " - + shaps_local["probability"].apply("{:.2%}".format) - + "; Label: " - + shaps_local["label"].astype(str), -) - -for index, row in shaps_local.iterrows(): - feature_values = [0] + [row[feature] for feature in features] - shap_values = row["shapValues"] - list_of_tuples = list(zip(features_with_base, feature_values, shap_values)) - shap_pdf = pd.DataFrame(list_of_tuples, columns=["name", "value", "shap"]) - fig.add_trace( - go.Bar( - x=shap_pdf["name"], - y=shap_pdf["shap"], - hovertext="value: " + shap_pdf["value"].astype(str), - ), - row=index + 1, - col=1, - ) - -fig.update_yaxes(range=[-1, 1], fixedrange=True, zerolinecolor="black") -fig.update_xaxes(type="category", tickangle=45, fixedrange=True) -fig.update_layout(height=400 * rows, title_text="SHAP explanations") -fig.show() -``` - -Your results will look like: - - diff --git a/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Text Explainers.md b/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Text Explainers.md deleted file mode 100644 index 43c0e40b6a..0000000000 --- a/website/versioned_docs/version-0.10.0/features/responsible_ai/Interpretability - Text Explainers.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -title: Interpretability - Text Explainers -hide_title: true -status: stable ---- -## Interpretability - Text Explainers - -In this example, we use LIME and Kernel SHAP explainers to explain a text classification model. - -First we import the packages and define some UDFs and a plotting function we will need later. - - -``` -from pyspark.sql.functions import * -from pyspark.sql.types import * -from pyspark.ml.feature import StopWordsRemover, HashingTF, IDF, Tokenizer -from pyspark.ml import Pipeline -from pyspark.ml.classification import LogisticRegression -from synapse.ml.explainers import * -from synapse.ml.featurize.text import TextFeaturizer -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display - -vec2array = udf(lambda vec: vec.toArray().tolist(), ArrayType(FloatType())) -vec_access = udf(lambda v, i: float(v[i]), FloatType()) -``` - -Load training data, and convert rating to binary label. - - -``` -data = ( - spark.read.parquet("wasbs://publicwasb@mmlspark.blob.core.windows.net/BookReviewsFromAmazon10K.parquet") - .withColumn("label", (col("rating") > 3).cast(LongType())) - .select("label", "text") - .cache() -) - -display(data) -``` - -We train a text classification model, and randomly sample 10 rows to explain. - - -``` -train, test = data.randomSplit([0.60, 0.40]) - -pipeline = Pipeline( - stages=[ - TextFeaturizer( - inputCol="text", - outputCol="features", - useStopWordsRemover=True, - useIDF=True, - minDocFreq=20, - numFeatures=1 << 16, - ), - LogisticRegression(maxIter=100, regParam=0.005, labelCol="label", featuresCol="features"), - ] -) - -model = pipeline.fit(train) - -prediction = model.transform(test) - -explain_instances = prediction.orderBy(rand()).limit(10) -``` - - -``` -def plotConfusionMatrix(df, label, prediction, classLabels): - from synapse.ml.plot import confusionMatrix - import matplotlib.pyplot as plt - - fig = plt.figure(figsize=(4.5, 4.5)) - confusionMatrix(df, label, prediction, classLabels) - if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - plt.show() - else: - display(fig) - - -plotConfusionMatrix(model.transform(test), "label", "prediction", [0, 1]) -``` - -First we use the LIME text explainer to explain the model's predicted probability for a given observation. - - -``` -lime = TextLIME( - model=model, - outputCol="weights", - inputCol="text", - targetCol="probability", - targetClasses=[1], - tokensCol="tokens", - samplingFraction=0.7, - numSamples=2000, -) - -lime_results = ( - lime.transform(explain_instances) - .select("tokens", "weights", "r2", "probability", "text") - .withColumn("probability", vec_access("probability", lit(1))) - .withColumn("weights", vec2array(col("weights").getItem(0))) - .withColumn("r2", vec_access("r2", lit(0))) - .withColumn("tokens_weights", arrays_zip("tokens", "weights")) -) - -display(lime_results.select("probability", "r2", "tokens_weights", "text").orderBy(col("probability").desc())) -``` - -Then we use the Kernel SHAP text explainer to explain the model's predicted probability for a given observation. - -> Notice that we drop the base value from the SHAP output before displaying the SHAP values. The base value is the model output for an empty string. - - -``` -shap = TextSHAP( - model=model, - outputCol="shaps", - inputCol="text", - targetCol="probability", - targetClasses=[1], - tokensCol="tokens", - numSamples=5000, -) - -shap_results = ( - shap.transform(explain_instances) - .select("tokens", "shaps", "r2", "probability", "text") - .withColumn("probability", vec_access("probability", lit(1))) - .withColumn("shaps", vec2array(col("shaps").getItem(0))) - .withColumn("shaps", slice(col("shaps"), lit(2), size(col("shaps")))) - .withColumn("r2", vec_access("r2", lit(0))) - .withColumn("tokens_shaps", arrays_zip("tokens", "shaps")) -) - -display(shap_results.select("probability", "r2", "tokens_shaps", "text").orderBy(col("probability").desc())) -``` diff --git a/website/versioned_docs/version-0.10.0/features/spark_serving/SparkServing - Deploying a Classifier.md b/website/versioned_docs/version-0.10.0/features/spark_serving/SparkServing - Deploying a Classifier.md deleted file mode 100644 index acddcb3365..0000000000 --- a/website/versioned_docs/version-0.10.0/features/spark_serving/SparkServing - Deploying a Classifier.md +++ /dev/null @@ -1,131 +0,0 @@ ---- -title: SparkServing - Deploying a Classifier -hide_title: true -status: stable ---- -## Model Deployment with Spark Serving -In this example, we try to predict incomes from the *Adult Census* dataset. Then we will use Spark serving to deploy it as a realtime web service. -First, we import needed packages: - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() -``` - - -```python -import sys -import numpy as np -import pandas as pd -``` - -Now let's read the data and split it to train and test sets: - - -```python -data = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/AdultCensusIncome.parquet" -) -data = data.select(["education", "marital-status", "hours-per-week", "income"]) -train, test = data.randomSplit([0.75, 0.25], seed=123) -train.limit(10).toPandas() -``` - -`TrainClassifier` can be used to initialize and fit a model, it wraps SparkML classifiers. -You can use `help(synapse.ml.TrainClassifier)` to view the different parameters. - -Note that it implicitly converts the data into the format expected by the algorithm. More specifically it: - tokenizes, hashes strings, one-hot encodes categorical variables, assembles the features into a vector -etc. The parameter `numFeatures` controls the number of hashed features. - - -```python -from synapse.ml.train import TrainClassifier -from pyspark.ml.classification import LogisticRegression - -model = TrainClassifier( - model=LogisticRegression(), labelCol="income", numFeatures=256 -).fit(train) -``` - -After the model is trained, we score it against the test dataset and view metrics. - - -```python -from synapse.ml.train import ComputeModelStatistics, TrainedClassifierModel - -prediction = model.transform(test) -prediction.printSchema() -``` - - -```python -metrics = ComputeModelStatistics().transform(prediction) -metrics.limit(10).toPandas() -``` - -First, we will define the webservice input/output. -For more information, you can visit the [documentation for Spark Serving](https://github.com/Microsoft/SynapseML/blob/master/docs/mmlspark-serving.md) - - -```python -from pyspark.sql.types import * -from synapse.ml.io import * -import uuid - -serving_inputs = ( - spark.readStream.server() - .address("localhost", 8898, "my_api") - .option("name", "my_api") - .load() - .parseRequest("my_api", test.schema) -) - -serving_outputs = model.transform(serving_inputs).makeReply("prediction") - -server = ( - serving_outputs.writeStream.server() - .replyTo("my_api") - .queryName("my_query") - .option("checkpointLocation", "file:///tmp/checkpoints-{}".format(uuid.uuid1())) - .start() -) -``` - -Test the webservice - - -```python -import requests - -data = '{"education":" 10th","marital-status":"Divorced","hours-per-week":40.0}' -r = requests.post(data=data, url="http://localhost:8898/my_api") -print("Response {}".format(r.text)) -``` - - -```python -import requests - -data = '{"education":" Masters","marital-status":"Married-civ-spouse","hours-per-week":40.0}' -r = requests.post(data=data, url="http://localhost:8898/my_api") -print("Response {}".format(r.text)) -``` - - -```python -import time - -time.sleep(20) # wait for server to finish setting up (just to be safe) -server.stop() -``` - - -```python - -``` diff --git a/website/versioned_docs/version-0.10.0/features/spark_serving/about.md b/website/versioned_docs/version-0.10.0/features/spark_serving/about.md deleted file mode 100644 index 1aaeadde49..0000000000 --- a/website/versioned_docs/version-0.10.0/features/spark_serving/about.md +++ /dev/null @@ -1,228 +0,0 @@ ---- -title: Spark Serving -hide_title: true -sidebar_label: About ---- - - - -# Spark Serving - -### An Engine for Deploying Spark Jobs as Distributed Web Services - -- **Distributed**: Takes full advantage of Node, JVM, and thread level - parallelism that Spark is famous for. -- **Fast**: No single node bottlenecks, no round trips to Python. - Requests can be routed directly to and from worker JVMs through - network switches. Spin up a web service in a matter of seconds. -- **Low Latency**: When using continuous serving, - you can achieve latencies as low as 1 millisecond. -- **Deployable Anywhere**: Works anywhere that runs Spark such as - Databricks, HDInsight, AZTK, DSVMs, local, or on your own - cluster. Usable from Spark, PySpark, and SparklyR. -- **Lightweight**: No dependence on costly Kafka or - Kubernetes clusters. -- **Idiomatic**: Uses the same API as batch and structured streaming. -- **Flexible**: Spin up and manage several services on a single Spark - cluster. Synchronous and Asynchronous service management and - extensibility. Deploy any spark job that is expressible as a - structured streaming query. Use serving sources/sinks with other - Spark data sources/sinks for more complex deployments. - -## Usage - -### Jupyter Notebook Examples - -- [Deploy a classifier trained on the Adult Census Dataset](../SparkServing%20-%20Deploying%20a%20Classifier) -- More coming soon! - -### Spark Serving Hello World - -```python -import synapse.ml -import pyspark -from pyspark.sql.functions import udf, col, length -from pyspark.sql.types import * - -df = spark.readStream.server() \ - .address("localhost", 8888, "my_api") \ - .load() \ - .parseRequest(StructType().add("foo", StringType()).add("bar", IntegerType())) - -replies = df.withColumn("fooLength", length(col("foo")))\ - .makeReply("fooLength") - -server = replies\ - .writeStream \ - .server() \ - .replyTo("my_api") \ - .queryName("my_query") \ - .option("checkpointLocation", "file:///path/to/checkpoints") \ - .start() -``` - -### Deploying a Deep Network with the CNTKModel - -```python -import synapse.ml -from synapse.ml.cntk import CNTKModel -import pyspark -from pyspark.sql.functions import udf, col - -df = spark.readStream.server() \ - .address("localhost", 8888, "my_api") - .load() - .parseRequest() - -# See notebook examples for how to create and save several -# examples of CNTK models -network = CNTKModel.load("file:///path/to/my_cntkmodel.mml") - -transformed_df = network.transform(df).makeReply() - -server = transformed_df \ - .writeStream \ - .server() \ - .replyTo("my_api") \ - .queryName("my_query") \ - .option("checkpointLocation", "file:///path/to/checkpoints") \ - .start() -``` - -## Architecture - -Spark Serving adds special streaming sources and sinks to turn any -structured streaming job into a web service. Spark Serving comes -with two deployment options that vary based on what form of load balancing -is being used. - -In brief you can use: -`spark.readStream.server()`: For head node load balanced services -`spark.readStream.distributedServer()`: For custom load balanced services -`spark.readStream.continuousServer()`: For a custom load balanced, submillisecond-latency continuous server - -to create the various different serving dataframes and use the equivalent statements after `df.writeStream` -for replying to the web requests. - -### Head Node Load Balanced - -You can deploy head node load balancing with the `HTTPSource` and -`HTTPSink` classes. This mode spins up a queue on the head node, -distributes work across partitions, then collects response data back to -the head node. All HTTP requests are kept and replied to on the head -node. In both python and Scala these classes can be access by using -`spark.readStream.server()` after importing SynapseML. -This mode allows for more complex windowing, repartitioning, and -SQL operations. This option is also idea for rapid setup and testing, -as it doesn't require any further load balancing or network -switches. A diagram of this configuration can be seen in this image: - -

- -

- -### Fully Distributed (Custom Load Balancer) - -You can configure Spark Serving for a custom load balancer using the -`DistributedHTTPSource` and `DistributedHTTPSink` classes. This mode -spins up servers on each executor JVM. -In both python and Scala these classes can be access by using -`spark.readStream.distributedServer()` after importing SynapseML. -Each server will feed its -executor's partitions in parallel. This mode is key for high throughput -and low latency as data doesn't need to be transferred to and from the -head node. This deployment results in several web services that all -route into the same spark computation. You can deploy an external load -balancer to unify the executor's services under a single IP address. -Support for automatic load balancer management and deployment is -targeted for the next release of SynapseML. A diagram of this -configuration can be seen here: - -

- -

- -Queries that involve data movement across workers, such as a nontrivial -SQL join, need special consideration. The user must ensure that the -right machine replies to each request. One can route data back to the -originating partition with a broadcast join. In the future, request -routing will be automatically handled by the sink. - -### Sub-Millisecond Latency with Continuous Processing - -

- -

- -Continuous processing can be enabled by hooking into the `HTTPSourceV2` class using: - - spark.readStream.continuousServer() - ... - -In continuous serving, much like continuous streaming you need to add a trigger to your write statement: - - df.writeStream - .continuousServer() - .trigger(continuous="1 second") - ... - -The architecture is similar to the custom load balancer setup described earlier. -More specifically, Spark will manage a web service on each partition. -These webservices can be unified together using an Azure Load Balancer, -Kubernetes Service Endpoint, Azure Application gateway or any other way to load balance a distributed service. -It's currently the user's responsibility to optionally unify these services as they see fit. -In the future, we'll include options to dynamically spin up and manage a load balancer. - -#### Databricks Setup - -Databricks is a managed architecture and they've restricted -all incoming traffic to the nodes of the cluster. -If you create a web service in your databricks cluster (head or worker nodes), -your cluster can communicate with the service, but the outside world can't. -However, in the future, Databricks will support Virtual Network Injection, so problem will not arise. -In the meantime, you must use SSH tunneling to forward the services to another machine(s) -to act as a networking gateway. This machine can be any machine that accepts SSH traffic and requests. -We have included settings to automatically configure this SSH tunneling for convenience. - -##### Linux Gateway Setup - Azure - -1. [Create a Linux VM using SSH](https://docs.microsoft.com/en-us/azure/virtual-machines/linux/quick-create-portal) -2. [Open ports 8000-9999 from the Azure portal](https://docs.microsoft.com/en-us/azure/virtual-machines/windows/nsg-quickstart-portal) -3. Open the port on the firewall on the VM - ```$xslt - firewall-cmd --zone=public --add-port=8000-10000/tcp --permanent - firewall-cmd --reload - echo "GatewayPorts yes" >> /etc/ssh/sshd_config - service ssh --full-restart - ``` -4. Add your private key to a private container in [Azure Storage Blob](https://docs.microsoft.com/en-us/azure/storage/common/storage-quickstart-create-account?toc=%2Fazure%2Fstorage%2Fblobs%2Ftoc.json&tabs=portal). -5. Generate a SAS link for your key and save it. -6. Include the following parameters on your reader to configure the SSH tunneling: - serving_inputs = (spark.readStream.continuousServer() - .option("numPartitions", 1) - .option("forwarding.enabled", True) # enable ssh forwarding to a gateway machine - .option("forwarding.username", "username") - .option("forwarding.sshHost", "ip or dns") - .option("forwarding.keySas", "SAS url from the previous step") - .address("localhost", 8904, "my_api") - .load() - -This setup will make your service require an extra jump and affect latency. -It's important to pick a gateway that has good connectivity to your spark cluster. -For best performance and ease of configuration, we suggest using Spark Serving -on an open cluster environment such as Kubernetes, Mesos, or Azure Batch. - - -## Parameters - -| Parameter Name | Description | Necessary | Default Value | Applicable When | -| ------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | --------- | ------------- | ----------------------------------------------------------------------------------------------------- | -| host | The host to spin up a server on | Yes | | | -| port | The starting port when creating the web services. Web services will increment this port several times to find an open port. In the future, the flexibility of this param will be expanded | yes | | | -| name | The Path of the api a user would call. The format is `hostname:port/name` | yes | | | -| forwarding.enabled | Whether to forward the services to a gateway machine | no | false | When you need to forward services out of a protected network. Only Supported for Continuous Serving. | -| forwarding.username | the username to connect to on the remote host | no | | | -| forwarding.sshport | the port to ssh connect to | no | 22 | | -| forwarding.sshHost | the host of the gateway machine | no | | | -| forwarding.keySas | A Secure access link that can be used to automatically download the required ssh private key | no | | Sometimes more convenient than a directory | -| forwarding.keyDir | A directory on the machines holding the private key | no | "~/.ssh" | Useful if you can't send keys over the wire securely | diff --git a/website/versioned_docs/version-0.10.0/features/vw/Vowpal Wabbit - Overview.md b/website/versioned_docs/version-0.10.0/features/vw/Vowpal Wabbit - Overview.md deleted file mode 100644 index f6289da084..0000000000 --- a/website/versioned_docs/version-0.10.0/features/vw/Vowpal Wabbit - Overview.md +++ /dev/null @@ -1,559 +0,0 @@ ---- -title: Vowpal Wabbit - Overview -hide_title: true -status: stable ---- - - -# VowalWabbit - -[VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) (VW) is a machine learning system which -pushes the frontier of machine learning with techniques such as online, hashing, allreduce, -reductions, learning2search, active, and interactive learning. -VowpalWabbit is a popular choice in ad-tech due to it's speed and cost efficacy. -Furthermore it includes many advances in the area of reinforcement learning (e.g. contextual bandits). - -### Advantages of VowpalWabbit - -- **Composability**: VowpalWabbit models can be incorporated into existing - SparkML Pipelines, and used for batch, streaming, and serving workloads. -- **Small footprint**: VowpalWabbit memory consumption is rather small and can be controlled through '-b 18' or setNumBits method. - This determines the size of the model (e.g. 2^18 * some_constant). -- **Feature Interactions**: Feature interactions (e.g. quadratic, cubic,... terms) are created on-the-fly within the most inner - learning loop in VW. - Interactions can be specified by using the -q parameter and passing the first character of the namespaces that should be _interacted_. - The VW namespace concept is mapped to Spark using columns. The column name is used as namespace name, thus one sparse or dense Spark ML vector corresponds to the features of a single namespace. - To allow passing of multiple namespaces the VW estimator (classifier or regression) expose an additional property called _additionalFeatures_. Users can pass an array of column names. -- **Simple deployment**: all native dependencies are packaged into a single jars (including boost and zlib). -- **VowpalWabbit command line arguments**: users can pass VW command line arguments to control the learning process. -- **VowpalWabbit binary models** Users can supply an inital VowpalWabbit model to start the training which can be produced outside of - VW on Spark by invoking _setInitialModel_ and pass the model as a byte array. Similarly users can access the binary model by invoking - _getModel_ on the trained model object. -- **Java-based hashing** VWs version of murmur-hash was re-implemented in Java (praise to [JackDoe](https://github.com/jackdoe)) - providing a major performance improvement compared to passing input strings through JNI and hashing in C++. -- **Cross language** VowpalWabbit on Spark is available on Spark, PySpark, and SparklyR. - -### Limitations of VowpalWabbit on Spark - -- **Linux and CentOS only** The native binaries included with the published jar are built Linux and CentOS only. - We're working on creating a more portable version by statically linking Boost and lib C++. -- **Limited Parsing** Features implemented in the native VW parser (e.g. ngrams, skips, ...) are not yet implemented in - VowpalWabbitFeaturizer. - -### VowpalWabbit Usage: - -- VowpalWabbitClassifier: used to build classification models. -- VowpalWabbitRegressor: used to build regression models. -- VowpalWabbitFeaturizer: used for feature hashing and extraction. For details please visit [here](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Feature-Hashing-and-Extraction). -- VowpalWabbitContextualBandit: used to solve contextual bandits problems. For algorithm details please visit [here](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Contextual-Bandit-algorithms). - -## Heart Disease Detection with VowalWabbit Classifier - - - -#### Read dataset - - -```python -import os - -if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia": - from pyspark.sql import SparkSession - - spark = SparkSession.builder.getOrCreate() - from notebookutils.visualization import display -``` - - -```python -df = ( - spark.read.format("csv") - .option("header", True) - .option("inferSchema", True) - .load( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/heart_disease_prediction_data.csv" - ) -) -# print dataset basic info -print("records read: " + str(df.count())) -print("Schema: ") -df.printSchema() -``` - - -```python -display(df) -``` - -#### Split the dataset into train and test - - -```python -train, test = df.randomSplit([0.85, 0.15], seed=1) -``` - -#### Use VowalWabbitFeaturizer to convert data features into vector - - -```python -from synapse.ml.vw import VowpalWabbitFeaturizer - -featurizer = VowpalWabbitFeaturizer(inputCols=df.columns[:-1], outputCol="features") -train_data = featurizer.transform(train)["target", "features"] -test_data = featurizer.transform(test)["target", "features"] -``` - - -```python -display(train_data.groupBy("target").count()) -``` - -#### Model Training - - -```python -from synapse.ml.vw import VowpalWabbitClassifier - -model = VowpalWabbitClassifier( - numPasses=20, labelCol="target", featuresCol="features" -).fit(train_data) -``` - -#### Model Prediction - - -```python -predictions = model.transform(test_data) -display(predictions) -``` - - -```python -from synapse.ml.train import ComputeModelStatistics - -metrics = ComputeModelStatistics( - evaluationMetric="classification", labelCol="target", scoredLabelsCol="prediction" -).transform(predictions) -display(metrics) -``` - -## Adult Census with VowpalWabbitClassifier - -In this example, we predict incomes from the Adult Census dataset using Vowpal Wabbit (VW) Classifier in SynapseML. - -#### Read dataset and split them into train & test - - -```python -data = spark.read.parquet( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/AdultCensusIncome.parquet" -) -data = data.select(["education", "marital-status", "hours-per-week", "income"]) -train, test = data.randomSplit([0.75, 0.25], seed=123) -display(train) -``` - -#### Model Training - -We define a pipeline that includes feature engineering and training of a VW classifier. We use a featurizer provided by VW that hashes the feature names. Note that VW expects classification labels being -1 or 1. Thus, the income category is mapped to this space before feeding training data into the pipeline. - -Note: VW supports distributed learning, and it's controlled by number of partitions of dataset. - - -```python -from pyspark.sql.functions import when, col -from pyspark.ml import Pipeline -from synapse.ml.vw import VowpalWabbitFeaturizer, VowpalWabbitClassifier - -# Define classification label -train = train.withColumn( - "label", when(col("income").contains("<"), 0.0).otherwise(1.0) -).repartition(1) -print(train.count()) - -# Specify featurizer -vw_featurizer = VowpalWabbitFeaturizer( - inputCols=["education", "marital-status", "hours-per-week"], outputCol="features" -) -``` - -Note: "passThroughArgs" parameter lets you pass in any params not exposed through our API. Full command line argument docs can be found [here](https://github.com/VowpalWabbit/vowpal_wabbit/wiki/Command-Line-Arguments). - - -```python -# Define VW classification model -args = "--loss_function=logistic --quiet --holdout_off" -vw_model = VowpalWabbitClassifier( - featuresCol="features", labelCol="label", passThroughArgs=args, numPasses=10 -) - -# Create a pipeline -vw_pipeline = Pipeline(stages=[vw_featurizer, vw_model]) -``` - - -```python -vw_trained = vw_pipeline.fit(train) -``` - -#### Model Prediction - -After the model is trained, we apply it to predict the income of each sample in the test set. - - -```python -# Making predictions -test = test.withColumn("label", when(col("income").contains("<"), 0.0).otherwise(1.0)) -prediction = vw_trained.transform(test) -display(prediction) -``` - -Finally, we evaluate the model performance using ComputeModelStatistics function which will compute confusion matrix, accuracy, precision, recall, and AUC by default for classificaiton models. - - -```python -from synapse.ml.train import ComputeModelStatistics - -metrics = ComputeModelStatistics( - evaluationMetric="classification", labelCol="label", scoredLabelsCol="prediction" -).transform(prediction) -display(metrics) -``` - -## Boston house price prediction with VowpalWabbitRegressor - Quantile Regression - -In this example, we show how to build regression model with VW using Boston's house price. - -#### Read dataset - -We use [*Boston house price* dataset](https://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html) -. -The data was collected in 1978 from Boston area and consists of 506 entries with 14 features including the value of homes. -We use `sklearn.datasets` module to download it easily, then split the set into training and testing by 75/25. - - -```python -import math -from matplotlib.colors import ListedColormap, Normalize -from matplotlib.cm import get_cmap -import matplotlib.pyplot as plt -from synapse.ml.train import ComputeModelStatistics -from synapse.ml.vw import VowpalWabbitRegressor, VowpalWabbitFeaturizer -import numpy as np -import pandas as pd -from sklearn.datasets import load_boston -``` - - -```python -boston = load_boston() - -feature_cols = ["f" + str(i) for i in range(boston.data.shape[1])] -header = ["target"] + feature_cols -df = spark.createDataFrame( - pd.DataFrame(data=np.column_stack((boston.target, boston.data)), columns=header) -).repartition(1) -print("Dataframe has {} rows".format(df.count())) -display(df.limit(10)) -``` - - -```python -train_data, test_data = df.randomSplit([0.75, 0.25], seed=42) -``` - - -```python -display(train_data.summary().toPandas()) -``` - - -```python -train_data.show(10) -``` - -Exploratory analysis: plot feature distributions over different target values. - - -```python -features = train_data.columns[1:] -values = train_data.drop("target").toPandas() -ncols = 5 -nrows = math.ceil(len(features) / ncols) - -yy = [r["target"] for r in train_data.select("target").collect()] - -f, axes = plt.subplots(nrows, ncols, sharey=True, figsize=(30, 10)) -f.tight_layout() - -for irow in range(nrows): - axes[irow][0].set_ylabel("target") - for icol in range(ncols): - try: - feat = features[irow * ncols + icol] - xx = values[feat] - - axes[irow][icol].scatter(xx, yy, s=10, alpha=0.25) - axes[irow][icol].set_xlabel(feat) - axes[irow][icol].get_yaxis().set_ticks([]) - except IndexError: - f.delaxes(axes[irow][icol]) -``` - -#### VW-style feature hashing - - -```python -vw_featurizer = VowpalWabbitFeaturizer( - inputCols=feature_cols, - outputCol="features", -) -vw_train_data = vw_featurizer.transform(train_data)["target", "features"] -vw_test_data = vw_featurizer.transform(test_data)["target", "features"] -display(vw_train_data) -``` - -#### Model training & Prediction - -See [VW wiki](https://github.com/vowpalWabbit/vowpal_wabbit/wiki/Command-Line-Arguments) for command line arguments. - - -```python -args = "--holdout_off --loss_function quantile -l 7 -q :: --power_t 0.7" -vwr = VowpalWabbitRegressor( - labelCol="target", - featuresCol="features", - passThroughArgs=args, - numPasses=200, -) - -# To reduce number of partitions (which will effect performance), use `vw_train_data.repartition(1)` -vw_model = vwr.fit(vw_train_data.repartition(1)) -vw_predictions = vw_model.transform(vw_test_data) - -display(vw_predictions.limit(20).toPandas()) -``` - -#### Compute Statistics & Visualization - - -```python -metrics = ComputeModelStatistics( - evaluationMetric="regression", labelCol="target", scoresCol="prediction" -).transform(vw_predictions) - -vw_result = metrics.toPandas() -vw_result.insert(0, "model", ["Vowpal Wabbit"]) -display(vw_result) -``` - - -```python -cmap = get_cmap("YlOrRd") -target = np.array(test_data.select("target").collect()).flatten() -model_preds = [("Vowpal Wabbit", vw_predictions)] - -f, axe = plt.subplots(figsize=(6, 6)) -f.tight_layout() - -preds = np.array(vw_predictions.select("prediction").collect()).flatten() -err = np.absolute(preds - target) -norm = Normalize() -clrs = cmap(np.asarray(norm(err)))[:, :-1] -plt.scatter(preds, target, s=60, c=clrs, edgecolors="#888888", alpha=0.75) -plt.plot((0, 60), (0, 60), line, color="#888888") -axe.set_xlabel("Predicted values") -axe.set_ylabel("Actual values") -axe.set_title("Vowpal Wabbit") -``` - -## Quantile Regression for Drug Discovery with VowpalWabbitRegressor - - - -#### Read dataset - - -```python -triazines = spark.read.format("libsvm").load( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/triazines.scale.svmlight" -) -``` - - -```python -# print some basic info -print("records read: " + str(triazines.count())) -print("Schema: ") -triazines.printSchema() -display(triazines.limit(10)) -``` - -#### Split dataset into train and test - - -```python -train, test = triazines.randomSplit([0.85, 0.15], seed=1) -``` - -#### Model Training - - -```python -from synapse.ml.vw import VowpalWabbitRegressor - -model = VowpalWabbitRegressor( - numPasses=20, passThroughArgs="--holdout_off --loss_function quantile -q :: -l 0.1" -).fit(train) -``` - -#### Model Prediction - - -```python -scoredData = model.transform(test) -display(scoredData.limit(10)) -``` - - -```python -from synapse.ml.train import ComputeModelStatistics - -metrics = ComputeModelStatistics( - evaluationMetric="regression", labelCol="label", scoresCol="prediction" -).transform(scoredData) -display(metrics) -``` - -## VW Contextual Bandit - -#### Read dataset - - -```python -data = spark.read.format("json").load( - "wasbs://publicwasb@mmlspark.blob.core.windows.net/vwcb_input.dsjson" -) -``` - -Note: Actions are all five TAction_x_topic columns. - - -```python -from pyspark.sql.functions import col -from pyspark.sql.types import IntegerType, DoubleType - -data = ( - data.withColumn("GUser_id", col("c.GUser.id")) - .withColumn("GUser_major", col("c.GUser.major")) - .withColumn("GUser_hobby", col("c.GUser.hobby")) - .withColumn("GUser_favorite_character", col("c.GUser.favorite_character")) - .withColumn("TAction_0_topic", col("c._multi.TAction.topic")[0]) - .withColumn("TAction_1_topic", col("c._multi.TAction.topic")[1]) - .withColumn("TAction_2_topic", col("c._multi.TAction.topic")[2]) - .withColumn("TAction_3_topic", col("c._multi.TAction.topic")[3]) - .withColumn("TAction_4_topic", col("c._multi.TAction.topic")[4]) - .withColumn("chosenAction", col("_label_Action").cast(IntegerType())) - .withColumn("label", col("_labelIndex").cast(DoubleType())) - .withColumn("probability", col("_label_probability")) - .select( - "GUser_id", - "GUser_major", - "GUser_hobby", - "GUser_favorite_character", - "TAction_0_topic", - "TAction_1_topic", - "TAction_2_topic", - "TAction_3_topic", - "TAction_4_topic", - "chosenAction", - "label", - "probability", - ) -) - -print("Schema: ") -data.printSchema() -``` - -Add pipeline to add featurizer, convert all feature columns into vector. - - -```python -from synapse.ml.vw import ( - VowpalWabbitFeaturizer, - VowpalWabbitContextualBandit, - VectorZipper, -) -from pyspark.ml import Pipeline - -pipeline = Pipeline( - stages=[ - VowpalWabbitFeaturizer(inputCols=["GUser_id"], outputCol="GUser_id_feature"), - VowpalWabbitFeaturizer( - inputCols=["GUser_major"], outputCol="GUser_major_feature" - ), - VowpalWabbitFeaturizer( - inputCols=["GUser_hobby"], outputCol="GUser_hobby_feature" - ), - VowpalWabbitFeaturizer( - inputCols=["GUser_favorite_character"], - outputCol="GUser_favorite_character_feature", - ), - VowpalWabbitFeaturizer( - inputCols=["TAction_0_topic"], outputCol="TAction_0_topic_feature" - ), - VowpalWabbitFeaturizer( - inputCols=["TAction_1_topic"], outputCol="TAction_1_topic_feature" - ), - VowpalWabbitFeaturizer( - inputCols=["TAction_2_topic"], outputCol="TAction_2_topic_feature" - ), - VowpalWabbitFeaturizer( - inputCols=["TAction_3_topic"], outputCol="TAction_3_topic_feature" - ), - VowpalWabbitFeaturizer( - inputCols=["TAction_4_topic"], outputCol="TAction_4_topic_feature" - ), - VectorZipper( - inputCols=[ - "TAction_0_topic_feature", - "TAction_1_topic_feature", - "TAction_2_topic_feature", - "TAction_3_topic_feature", - "TAction_4_topic_feature", - ], - outputCol="features", - ), - ] -) -tranformation_pipeline = pipeline.fit(data) -transformed_data = tranformation_pipeline.transform(data) - -display(transformed_data) -``` - -Build VowpalWabbit Contextual Bandit model and compute performance statistics. - - -```python -estimator = ( - VowpalWabbitContextualBandit() - .setPassThroughArgs("--cb_explore_adf --epsilon 0.2 --quiet") - .setSharedCol("GUser_id_feature") - .setAdditionalSharedFeatures( - [ - "GUser_major_feature", - "GUser_hobby_feature", - "GUser_favorite_character_feature", - ] - ) - .setFeaturesCol("features") - .setUseBarrierExecutionMode(False) - .setChosenActionCol("chosenAction") - .setLabelCol("label") - .setProbabilityCol("probability") -) -model = estimator.fit(transformed_data) -display(model.getPerformanceStatistics()) -``` diff --git a/website/versioned_docs/version-0.10.0/features/vw/about.md b/website/versioned_docs/version-0.10.0/features/vw/about.md deleted file mode 100644 index ac0f56ff2f..0000000000 --- a/website/versioned_docs/version-0.10.0/features/vw/about.md +++ /dev/null @@ -1,112 +0,0 @@ ---- -title: VW -hide_title: true -sidebar_label: About ---- - - - -# VowpalWabbit on Apache Spark - -### Overview - -[VowpalWabbit](https://github.com/VowpalWabbit/vowpal_wabbit) (VW) is a machine learning system that -pushes the frontier of machine learning with techniques such as online, hashing, allreduce, -reductions, learning2search, active, and interactive learning. -VowpalWabbit is a popular choice in ad-tech due to its speed and cost efficacy. -Furthermore it includes many advances in the area of reinforcement learning (for instance, contextual bandits). - -### Advantages of VowpalWabbit - -- **Composability**: VowpalWabbit models can be incorporated into existing - SparkML Pipelines, and used for batch, streaming, and serving workloads. -- **Small footprint**: VowpalWabbit memory consumption is rather small and can be controlled through '-b 18' or the setNumBits method. - This option determines the size of the model (2^18 * some_constant, in this example). -- **Feature Interactions**: Feature interactions (quadratic, cubic,... terms, for instance) are created on-the-fly within the most inner - learning loop in VW. - Interactions can be specified by using the -q parameter and passing the first character of the namespaces that should be _interacted_. - The VW namespace concept is mapped to Spark using columns. The column name is used as namespace name, thus one sparse or dense Spark ML vector corresponds to the features of a single namespace. - To allow passing of multiple namespaces, the VW estimator (classifier or regression) exposes a property called _additionalFeatures_. Users can pass an array of column names. -- **Simple deployment**: all native dependencies are packaged into a single jars (including boost and zlib). -- **VowpalWabbit command line arguments**: users can pass VW command line arguments to control the learning process. -- **VowpalWabbit binary models** To start the training, users can supply an initial VowpalWabbit model, which can be produced outside of - VW on Spark, by invoking _setInitialModel_ and passing the model as a byte array. Similarly, users can access the binary model by invoking - _getModel_ on the trained model object. -- **Java-based hashing** VW's version of murmur-hash was reimplemented in Java (praise to [JackDoe](https://github.com/jackdoe)) - providing a major performance improvement compared to passing input strings through JNI and hashing in C++. -- **Cross language** VowpalWabbit on Spark is available on Spark, PySpark, and SparklyR. - -### Limitations of VowpalWabbit on Spark - -- **Linux and CentOS only** The native binaries included with the published jar are built Linux and CentOS only. - We're working on creating a more portable version by statically linking Boost and lib C++. -- **Limited Parsing** Features implemented in the native VW parser (ngrams, skips, ...) are not yet implemented in - VowpalWabbitFeaturizer. - -### Usage - -In PySpark, you can run the `VowpalWabbitClassifier` via: - -```python -from synapse.ml.vw import VowpalWabbitClassifier -model = (VowpalWabbitClassifier(numPasses=5, args="--holdout_off --loss_function logistic") - .fit(train)) -``` - -Similarly, you can run the `VowpalWabbitRegressor`: - -```python -from synapse.ml.vw import VowpalWabbitRegressor -model = (VowpalWabbitRegressor(args="--holdout_off --loss_function quantile -q :: -l 0.1") - .fit(train)) -``` - -You can pass command line parameters to VW via the args parameter, as documented in the [VW Wiki](https://github.com/vowpalWabbit/vowpal_wabbit/wiki/Command-Line-Arguments). - -For an end to end application, check out the VowpalWabbit [notebook -example](../Vowpal%20Wabbit%20-%20Overview). - -### Hyper-parameter tuning - -- Common parameters can also be set through methods enabling the use of SparkMLs ParamGridBuilder and CrossValidator ([example](https://github.com/Azure/mmlspark/blob/master/src/test/scala/com/microsoft/azure/synapse/ml/vw/VerifyVowpalWabbitClassifier.scala#L29)). If - the same parameters are passed through the _args_ property (for instance, args="-l 0.2" and setLearningRate(0.5)) the _args_ value will - take precedence. - parameter -* learningRate -* numPasses -* numBits -* l1 -* l2 -* powerT -* interactions -* ignoreNamespaces - -### Architecture - -VowpalWabbit on Spark uses an optimized JNI layer to efficiently support Spark. -Java bindings can be found in the [VW GitHub repo](https://github.com/VowpalWabbit/vowpal_wabbit/blob/master/java/src/main/c%2B%2B/jni_spark_vw_generated.h). - -VW's command line tool uses a two-thread architecture (1x parsing/hashing, 1x learning) for learning and inference. -To fluently embed VW into the Spark ML eco system, the following adaptions were made: - -- VW classifier/regressor operates on Spark's dense/sparse vectors - - Pro: best composability with existing Spark ML components. - - Cons: due to type restrictions (for example, feature indices are Java integers), the maximum model size is limited to 30 bits. One could overcome this restriction by adding type support to the classifier/regressor to directly operate on input features (strings, int, double, ...). - -- VW hashing is separated out into the [VowpalWabbitFeaturizer](https://github.com/Azure/mmlspark/blob/master/src/test/scala/com/microsoft/azure/synapse/ml/vw/VerifyVowpalWabbitFeaturizer.scala#L34) transformer. It supports mapping Spark Dataframe schema into VW's namespaces and sparse -features. - - Pro: featurization can be scaled to many nodes, scale independent of distributed learning. - - Pro: hashed features can be cached and efficiently reused when performing hyper-parameter sweeps. - - Pro: featurization can be used for other Spark ML learning algorithms. - - Cons: due to type restrictions (for instance, sparse indices are Java integers) the hash space is limited to 30 bits. - -- VW multi-pass training can be enabled using '--passes 4' argument or setNumPasses method. Cache file is automatically named. - - Pro: simplified usage. - - Pro: certain algorithms (for example, l-bfgs) require a cache file when running in multi-pass node. - - Cons: Since the cache file resides in the Java temp directory, a bottleneck may arise, depending on your node's I/O performance and the location of the temp directory. -- VW distributed training is transparently set up and can be controlled through the input dataframes number of partitions. - Similar to LightGBM all training instances must be running at the same time, thus the maximum parallelism is restricted by the - number of executors available in the cluster. Under the hood, VW's built-in spanning tree functionality is used to coordinate _allreduce_. - Required parameters are automatically determined and supplied to VW. The spanning tree coordination process is run on the driver node. - - Pro: seamless parallelization. - - Cons: currently barrier execution mode isn't implemented and thus if one node crashes the complete job needs to be manually restarted. diff --git a/website/versioned_docs/version-0.10.0/getting_started/dotnet_example.md b/website/versioned_docs/version-0.10.0/getting_started/dotnet_example.md deleted file mode 100644 index ad7003b530..0000000000 --- a/website/versioned_docs/version-0.10.0/getting_started/dotnet_example.md +++ /dev/null @@ -1,126 +0,0 @@ ---- -title: .NET Example with LightGBMClassifier -sidebar_label: .NET example -description: A simple example about classification with LightGBMClassifier using .NET ---- - -:::note -Make sure you have followed the guidance in [.NET installation](../reference/dotnet-setup.md) before jumping into this example. -::: - -## Classification with LightGBMClassifier - -Install NuGet packages by running following command: -```powershell -dotnet add package Microsoft.Spark --version 2.1.1 -dotnet add package SynapseML.Lightgbm --version 0.10.0 -dotnet add package SynapseML.Core --version 0.10.0 -``` - -Use the following code in your main program file: -```csharp -using System; -using System.Collections.Generic; -using Synapse.ML.Lightgbm; -using Synapse.ML.Featurize; -using Microsoft.Spark.Sql; -using Microsoft.Spark.Sql.Types; - -namespace SynapseMLApp -{ - class Program - { - static void Main(string[] args) - { - // Create Spark session - SparkSession spark = - SparkSession - .Builder() - .AppName("LightGBMExample") - .GetOrCreate(); - - // Load Data - DataFrame df = spark.Read() - .Option("inferSchema", true) - .Parquet("wasbs://publicwasb@mmlspark.blob.core.windows.net/AdultCensusIncome.parquet") - .Limit(2000); - - var featureColumns = new string[] {"age", "workclass", "fnlwgt", "education", "education-num", - "marital-status", "occupation", "relationship", "race", "sex", "capital-gain", - "capital-loss", "hours-per-week", "native-country"}; - - // Transform features - var featurize = new Featurize() - .SetOutputCol("features") - .SetInputCols(featureColumns) - .SetOneHotEncodeCategoricals(true) - .SetNumFeatures(14); - - var dfTrans = featurize - .Fit(df) - .Transform(df) - .WithColumn("label", Functions.When(Functions.Col("income").Contains("<"), 0.0).Otherwise(1.0)); - - DataFrame[] dfs = dfTrans.RandomSplit(new double[] {0.75, 0.25}, 123); - var trainDf = dfs[0]; - var testDf = dfs[1]; - - // Create LightGBMClassifier - var lightGBMClassifier = new LightGBMClassifier() - .SetFeaturesCol("features") - .SetRawPredictionCol("rawPrediction") - .SetObjective("binary") - .SetNumLeaves(30) - .SetNumIterations(200) - .SetLabelCol("label") - .SetLeafPredictionCol("leafPrediction") - .SetFeaturesShapCol("featuresShap"); - - // Fit the model - var lightGBMClassificationModel = lightGBMClassifier.Fit(trainDf); - - // Apply transformation and displayresults - lightGBMClassificationModel.Transform(testDf).Show(50); - - // Stop Spark session - spark.Stop(); - } - } -} -``` - -Run `dotnet build` to build the project. Then navigate to build output directory, and run following command: -```powershell -spark-submit --class org.apache.spark.deploy.dotnet.DotnetRunner --packages com.microsoft.azure:synapseml_2.12:0.10.0,org.apache.hadoop:hadoop-azure:3.3.1 --master local microsoft-spark-3-2_2.12-2.1.1.jar dotnet SynapseMLApp.dll -``` -:::note -Here we added two packages: synapseml_2.12 for SynapseML's scala source, and hadoop-azure to support reading files from ADLS. -::: - -Expected output: -``` -+---+---------+------+-------------+-------------+--------------+------------------+---------------+-------------------+-------+------------+------------+--------------+--------------+------+--------------------+-----+--------------------+--------------------+----------+--------------------+--------------------+ -|age|workclass|fnlwgt| education|education-num|marital-status| occupation| relationship| race| sex|capital-gain|capital-loss|hours-per-week|native-country|income| features|label| rawPrediction| probability|prediction| leafPrediction| featuresShap| -+---+---------+------+-------------+-------------+--------------+------------------+---------------+-------------------+-------+------------+------------+--------------+--------------+------+--------------------+-----+--------------------+--------------------+----------+--------------------+--------------------+ -| 17| ?|634226| 10th| 6| Never-married| ?| Own-child| White| Female| 0| 0| 17.0| United-States| <=50K|(61,[7,9,11,15,20...| 0.0|[9.37122343731523...|[0.99991486808581...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.0560742274706...| -| 17| Private| 73145| 9th| 5| Never-married| Craft-repair| Own-child| White| Female| 0| 0| 16.0| United-States| <=50K|(61,[7,9,11,15,17...| 0.0|[12.7512760001880...|[0.99999710138899...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1657810433238...| -| 17| Private|150106| 10th| 6| Never-married| Sales| Own-child| White| Female| 0| 0| 20.0| United-States| <=50K|(61,[5,9,11,15,17...| 0.0|[12.7676985938038...|[0.99999714860282...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1276877355292...| -| 17| Private|151141| 11th| 7| Never-married| Handlers-cleaners| Own-child| White| Male| 0| 0| 15.0| United-States| <=50K|(61,[8,9,11,15,17...| 0.0|[12.1656242513070...|[0.99999479363924...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1279828578119...| -| 17| Private|327127| 11th| 7| Never-married| Transport-moving| Own-child| White| Male| 0| 0| 20.0| United-States| <=50K|(61,[1,9,11,15,17...| 0.0|[12.9962776686392...|[0.99999773124636...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1164691543415...| -| 18| ?|171088| Some-college| 10| Never-married| ?| Own-child| White| Female| 0| 0| 40.0| United-States| <=50K|(61,[7,9,11,15,20...| 0.0|[12.9400428266629...|[0.99999760000817...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1554829578661...| -| 18| Private|115839| 12th| 8| Never-married| Adm-clerical| Not-in-family| White| Female| 0| 0| 30.0| United-States| <=50K|(61,[0,9,11,15,17...| 0.0|[11.8393032168619...|[0.99999278472630...| 0.0|[0.0,0.0,0.0,0.0,...|[0.44080835709189...| -| 18| Private|133055| HS-grad| 9| Never-married| Other-service| Own-child| White| Female| 0| 0| 30.0| United-States| <=50K|(61,[3,9,11,15,17...| 0.0|[11.5747235180479...|[0.99999059936124...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1415862541824...| -| 18| Private|169745| 7th-8th| 4| Never-married| Other-service| Own-child| White| Female| 0| 0| 40.0| United-States| <=50K|(61,[3,9,11,15,17...| 0.0|[11.8316427733613...|[0.99999272924226...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1527378526573...| -| 18| Private|177648| HS-grad| 9| Never-married| Sales| Own-child| White| Female| 0| 0| 25.0| United-States| <=50K|(61,[5,9,11,15,17...| 0.0|[10.0820248199174...|[0.99995817710510...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1151843103241...| -| 18| Private|188241| 11th| 7| Never-married| Other-service| Own-child| White| Male| 0| 0| 16.0| United-States| <=50K|(61,[3,9,11,15,17...| 0.0|[10.4049945509280...|[0.99996972005153...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1356854966291...| -| 18| Private|200603| HS-grad| 9| Never-married| Adm-clerical| Other-relative| White| Female| 0| 0| 30.0| United-States| <=50K|(61,[0,9,11,15,17...| 0.0|[12.1354343020828...|[0.99999463406365...| 0.0|[0.0,0.0,0.0,0.0,...|[0.53241098695335...| -| 18| Private|210026| 10th| 6| Never-married| Other-service| Other-relative| White| Female| 0| 0| 40.0| United-States| <=50K|(61,[3,9,11,15,17...| 0.0|[12.3692360082180...|[0.99999575275599...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1275208795564...| -| 18| Private|447882| Some-college| 10| Never-married| Adm-clerical| Not-in-family| White| Female| 0| 0| 20.0| United-States| <=50K|(61,[0,9,11,15,17...| 0.0|[10.2514945786032...|[0.99996469655062...| 0.0|[0.0,0.0,0.0,0.0,...|[0.36497782752201...| -| 19| ?|242001| Some-college| 10| Never-married| ?| Own-child| White| Female| 0| 0| 40.0| United-States| <=50K|(61,[7,9,11,15,20...| 0.0|[13.9439986622060...|[0.99999912057674...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1265631737386...| -| 19| Private| 63814| Some-college| 10| Never-married| Adm-clerical| Not-in-family| White| Female| 0| 0| 18.0| United-States| <=50K|(61,[0,9,11,15,17...| 0.0|[10.2057742895673...|[0.99996304506073...| 0.0|[0.0,0.0,0.0,0.0,...|[0.77645146059597...| -| 19| Private| 83930| HS-grad| 9| Never-married| Other-service| Own-child| White| Female| 0| 0| 20.0| United-States| <=50K|(61,[3,9,11,15,17...| 0.0|[10.4771335467356...|[0.99997182742919...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1625827100973...| -| 19| Private| 86150| 11th| 7| Never-married| Sales| Own-child| Asian-Pac-Islander| Female| 0| 0| 19.0| Philippines| <=50K|(61,[5,9,14,15,17...| 0.0|[12.0241839747799...|[0.99999400263272...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1532111483051...| -| 19| Private|189574| HS-grad| 9| Never-married| Other-service| Not-in-family| White| Female| 0| 0| 30.0| United-States| <=50K|(61,[3,9,11,15,17...| 0.0|[9.53742673004733...|[0.99992790305091...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.0988907054317...| -| 19| Private|219742| Some-college| 10| Never-married| Other-service| Own-child| White| Female| 0| 0| 15.0| United-States| <=50K|(61,[3,9,11,15,17...| 0.0|[12.8625329757574...|[0.99999740658642...| 0.0|[0.0,0.0,0.0,0.0,...|[-0.1922327651359...| -+---+---------+------+-------------+-------------+--------------+------------------+---------------+-------------------+-------+------------+------------+--------------+--------------+------+--------------------+-----+--------------------+--------------------+----------+--------------------+--------------------+ -``` diff --git a/website/versioned_docs/version-0.10.0/getting_started/first_example.md b/website/versioned_docs/version-0.10.0/getting_started/first_example.md deleted file mode 100644 index 8d73dda6bf..0000000000 --- a/website/versioned_docs/version-0.10.0/getting_started/first_example.md +++ /dev/null @@ -1,66 +0,0 @@ ---- -title: First Example -description: Build machine learning applications using Microsoft Machine Learning for Apache Spark ---- - -## Prerequisites - -- If you don't have an Azure subscription, [create a free account before you begin](https://azure.microsoft.com/free/). -- [Azure Synapse Analytics workspace](https://docs.microsoft.com/en-us/azure/synapse-analytics/get-started-create-workspace) with an Azure Data Lake Storage Gen2 storage account configured as the default storage. You need to be the _Storage Blob Data Contributor_ of the Data Lake Storage Gen2 file system that you work with. -- Spark pool in your Azure Synapse Analytics workspace. For details, see [Create a Spark pool in Azure Synapse](https://docs.microsoft.com/en-us/azure/synapse-analytics/get-started-analyze-spark). -- Pre-configuration steps described in the tutorial [Configure Cognitive Services in Azure Synapse](https://docs.microsoft.com/en-us/azure/synapse-analytics/machine-learning/tutorial-configure-cognitive-services-synapse). - -## Get started - -To get started, import synapse.ml and configurate service keys. - -```python -import synapse.ml -from synapse.ml.cognitive import * -from notebookutils import mssparkutils - -# A general Cognitive Services key for Text Analytics and Computer Vision (or use separate keys that belong to each service) -cognitive_service_key = mssparkutils.credentials.getSecret("ADD_YOUR_KEY_VAULT_NAME", "ADD_YOUR_SERVICE_KEY","ADD_YOUR_KEY_VAULT_LINKED_SERVICE_NAME") -# A Bing Search v7 subscription key -bingsearch_service_key = mssparkutils.credentials.getSecret("ADD_YOUR_KEY_VAULT_NAME", "ADD_YOUR_BING_SEARCH_KEY","ADD_YOUR_KEY_VAULT_LINKED_SERVICE_NAME") -# An Anomaly Dectector subscription key -anomalydetector_key = mssparkutils.credentials.getSecret("ADD_YOUR_KEY_VAULT_NAME", "ADD_YOUR_ANOMALY_KEY","ADD_YOUR_KEY_VAULT_LINKED_SERVICE_NAME") - - -``` - -## Text analytics sample - -The [Text Analytics](https://azure.microsoft.com/en-us/services/cognitive-services/text-analytics/) service provides several algorithms for extracting intelligent insights from text. For example, we can find the sentiment of given input text. The service will return a score between 0.0 and 1.0 where low scores indicate negative sentiment and high score indicates positive sentiment. This sample uses three simple sentences and returns the sentiment for each. - -```python -from pyspark.sql.functions import col - -# Create a dataframe that's tied to it's column names -df_sentences = spark.createDataFrame([ - ("I'm so happy today, it's sunny!", "en-US"), - ("this is a dog", "en-US"),s - ("I'm frustrated by this rush hour traffic!", "en-US") -], ["text", "language"]) - -# Run the Text Analytics service with options -sentiment = (TextSentiment() - .setTextCol("text") - .setLocation("eastasia") # Set the location of your cognitive service - .setSubscriptionKey(cognitive_service_key) - .setOutputCol("sentiment") - .setErrorCol("error") - .setLanguageCol("language")) - -# Show the results of your text query in a table format - -display(sentiment.transform(df_sentences).select("text", col("sentiment")[0].getItem("sentiment").alias("sentiment"))) -``` - -### Expected results - -| text | sentiment | -| ------------------------------------------ | --------- | -| I'm frustrated by this rush hour traffic! | negative | -| this is a dog | neutral | -| I'm so happy today, it's sunny! | positive | diff --git a/website/versioned_docs/version-0.10.0/getting_started/first_model.md b/website/versioned_docs/version-0.10.0/getting_started/first_model.md deleted file mode 100644 index b11797600f..0000000000 --- a/website/versioned_docs/version-0.10.0/getting_started/first_model.md +++ /dev/null @@ -1,117 +0,0 @@ ---- -title: First Model -hide_title: true -description: First Model ---- - -# Your First Model - -In this example, we construct a basic classification model to predict a person's -income level given demographics data such as education level or marital status. -We also learn how to use Jupyter notebooks for developing and running the model. - -### Prerequisites - -- You've installed the SynapseML package, either as a Docker image or on a - Spark cluster, -- You have basic knowledge of Python language, -- You have basic understanding of machine learning concepts: training, testing, - classification. - -### Working with Jupyter Notebooks - -Once you have the SynapseML package installed, open Jupyter notebooks folder in -your web browser - -- Local Docker: `http://localhost:8888` -- Spark cluster: `https:///jupyter` - -Create a new notebook by selecting "New" -> "PySpark3". Let's also give the -notebook a friendlier name, _Adult Census Income Prediction_, by clicking the -title. - -### Importing Packages and Starting the Spark Application - -At this point, the notebook isn't running a Spark application yet. In the -first cell, let's import some needed packages - -```python -import numpy as np -import pandas as pd -``` - -Click the "run cell" button on the toolbar to start the application. After a -few moments, you should see the message "SparkSession available as 'spark'". -Now you're ready to start coding and running your application. - -### Reading in Data - -In a typical Spark application, you'd likely work with huge datasets stored on -distributed file system, such as HDFS. However, to keep this tutorial simple -and quick, we'll copy over a small dataset from a URL. We then read this data -into memory using Pandas CSV reader, and distribute the data as a Spark -DataFrame. Finally, we show the first 5 rows of the dataset. Copy the following -code to the next cell in your notebook, and run the cell. - -```python -dataFile = "AdultCensusIncome.csv" -import os, urllib -if not os.path.isfile(dataFile): - urllib.request.urlretrieve("https://mmlspark.azureedge.net/datasets/" + dataFile, dataFile) -data = spark.createDataFrame(pd.read_csv(dataFile, dtype={" hours-per-week": np.float64})) -data.show(5) -``` - -### Selecting Features and Splitting Data to Train and Test Sets - -Next, select some features to use in our model. You can try out different -features, but you should include `" income"` as it is the label column the model -is trying to predict. We then split the data into a `train` and `test` sets. - -```python -data = data.select([" education", " marital-status", " hours-per-week", " income"]) -train, test = data.randomSplit([0.75, 0.25], seed=123) -``` - -### Training a Model - -To train the classifier model, we use the `synapse.ml.TrainClassifier` class. It -takes in training data and a base SparkML classifier, maps the data into the -format expected by the base classifier algorithm, and fits a model. - -```python -from synapse.ml.train import TrainClassifier -from pyspark.ml.classification import LogisticRegression -model = TrainClassifier(model=LogisticRegression(), labelCol=" income").fit(train) -``` - -`TrainClassifier` implicitly handles string-valued columns and -binarizes the label column. - -### Scoring and Evaluating the Model - -Finally, let's score the model against the test set, and use -`synapse.ml.ComputeModelStatistics` class to compute metrics—accuracy, AUC, -precision, recall—from the scored data. - -```python -from synapse.ml.train import ComputeModelStatistics -prediction = model.transform(test) -metrics = ComputeModelStatistics().transform(prediction) -metrics.select('accuracy').show() -``` - -And that's it: you've build your first machine learning model using the SynapseML -package. For help on SynapseML classes and methods, you can use Python's help() -function, for example - -```python -help(synapse.ml.train.TrainClassifier) -``` - -Next, view our other tutorials to learn how to - -- Tune model parameters to find the best model -- Use SparkML pipelines to build a more complex model -- Use deep neural networks for image classification -- Use text analytics for document classification diff --git a/website/versioned_docs/version-0.10.0/getting_started/installation.md b/website/versioned_docs/version-0.10.0/getting_started/installation.md deleted file mode 100644 index dbec07e6ce..0000000000 --- a/website/versioned_docs/version-0.10.0/getting_started/installation.md +++ /dev/null @@ -1,172 +0,0 @@ ---- -title: Installation -description: Getting started with SynapseML ---- - -## Synapse - -SynapseML can be conveniently installed on Synapse: - -For Spark3.1 pool: -```python -%%configure -f -{ - "name": "synapseml", - "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.9.5-13-d1b51517-SNAPSHOT", - "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", - "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12", - "spark.yarn.user.classpath.first": "true" - } -} -``` - -For Spark3.2 pool: -```python -%%configure -f -{ - "name": "synapseml", - "conf": { - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.10.0", - "spark.jars.repositories": "https://mmlspark.azureedge.net/maven", - "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", - "spark.yarn.user.classpath.first": "true" - } -} -``` - -## Python - -To try out SynapseML on a Python (or Conda) installation, you can get Spark -installed via pip with `pip install pyspark`. You can then use `pyspark` as in -the above example, or from python: - -```python -import pyspark -spark = pyspark.sql.SparkSession.builder.appName("MyApp") \ - # Use 0.10.0 version for Spark3.2 and 0.9.5-13-d1b51517-SNAPSHOT version for Spark3.1 - .config("spark.jars.packages", "com.microsoft.azure:synapseml_2.12:0.10.0") \ - .config("spark.jars.repositories", "https://mmlspark.azureedge.net/maven") \ - .getOrCreate() -import synapse.ml -``` - -## SBT - -If you're building a Spark application in Scala, add the following lines to -your `build.sbt`: - -```scala -resolvers += "SynapseML" at "https://mmlspark.azureedge.net/maven" -// Use 0.10.0 version for Spark3.2 and 0.9.5-13-d1b51517-SNAPSHOT version for Spark3.1 -libraryDependencies += "com.microsoft.azure" %% "synapseml_2.12" % "0.10.0" - -``` - -## Spark package - -SynapseML can be conveniently installed on existing Spark clusters via the -`--packages` option, examples: - -```bash -# Please use 0.10.0 version for Spark3.2 and 0.9.5-13-d1b51517-SNAPSHOT version for Spark3.1 -spark-shell --packages com.microsoft.azure:synapseml_2.12:0.10.0 -pyspark --packages com.microsoft.azure:synapseml_2.12:0.10.0 -spark-submit --packages com.microsoft.azure:synapseml_2.12:0.10.0 MyApp.jar -``` - -A similar technique can be used in other Spark contexts too. For example, you can use SynapseML -in [AZTK](https://github.com/Azure/aztk/) by [adding it to the -`.aztk/spark-defaults.conf` -file](https://github.com/Azure/aztk/wiki/PySpark-on-Azure-with-AZTK#optional-set-up-mmlspark). - -## Databricks - -To install SynapseML on the [Databricks -cloud](http://community.cloud.databricks.com), create a new [library from Maven -coordinates](https://docs.databricks.com/user-guide/libraries.html#libraries-from-maven-pypi-or-spark-packages) -in your workspace. - -For the coordinates use: `com.microsoft.azure:synapseml_2.12:0.10.0` for Spark3.2 Cluster and - `com.microsoft.azure:synapseml_2.12:0.9.5-13-d1b51517-SNAPSHOT` for Spark3.1 Cluster; -Add the resolver: `https://mmlspark.azureedge.net/maven`. Ensure this library is -attached to your target cluster(s). - -Finally, ensure that your Spark cluster has at least Spark 3.12 and Scala 2.12. - -You can use SynapseML in both your Scala and PySpark notebooks. To get started with our example notebooks, import the following databricks archive: - -`https://mmlspark.blob.core.windows.net/dbcs/SynapseMLExamplesv0.10.0.dbc` - -## Apache Livy and HDInsight - -To install SynapseML from within a Jupyter notebook served by Apache Livy, the following configure magic can be used. You'll need to start a new session after this configure cell is executed. - -Excluding certain packages from the library may be necessary due to current issues with Livy 0.5 - -``` -%%configure -f -{ - "name": "synapseml", - "conf": { - # Please use 0.10.0 version for Spark3.2 and 0.9.5-13-d1b51517-SNAPSHOT version for Spark3.1 - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.10.0", - "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind" - } -} -``` - -In Azure Synapse, "spark.yarn.user.classpath.first" should be set to "true" to override the existing SynapseML packages - -``` -%%configure -f -{ - "name": "synapseml", - "conf": { - # Please use 0.10.0 version for Spark3.2 and 0.9.5-13-d1b51517-SNAPSHOT version for Spark3.1 - "spark.jars.packages": "com.microsoft.azure:synapseml_2.12:0.10.0", - "spark.jars.excludes": "org.scala-lang:scala-reflect,org.apache.spark:spark-tags_2.12,org.scalactic:scalactic_2.12,org.scalatest:scalatest_2.12,com.fasterxml.jackson.core:jackson-databind", - "spark.yarn.user.classpath.first": "true" - } -} -``` - -## Docker - -The easiest way to evaluate SynapseML is via our pre-built Docker container. To -do so, run the following command: - -```bash -docker run -it -p 8888:8888 -e ACCEPT_EULA=yes mcr.microsoft.com/mmlspark/release -``` - -Navigate to in your web browser to run the sample -notebooks. See the [documentation](reference/docker.md) for more on Docker use. - -> To read the EULA for using the docker image, run -``` bash -docker run -it -p 8888:8888 mcr.microsoft.com/mmlspark/release eula -``` - - -## Building from source - -SynapseML has recently transitioned to a new build infrastructure. -For detailed developer docs, see the [Developer Readme](reference/developer-readme.md) - -If you're an existing SynapseML developer, you'll need to reconfigure your -development setup. We now support platform independent development and -better integrate with intellij and SBT. - If you encounter issues, reach out to our support email! - -## R (Beta) - -To try out SynapseML using the R autogenerated wrappers, [see our -instructions](reference/R-setup.md). Note: This feature is still under development -and some necessary custom wrappers may be missing. - -## C# (.NET) - -To try out SynapseML with .NET, follow the [.NET Installation Guide](reference/dotnet-setup.md). -Note: Some stages including AzureSearchWriter, DiagnosticInfo, UDPyF Param, ParamSpaceParam, BallTreeParam, -ConditionalBallTreeParam, LightGBMBooster Param are still under development and not exposed in .NET. diff --git a/website/versioned_docs/version-0.10.0/mlflow/autologging.md b/website/versioned_docs/version-0.10.0/mlflow/autologging.md deleted file mode 100644 index 797b8fca4f..0000000000 --- a/website/versioned_docs/version-0.10.0/mlflow/autologging.md +++ /dev/null @@ -1,82 +0,0 @@ ---- -title: SynapseML Autologging -description: SynapseML autologging ---- - -## Automatic Logging - -[MLflow automatic logging](https://www.mlflow.org/docs/latest/tracking.html#automatic-logging) allows you to log metrics, parameters, and models without the need for explicit log statements. -SynapseML supports autologging for every model in the library. - -To enable autologging for SynapseML: -1. Download this customized [log_model_allowlist file](https://mmlspark.blob.core.windows.net/publicwasb/log_model_allowlist.txt) and put it at a place that your code has access to. -For example: -* In Synapse `wasb://@.blob.core.windows.net/PATH_TO_YOUR/log_model_allowlist.txt` -* In Databricks `/dbfs/FileStore/PATH_TO_YOUR/log_model_allowlist.txt`. -2. Set spark configuration `spark.mlflow.pysparkml.autolog.logModelAllowlistFile` to the path of your `log_model_allowlist.txt` file. -3. Call `mlflow.pyspark.ml.autolog()` before your training code to enable autologging for all supported models. - -Note: -1. If you want to support autologging of PySpark models not present in the log_model_allowlist file, you can add such models to the file. -2. If you've enabled autologging, then don't write explicit `with mlflow.start_run()` as it might cause multiple runs for one single model or one run for multiple models. - - -## Configuration process in Databricks as an example - -1. Install latest MLflow via `%pip install mlflow -u` -2. Upload your customized `log_model_allowlist.txt` file to dbfs by clicking File/Upload Data button on Databricks UI. -3. Set Cluster Spark configuration following [this documentation](https://docs.microsoft.com/en-us/azure/databricks/clusters/configure#spark-configuration) -``` -spark.mlflow.pysparkml.autolog.logModelAllowlistFile /dbfs/FileStore/PATH_TO_YOUR/log_model_allowlist.txt -``` -4. Run the following call before your training code executes. You can also customize corresponding [parameters](https://www.mlflow.org/docs/latest/python_api/mlflow.pyspark.ml.html#mlflow.pyspark.ml.autolog) by passing arguments to `autolog`. -``` -mlflow.pyspark.ml.autolog() -``` -5. To find your experiment's results via the `Experiments` tab of the MLFlow UI. - - -## Example for ConditionalKNNModel -```python -from pyspark.ml.linalg import Vectors -from synapse.ml.nn import * - -df = spark.createDataFrame([ - (Vectors.dense(2.0,2.0,2.0), "foo", 1), - (Vectors.dense(2.0,2.0,4.0), "foo", 3), - (Vectors.dense(2.0,2.0,6.0), "foo", 4), - (Vectors.dense(2.0,2.0,8.0), "foo", 3), - (Vectors.dense(2.0,2.0,10.0), "foo", 1), - (Vectors.dense(2.0,2.0,12.0), "foo", 2), - (Vectors.dense(2.0,2.0,14.0), "foo", 0), - (Vectors.dense(2.0,2.0,16.0), "foo", 1), - (Vectors.dense(2.0,2.0,18.0), "foo", 3), - (Vectors.dense(2.0,2.0,20.0), "foo", 0), - (Vectors.dense(2.0,4.0,2.0), "foo", 2), - (Vectors.dense(2.0,4.0,4.0), "foo", 4), - (Vectors.dense(2.0,4.0,6.0), "foo", 2), - (Vectors.dense(2.0,4.0,8.0), "foo", 2), - (Vectors.dense(2.0,4.0,10.0), "foo", 4), - (Vectors.dense(2.0,4.0,12.0), "foo", 3), - (Vectors.dense(2.0,4.0,14.0), "foo", 2), - (Vectors.dense(2.0,4.0,16.0), "foo", 1), - (Vectors.dense(2.0,4.0,18.0), "foo", 4), - (Vectors.dense(2.0,4.0,20.0), "foo", 4) -], ["features","values","labels"]) - -cnn = (ConditionalKNN().setOutputCol("prediction")) -cnnm = cnn.fit(df) - -test_df = spark.createDataFrame([ - (Vectors.dense(2.0,2.0,2.0), "foo", 1, [0, 1]), - (Vectors.dense(2.0,2.0,4.0), "foo", 4, [0, 1]), - (Vectors.dense(2.0,2.0,6.0), "foo", 2, [0, 1]), - (Vectors.dense(2.0,2.0,8.0), "foo", 4, [0, 1]), - (Vectors.dense(2.0,2.0,10.0), "foo", 4, [0, 1]) -], ["features","values","labels","conditioner"]) - -display(cnnm.transform(test_df)) -``` - -This code should log one run with a ConditionalKNNModel artifact and its parameters. - diff --git a/website/versioned_docs/version-0.10.0/mlflow/examples.md b/website/versioned_docs/version-0.10.0/mlflow/examples.md deleted file mode 100644 index aee9e35882..0000000000 --- a/website/versioned_docs/version-0.10.0/mlflow/examples.md +++ /dev/null @@ -1,141 +0,0 @@ ---- -title: Examples -description: Examples using SynapseML with MLflow ---- - -## Prerequisites - -If you're using Databricks, please install mlflow with below command: -``` -# run this so that mlflow is installed on workers besides driver -%pip install mlflow -``` - -Install SynapseML based on the [installation guidance](../getting_started/installation.md). - -## API Reference - -* [mlflow.spark.save_model](https://www.mlflow.org/docs/latest/python_api/mlflow.spark.html#mlflow.spark.save_model) -* [mlflow.spark.log_model](https://www.mlflow.org/docs/latest/python_api/mlflow.spark.html#mlflow.spark.log_model) -* [mlflow.spark.load_model](https://www.mlflow.org/docs/latest/python_api/mlflow.spark.html#mlflow.spark.load_model) -* [mlflow.log_metric](https://mlflow.org/docs/latest/python_api/mlflow.html#mlflow.log_metric) - -## LightGBMClassificationModel - -```python -import mlflow -from synapse.ml.featurize import Featurize -from synapse.ml.lightgbm import * -from synapse.ml.train import ComputeModelStatistics - -with mlflow.start_run(): - - feature_columns = ["Number of times pregnant","Plasma glucose concentration a 2 hours in an oral glucose tolerance test", - "Diastolic blood pressure (mm Hg)","Triceps skin fold thickness (mm)","2-Hour serum insulin (mu U/ml)", - "Body mass index (weight in kg/(height in m)^2)","Diabetes pedigree function","Age (years)"] - df = spark.createDataFrame([ - (0,131,66,40,0,34.3,0.196,22,1), - (7,194,68,28,0,35.9,0.745,41,1), - (3,139,54,0,0,25.6,0.402,22,1), - (6,134,70,23,130,35.4,0.542,29,1), - (9,124,70,33,402,35.4,0.282,34,0), - (0,93,100,39,72,43.4,1.021,35,0), - (4,110,76,20,100,28.4,0.118,27,0), - (2,127,58,24,275,27.7,1.6,25,0), - (0,104,64,37,64,33.6,0.51,22,1), - (2,120,54,0,0,26.8,0.455,27,0), - (7,178,84,0,0,39.9,0.331,41,1), - (2,88,58,26,16,28.4,0.766,22,0), - (1,91,64,24,0,29.2,0.192,21,0), - (10,101,76,48,180,32.9,0.171,63,0), - (5,73,60,0,0,26.8,0.268,27,0), - (3,158,70,30,328,35.5,0.344,35,1), - (2,105,75,0,0,23.3,0.56,53,0), - (12,84,72,31,0,29.7,0.297,46,1), - (9,119,80,35,0,29.0,0.263,29,1), - (6,93,50,30,64,28.7,0.356,23,0), - (1,126,60,0,0,30.1,0.349,47,1) - ], feature_columns+["labels"]).repartition(2) - - - featurize = (Featurize() - .setOutputCol("features") - .setInputCols(featureColumns) - .setOneHotEncodeCategoricals(True) - .setNumFeatures(4096)) - - df_trans = featurize.fit(df).transform(df) - - lightgbm_classifier = (LightGBMClassifier() - .setFeaturesCol("features") - .setRawPredictionCol("rawPrediction") - .setDefaultListenPort(12402) - .setNumLeaves(5) - .setNumIterations(10) - .setObjective("binary") - .setLabelCol("labels") - .setLeafPredictionCol("leafPrediction") - .setFeaturesShapCol("featuresShap")) - - lightgbm_model = lightgbm_classifier.fit(df_trans) - - # Use mlflow.spark.save_model to save the model to your path - mlflow.spark.save_model(lightgbm_model, "lightgbm_model") - # Use mlflow.spark.log_model to log the model if you have a connected mlflow service - mlflow.spark.log_model(lightgbm_model, "lightgbm_model") - - # Use mlflow.pyfunc.load_model to load model back as PyFuncModel and apply predict - prediction = mlflow.pyfunc.load_model("lightgbm_model").predict(df_trans.toPandas()) - prediction = list(map(str, prediction)) - mlflow.log_param("prediction", ",".join(prediction)) - - # Use mlflow.spark.load_model to load model back as PipelineModel and apply transform - predictions = mlflow.spark.load_model("lightgbm_model").transform(df_trans) - metrics = ComputeModelStatistics(evaluationMetric="classification", labelCol='labels', scoredLabelsCol='prediction').transform(predictions).collect() - mlflow.log_metric("accuracy", metrics[0]['accuracy']) -``` - -## Cognitive Services - -Note: Cognitive Services are not supported direct save/load by mlflow for now, so we need to wrap it as a PipelineModel manually. - -The [feature request](https://github.com/mlflow/mlflow/issues/5216) to support them (Transformers in general) has been under progress, please vote for the issue if you'd like. - -```python -import mlflow -from synapse.ml.cognitive import * -from pyspark.ml import PipelineModel - -with mlflow.start_run(): - - text_key = "YOUR_COG_SERVICE_SUBSCRIPTION_KEY" - df = spark.createDataFrame([ - ("I am so happy today, its sunny!", "en-US"), - ("I am frustrated by this rush hour traffic", "en-US"), - ("The cognitive services on spark aint bad", "en-US"), - ], ["text", "language"]) - - sentiment = (TextSentiment() - .setSubscriptionKey(text_key) - .setLocation("eastus") - .setTextCol("text") - .setOutputCol("prediction") - .setErrorCol("error") - .setLanguageCol("language")) - - display(sentiment.transform(df)) - - # Wrap it as a stage in the PipelineModel - sentiment_model = PipelineModel(stages=[sentiment]) - mlflow.spark.save_model(sentiment_model, "sentiment_model") - mlflow.spark.log_model(sentiment_model, "sentiment_model") - - output_df = mlflow.spark.load_model("sentiment_model").transform(df) - display(output_df) - - # In order to call the predict function successfully you need to specify the - # outputCol name as `prediction` - prediction = mlflow.pyfunc.load_model("sentiment_model").predict(df.toPandas()) - prediction = list(map(str, prediction)) - mlflow.log_param("prediction", ",".join(prediction)) -``` diff --git a/website/versioned_docs/version-0.10.0/mlflow/introduction.md b/website/versioned_docs/version-0.10.0/mlflow/introduction.md deleted file mode 100644 index f86bb1a1da..0000000000 --- a/website/versioned_docs/version-0.10.0/mlflow/introduction.md +++ /dev/null @@ -1,21 +0,0 @@ ---- -title: Introduction -description: MLflow support of SynapseML ---- - -## What is MLflow - -[MLflow](https://github.com/mlflow/mlflow) is a platform to streamline machine learning development, including tracking experiments, packaging code into reproducible runs, and sharing and deploying models. MLflow offers a set of lightweight APIs that can be used with any existing machine learning application or library (TensorFlow, PyTorch, XGBoost, etc), wherever you currently run ML code (e.g. in notebooks, standalone applications or the cloud). MLflow's current components are: - -* [MLflow Tracking](https://mlflow.org/docs/latest/tracking.html): An API to log parameters, code, and results in machine learning experiments and compare them using an interactive UI. -* [MLflow Projects](https://mlflow.org/docs/latest/projects.html): A code packaging format for reproducible runs using Conda and Docker, so you can share your ML code with others. -* [MLflow Models](https://mlflow.org/docs/latest/models.html): A model packaging format and tools that let you easily deploy the same model (from any ML library) to batch and real-time scoring on platforms such as Docker, Apache Spark, Azure ML and AWS SageMaker. -* [MLflow Model Registry](https://mlflow.org/docs/latest/model-registry.html): A centralized model store, set of APIs, and UI, to collaboratively manage the full lifecycle of MLflow Models. - -## Installation - -Install MLflow from PyPI via `pip install mlflow` - -MLflow requires `conda` to be on the `PATH` for the projects feature. - -Learn more about MLflow on their [GitHub page](https://github.com/mlflow/mlflow). diff --git a/website/versioned_docs/version-0.10.0/reference/R-setup.md b/website/versioned_docs/version-0.10.0/reference/R-setup.md deleted file mode 100644 index 9c4c76acdd..0000000000 --- a/website/versioned_docs/version-0.10.0/reference/R-setup.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -title: R setup -hide_title: true -sidebar_label: R setup -description: R setup and example for SynapseML ---- - - -# R setup and example for SynapseML - -## Installation - -**Requirements**: You'll need to have R and -[devtools](https://github.com/hadley/devtools) installed on your -machine. - -To install the current SynapseML package for R use: - -```R -... -devtools::install_url("https://mmlspark.azureedge.net/rrr/synapseml-0.10.0.zip") -... -``` - -### Importing libraries and setting up spark context - -It will take some time to install all dependencies. Then, run: - -```R -... -library(sparklyr) -library(dplyr) -config <- spark_config() -config$sparklyr.defaultPackages <- "com.microsoft.azure:synapseml_2.12:0.10.0" -sc <- spark_connect(master = "local", config = config) -... -``` - -This will create a spark context on local machine. - -We'll then need to import the R wrappers: - -```R -... -library(synapseml) -... -``` - -## Example - -We can use the faithful dataset in R: - -```R -... -faithful_df <- copy_to(sc, faithful) -cmd_model = ml_clean_missing_data( - x=faithful_df, - inputCols = c("eruptions", "waiting"), - outputCols = c("eruptions_output", "waiting_output"), - only.model=TRUE) -sdf_transform(cmd_model, faithful_df) -... -``` - -You should see the output: - -```R -... -# Source: table [?? x 4] -# Database: spark_connection - eruptions waiting eruptions_output waiting_output - - 1 3.600 79 3.600 79 - 2 1.800 54 1.800 54 - 3 3.333 74 3.333 74 - 4 2.283 62 2.283 62 - 5 4.533 85 4.533 85 - 6 2.883 55 2.883 55 - 7 4.700 88 4.700 88 - 8 3.600 85 3.600 85 - 9 1.950 51 1.950 51 - 10 4.350 85 4.350 85 - # ... with more rows -... -``` - -## Azure Databricks - -In Azure Databricks, you can install devtools and the spark package from URL -and then use spark_connect with method = "databricks": - -```R -install.packages("devtools") -devtools::install_url("https://mmlspark.azureedge.net/rrr/synapseml-0.10.0.zip") -library(sparklyr) -library(dplyr) -sc <- spark_connect(method = "databricks") -faithful_df <- copy_to(sc, faithful) -unfit_model = ml_light_gbmregressor(sc, maxDepth=20, featuresCol="waiting", labelCol="eruptions", numIterations=10, unfit.model=TRUE) -ml_train_regressor(faithful_df, labelCol="eruptions", unfit_model) -``` - -## Building from Source - -Our R bindings are built as part of the [normal build -process](developer-readme.md). To get a quick build, start at the root -of the synapsemldirectory, and: - -```bash -./runme TESTS=NONE -unzip ./BuildArtifacts/packages/R/synapseml-0.0.zip -``` - -You can then run R in a terminal and install the above files directly: - -```R -... -devtools::install_local("./BuildArtifacts/packages/R/synapseml") -... -``` diff --git a/website/versioned_docs/version-0.10.0/reference/SAR.md b/website/versioned_docs/version-0.10.0/reference/SAR.md deleted file mode 100644 index ac67359dda..0000000000 --- a/website/versioned_docs/version-0.10.0/reference/SAR.md +++ /dev/null @@ -1,183 +0,0 @@ ---- -title: Smart Adaptive Recommendations (SAR) Algorithm -hide_title: true -sidebar_label: SAR Algorithm ---- - - -# Smart Adaptive Recommendations (SAR) Algorithm - -The following document is a subset of the implemented logic. The original can be found [here](https://github.com/Microsoft/Product-Recommendations/blob/master/doc/sar.md) - -**SAR** is a fast scalable adaptive algorithm for personalized recommendations based on user transactions history and items description. It produces easily explainable / interpretable recommendations. - -The overall architecture of SAR is shown in the following diagram: - -![SAR Diagram](https://i.imgur.com/AMPShWl.jpg) - -## Input - -The input to SAR consists of: - -- transaction (usage) data -- catalog data - -**Transaction data**, also called **usage data**, contains information on interactions between users and items and has the following schema: - -`,,