diff --git a/.github/workflows/ci.yaml b/.github/workflows/checkin_ci.yaml similarity index 97% rename from .github/workflows/ci.yaml rename to .github/workflows/checkin_ci.yaml index 792705d..e1d33a3 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/checkin_ci.yaml @@ -1,4 +1,4 @@ -name: CI +name: CHECKIN_CI # Run this workflow every time a commit is pushed to main or a pull request is opened against main on: @@ -7,10 +7,11 @@ on: - main - dev pull_request: - branches: + branches: - main - dev - + workflow_dispatch: + jobs: build-and-test: runs-on: ubuntu-latest @@ -20,7 +21,7 @@ jobs: username: ${{ secrets.ACR_PRINCIPAL_ID }} password: ${{ secrets.ACR_PRINCIPAL_PWD }} timeout-minutes: 15 - + steps: - uses: actions/checkout@master - name: Build diff --git a/.github/workflows/issues.yaml b/.github/workflows/issues.yaml new file mode 100644 index 0000000..3dcf4c9 --- /dev/null +++ b/.github/workflows/issues.yaml @@ -0,0 +1,24 @@ +name: Sync issue to Azure DevOps work item + +on: + issues: + types: + [opened, edited, deleted, closed, renamed, reopened, labeled, unlabeled, assigned] + +jobs: + alert: + runs-on: ubuntu-latest + steps: + - uses: danhellem/github-actions-issue-to-work-item@master + env: + ado_token: "${{ secrets.ADO_PERSONAL_ACCESS_TOKEN }}" + github_token: "${{ secrets.GH_PERSONAL_ACCESS_TOKEN }}" + ado_organization: "${{ secrets.ADO_ORGANIZATION }}" + ado_project: "${{ secrets.ADO_PROJECT }}" + ado_wit: "Bug" + ado_new_state: "New" + ado_active_state: "Active" + ado_resolved_state: "Resolved" + ado_close_state: "Closed" + ado_bypassrules: true + log_level: 100 diff --git a/.github/workflows/nightly_ci.yaml b/.github/workflows/nightly_ci.yaml new file mode 100644 index 0000000..d4c27b7 --- /dev/null +++ b/.github/workflows/nightly_ci.yaml @@ -0,0 +1,66 @@ +name: NIGHTLY_CI +on: + push: + branches: + - main + - dev + schedule: + - cron: '30 8 * * *' + +jobs: + build-and-test: + runs-on: ubuntu-latest + container: + image: rfsxbuild.azurecr.io/gnuradiopipeline:3.9.2.0-3 + credentials: + username: ${{ secrets.ACR_PRINCIPAL_ID }} + password: ${{ secrets.ACR_PRINCIPAL_PWD }} + timeout-minutes: 20 + + steps: + - uses: actions/checkout@master + - name: Build + run: | + cd gr-azure-software-radio; pip install -r python/requirements.txt + mkdir build; cd build; cmake ..; cmake --build . -j --config Release + - name: Install + run: | + cd gr-azure-software-radio/build + sudo make install -j + - name: Update GNURadio preferences + run: | + mkdir -p "${HOME}/.gnuradio/prefs/" + echo "gr::vmcircbuf_sysv_shm_factory" > "${HOME}/.gnuradio/prefs/vmcircbuf_default_factory" + - name: Run Tests + env: + AZURE_STORAGE_CONNECTION_STRING: ${{ secrets.AZURE_STORAGE_CONNECTION_STRING }} + AZURE_STORAGE_URL: ${{ secrets.AZURE_STORAGE_URL }} + AZURE_STORAGE_SAS: ${{ secrets.AZURE_STORAGE_SAS }} + AZURE_SUBSCRIPTION_ID: ${{ secrets.AZURE_SUBSCRIPTION_ID }} + AZURE_TENANT_ID: ${{ secrets.AZURE_TENANT_ID }} + AZURE_CLIENT_ID: ${{ secrets.AZURE_CLIENT_ID }} + AZURE_CLIENT_SECRET: ${{ secrets.AZURE_CLIENT_SECRET }} + AZURE_EVENTHUB_CONNECTION_STRING: ${{ secrets.AZURE_EVENTHUB_CONNECTION_STRING }} + AZURE_EVENTHUB_NAME: ${{ secrets.AZURE_EVENTHUB_NAME }} + AZURE_EVENTHUB_CONSUMER_GROUP: ${{ secrets.AZURE_EVENTHUB_CONSUMER_GROUP }} + AZURE_EVENTHUB_HOST_NAME: ${{ secrets.AZURE_EVENTHUB_HOST_NAME }} + AZURE_KEYVAULT_NAME: ${{ secrets.AZURE_KEYVAULT_NAME }} + AZURE_KEYVAULT_TEST_KEY: ${{ secrets.AZURE_KEYVAULT_TEST_KEY }} + AZURE_STORAGE_READONLY_SAS: ${{ secrets.AZURE_STORAGE_READONLY_SAS }} + run: | + cd gr-azure-software-radio/build + export PYTHONPATH=${{ secrets.PYTHONPATH }}; export LD_LIBRARY_PATH=${{ secrets.LD_LIBRARY_PATH }};ctest -V -T test; + echo " " + echo "Now running integration..." + cd ..; cd python; python3 -m unittest integration_blob_source.py; python3 -m unittest integration_blob_sink.py; python3 -m unittest integration_default_credentials.py; python3 -m unittest integration_eventhub_sink.py; python3 -m unittest integration_eventhub_source.py; + + shell: bash + - name: pylint + continue-on-error: false + run: | + export PYTHONPATH=${{ secrets.PYTHONPATH }}; export LD_LIBRARY_PATH=${{ secrets.LD_LIBRARY_PATH }}; + cd .. + pylint azure-software-radio --rcfile azure-software-radio/.pylintrc + - name: check step + if: success() + run: echo pylint has passed all checks diff --git a/README.md b/README.md index 348a05f..9f166c6 100644 --- a/README.md +++ b/README.md @@ -3,9 +3,10 @@ Here we are sharing two assets we have developed to-date. Click on either link to dive in further! -1. [GNURadio - Azure Out of Tree Module](./gr-azure-software-radio/README.md) +1. [GNU Radio - Azure Out of Tree Module](./gr-azure-software-radio/README.md) 2. [Azure software radio Developer Virtual Machine](./pages/devvm.md) +We have also prepared a series of [tutorials](./tutorials/README.md) that walk you through setting up and using the Developer VM with the Azure Out of Tree Modules. ## Support diff --git a/gr-azure-software-radio/README.md b/gr-azure-software-radio/README.md index 07f7795..a0dbf6b 100644 --- a/gr-azure-software-radio/README.md +++ b/gr-azure-software-radio/README.md @@ -1,214 +1,167 @@ # Azure software radio Out of Tree Module -The Azure software radio Out of Tree (OOT) Module allows users to leverage and easily use cloud resources in GNU Radio directly within a flowgraph. This OOT module can be used in a VM in the cloud or a local machine. +The gr-azure-software-radio Out of Tree (OOT) Module allows users to easily leverage Azure cloud resources from within a GNU Radio flowgraph. You can use this OOT module with your existing GNU Radio development environment, or within a VM in the cloud. Example use-cases involve storing and retrieving RF recordings from Blob (file) storage, communicating with [DIFI](https://dificonsortium.org/about/) devices from within GNU Radio, or creating complex cloud applications using Azure Event Hubs as a bridge between your flowgraph and [Azure services](https://azure.microsoft.com/en-us/services/). We are excited to see what can be created by combining GNU Radio with the power and scalability of the cloud! + +
## Table of Contents -- [Azure software radio Out of Tree Module](#azure-software-radio-out-of-tree-module) - - [Table of Contents](#table-of-contents) - - [Getting Started](#getting-started) - - [Prerequisites](#prerequisites) - - [Installing Azure software radio OOT](#installing-azure-software-radio-oot) - - [Running the Unit Tests](#running-the-unit-tests) - - [Running the Integration Tests](#running-the-integration-tests) - - [Blob Integration Tests](#blob-integration-tests) - - [Event Hub Integration Tests](#event-hub-integration-tests) - - [Frequently Asked Questions](#frequently-asked-questions) -- [Azure software radio Out of Tree Module Blocks](#azure-software-radio-out-of-tree-module-blocks) +- [Getting Started](#getting-started) + - [Prerequisites](#prerequisites) + - [Installing Azure software radio OOT](#installing-azure-software-radio-oot) + - [Running the Unit Tests](#running-the-unit-tests) + - [Resolutions to Common Problems During Installation and Tests](#resolutions-to-common-Problems-During-Installation-and-Tests) +- [Examples](#examples) +- [Blocks Documentation](#azure-software-radio-out-of-tree-module-blocks) - [Key Vault Block](#key-vault-block) - [Blob Blocks](#blob-blocks) - - [Blob Block Descriptions](#blob-block-descriptions) - [Event Hub Blocks](#event-hub-blocks) - - [Event Hub Block Descriptions](#event-hub-block-descriptions) - - [IEEE-ISTO Std 4900-2021: Digital IF Interoperability Standard (DIFI)](#ieee-isto-std-4900-2021-digital-if-interoperability-standard-difi) - - [DIFI Block Descriptions](#difi-block-descriptions) - + - [DIFI Blocks using the IEEE-ISTO Std 4900-2021: Digital IF Interoperability Standard](#difi-blocks-using-the-ieee-isto-std-4900-2021-digital-if-interoperability-standard) +- [Frequently Asked Questions](#frequently-asked-questions) ## Getting Started -To get started, first please follow the install guides below. - -These instructions will get you a copy of the project up and running on your local machine for development and testing purposes. +The following installation instructions will get you up and running with the Azure OOT Module on your local machine. ### Prerequisites +This project depends on the GNU Radio 3.9.x runtime and development dependencies. This project does not +support GNU Radio 3.10 at this time. See the [GNU Radio installation instructions](https://wiki.gnuradio.org/index.php/InstallingGR#From_Binaries) for steps on +installing GNU Radio from binaries (note that GNU Radio packaged with Ubuntu 20 is only 3.8). Some package managers do not automatically install all of the development dependencies, +so you may need to separately install and configure some of them. The Azure software radio OOT module requires the following: + +- GnuRadio 3.9.x (not 3.8.x or 3.10.x) +- Python 3.8 or greater +- python3-pip +- cmake +- liborc-dev +- doxygen +- pytest +- pybind11 + +See the installation steps below for how to install these dependencies. + +**NOTE:** If using the Azure CLI, you will need version 2.17.1 or newer. This module is not compatible with +the Azure CLI availabile in the default apt repository on Ubuntu 20. If this older version of the Azure CLI is present +on your system, the installation of this OOT module may fail or the module may crash at runtime. Please install the +Azure CLI according to the recommendations found in [AZ CLI Installation in Linux](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-linux?pivots=apt). -You will need to install and configure the following before installing the Azure software radio OOT module: - -``` -GnuRadio 3.9.0 or greater -python 3.8 or greater -``` -NOTE: If you have installed the Azure CLI with the default apt package on Ubuntu 20, the install may fail or the module may crash at runtime. See [Ubuntu 20 CLI](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-linux?pivots=apt) ### Installing Azure software radio OOT -``` -pip install -r python/requirements.txt +The following steps show how to install this OOT module on a Debian-based OS with GNU Radio already installed. They have been tested to work under Ubuntu 20. If you see error messages after running any of the following steps, stop and check our [FAQ](./docs/FAQ.md) for how to +resolve the problem. + +```bash +sudo apt-get install python3-pip cmake liborc-dev doxygen +sudo pip install pytest pybind11 + +git clone https://github.com/microsoft/azure-software-radio.git +cd azure-software-radio +cd gr-azure-software-radio + +sudo pip install -r python/requirements.txt mkdir build cd build cmake .. -make +make -j4 sudo make install +sudo ldconfig ``` -### Running the Unit Tests -Run the QA tests with any of the following methods: - - From the terminal you'll use to run the tests, run: +At this point the OOT module should have been installed, and you should see additional blocks within GNU Radio Companion. - From the build directory: +### Running the Unit Tests +If you would like to run the QA tests, there are two methods: + 1. From within the build directory, run: ``` make test ``` - - Or from the python directory: - ``` - python -m pytest qa_* - ``` - -### Running the Integration Tests - -#### Blob Integration Tests -To run the integration tests for the blob blocks, you must first create a storage account on Azure -and obtain the connection string. - -Also generate a SAS token for the storage account with at least read and list permissions so we can -test out all the auth options for the blob blobs. - -The blob integration test code require the following environment variables: -- AZURE_STORAGE_CONNECTION_STRING: Connection string for the storage account you created for testing. -- AZURE_STORAGE_URL: must contain the the URL to the storage account with no trailing '/' -- AZURE_STORAGE_SAS: must contain a SAS token string for the blob storage account specified in AZURE_STORAGE_URL. The - SAS token must have full permissions to the storage account. -- AZURE_STORAGE_READONLY_SAS: must contain a SAS token string for the blob storage account specified in - AZURE_STORAGE_URL, but with read-only permissions to the storage account. - - -Finally, you must have at least one set of credentials supported by DefaultAzureCredential in your -environment that has permissions to the blob account to test against. Running `az login` should be -sufficient to provide this. - -The integration test code will create a randomly generated container to store -unit test data requiring interactions with actual Azure infrastructure. - -#### Key Vault Integration Tests -To run the Key Vault integration tests, you'll need to first create a Key Vault and store a secret with a value of -"3.14". Make note of the key you used to store the secret. Next, you'll need to export the following environment -variables: - -- AZURE_KEYVAULT_NAME: Environment variable containing the name of the Key Vault which contains the test data. If the - full URL to your Key Vault is "https://my-key-vault-name.vault.azure.net", you would use "my-key-vault-name". -- AZURE_KEYVAULT_TEST_KEY: Environment variable containing the key of the secret required for the test. Again, the value - of the secret must be "3.14" for the test to pass. -#### Event Hub Integration Tests -In order to run the integration tests for the event hub blocks, you must first create an event hub resource on Azure, create a consumer group in the event hub, obtain the connection string and event hub entity name. + You can review detailed test output (including any failures) in Testing/Temporary/LastTest.log. -The event hub integration test code require the following environment variables: -- AZURE_EVENTHUB_CONNECTION_STRING: Connection string for the event hub namespace you created for testing. -- AZURE_EVENTHUB_NAME: The event hub entity name in the namespace. -- AZURE_EVENTHUB_CONSUMER_GROUP: The consumer group entity name in the event hub. - -Tests can be run with any of the following methods: - - From the terminal you'll use to run the tests, run: - ``` - az login - ``` - - - Then, from the python directory: + 2. From within the python directory, run: ``` - python -m pytest integration_* + python -m pytest qa_* ``` - or + Pytest will show detailed test results directly in the output of this command. - ``` - python3 -m unittest integration_* - ``` +### Resolutions to Common Problems During Installation and Tests +For a list common problems and resolutions, please check our [FAQ](./docs/FAQ.md) to see if your issue has been addressed. -## Frequently Asked Questions -For a list of common questions, including problems and resolutions, please check our [FAQ](./docs/FAQ.md) +## Examples +The [examples](./examples) folder has a collection of flowgraphs and supporting files that illustrate common ways of +using the blocks provided in this module. See the [README in the examples folder](./examples/README.md) to get started. -# Azure software radio Out of Tree Module Blocks +## Azure software radio Out of Tree Module Blocks -## Key Vault Block +### Key Vault Block The Key Vault block allows users to pull down keys and secrets from an [Azure Key Vault](https://docs.microsoft.com/en-us/azure/key-vault/general/overview) in GNU Radio. - It is expected that the user will setup and store secrets in an Azure Key Vault prior to pulling down keys using this block. To create a Key Vault, see [Create Key Vault](https://docs.microsoft.com/en-us/azure/key-vault/secrets/quick-create-cli). -See the [Key Vault Example](./examples/README.md#key-vault-example). +For a brief tutorial on using this block, see the [Key Vault Example](./examples/README.md#key-vault-example). -## Blob Blocks +### Blob Blocks The two Blob blocks (source and sink) provide an interface to read and write samples to [Azure Blob storage](https://docs.microsoft.com/en-us/azure/storage/blobs/storage-blobs-introduction) in GNU Radio. - It is expected that the user will setup a storage account and a container prior to accessing Blob storage with the Blob source and sink blocks. To create a storage account, see [Create Storage Account](https://docs.microsoft.com/en-us/azure/storage/common/storage-account-create?tabs=azure-portal). -### Blob Block Descriptions - * Blob Source Block\ - The Blob source block reads samples from Azure Blob storage. This block currently supports complex64 inputs and block blobs (Page blobs and append blobs are not supported at this time). + * __Blob Source Block__\ + The Blob source block reads samples from Azure Blob storage. This block currently supports block blobs and the following outputs: Complex float32, Complex int16, Complex int8, float, int, short and byte (Page blobs and append blobs are not supported at this time). - * Blob Sink Block\ - The Blob sink block writes samples to Azure Blob storage. This block currently supports complex64 inputs and block blobs (Page blobs and append blobs are not supported at this time). + * __Blob Sink Block__\ + The Blob sink block writes samples to Azure Blob storage. This block currently supports block blobs and the following inputs: Complex float32, Complex int16, Complex int8, float, int, short and byte (Page blobs and append blobs are not supported at this time). - There are several ways to authenticate to the Azue blob backend, these blocks support authentication using a connection string, a URL with an embedded SAS token, or use credentials supported by the DefaultAzureCredential class. +There are several ways to authenticate to the Azure blob backend, these blocks support authentication using a connection string, a URL with an embedded SAS token, or use credentials supported by the DefaultAzureCredential class. - See the [Blob Examples](./examples/README.md). +For a brief tutorial on using these blocks, see the [Blob Examples](./examples/README.md#Blob-Source-and-Sink-Examples). -## Event Hub Blocks +### Event Hub Blocks The Event Hub blocks (source and sink) provide an interface to send and receive events to [Azure Event Hubs](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-about) using the message passing interface in GNU Radio. - It is expected that the user will create an Event Hubs namespace, Event Hub entity and consumer group prior to using the Event Hub source and sink blocks. To create an Event Hub, see [Create an Event Hub](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-create). -### Event Hub Block Descriptions - * EventHub Source Block\ + * __EventHub Source Block__\ The EventHub source block receives a JSON formatted event message from Azure Event Hub and converts it to GNU Radio PMT format. - * EventHub Sink Block\ + * __EventHub Sink Block__\ The EventHub sink block converts a PMT message to JSON and sends it to Azure Event Hub. - These blocks support multiple ways to authenticate to the Azue Event Hub backend, such as using a connection string, a SAS token, or use credentials supported by the DefaultAzureCredential class. +These blocks support multiple ways to authenticate to the Azure Event Hub backend, such as using a connection string, a SAS token, or use credentials supported by the DefaultAzureCredential class. - See the [Event Hub Examples](./examples/README.md). +For a brief tutorial on using these blocks, see the [Event Hub Examples](./examples/README.md#Event-Hub-Examples). -## IEEE-ISTO Std 4900-2021: Digital IF Interoperability Standard (DIFI) +### DIFI Blocks Using the IEEE-ISTO Std 4900-2021: Digital IF Interoperability Standard This is a set of GNU Radio blocks based on IEEE-ISTO Std 4900-2021: Digital IF Interoperability Standard version 1.0. -There are two DIFI blocks (source and sink) as part of this out of tree module. The Bit Depths currently supported are 8 and 16 with support for the full range of bit depths specified in the DIFI standard coming later. +There are two DIFI blocks (source and sink) as part of this OOT module. The Bit Depths currently supported are 8 and 16 with upcoming support for the full range of bit depths specified in the DIFI standard. -### DIFI Block Descriptions - * DIFI Source Block\ + * __DIFI Source Block__\ The DIFI source block receives UDP DIFI packets from a given IP address and port. It then forwards them to GNU Radio as a complex64 (gr_complex) or signed complex 8 (std::complex). This block emits the following tags in the following situations: - pck_n tag: Emitted when a missed packet occurs, will update the upstream blocks with the current packet number to expect and the current time stamps - context tag: Emitted when a new DIFI context packet is received with the context packet dynamic information - static_change: Emitted when the static parts of the DIFI context packet changes - DIFI Advanced: - This tab contains more advanced settings for the DIFI block and should be used by users who know the devices and network in use. + - pck_n tag: Emitted when a missed packet occurs, will update the upstream blocks with the current packet number to expect and the current time stamps + - context tag: Emitted when a new DIFI context packet is received with the context packet dynamic information + - static_change: Emitted when the static parts of the DIFI context packet changes - Context Packet Mismatch Behavior + The DIFI Advanced tab contains more advanced settings for the DIFI block and should be used by users who know the devices and network in use. + + Context Packet Mismatch Behavior: - Default: Throws exceptions if context packet is incorrect or non-compliant - Ignore Mismatches - Forward data, no warnings: Entirely ignore the context packet, only forwards data - - Throw Warnings - Forward: Displays Warnings about context packet mismatch or non-compliant context packets, but still forward DIFI data. + - Throw Warnings - Forward: Displays Warnings about context packet mismatch or non-compliant context packets, but still forward DIFI data - Throw Warnings - No Forward: Displays Warnings about context packet mismatch or non-compliant context packets, but won't forward data until a correct context packet is received or one that matches the given settings - * DIFI Sink Block\ - The DIFI sink block forwards packets to a given IP address and port number and packets the data with the given bit depth. This block operates in two modes, standalone and paired. + * __DIFI Sink Block__\ + The DIFI sink block forwards packets to a given IP address and port number and packets the data with the given bit depth. This block operates in two modes, standalone and paired: - Pair Mode: The block expects to be paired with a DIFI source block that sends context packets, timing information, and packet count information. The sink block forwards context packets received via tags. For data packets, it adds the correct timestamps and packet number. The data format is packeted as complex64 (gr_complex) or complex signed 8 (std::complex)) samples. + - Pair Mode: The block expects to be paired with a DIFI source block that sends context packets, timing information, and packet count information. The sink block forwards context packets received via tags. For data packets, it adds the correct timestamps and packet number. The data format is packeted as complex64 (gr_complex) or complex signed 8 (std::complex) samples. - Standalone Mode: In standalone mode, it is expected the user will supply the context packet information via GRC or the constructor of the class. For now, the context packet payload data are static once specified by the user. Like paired mode, the data format to pack is, complex64 (gr_complex) or complex signed 8 (std::complex)) samples. + - Standalone Mode: In standalone mode, it is expected the user will supply the context packet information via GRC or the constructor of the class. For now, the context packet payload data are static once specified by the user. Similar to paired mode, the data format to pack is complex64 (gr_complex) or complex signed 8 (std::complex) samples. Scaling Mode: To help mitigate quantization error, the DIFI Sink has an optional helper feature to apply a gain & offset to the input signal. The first mode "Manual" allows a user to manually set gain & offset. In Min-Max mode the user supplies the max and min expected I & Q values and the block solves for a gain & offset based on these and the specified bit depth. Note: this block converts from float 32 I and Q down to the specified bit depth for I and Q, which can cause significant quantization error for small signals. - See [DIFI Examples](./examples/README.md), [DIFI Paired](./examples/difi_paired_example.grc) and [DIFI Standalone](./examples/difi_standalone.grc) for block examples. - - - - - - - - - +For a brief tutorial on using these blocks, see the [DIFI Examples](./examples/README.md#difi-examples). +## Frequently Asked Questions +For a list of common questions, including problems and resolutions, please check our [FAQ](./docs/FAQ.md) diff --git a/gr-azure-software-radio/docs/FAQ.md b/gr-azure-software-radio/docs/FAQ.md index b59b4a8..70e6dc2 100644 --- a/gr-azure-software-radio/docs/FAQ.md +++ b/gr-azure-software-radio/docs/FAQ.md @@ -1,42 +1,115 @@ # Frequently asked questions -## How can I file an issue? - See our guidelines on [How to file issues and get help](../../SUPPORT.md) or [Reporting Security Issues](../../SECURITY.md#reporting-security-issues). +## How can I file an issue? +See our guidelines on [How to file issues and get help](../../SUPPORT.md) or [Reporting Security Issues](../../SECURITY.md#reporting-security-issues). -## How can I contribute to this project? - See our [Contributing](../../README.md#contributing) guide for more details. +## How can I contribute to this project? +See our [Contributing](../../README.md#contributing) guide for more details. -## Why is cmake failing to find azure components? - If you are having issues installing gr-azure-software-radio, the first troubleshooting step is to make sure the python requirements have been successfully installed. To install the requirements use ``` pip install -r python/requirements.txt ``` (or follow the [Installation Instructions](../../README.md/#installing-azure-software-radio-oot)). - Then, verify the installed package versions using ``` pip freeze -r python/requirements.txt ```. +## I'm getting a CMAKE error that says it cannot find Gnuradio. What do I do? +If you see an error like the following: - ### Installing requirements errors with version conflicts, what should I do? - If there are version conflicts with the Azure packages, consider using a [virtual environment](https://docs.python.org/3/tutorial/venv.html) to manage package dependencies. +``` +CMake Error at CMakeLists.txt:77 (find_package): + Could not find a configuration file for package "Gnuradio" that is compatible with requested version "3.9". +``` - If the version conflicts are due to the Azure CLI installation using pip, refer to the [Azure CLI Installation](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) for the recommended installation options. +It's likely that either GNU Radio isn't installed, or your environment isn't set up properly for CMAKE to be able to find where it was installed. Please check the [GNU Radio Installation Instructions](https://wiki.gnuradio.org/index.php/InstallingGR) for suggestions on ways to properly install and configure GNU Radio appropriately for your environment. + +## Why is cmake failing to find Azure components? +If you are having issues installing gr-azure-software-radio, the first troubleshooting step is to make sure the python requirements have been successfully installed. To install the requirements use ``` pip install -r python/requirements.txt ``` (or follow the [Installation Instructions](../../README.md/#installing-azure-software-radio-oot)). +Then, verify the installed package versions using ``` pip freeze -r python/requirements.txt ```. + +## I'm seeing version conflict errors when I try to install the Python dependencies, what should I do? +If there are version conflicts with the Azure packages, consider using a [virtual environment](https://docs.python.org/3/tutorial/venv.html) to manage package dependencies. + +If the version conflicts are due to the Azure CLI installation using pip, refer to the [Azure CLI Installation][azure-cli-installation] for the recommended installation options. ## Why is my build complaining about liborc and failing? - If you get a failure related to liborc when trying to compile the OOT module that looks like this: - ``` - make[2]: *** No rule to make target '/usr/lib/x86_64-linux-gnu/liborc-0.4.so', needed by 'lib/libgnuradio-azure_software_radio.so.v1.0-compat-xxx-xunknown'. Stop. - make[1]: *** [CMakeFiles/Makefile2:251: lib/CMakeFiles/gnuradio-azure_software_radio.dir/all] Error 2 - make: *** [Makefile:141: all] Error 2 - ``` - - You'll need to install the liborc package. On Ubuntu 20.04, you can install the missing package by running: - ``` - sudo apt install liborc-0.4-dev - ``` - - You should now be able to compile gr-azure-software-radio. - -## Failures importing azure_software_radio in the flowgraph - By default Azure software radio will be installed in the ``` /usr/local/ ``` directory. Use the ``` CMAKE_INSTALL_PREFIX ``` to install elsewhere. - - Add the install prefix to your environment - ``` - export PYTHONPATH=/lib/python3/dist-packages:/lib/python3/site-packages:$PYTHONPATH - export LD_LIBRARY_PATH=/lib:$LD_LIBRARY_PATH - ``` - - \ No newline at end of file +If you get a failure related to liborc when trying to compile the OOT module that looks like this: +``` +make[2]: *** No rule to make target '/usr/lib/x86_64-linux-gnu/liborc-0.4.so', needed by 'lib/libgnuradio-azure_software_radio.so.v1.0-compat-xxx-xunknown'. Stop. +make[1]: *** [CMakeFiles/Makefile2:251: lib/CMakeFiles/gnuradio-azure_software_radio.dir/all] Error 2 +make: *** [Makefile:141: all] Error 2 +``` + +You'll need to install the liborc package. On Ubuntu 20.04, you can install the missing package by running: +``` +sudo apt install liborc-0.4-dev +``` + +You should now be able to compile gr-azure-software-radio. + +## I just compiled and installed the OOT Module, but all of the unit tests are failing. What now? +Check the test output for the specific errors that were thrown. If you ran the tests with `make test`, the error logs should be in Testing/Temporary/LastTest.log, starting from the build directory. + +If you ran the tests using python and pytest, the errors should be in the output of the test command. + + +## I'm getting an ImportError for libgnuradio-azure_software_radio when running the unit tests +If you see an error that looks like the following: + +``` +ImportError: libgnuradio-azure_software_radio.so.1.0.0git: cannot open shared object file: No such file or directory +``` + +If you were able to run `make` and `make install` with no errors, try running the following: + +``` +sudo ldconfig +``` + +This should enable the Linux dynamic library loader to find your newly installed shared object file. + +## I'm getting a ModuleNotFoundError +If you see the following error, + +``` +ModuleNotFoundError: No module named 'azure_software_radio' +``` +then Python can't find the gr-azure-software-radio module. Try running + +``` +export PYTHONPATH=/usr/local/lib/python3/dist-packages/ +``` + +If that resolves the issue, you may want to update your environment to include `/usr/local/lib/python3/dist-packages/` in your PYTHONPATH, possibly by adding the following line in your ~/.bashrc file: + +``` +export PYTHONPATH=/usr/local/lib/python3/dist-packages/:$PYTHONPATH +``` + +## I'm getting an ImportError for the BlobServiceClient +If you see an error like: +``` +ImportError: cannot import name 'BlobServiceClient' from 'azure.storage.blob' (/usr/lib/python3/dist-packages/azure/storage/blob/__init__.py +``` +It is very likely you have an Azure component conflict in your environment. This may occur if you system has an older version of the Azure CLI installed. Please see the [recommendations on how to install the Azure CLI.][azure-cli-installation] + +## I'm seeing gr::log :ERROR: statements in Unit and Integration test output +If you see the following lines in your test output: + +``` +gr::log :ERROR: vmcircbuf_prefs::get - /home/youruser/.gnuradio/prefs/vmcircbuf_default_factory: No such file or directory +gr::log :ERROR: vmcircbuf_createfilemapping - vmcircbuf_createfilemapping: createfilemapping is not available +``` + +This means that GNU Radio is unable to find a certain file in your GNU Radio configuration directory. This file is generally created the +first time you run a flowgraph in GNU Radio Companion, but you can also create one yourself by running: + +``` +mkdir -p "${HOME}/.gnuradio/prefs/" +echo "gr::vmcircbuf_sysv_shm_factory" > "${HOME}/.gnuradio/prefs/vmcircbuf_default_factory" +``` + +## Failures importing azure_software_radio in the flowgraph +By default Azure software radio will be installed in the ``` /usr/local/ ``` directory. Use the ``` CMAKE_INSTALL_PREFIX ``` to install elsewhere. + +Add the install prefix to your environment +``` +export PYTHONPATH=/lib/python3/dist-packages:/lib/python3/site-packages:$PYTHONPATH +export LD_LIBRARY_PATH=/lib:$LD_LIBRARY_PATH +``` + + +[azure-cli-installation]: https://docs.microsoft.com/en-us/cli/azure/install-azure-cli-linux?pivots=apt "Azure CLI Installation" \ No newline at end of file diff --git a/gr-azure-software-radio/examples/README.md b/gr-azure-software-radio/examples/README.md index 9678702..435d37f 100644 --- a/gr-azure-software-radio/examples/README.md +++ b/gr-azure-software-radio/examples/README.md @@ -1,119 +1,52 @@ # Azure software radio examples -## Table of Contents -- [DIFI Examples](#difi-examples) -- [Key Vault Example](#key-vault-example) -- [Blob Examples](#blob-source-and-sink-examples) -- [Event Hub Examples](#event-hub-examples) - -# DIFI Examples - -The DIFI Source block is based on IEEE-ISTO Std 4900-2021: Digital IF Interoperability 1.0 Standard. The example shows the use of the block in both paired and standalone mode. In paired mode, the DIFI sink is expected to be paired with a DIFI source block, else it will have unexpected behavior. If no DIFI source block is used, the DIFI sink block should be used in standalone mode. In standalone mode one must specify the fields that would have been in a context packet in paired mode. The examples show both of these situations. - -- difi_paried_example: This will need an external DIFI source, either hardware or software that sends DIFI packets -- difi_standalone: This is expected to be run with samples coming from GNURadio and not an external DIFI source - -# Key Vault Example - -The Key Vault block pull the given key from an Azure Key Vault given the vault name. - -In the example, you can see the correct way to input a value into the Azure Key Vault block. - -To run the flowgraph correctly, you must setup a Key Vault resource in Azure and replace the KeyVault Name with your Key Vault resource name. - -Also, the example assume you have the secretscramble key in your Key Vault. See https://docs.microsoft.com/en-us/azure/key-vault/secrets/quick-create-cli to get started with Key Vault - - -When your resources are ready in Azure, the flowgraph should pull the value from Azure Key Vault, and scramble the sequence with that pulled value. - -If you want to enable the Azure Blob sink block, you will need to also setup a storage account and container in that account to store the data. The point of showing this is so that one can see how to use Azure Key Vault to get connection strings to use with Azure services, like Blob. +Below you will find examples for each block within this OOT module. -# Blob Source and Sink Examples -## Blob Example Prerequisites -To run [blob-sink-example.grc](../examples/blob-sink-example.grc) or [blob-source-example.grc](../examples/blob-source-example.grc), you must first: -1. Set up a storage account in your Azure subscription - - See: https://docs.microsoft.com/en-us/azure/storage/common/storage-account-create -2. Add a container in that storage account. - - See: https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-portal#create-a-container -3. Choose how to authenticate to the blob storage account. This example uses the "default" authentication option, which uses the [DefaultAzureCredential class](https://docs.microsoft.com/en-us/dotnet/api/azure.identity.defaultazurecredential) to attempt to authenticate to the blob storage backend with a list of credential types in priority order. - - If running on a VM in Azure, you may use the VM's Managed Identity for authentication. See https://docs.microsoft.com/en-us/azure/storage/blobs/authorize-managed-identity for instructions on how to work with blobs and managed identities. - - If the VM in Azure has managed identity disabled, or not running on an Azure VM, you may use the Azure CLI to log in to Azure and authenticate to blob storage. - - See https://docs.microsoft.com/en-us/cli/azure/get-started-with-azure-cli to get started - with the Azure CLI. - - Ensure that you have assigned yourself the "Storage Blob Data Contributor" permission for - your storage account. See https://docs.microsoft.com/en-us/azure/storage/blobs/assign-azure-role-data-access - -## Blob Sink Example -If you plan to use the Azure CLI to authenticate to the blob back end, please run - -```bash -az login -``` - -and then launch GNU Radio Companion from the same terminal. This will ensure your authentication tokens are available when running the flowgraph. Open [blob-sink-example.grc](../examples/blob-sink-example.grc). - -To run the flowgraph, you must: -- change the blob_url variable to use your actual storage account URL -- change the blob_container_name variable to use the container name you created as part of the [Blob Prerequisites section above](#blob-example-prerequisites) - -Run the flowgraph until at least `blob_block_length` (defaults to 10M) samples have been generated, then close the flowgraph. Navigate to your blob container in the Azure portal and you should see a new blob object named "test-signal.dat". - -## Blob Source Example -If you plan to use the Azure CLI to authenticate to the blob back end, please run - -```bash -az login -``` - -and then launch GNU Radio Companion from the same terminal. This will ensure your authentication tokens are available when running the flowgraph. Open [blob-source-example.grc](../examples/blob-source-example.grc). - -To run the flowgraph, you must: -- change the blob_url variable to use your actual storage account URL -- change the blob_container_name variable to use the container name you created as part of the [Blob Prerequisites section above](#blob-example-prerequisites) -- change the blob_name to point to an existing blob object. The simplest way to create a blob object is to run the (Blob Sink Example)(#blob-sink-example) first. - -Run the flowgraph and you should see the QT GUI Sink block showing the contents of your blob object. +## Table of Contents +- [DIFI Examples](#difi-source-and-sink-examples) +- [Azure Authentication](#azure-authentication) +- [Key Vault](#key-vault) +- [Blob Storage](#blob-storage) +- [Event Hub](#event-hub) -Once you are done with running the examples, delete your blob object to stop being charged for storage. -# Event Hub Examples -## Event Hub Example Prerequisites -To run [eventhub_sink_example.grc](../examples/eventhub_sink_example.grc) or [eventhub_source_example.grc](../examples/eventhub-source-example.grc), you must first do the following: -1. Create an Event Hub in your Azure subscription - - See: [Create an Event Hub](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-create) for instructions. -2. Create a Consumer Group in the Event Hub - - See: [Create an Event Hub Consumer Group](https://docs.microsoft.com/en-us/cli/azure/eventhubs/eventhub/consumer-group?view=azure-cli-latest) -3. Choose how to authenticate to the Azure Event Hub. This example uses the "connection string" authentication option. - - See [Get an Event Hub Connection String](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string) for instructions on how to obtain a connection string. +## DIFI Source and Sink Examples +The intent of the Digital Intermediate Frequency Interoperability (DIFI) standard is to enable the digital transformation +of space, satellite, and related industries by providing a simple, open, interoperable Digital IF/RF standard that +replaces the natural interoperability of analog IF signals and helps prevent vendor lock-in. The articles linked below +describe how to run the examples which show how to use the DIFI source and sink blocks that implement the DIFI standard +for use with GNU Radio. -## Event Hub Sink Example -If you plan to use the Azure CLI to authenticate to the back end, please run +- [Quickstart: Running the DIFI source and sink block examples](difi_quickstart.md) -```bash -az login -``` +## Azure Authentication +The remaining examples below require the use Azure resources, most of which require applications to authenticate to them +in some way before they can be used. Most of the GNU Radio blocks in the Azure software radio Out-of-Tree module support + the use of the [DefaultAzureCredential class](https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity.defaultazurecredential?view=azure-python), which supports a wide variety of credential types, as one of their + authentication methods. In general, the examples below try to show how to use the blocks in the Azure software radio +Out-of-Tree module in applications running on resources in Azure as well as in on premise hardware, such as developer +systems or edge-deployed servers. [Azure managed identities](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) can be convenient credentials for use in applications running in Azure, +while credentials retrieved by [signing in using the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli) can be used interactively from any system with access to Azure. -and then launch GNU Radio Companion from the same terminal. This will ensure your authentication tokens are available when running the flowgraph. Open [eventhub_sink_example.grc](../examples/eventhub_sink_example.grc). +The article below walks through how to enable a managed identity on a virtual machine in Azure so that applications +running on that VM can authenticate to other Azure resources. -To run the flowgraph, you must: -- change the connection_str variable to use your connection string -- change the eventhub_name variable to use the event hub entity you created as part of the [Event Hub Prerequisites section above](#event-hub-example-prerequisites) +- [Azure Managed Identity Configuration with the Azure CLI](managed_identity_cli_quickstart.md) +## Key Vault -Run the flowgraph for a few seconds and then close it. Navigate to your event hub in the Azure portal and you should see the events in the 'overview' tab. To process the events, enable the real-time insights under 'process data'. +The Key Vault block is used to retrieve secrets stored in Azure Key Vault, and these two quickstarts show how they can be used within GNU Radio. -## Event Hub Source Example -If you plan to use the Azure CLI to authenticate to the back end, please run +- [Quickstart: Key Vault with Role Based Access Controls and Azure CLI Credentials](key_vault_rbac_az_login_quickstart.md) +- [Quickstart: Key Vault with Role Based Access Controls and Managed Identities](key_vault_rbac_managed_id_quickstart.md) -```bash -az login -``` +## Blob Storage +Many GNU Radio applications involve working with files, and the Blob Source and Sink blocks allow files to be stored and retrieved from Azure with ease. The following quickstarts show how to use these blocks, depending on whether you are on a VM with Managed ID enabled, or are using `az login`. -and then launch GNU Radio Companion from the same terminal. This will ensure your authentication tokens are available when running the flowgraph. Open [eventhub_source_example.grc](../examples/eventhub_source_example.grc). +- [Quickstart: Running the Blob Source and Sink blocks with Managed ID](blob_managed_id_quickstart.md) +- [Quickstart: Running the Blob Source and Sink blocks with `az login`](blob_az_login_quickstart.md) -To run the flowgraph, you must: -- change the connection_str variable to use your connection string -- change the eventhub_name variable to use the event hub entity you created as part of the [Event Hub Prerequisites section above](#event-hub-example-prerequisites) -- change the consumer_grp variable to use the default or use the created consumer group as part of the [Event Hub Prerequisites section above](#event-hub-example-prerequisites) +## Event Hub +The Event Hub blocks provide an interface to send and receive events to Azure Event Hubs using the message passing interface in GNU Radio. The article below walks through examamples of both the Source and Sink blocks. -Run the flowgraph and you should see the Message Debug block showing the contents of the received events. +- [Quickstart: Using Azure Event Hubs in GNU Radio](event_hubs_quickstart_cli.md) diff --git a/gr-azure-software-radio/examples/blob_az_login_quickstart.md b/gr-azure-software-radio/examples/blob_az_login_quickstart.md new file mode 100644 index 0000000..b97c91b --- /dev/null +++ b/gr-azure-software-radio/examples/blob_az_login_quickstart.md @@ -0,0 +1,116 @@ +# Quickstart: Running the Blob Source and Sink blocks with az login + +In this article you learn how to: +- Create a blob storage account and container +- Authenticate to the blob storage account from within GNU Radio +- Simulate a signal in GNU Radio which then gets stored as a blob +- Pull down the previously stored signal from blob storage and feed it through a GNU Radio flowgraph + +The steps in this article will work for interactive sessions on systems with access to Azure. + +## Prerequisites +- Use the Bash environment in [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart). +For more information, see [Azure Cloud Shell Quickstart - Bash](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart) + + + +- If you prefer to run CLI reference commands locally, [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI. If you are running on Windows or macOS, consider running Azure CLI in a Docker container. For more information, see [How to run the Azure CLI in a Docker container](https://docs.microsoft.com/en-us/cli/azure/run-azure-cli-docker). + + - If you're using a local installation, sign in to the Azure CLI by using the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. To finish the authentication process, follow the steps displayed in your terminal. For additional sign-in options, see [Sign in with the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). + + - When you're prompted, install Azure CLI extensions on first use. For more information about extensions, see [Use extensions with the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/azure-cli-extensions-overview). + + - Run [az version](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_version) to find the version and dependent libraries that are installed. To upgrade to the latest version, run [az upgrade](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_upgrade). + +- This article requires version 2.29.0 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +- You must [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI on the machine that will run the example flowgraph. +- If running on a virtual machine in Azure, you must **not** enable a managed identity. The Key Vault block uses the DefaultAzureCredential class for authentication, which will use the managed identity for authentication if it is enabled, ignoring credentials provided by [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login). Follow [these instructions](managed_identity_cli_quickstart.md) to confirm whether or not your virtual machine has a managed identity enabled. + +## Set Up Storage in Azure + +To run the examples in this quickstart you must first create the necessary resources in Azure. You can either click the button below to deploy a new storage account and blob container for testing, or you can follow Azure tutorials on how to deploy the blob resources manually. + +(Choose One) + +1. Deploy Resources Automatically + + 1. Click + 2. You will have to pick a new or existing resource group to assign the new resources to + 3. The default value for Storage Account Name might look confusing but if you leave it as is, it will create a new one with a globally unique name to avoid conflicts, such as storageaccountgeh5jwaddf7tc. You are welcome to replace the entire string with your own unique name, it may contain numbers and lowercase letters only because it will be part of a URL. + 4. The container name does not have to be unique, but make note of what you called it. + +2. Deploy Resources Manually + + 1. Set up a storage account in your Azure subscription, see: https://docs.microsoft.com/en-us/azure/storage/common/storage-account-create + 2. Add a container in that storage account, see: https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-portal#create-a-container + +### Determine Blob Authentication +You will need to choose how to authenticate to the blob storage account. This example is set up to use the "default" authentication option, which uses the [DefaultAzureCredential class](https://docs.microsoft.com/en-us/dotnet/api/azure.identity.defaultazurecredential) to attempt to authenticate to the blob storage backend with a list of credential types in priority order. + +The instructions below use the Azure CLI to configure access to Azure storage. See https://docs.microsoft.com/en-us/cli/azure/get-started-with-azure-cli to get started with the Azure CLI, or use the browser accessible Azure Cloud Shell to start using the AZ CLI without installing any dependencies: https://docs.microsoft.com/en-us/azure/cloud-shell/overview + +This quickstart assumes your VM in Azure has managed identity disabled, or you are not running on an Azure VM. We will use the Azure CLI to log in to Azure and authenticate to blob storage. This only needs to be done once per user and storage account pairing. Afterwards, the user will have permissions to access any current or new blob containers in that storage account. For the quickstart that uses managed identity instead, [use this](blob_managed_id_quickstart.md). + +In a terminal on your VM: + +1. To log in to Azure, run: + ``` + az login + ``` +2. Ensure that you have assigned yourself the "Storage Blob Data Contributor" or "Storage Blob Data Owner" role for your storage account. Get the currently logged in user ID by running: + ``` + userName=$(az ad signed-in-user show --query userPrincipalName --out tsv); echo $userName + ``` +3. Get the Azure ID of your storage account by running the following command _after_ replacing MyStorageAccountName with your storage account's name: + ``` + storageID=$(az resource list --name MyStorageAccountName --query [*].id --out tsv); echo $storageID + ``` +A THIS POINT THINGS STOP WORKING FOR ME, I DONT SEE ANY RESULTS FROM LINE ABOVE AND IF I SPECIFY MY RG I GET AN ERROR + +4. List what roles you have assigned for your storage account by running: + ``` + az role assignment list --assignee $userName --scope $storageID --query [*].roleDefinitionName --out tsv + ``` +5. If you do not see either the "Storage Blob Data Contributor" or "Storage Blob Data Owner" role for your storage account, add the "Storage Blob Data Owner" role by running: + ``` + az role assignment create --assignee $userName --role 'Storage Blob Data Owner' --scope $storageID + ``` +See https://docs.microsoft.com/en-us/azure/storage/blobs/assign-azure-role-data-access for more information on assigning roles to enable access to Azure storage resources. + +Next, launch GNU Radio Companion from the same terminal using: + +```bash +gnuradio-companion +``` + +This will ensure your authentication tokens are available when running the flowgraph. + +## Run the Blob Sink Example + +Open [blob-sink-example.grc](../examples/blob-sink-example.grc). + +To run the flowgraph, you must: +- Change the blob_url variable to use your actual storage account URL +- Change the blob_container_name variable to use the container name you created + +Run the flowgraph for a few seconds, then stop it (e.g., by closing the window that popped up). Navigate to your blob container in the Azure portal and you should see a new blob object named "test-signal.dat". The number of samples simulated and stored depends on how long you ran the flowgraph, but the `blob_block_length` defines the maximum (defaults to 10M samples). + +## Run the Blob Source Example + +Open [blob-source-example.grc](../examples/blob-source-example.grc). + +To run the flowgraph, you must: +- Change the blob_url variable to use your actual storage account URL +- Change the blob_container_name variable to use the container name you created +- Change the blob_name to point to an existing blob object, which is test-signal.dat assuming you did the previous section to completion. + +Run the flowgraph and you should see the QT GUI Sink block showing the contents of your blob object, which is the signal we simulated in the previous section. + +Once you are done with running the examples, delete the resources you created to ensure you do not incur ongoing charges for storage. + +---- + + +## Recommended content + +### [Azure Blob storage overview](https://azure.microsoft.com/en-us/services/storage/blobs/) \ No newline at end of file diff --git a/gr-azure-software-radio/examples/blob_example_resources.json b/gr-azure-software-radio/examples/blob_example_resources.json new file mode 100644 index 0000000..c437cbc --- /dev/null +++ b/gr-azure-software-radio/examples/blob_example_resources.json @@ -0,0 +1,128 @@ +{ + "$schema": "http://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "storageAccountName": { + "type": "string", + "defaultValue": "[format('storageaccount{0}', uniqueString(resourceGroup().id))]", + "metadata": { + "description": "The name of the Storage Account" + } + }, + "accountType": { + "type": "string", + "defaultValue": "Standard_LRS", + "allowedValues": [ + "Premium_LRS", + "Premium_ZRS", + "Standard_GRS", + "Standard_GZRS", + "Standard_LRS", + "Standard_RAGRS", + "Standard_RAGZRS", + "Standard_ZRS" + ], + "metadata": { + "description": "Storage Account type (pricing tier and redundancy options)" + } + }, + "blobContainerName": { + "type": "string", + "defaultValue": "mycontainer", + "metadata": { + "description": "Name of the blob container to create in the storage account" + } + }, + "allowBlobPublicAccess": { + "type": "bool", + "defaultValue": "false", + "metadata": { + "description": "When blob public access is enabled, one is permitted to configure container ACLs to allow anonymous access to blobs within the storage account. When disabled, no anonymous access to blobs within the storage account is permitted, regardless of underlying ACL configurations" + } + }, + "allowSharedKeyAccess": { + "type": "bool", + "defaultValue": "true", + "metadata": { + "description": "When storage account key access is disabled, any requests to the account that are authorized with Shared Key, including shared access signatures (SAS), will be denied. Client applications that currently access the storage account using shared key will no longer work" + } + } + + }, + "variables": {}, + "resources": [ + { + "name": "[parameters('storageAccountName')]", + "type": "Microsoft.Storage/storageAccounts", + "apiVersion": "2021-08-01", + "location": "[resourceGroup().location]", + "properties": { + "allowBlobPublicAccess": "[parameters('allowBlobPublicAccess')]", + "allowSharedKeyAccess": "[parameters('allowSharedKeyAccess')]", + "allowCrossTenantReplication": true, + "defaultToOAuthAuthentication": false, + "networkAcls": { + "bypass": "AzureServices", + "defaultAction": "Allow", + "ipRules": [] + }, + "encryption": { + "keySource": "Microsoft.Storage", + "services": { + "blob": { + "enabled": true + } + }, + "requireInfrastructureEncryption": false + } + }, + "dependsOn": [], + "sku": { + "name": "[parameters('accountType')]" + }, + "kind": "StorageV2", + "tags": {} + }, + { + "name": "[concat(parameters('storageAccountName'), '/default')]", + "type": "Microsoft.Storage/storageAccounts/blobServices", + "apiVersion": "2021-08-01", + "properties": { + "restorePolicy": { + "enabled": false + }, + "deleteRetentionPolicy": { + "enabled": false + }, + "containerDeleteRetentionPolicy": { + "enabled": false + }, + "changeFeed": { + "enabled": false + }, + "isVersioningEnabled": false + }, + "dependsOn": [ + "[concat('Microsoft.Storage/storageAccounts/', parameters('storageAccountName'))]" + ] + }, + { + "type": "Microsoft.Storage/storageAccounts/blobServices/containers", + "apiVersion": "2021-06-01", + "name": "[concat(parameters('storageAccountName'), '/default/', parameters('blobContainerName'))]", + "dependsOn": [ + "[resourceId('Microsoft.Storage/storageAccounts/blobServices', parameters('storageAccountName'), 'default')]", + "[resourceId('Microsoft.Storage/storageAccounts', parameters('storageAccountName'))]" + ], + "properties": { + "immutableStorageWithVersioning": { + "enabled": false + }, + "defaultEncryptionScope": "$account-encryption-key", + "denyEncryptionScopeOverride": false, + "publicAccess": "None" + } + } + ], + "outputs": {} +} \ No newline at end of file diff --git a/gr-azure-software-radio/examples/blob_managed_id_quickstart.md b/gr-azure-software-radio/examples/blob_managed_id_quickstart.md new file mode 100644 index 0000000..0c09332 --- /dev/null +++ b/gr-azure-software-radio/examples/blob_managed_id_quickstart.md @@ -0,0 +1,108 @@ +# Quickstart: Running the Blob Source and Sink blocks with Managed ID + +In this article you learn how to: +- Create a blob storage account and container +- Authenticate to the blob storage account from within GNU Radio +- Simulate a signal in GNU Radio which then gets stored as a blob +- Pull down the previously stored signal from blob storage and feed it through a GNU Radio flowgraph + +The steps in this article will work for applications running on Azure resources that support managed identities. + +## Prerequisites + +- You must run the Key Vault example on a virtual machine in Azure to take advantage of managed identities. Follow [these instructions](managed_identity_cli_quickstart.md) to confirm your virtual machine is configured properly to use a managed identity. + +- Use the Bash environment in [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart). +For more information, see [Azure Cloud Shell Quickstart - Bash](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart) + + + +- If you prefer to run CLI reference commands locally, [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI. If you are running on Windows or macOS, consider running Azure CLI in a Docker container. For more information, see [How to run the Azure CLI in a Docker container](https://docs.microsoft.com/en-us/cli/azure/run-azure-cli-docker). + + - If you're using a local installation, sign in to the Azure CLI by using the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. To finish the authentication process, follow the steps displayed in your terminal. For additional sign-in options, see [Sign in with the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). + + - When you're prompted, install Azure CLI extensions on first use. For more information about extensions, see [Use extensions with the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/azure-cli-extensions-overview). + + - Run [az version](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_version) to find the version and dependent libraries that are installed. To upgrade to the latest version, run [az upgrade](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_upgrade). + +- This article requires version 2.29.0 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +- You must [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI on the machine that will run the example flowgraph. +- If running on a virtual machine in Azure, you must **not** enable a managed identity. The Key Vault block uses the DefaultAzureCredential class for authentication, which will use the managed identity for authentication if it is enabled, ignoring credentials provided by [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login). Follow [these instructions](managed_identity_cli_quickstart.md) to confirm whether or not your virtual machine has a managed identity enabled. + +## Set Up Storage in Azure + +To run the examples in this quickstart you must first create the necessary resources in Azure. You can either click the button below to deploy a new storage account and blob container for testing, or you can follow Azure tutorials on how to deploy the blob resources manually. + +(Choose One) + +1. Deploy Resources Automatically + + 1. Click + 2. You will have to pick a new or existing resource group to assign the new resources to + 3. The default value for Storage Account Name might look confusing but if you leave it as is, it will create a new one with a globally unique name to avoid conflicts, such as storageaccountgeh5jwaddf7tc. You are welcome to replace the entire string with your own unique name, it may contain numbers and lowercase letters only because it will be part of a URL. + 4. The container name does not have to be unique, but make note of what you called it. + +2. Deploy Resources Manually + + 1. Set up a storage account in your Azure subscription, see: https://docs.microsoft.com/en-us/azure/storage/common/storage-account-create + 2. Add a container in that storage account, see: https://docs.microsoft.com/en-us/azure/storage/blobs/storage-quickstart-blobs-portal#create-a-container + +Next, remote desktop into your VM. + +### Determine Blob Authentication +You will need to choose how to authenticate to the blob storage account. This example is set up to use the "default" authentication option, which uses the [DefaultAzureCredential class](https://docs.microsoft.com/en-us/dotnet/api/azure.identity.defaultazurecredential) to attempt to authenticate to the blob storage backend with a list of credential types in priority order. + +The instructions below use the Azure CLI to configure access to Azure storage. See https://docs.microsoft.com/en-us/cli/azure/get-started-with-azure-cli to get started with the Azure CLI, or use the browser accessible Azure Cloud Shell to start using the Azure CLI without installing any dependencies: https://docs.microsoft.com/en-us/azure/cloud-shell/overview + +In this quickstart it is assuming you are on a VM with a managed identity enabled, you can give the VM permissions to use the storage account using the following steps. For a different version of this quickstart where managed identity disabled, or you are not running on an Azure VM, see [this quickstart](blob_az_login_quickstart.md). + +In a terminal on your VM: + +1. Get the service principal ID for the VM's managed identity by running: + ``` + spID=$(az vm identity show --name MyVirtualMachine --resource-group myResourceGroup --query principalId --out tsv) + ``` +2. Get the Azure ID of your storage account by running: + ``` + storageID=$(az resource list --name MyStorageAccountName --query [*].id --out tsv) + ``` +3. Give the VM full permissions to read and write to the storage account by assigning it the "Storage Blob Data Owner" role: + ``` + az role assignment create --assignee $spID --role 'Storage Blob Data Owner' --scope $storageID + ``` + +See https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/howto-assign-access-cli for more details on using the Azure CLI to manage access to resources. + +Next, open GNU Radio using: + +```bash +gnuradio-companion +``` + +## Run the Blob Sink Example + +Open [blob-sink-example.grc](../examples/blob-sink-example.grc). + +To run the flowgraph, you must: +- Change the blob_url variable to use your actual storage account URL +- Change the blob_container_name variable to use the container name you created + +Run the flowgraph for a few seconds, then stop it (e.g., by closing the window that popped up). Navigate to your blob container in the Azure portal and you should see a new blob object named "test-signal.dat". The number of samples simulated and stored depends on how long you ran the flowgraph, but the `blob_block_length` defines the maximum (defaults to 10M samples). + +## Run the Blob Source Example + +Open [blob-source-example.grc](../examples/blob-source-example.grc). + +To run the flowgraph, you must: +- Change the blob_url variable to use your actual storage account URL +- Change the blob_container_name variable to use the container name you created +- Change the blob_name to point to an existing blob object, which is test-signal.dat assuming you did the previous section to completion. + +Run the flowgraph and you should see the QT GUI Sink block showing the contents of your blob object, which is the signal we simulated in the previous section. + +Once you are done with running the examples, delete the resources you created to ensure you do not incur ongoing charges for storage. + +---- +## Recommended content + +### [Azure Blob storage overview](https://azure.microsoft.com/en-us/services/storage/blobs/) \ No newline at end of file diff --git a/gr-azure-software-radio/examples/difi_quickstart.md b/gr-azure-software-radio/examples/difi_quickstart.md new file mode 100644 index 0000000..8861d20 --- /dev/null +++ b/gr-azure-software-radio/examples/difi_quickstart.md @@ -0,0 +1,38 @@ +# Quickstart: Running the DIFI source and sink block examples + +The DIFI source and sink blocks are based on the IEEE-ISTO Std 4900-2021: Digital IF Interoperability 1.0 Standard. They +enable users to transmit and receive digitized IF data and corresponding metadata over standard IP networks using GNU +Radio. + +## Prerequisites + +- Install the GNU Radio runtime, GNU Radio Companion, and the Azure Software Radio Out-of-Tree module. See the +[Azure Software Radio: Getting Started](../README.md#getting-started) section for more details. + +## Run the DIFI standalone example +DIFI sink blocks have a `mode` parameter that can be set to either "Paired" or "Standalone". In standalone mode, the +user must specify values for fields that will be included in the DIFI packet stream metadata. Since the metadata values +are provided by the user, the DIFI sink can be used with any normal sample source in GNU Radio, not just sources that +generate DIFI-compliant metadata.
+ +Open the [DIFI sink standalone mode example](difi_standalone.grc) in GNU Radio Companion and run the flowgraph to see +the DIFI sink block running in standalone mode. + +---- +## Run the paired DIFI source and sink example +When using the DIFI sink in paired mode, the DIFI sink expects the input sample stream to include DIFI compliant +metadata in stream tags, such as those produced by the DIFI source block. If the DIFI sink does not receive the metadata +it requires, it will have unexpected behavior. + +Note: To run this example, you will need to send DIFI packets to the DIFI source block in the example using an external +source, such as a DIFI compliant packet source or another flowgraph that includes a DIFI sink block. + +Open the [DIFI sink paired mode example](difi_paired_example.grc) in GNU Radio Companion and run the flowgraph to see +the DIFI sink block running with a paired DIFI source block. + +--- +## Recommended content +### [Digital Intermediate Frequency Interoperability (DIFI) Consortium](https://dificonsortium.org/) + +The organization which oversees development of the IEEE-ISTO Std 4900-2021: Digital IF Interoperability 1.0 Standard + diff --git a/gr-azure-software-radio/examples/event_hub_example_resources.json b/gr-azure-software-radio/examples/event_hub_example_resources.json new file mode 100644 index 0000000..fd4c58d --- /dev/null +++ b/gr-azure-software-radio/examples/event_hub_example_resources.json @@ -0,0 +1,130 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "eventHubNamespaceName": { + "type": "String", + "defaultValue": "mynamespace", + "metadata": { + "description": "The name of the Event Hub namespace" + } + }, + "eventHubName": { + "type": "String", + "defaultValue": "myeventhub", + "metadata": { + "description": "The name of the Event Hub inside the Event Hub namespace" + } + }, + "eventHubPricingTier": { + "type": "string", + "allowedValues": [ "Basic", "Standard", "Premium" ], + "defaultValue": "Standard", + "metadata": { + "description": "The billing tier for the Event Hub namespace" + } + }, + "eventHubCapacity": { + "type": "int", + "defaultValue": 1, + "metadata": { + "description": "For Basic and Standard tiers, this sets the number of throughput units (1-20). For Premium tiers, this sets the number of processing units (1-10)" + } + }, + "consumerGroupName": { + "type": "String", + "defaultValue": "myconsumergroup", + "metadata": { + "description": "Name of the consumer group to create in the Event Hub" + } + } + }, + "variables": {}, + "resources": [ + { + "type": "Microsoft.EventHub/namespaces", + "apiVersion": "2021-11-01", + "name": "[parameters('eventHubNamespaceName')]", + "location": "East US", + "sku": { + "name": "[parameters('eventHubPricingTier')]", + "tier": "[parameters('eventHubPricingTier')]", + "capacity": "[parameters('eventHubCapacity')]" + }, + "properties": { + "disableLocalAuth": false, + "zoneRedundant": true, + "isAutoInflateEnabled": false, + "maximumThroughputUnits": 0, + "kafkaEnabled": true + } + }, + { + "type": "Microsoft.EventHub/namespaces/AuthorizationRules", + "apiVersion": "2021-11-01", + "name": "[concat(parameters('eventHubNamespaceName'), '/RootManageSharedAccessKey')]", + "location": "East US", + "dependsOn": [ + "[resourceId('Microsoft.EventHub/namespaces', parameters('eventHubNamespaceName'))]" + ], + "properties": { + "rights": [ + "Listen", + "Manage", + "Send" + ] + } + }, + { + "type": "Microsoft.EventHub/namespaces/eventhubs", + "apiVersion": "2021-11-01", + "name": "[concat(parameters('eventHubNamespaceName'), '/', parameters('eventHubName'))]", + "location": "East US", + "dependsOn": [ + "[resourceId('Microsoft.EventHub/namespaces', parameters('eventHubNamespaceName'))]" + ], + "properties": { + "messageRetentionInDays": 1, + "partitionCount": 1, + "status": "Active" + } + }, + { + "type": "Microsoft.EventHub/namespaces/networkRuleSets", + "apiVersion": "2021-11-01", + "name": "[concat(parameters('eventHubNamespaceName'), '/default')]", + "location": "East US", + "dependsOn": [ + "[resourceId('Microsoft.EventHub/namespaces', parameters('eventHubNamespaceName'))]" + ], + "properties": { + "publicNetworkAccess": "Enabled", + "defaultAction": "Allow", + "virtualNetworkRules": [], + "ipRules": [] + } + }, + { + "type": "Microsoft.EventHub/namespaces/eventhubs/consumergroups", + "apiVersion": "2021-11-01", + "name": "[concat(parameters('eventHubNamespaceName'), '/', parameters('eventHubName'), '/$Default')]", + "location": "East US", + "dependsOn": [ + "[resourceId('Microsoft.EventHub/namespaces/eventhubs', parameters('eventHubNamespaceName'), parameters('eventHubName'))]", + "[resourceId('Microsoft.EventHub/namespaces', parameters('eventHubNamespaceName'))]" + ], + "properties": {} + }, + { + "type": "Microsoft.EventHub/namespaces/eventhubs/consumergroups", + "apiVersion": "2021-11-01", + "name": "[concat(parameters('eventHubNamespaceName'), '/', parameters('eventHubName'), '/', parameters('consumerGroupName'))]", + "location": "East US", + "dependsOn": [ + "[resourceId('Microsoft.EventHub/namespaces/eventhubs', parameters('eventHubNamespaceName'), parameters('eventHubName'))]", + "[resourceId('Microsoft.EventHub/namespaces', parameters('eventHubNamespaceName'))]" + ], + "properties": {} + } + ] +} diff --git a/gr-azure-software-radio/examples/event_hubs_quickstart_cli.md b/gr-azure-software-radio/examples/event_hubs_quickstart_cli.md new file mode 100644 index 0000000..a3a0a55 --- /dev/null +++ b/gr-azure-software-radio/examples/event_hubs_quickstart_cli.md @@ -0,0 +1,98 @@ +# Quickstart: Using Azure Event Hubs in GNU Radio + +The Event Hub blocks provide an interface to send and receive events to [Azure Event Hubs](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-about) using the message passing interface in GNU Radio. In this quickstart we walk through examamples of both of these blocks. + +## Prerequisites + +- Install GNU Radio runtime, GNU Radio Companion, and the Azure Software Radio Out-of-Tree module. See the +[Azure Software Radio: Getting Started](../README.md#getting-started) section for more details. + +## Setting Up an Event Hub in Azure + +To run the examples in this tutorial you must first create an Event Hub namespace, an Event Hub, and a consumer group. To deploy these resources using our premade ARM templates, click the 'Deploy to Azure' button. Alternatively, you can create the resources manually using the instructions below. + +(Choose One) + +1. Deploy Resources Automatically + + 1. Click + 2. You will have to pick a new or existing resource group to assign the new resources to + 3. Change namespace to be unique (e.g., add your favorite 5 digit number to the end) + 4. For the rest of the parameters you can leave the defaults or fill in your own names + +2. Deploy Resources Manually + + 1. Create an Event Hub in your Azure subscription, see [Create an Event Hub](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-create) for instructions. + 2. Create a Consumer Group within the newly created Event Hub, see [Create an Event Hub Consumer Group](https://docs.microsoft.com/en-us/cli/azure/eventhubs/eventhub/consumer-group?view=azure-cli-latest). + 3. Choose how to authenticate to the Azure Event Hub. This tutorial uses the "connection string" authentication option. See [Get an Event Hub Connection String](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string) for instructions on how to obtain a connection string associated with your newly created Event Hub. + +Next, remote desktop into your VM. If you plan to use the Azure CLI to authenticate to the back end, please run + +```bash +az login +``` + +Then launch GNU Radio Companion from the same terminal using: + +```bash +gnuradio-companion +``` + +This will ensure your authentication tokens are available when running the flowgraph. + +---- +## Run the Event Hub Sink Example + +Open the [eventhub_sink_example.grc](../examples/eventhub_sink_example.grc) flowgraph within GNU Radio. Before running the flowgraph you must: +1. Change the __connection_str__ variable to use your connection string (see the instructions on how to [Get an Event Hubs connection string](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string) for more details). Replace the entire Value with your string, then add single quotes to each side. +2. Change the __eventhub_name__ variable to use the event hub entity you created as part of the [Event Hub Prerequisites section above](#event-hub-example-prerequisites) (e.g., myeventhub) with single quotes around it. + +Run the flowgraph for a few seconds and then close it, either by closing the window or using the stop button. You will know the flowgraph works if you see a similar window pop up: + +
+ +Navigate to your event hub in the Azure portal and you should see the events in the __Overview__ blade. Depending on how long you left the flowgraph running, you should see event activity in the chart: + +
+ +What this flowgraph is doing is simulating a sine wave, displaying it with the QT GUI Waterfall Sink, and the Probe Rate block is outputting messages containing the rate samples are flowing into it (the messages are created at the specified update rate and the contents of the message contain the rate samples are flowing). Each message coming out of this Probe Rate block is then sent to your Azure event hub using the Event Hub Sink block. In the next part we will pull these messages from Azure back into GNU Radio and display them. + +---- +## Run the Event Hub Source Example + +Note, this example requires having already run the Sink example above, so that your event hub has content in it. + +Open the [eventhub_source_example.grc](../examples/eventhub_source_example.grc) flowgraph within GNU Radio. Before running the flowgraph you must: + +1. Change the __connection_str__ variable to use your connection string (see the instructions on how to [Get an Event Hubs connection string](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string) for more details). Replace the entire Value with your string, then add single quotes to each side. +2. Change the __eventhub_name__ variable to use the event hub entity you created as part of the [Event Hub Prerequisites section above](#event-hub-example-prerequisites) (e.g., myeventhub) with single quotes around it. +3. Change the consumer_grp variable to use consumer group name created as part of the [Event Hub Prerequisites section above](#event-hub-example-prerequisites) (e.g., myconsumergroup) with single quotes around it. + +Run the flowgraph and you should see the Message Debug block showing the contents of the received events: + +
+ +The flowgraph grabs the messages that were sent to your event hub in the previous example, and then displays them in a terminal. You will notice they all show up immediately, and then nothing else happens, because it reached the end of the list of messages in event hub. + +Once you are done with running the examples, delete the resources you created to ensure you do not incur ongoing charges for storage. + +---- +## Recommended content + +### [Azure Event Hubs](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-about) + + + + + + + + + + + + + + + + diff --git a/gr-azure-software-radio/examples/eventhub_screenshot_1.png b/gr-azure-software-radio/examples/eventhub_screenshot_1.png new file mode 100644 index 0000000..3f8b00f Binary files /dev/null and b/gr-azure-software-radio/examples/eventhub_screenshot_1.png differ diff --git a/gr-azure-software-radio/examples/eventhub_screenshot_2.png b/gr-azure-software-radio/examples/eventhub_screenshot_2.png new file mode 100644 index 0000000..e0239cc Binary files /dev/null and b/gr-azure-software-radio/examples/eventhub_screenshot_2.png differ diff --git a/gr-azure-software-radio/examples/eventhub_screenshot_3.png b/gr-azure-software-radio/examples/eventhub_screenshot_3.png new file mode 100644 index 0000000..cea791b Binary files /dev/null and b/gr-azure-software-radio/examples/eventhub_screenshot_3.png differ diff --git a/gr-azure-software-radio/examples/hdi-launch-cloud-shell.png b/gr-azure-software-radio/examples/hdi-launch-cloud-shell.png new file mode 100644 index 0000000..c5b9358 Binary files /dev/null and b/gr-azure-software-radio/examples/hdi-launch-cloud-shell.png differ diff --git a/gr-azure-software-radio/examples/key_vault_example_resources.json b/gr-azure-software-radio/examples/key_vault_example_resources.json new file mode 100644 index 0000000..8def0ee --- /dev/null +++ b/gr-azure-software-radio/examples/key_vault_example_resources.json @@ -0,0 +1,100 @@ + +{ + "$schema": "https://schema.management.azure.com/schemas/2019-04-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "parameters": { + "keyVaultName": { + "type": "string", + "metadata": { + "description": "Specifies the name of the key vault." + } + }, + "skuName": { + "type": "string", + "defaultValue": "standard", + "allowedValues": [ + "standard", + "premium" + ], + "metadata": { + "description": "Specifies whether the key vault is a standard vault or a premium vault." + } + }, + "seedSecretValue": { + "type": "secureString", + "metadata": { + "description": "Specifies the integer value of the seed secret that you want to create." + } + }, + "scrambleSecretValue": { + "type": "secureString", + "metadata": { + "description": "Specifies the integer value of the scramble secret that you want to create." + } + }, + "mySecretStringSecretValue": { + "type": "secureString", + "metadata": { + "description": "Specifies the value of the connection string secret that you want to create." + } + } + }, + "functions": [], + "resources": [ + { + "type": "Microsoft.KeyVault/vaults", + "apiVersion": "2021-04-01-preview", + "name": "[parameters('keyVaultName')]", + "location": "[resourceGroup().location]", + "properties": { + "enabledForDeployment": false, + "enabledForDiskEncryption": false, + "enabledForTemplateDeployment": false, + "enableRbacAuthorization": true, + "accessPolicies": [], + "tenantId": "[subscription().tenantId]", + "sku": { + "name": "[parameters('skuName')]", + "family": "A" + }, + "networkAcls": { + "defaultAction": "Allow", + "bypass": "AzureServices" + } + } + }, + { + "type": "Microsoft.KeyVault/vaults/secrets", + "apiVersion": "2021-04-01-preview", + "name": "[format('{0}/{1}', parameters('keyVaultName'), 'seed')]", + "properties": { + "value": "[parameters('seedSecretValue')]" + }, + "dependsOn": [ + "[resourceId('Microsoft.KeyVault/vaults', parameters('keyVaultName'))]" + ] + }, + { + "type": "Microsoft.KeyVault/vaults/secrets", + "apiVersion": "2021-04-01-preview", + "name": "[format('{0}/{1}', parameters('keyVaultName'), 'scramble')]", + "properties": { + "value": "[parameters('scrambleSecretValue')]" + }, + "dependsOn": [ + "[resourceId('Microsoft.KeyVault/vaults', parameters('keyVaultName'))]" + ] + }, + { + "type": "Microsoft.KeyVault/vaults/secrets", + "apiVersion": "2021-04-01-preview", + "name": "[format('{0}/{1}', parameters('keyVaultName'), 'mysecretstring')]", + "properties": { + "value": "[parameters('mySecretStringSecretValue')]" + }, + "dependsOn": [ + "[resourceId('Microsoft.KeyVault/vaults', parameters('keyVaultName'))]" + ] + } + ] +} diff --git a/gr-azure-software-radio/examples/key_vault_rbac_az_login_quickstart.md b/gr-azure-software-radio/examples/key_vault_rbac_az_login_quickstart.md new file mode 100644 index 0000000..c9ccc85 --- /dev/null +++ b/gr-azure-software-radio/examples/key_vault_rbac_az_login_quickstart.md @@ -0,0 +1,106 @@ +# Quickstart: Key Vault with Role Based Access Controls and Azure CLI Credentials + +The Key Vault block is used to retrieve secrets stored in Azure Key Vault. These secrets are stored as strings and are looked up by name, making credential management much more secure. The steps in this article will work for interactive sessions on +systems with access to Azure. + + +In this article you learn how to: +- Deploy an Azure Key Vault with a [role-based access control (Azure RBAC)](https://docs.microsoft.com/en-us/azure/role-based-access-control/overview) permissions model. +- Add secrets to a Key Vault using Azure CLI. +- Assign a role to a specific user in [Azure Active Directory](https://docs.microsoft.com/en-us/azure/active-directory/fundamentals/active-directory-whatis). +- Provide credentials to an application at runtime using the [DefaultAzureCredential class](https://docs.microsoft.com/en-us/dotnet/api/azure.identity.defaultazurecredential) and the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. + +## Prerequisites +- Use the Bash environment in [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart). +For more information, see [Azure Cloud Shell Quickstart - Bash](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart) + + + +- If you prefer to run CLI reference commands locally, [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI. If you are running on Windows or macOS, consider running Azure CLI in a Docker container. For more information, see [How to run the Azure CLI in a Docker container](https://docs.microsoft.com/en-us/cli/azure/run-azure-cli-docker). + + - If you're using a local installation, sign in to the Azure CLI by using the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. To finish the authentication process, follow the steps displayed in your terminal. For additional sign-in options, see [Sign in with the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). + + - When you're prompted, install Azure CLI extensions on first use. For more information about extensions, see [Use extensions with the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/azure-cli-extensions-overview). + + - Run [az version](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_version) to find the version and dependent libraries that are installed. To upgrade to the latest version, run [az upgrade](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_upgrade). + +- This article requires version 2.29.0 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. +- You must [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI on the machine that will run the example flowgraph. +- If running on a virtual machine in Azure, you must **not** enable a managed identity. The Key Vault block uses the DefaultAzureCredential class for authentication, which will use the managed identity for authentication if it is enabled, ignoring credentials provided by [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login). Follow [these instructions](managed_identity_cli_quickstart.md) to confirm whether or not your virtual machine has a managed identity enabled. + +## Deploy a Key Vault and configure secrets +To run the flowgraph correctly, you must setup a Key Vault resource in Azure and use it to store several secret values. You'll need to update the Key Vault name in the example flowgraph with the Key Vault name you choose when you deploy your Azure resources. + +### Deploy Resources Automatically +Click the following button to deploy a Key Vault with Role Based Access Controls (RBAC) and populate secrets for the example: + + + +### Deploy Resources Manually +If you'd prefer to get started with Key Vault by manually configuring your resources, you can follow this guide to [set and retrieve a secret from Azure Key Vault using Azure CLI](https://docs.microsoft.com/en-us/azure/key-vault/secrets/quick-create-cli). When creating the Key Vault, you must make sure to enable RBAC by using the `--enable-rbac-authorization` flag, like: + +``` +az keyvault create --name "" --resource-group "myResourceGroup" --location "EastUS" --enable-rbac-authorization true +``` + +## Add secrets to Key Vault + You'll need to add at least two secrets to your new Key Vault before you can successfully run the flowgraph: + +- seed: Integer random seed to use as an initial shift register contents for a scrambling algorithm. +- scramble: Integer valued polynomial mask for the scrambling algorithm LFSR +- mysecretstring: Optional - If you want to enable the blob sink block, you'll need to configure a storage account and blob container, then store the blob container's connection string in this secret. + +Create these secrets by following the instructions on how to [set and retrieve a secret from Azure Key Vault using Azure CLI](https://docs.microsoft.com/en-us/azure/key-vault/secrets/quick-create-cli) + +## Assign a Key Vault Role to your Azure identity +You need to assign the appropriate role to your Azure identity for your application to be able to access the Key Vault using credentials credentials provided by [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login). You'll need to know the role name you want to assign, the assignee to assign this role to, and the scope over which to assign this role. It's considered a best practice to grant access with the least privilege that is needed, so avoid assigning a role at a broader scope than necessary. See how to [Provide access to Key Vault keys, certificates, and secrets with an Azure role-based access control](https://docs.microsoft.com/en-us/azure/key-vault/general/rbac-guide?tabs=azure-cli) and how to [Assign Azure roles using Azure CLI](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-cli) for more details. + +If running on a virtual machine in Azure, you must **not** enable a managed identity. The Key Vault block uses the DefaultAzureCredential class for authentication, which will use the managed identity for authentication if it is enabled, ignoring credentials provided by `az login`. + +Confirm that your virtual machine does not have managed identity enabled by following [these instructions](managed_identity_cli_quickstart.md). + +To assign a role to your Azure identity, first get your current user ID in Azure: +``` +userName=$(az ad signed-in-user show --query userPrincipalName --out tsv) +``` + +Next get the Azure ID of your Key Vault by running: +``` +kvID=$(az resource list --name MyKeyVaultName --query [*].id --out tsv) +``` + +Finally, give the VM permissions to read and write to the Key Vault's secrets by assigning it the "Key Vault Secrets Officer" role: +``` +az role assignment create --assignee $userName --role 'Key Vault Secrets Officer' --scope $kvID +``` + +## Flowgraph Update +When your resources are ready in Azure, update the `key_vault_name` variable in the Key Vault example flowgraph to the name you chose for your +deployed Key Vault. Once properly configured, the flowgraph should pull the value from Azure Key Vault, and scramble the sequence with that pulled value. + +If you want to enable the Azure Blob sink block, you will need to also setup a storage account and container in that account to store the data. See https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string for details on connection strings. The point of showing this is so that one can see how to use Azure Key Vault to get connection strings to use with Azure services, like Blob. + +If you are not running the flowgraph on a VM with managed identity enabled, run `az login` and then start GNU Radio Companion from the same terminal to ensure GNU Radio has access to your credentials. + +## Run the flowgraph +You will need to [sign in with the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli) on the +machine where you intend to run the example flowgraph. After signing in, you will need to launch GNU Radio Companion +from the same terminal session to ensure the example flowgraph has access to the authentication token generated by the +`az login` command. + +Run the flowgraph in GNU Radio Companion. The Key Vault block will use the [DefaultAzureCredential class](https://docs.microsoft.com/en-us/dotnet/api/azure.identity) ti access the authentication token generated by the `az login` command and use it to authenticate to Key Vault. + + +---- +## Recommended content + +### [Key Vault](https://docs.microsoft.com/en-us/azure/key-vault/general/overview) +### [Assign a Key Vault access policy](https://docs.microsoft.com/en-us/azure/key-vault/general/assign-access-policy?tabs=azure-cli) +### [DefaultAzureCredential class](https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity.defaultazurecredential?view=azure-python) + + + + + + + diff --git a/gr-azure-software-radio/examples/key_vault_rbac_managed_id_quickstart.md b/gr-azure-software-radio/examples/key_vault_rbac_managed_id_quickstart.md new file mode 100644 index 0000000..ba868ab --- /dev/null +++ b/gr-azure-software-radio/examples/key_vault_rbac_managed_id_quickstart.md @@ -0,0 +1,90 @@ +# Quickstart: Key Vault with Role Based Access Controls and Managed Identities + +The Key Vault block is used to retrieve secrets stored in Azure Key Vault. These secrets are stored as strings and are looked up by name, making credential management much more secure. The steps in this article will work for applications running on +Azure resources that support managed identities. + +In this article you learn how to: +- Deploy an Azure Key Vault with a [role-based access control (Azure RBAC)](https://docs.microsoft.com/en-us/azure/role-based-access-control/overview) permissions model. +- Add secrets to a Key Vault using Azure CLI. +- Assign a role to a virtual machine managed identity to permit access to Key Vault secrets. +- Provide credentials to an application at runtime using the [DefaultAzureCredential class](https://docs.microsoft.com/en-us/dotnet/api/azure.identity.defaultazurecredential) and a virtual machine [managed identity](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview). + +## Prerequisites + +- You must run the Key Vault example on a virtual machine in Azure to take advantage of managed identities. Follow [these instructions](managed_identity_cli_quickstart.md) to confirm your virtual machine is configured properly to use a managed identity. + +- Use the Bash environment in [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart). +For more information, see [Azure Cloud Shell Quickstart - Bash](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart) + + + +- If you prefer to run CLI reference commands locally, [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI. If you are running on Windows or macOS, consider running Azure CLI in a Docker container. For more information, see [How to run the Azure CLI in a Docker container](https://docs.microsoft.com/en-us/cli/azure/run-azure-cli-docker). + + - If you're using a local installation, sign in to the Azure CLI by using the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. To finish the authentication process, follow the steps displayed in your terminal. For additional sign-in options, see [Sign in with the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). + + - When you're prompted, install Azure CLI extensions on first use. For more information about extensions, see [Use extensions with the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/azure-cli-extensions-overview). + + - Run [az version](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_version) to find the version and dependent libraries that are installed. To upgrade to the latest version, run [az upgrade](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_upgrade). + +- This article requires version 2.29.0 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. + +## Deploy a Key Vault and configure secrets +To run the flowgraph correctly, you must setup a Key Vault resource in Azure and use it to store several secret values. You'll need to update the Key Vault name in the example flowgraph with the Key Vault name you choose when you deploy your Azure resources. + +### Deploy Resources Automatically +Click the following button to deploy a Key Vault with Role Based Access Controls (RBAC) and populate secrets for the example: + + + +### Deploy Resources Manually +If you'd prefer to get started with Key Vault by manually configuring your resources, you can follow this guide to [set and retrieve a secret from Azure Key Vault using Azure CLI](https://docs.microsoft.com/en-us/azure/key-vault/secrets/quick-create-cli). When creating the Key Vault, you must make sure to enable RBAC by using the `--enable-rbac-authorization` flag, like: + +``` +az keyvault create --name "" --resource-group "myResourceGroup" --location "EastUS" --enable-rbac-authorization true +``` + +## Add secrets to Key Vault + You'll need to add at least two secrets to your new Key Vault before you can successfully run the flowgraph: + +- seed: Integer random seed to use as an initial shift register contents for a scrambling algorithm. +- scramble: Integer valued polynomial mask for the scrambling algorithm LFSR +- mysecretstring: Optional - If you want to enable the blob sink block, you'll need to configure a storage account and blob container, then store the blob container's connection string in this secret. + +Create these secrets by following the instructions on how to [set and retrieve a secret from Azure Key Vault using Azure CLI](https://docs.microsoft.com/en-us/azure/key-vault/secrets/quick-create-cli) + +## Assign a Key Vault Role to your managed identity +You need to assign the appropriate role to your virtual machine's managed identity to be able to access the Key Vault from that virtual machine. You'll need to know the role name you want to assign, the assignee to assign this role to, and the scope over which to assign this role. It's considered a best practice to grant access with the least privilege that is needed, so avoid assigning a role at a broader scope than necessary. See how to [Provide access to Key Vault keys, certificates, and secrets with an Azure role-based access control](https://docs.microsoft.com/en-us/azure/key-vault/general/rbac-guide?tabs=azure-cli) and how to [Assign Azure roles using Azure CLI](https://docs.microsoft.com/en-us/azure/role-based-access-control/role-assignments-cli) for more details. + +Confirm that your virtual machine has managed identity enabled by following [these instructions](managed_identity_cli_quickstart.md). + +To assign a role to your VM's identity, first get the service principal ID for the VM's managed identity by running: +``` +spID=$(az vm identity show --name MyVirtualMachine --resource-group myResourceGroup --query principalId --out tsv) +``` + +Next get the Azure ID of your Key Vault by running: +``` +kvID=$(az resource list --name MyKeyVaultName --query [*].id --out tsv) +``` + +Finally, give the VM permissions to read and write to the Key Vault's secrets by assigning it the "Key Vault Secrets Officer" role: +``` +az role assignment create --assignee $spID --role 'Key Vault Secrets Officer' --scope $kvID +``` + +## Update the example flowgraph +When your resources are ready in Azure, update the `key_vault_name` variable in the [Key Vault example flowgraph](keyvault.grc) to the name you chose for your +deployed Key Vault. Once properly configured, the flowgraph should pull the secret values from Azure Key Vault and scramble the sequence using the secret seed and secret polynomial. + +If you want to enable the Azure Blob sink block, you will need to also setup a storage account and container in that account to store the data. Follow these instructions to [configure Azure Storage connection strings](https://docs.microsoft.com/en-us/azure/storage/common/storage-configure-connection-string) for more details. + +## Run the flowgraph +Run the flowgraph in GNU Radio Companion. The Key Vault block will use the [DefaultAzureCredential class](https://docs.microsoft.com/en-us/dotnet/api/azure.identity) to automatically detect the virtual machine's managed identity and use it to authenticate to Key Vault. + +---- +## Recommended content + +### [Key Vault](https://docs.microsoft.com/en-us/azure/key-vault/general/overview) +### [Assign a Key Vault access policy](https://docs.microsoft.com/en-us/azure/key-vault/general/assign-access-policy?tabs=azure-cli) + + diff --git a/gr-azure-software-radio/examples/managed_identity_cli_quickstart.md b/gr-azure-software-radio/examples/managed_identity_cli_quickstart.md new file mode 100644 index 0000000..3cffbc8 --- /dev/null +++ b/gr-azure-software-radio/examples/managed_identity_cli_quickstart.md @@ -0,0 +1,82 @@ +# Azure Authentication with DefaultAzureCredentials and Managed Identities + +Many of the examples for this Out-of-Tree module require the configuration of Azure services to work properly. This +quickstart will walk through how to use the [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/) to configure [managed identities](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview) +to enable applications running in Azure VMs to authenticate to Azure resources. + +## Prerequisites +- You must run the examples on a Virtual Machine in Azure. + +- Use the Bash environment in [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart). +For more information, see [Azure Cloud Shell Quickstart - Bash](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart) + + + +- If you prefer to run CLI reference commands locally, [install](https://docs.microsoft.com/en-us/cli/azure/install-azure-cli) the Azure CLI. If you are running on Windows or macOS, consider running Azure CLI in a Docker container. For more information, see [How to run the Azure CLI in a Docker container](https://docs.microsoft.com/en-us/cli/azure/run-azure-cli-docker). + + - If you're using a local installation, sign in to the Azure CLI by using the [az login](https://docs.microsoft.com/en-us/cli/azure/reference-index#az_login) command. To finish the authentication process, follow the steps displayed in your terminal. For additional sign-in options, see [Sign in with the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli). + + - When you're prompted, install Azure CLI extensions on first use. For more information about extensions, see [Use extensions with the Azure CLI](https://docs.microsoft.com/en-us/cli/azure/azure-cli-extensions-overview). + + - Run [az version](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_version) to find the version and dependent libraries that are installed. To upgrade to the latest version, run [az upgrade](https://docs.microsoft.com/en-us/cli/azure/reference-index?#az_upgrade). + +- This article requires version 2.29.0 or later of the Azure CLI. If using Azure Cloud Shell, the latest version is already installed. + +## Set the subscription context +The following steps are not required if you're running commands in Cloud Shell. If you're running the CLI locally, perform the following steps to sign in to Azure and set your current subscription: + +Set the current subscription context. Replace `MyAzureSub` with the name of the Azure subscription you want to use: + +``` +az account set --subscription MyAzureSub +``` + +## Confirm that your virtual machine has a system-assigned managed identity +You can choose to have a system-assigned managed identity associated with your virtual machine when it is first created. +Check if the virtual machine that you'll use to run the examples has a managed identity by running: + +``` +az vm identity show --name MyVirtualMachine --resource-group MyResourceGroup +``` + +If there is a managed identity already associated with your virtual machine, you should see output resembling: + +``` +{ +"principalId": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", +"tenantId": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", +"type": "SystemAssigned", +"userAssignedIdentities": null +} +``` + +If there is no managed identity associated with your virtual machine, the command will not generate any output. + +## Assign a managed identity to a virtual machine +If your virtual machine does not have a managed identity associate with it, you can [Configure managed identities for Azure resources on a VM using the Azure portal](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-portal-windows-vm) +or you can configure one with the commands below: + +``` +az vm identity assign --name MyVirtualMachine --resource-group myResourceGroup +``` + +If the command was successful, you should see results like: + +``` +{ + "systemAssignedIdentity": "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX", + "userAssignedIdentities": {} +} +``` + +See [Configure managed identities for Azure resources on an Azure VM using Azure CLI](https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/qs-configure-cli-windows-vm) for more details on working with managed identities +using the Azure CLI. + +---- +## Recommended content + +### [az vm identity](https://docs.microsoft.com/en-us/cli/azure/vm/identity) + +### [Azure CLI](https://docs.microsoft.com/en-us/cli/azure/) + +### [Azure Cloud Shell](https://docs.microsoft.com/en-us/azure/cloud-shell/quickstart) diff --git a/gr-azure-software-radio/grc/azure_software_radio_eventhub_sink.block.yml b/gr-azure-software-radio/grc/azure_software_radio_eventhub_sink.block.yml index cabfdf5..9dbde8d 100644 --- a/gr-azure-software-radio/grc/azure_software_radio_eventhub_sink.block.yml +++ b/gr-azure-software-radio/grc/azure_software_radio_eventhub_sink.block.yml @@ -10,7 +10,7 @@ category: '[Azure software radio]' templates: imports: import azure_software_radio - make: azure_software_radio.EventHubSink(${authentication_method}, ${connection_str}, ${sas}, ${eventhub_host_name}, ${eventhub_name}, ${partition_id}) + make: azure_software_radio.EventHubSink(${authentication_method}, ${connection_str}, ${sas}, ${eventhub_host_name}, ${eventhub_name}, ${partition_id}, ${default_cred}) parameters: - id: authentication_method @@ -41,6 +41,10 @@ parameters: label: Partition ID dtype: string default: '' +- id: default_cred + label: DefaultAzureCredential + dtype: raw + default: '' inputs: - domain: message diff --git a/gr-azure-software-radio/grc/azure_software_radio_eventhub_source.block.yml b/gr-azure-software-radio/grc/azure_software_radio_eventhub_source.block.yml index e78cada..b7fa2ac 100644 --- a/gr-azure-software-radio/grc/azure_software_radio_eventhub_source.block.yml +++ b/gr-azure-software-radio/grc/azure_software_radio_eventhub_source.block.yml @@ -10,7 +10,7 @@ category: '[Azure software radio]' templates: imports: import azure_software_radio - make: azure_software_radio.EventHubSource(${authentication_method}, ${connection_str}, ${sas}, ${eventhub_host_name}, ${eventhub_name}, ${consumer_grp}, ${partition_id}, ${starting_position}) + make: azure_software_radio.EventHubSource(${authentication_method}, ${connection_str}, ${sas}, ${eventhub_host_name}, ${eventhub_name}, ${consumer_grp}, ${partition_id}, ${starting_position}, ${default_cred}) parameters: - id: authentication_method @@ -49,6 +49,10 @@ parameters: label: Starting Position dtype: raw default: '@latest' +- id: default_cred + label: DefaultAzureCredential + dtype: raw + default: '' outputs: - domain: message diff --git a/gr-azure-software-radio/python/blob_source.py b/gr-azure-software-radio/python/blob_source.py index 5a88b8e..8547c1f 100644 --- a/gr-azure-software-radio/python/blob_source.py +++ b/gr-azure-software-radio/python/blob_source.py @@ -39,7 +39,8 @@ class BlobSource(gr.sync_block): authentication if Auth Method is "connection_string". URL: Storage account URL string. This is required if using "default" or "url_with_sas" authentication. If using "url_with_sas", the URL must include a SAS - token. + token to access private blobs. If using a blob in a container with public permissions, the + SAS token is not necessary. Container Name: Name of the container where the blob of interest is stored. Blob Name: The name of the block blob to read from. Retry Total: Total number of Azure API retries to allow before throwing an exception. Higher diff --git a/gr-azure-software-radio/python/default_credentials/default_credentials.py b/gr-azure-software-radio/python/default_credentials/default_credentials.py index 721cc69..1bcca06 100644 --- a/gr-azure-software-radio/python/default_credentials/default_credentials.py +++ b/gr-azure-software-radio/python/default_credentials/default_credentials.py @@ -3,7 +3,7 @@ # Licensed under the GNU General Public License v3.0 or later. # See License.txt in the project root for license information. # -# pylint: disable=too-many-arguments +# pylint: disable=too-many-arguments, duplicate-code from azure.identity import DefaultAzureCredential diff --git a/gr-azure-software-radio/python/eventhub_sink.py b/gr-azure-software-radio/python/eventhub_sink.py index 4856485..edc1db0 100644 --- a/gr-azure-software-radio/python/eventhub_sink.py +++ b/gr-azure-software-radio/python/eventhub_sink.py @@ -9,10 +9,10 @@ import json import pmt -from gnuradio import gr from azure.eventhub import EventHubProducerClient, EventData from azure.core.credentials import AzureSasCredential from azure.identity import DefaultAzureCredential +from gnuradio import gr # pylint: disable=abstract-method @@ -44,7 +44,8 @@ def __init__( sas_token: str = None, eventhub_host_name: str = None, eventhub_name: str = None, - partition_id: str = None): + partition_id: str = None, + default_credential=None): gr.sync_block.__init__(self, name="eventhub_sink", @@ -61,7 +62,8 @@ def __init__( eventhub_name=eventhub_name, connection_str=connection_str, sas_token=sas_token, - eventhub_host_name=eventhub_host_name + eventhub_host_name=eventhub_host_name, + default_credential=default_credential ) self.message_port_register_in(pmt.intern('in')) @@ -89,7 +91,8 @@ def get_eventhub_producer_client( connection_str: str = None, sas_token: str = None, eventhub_host_name: str = None, - eventhub_name: str = None): + eventhub_name: str = None, + default_credential=None): """ Initialize the Event Hub Producer client Args: @@ -102,6 +105,7 @@ def get_eventhub_producer_client( eventhub_host_name (optional, str): The fully qualified host name for the Event Hub namespace. This is required if using "sas" and "default" authentication. eventhub_name (str): The path to the specified Event Hub to connect to. + DefaultAzureCredential: The credential to use. Ignored if Auth Method is not "default" or not specified. Raises: ValueError: Raised if an unsupported authentication method is used @@ -112,7 +116,7 @@ def get_eventhub_producer_client( eventhub_producer_client = EventHubProducerClient.from_connection_string( connection_str, eventhub_name=eventhub_name) - elif authentication_method == "sas_token": + elif authentication_method == "sas": credential = AzureSasCredential(sas_token) eventhub_producer_client = EventHubProducerClient( fully_qualified_namespace=eventhub_host_name, @@ -120,7 +124,8 @@ def get_eventhub_producer_client( credential=credential) elif authentication_method == "default": - default_credential = DefaultAzureCredential() + if not default_credential: + default_credential = DefaultAzureCredential() eventhub_producer_client = EventHubProducerClient( fully_qualified_namespace=eventhub_host_name, eventhub_name=eventhub_name, diff --git a/gr-azure-software-radio/python/eventhub_source.py b/gr-azure-software-radio/python/eventhub_source.py index b188d9d..a71cbf3 100644 --- a/gr-azure-software-radio/python/eventhub_source.py +++ b/gr-azure-software-radio/python/eventhub_source.py @@ -10,12 +10,12 @@ import threading import pmt -from gnuradio import gr from azure.eventhub import EventHubConsumerClient from azure.identity import DefaultAzureCredential from azure.core.credentials import AzureSasCredential +from gnuradio import gr -# pylint: disable=abstract-method +# pylint: disable=abstract-method,too-many-arguments class EventHubSource(gr.sync_block): """ Receives and converts JSON events from Azure Event Hub to GNU Radio PMT format. @@ -37,6 +37,7 @@ class EventHubSource(gr.sync_block): Consumer Group: The consumer group to receive events from Event Hub. Partition ID: The partition ID to receive events from. Starting Position: The position of an event in the Event Hub partition. + DefaultAzureCredential: The credential to use. Ignored if Auth Method is not "default" or not specified. """ # pylint: disable=too-many-arguments, no-member def __init__( @@ -48,7 +49,8 @@ def __init__( eventhub_name: str = None, consumer_group: str = None, partition_id: str = None, - starting_position=None): + starting_position=None, + default_credential=None): gr.sync_block.__init__(self, name="eventhub_source", @@ -68,8 +70,8 @@ def __init__( eventhub_host_name=eventhub_host_name, eventhub_name=eventhub_name, consumer_group=consumer_group, + default_credential=default_credential ) - self.message_port_register_out(pmt.intern('out')) self.rec_thread = threading.Thread(target=self.receive) @@ -106,7 +108,8 @@ def get_eventhub_consumer_client( sas_token: str = None, eventhub_host_name: str = None, eventhub_name: str = None, - consumer_group: str = None): + consumer_group: str = None, + default_credential=None): """ Initialize the Event Hub Consumer client Args: @@ -121,17 +124,19 @@ def get_eventhub_consumer_client( eventhub_name (str): The path to the specified Event Hub to connect to. consumer_group (str): The consumer group to receive events from Event Hub. + DefaultAzureCredential: The credential to use. Ignored if Auth Method is not "default" or not specified. Raises: ValueError: Raised if an unsupported authentication method is used Returns: EventHubConsumerClient: An Event Hub consumer client ready to be used """ + if authentication_method == "connection_string": eventhub_consumer_client = EventHubConsumerClient.from_connection_string( connection_str, eventhub_name=eventhub_name, consumer_group=consumer_group) - elif authentication_method == "sas_token": + elif authentication_method == "sas": credential = AzureSasCredential(sas_token) eventhub_consumer_client = EventHubConsumerClient( fully_qualified_namespace=eventhub_host_name, @@ -140,7 +145,8 @@ def get_eventhub_consumer_client( credential=credential) elif authentication_method == "default": - default_credential = DefaultAzureCredential() + if not default_credential: + default_credential = DefaultAzureCredential() eventhub_consumer_client = EventHubConsumerClient( fully_qualified_namespace=eventhub_host_name, eventhub_name=eventhub_name, diff --git a/gr-azure-software-radio/python/integration_eventhub_sink.py b/gr-azure-software-radio/python/integration_eventhub_sink.py index 52e1be0..57d37dd 100644 --- a/gr-azure-software-radio/python/integration_eventhub_sink.py +++ b/gr-azure-software-radio/python/integration_eventhub_sink.py @@ -1,4 +1,4 @@ -# pylint: disable=missing-function-docstring, no-self-use, missing-class-docstring, no-member +# pylint: disable=missing-function-docstring, no-self-use, missing-class-docstring, no-member, duplicate-code, abstract-method #!/usr/bin/env python # -*- coding: utf-8 -*- # @@ -17,14 +17,14 @@ import numpy as np import pmt -from azure_software_radio import EventHubSink +from azure_software_radio import EventHubSink, default_credentials from azure.eventhub import EventHubConsumerClient from gnuradio import gr, gr_unittest from gnuradio import blocks NUM_MSGS = 10 -#pylint: disable=abstract-method + class PmtMessageGenerator(gr.sync_block): """ This is a PMT Message Generating class for testing purposes @@ -83,6 +83,9 @@ def setUp(self): self.eventhub_connection_string = os.getenv( 'AZURE_EVENTHUB_CONNECTION_STRING') + + self.eventhub_host_name = os.getenv( + 'AZURE_EVENTHUB_HOST_NAME') self.eventhub_consumer_group = os.getenv( 'AZURE_EVENTHUB_CONSUMER_GROUP') self.eventhub_name = os.getenv('AZURE_EVENTHUB_NAME') @@ -96,8 +99,7 @@ def tearDown(self): self.tb = None def on_event(self, _partition_context, event): - msg = json.loads(list(event.body)[0]) - print('Received the event: %s' % msg) + _ = json.loads(list(event.body)[0]) self.num_rx_msgs += 1 if self.num_rx_msgs == NUM_MSGS: self.eventhub_consumer.close() @@ -113,12 +115,10 @@ def test_round_trip_data_through_eventhub(self): src_data.append(float(i)) src = blocks.vector_source_f(src_data, False) pmt_msg_gen = PmtMessageGenerator(msg_list, msg_interval) - sink_block = EventHubSink( authentication_method="connection_string", connection_str=self.eventhub_connection_string, eventhub_name=self.eventhub_name) - # Connect vector source to message gen self.tb.connect(src, pmt_msg_gen) @@ -141,6 +141,55 @@ def test_round_trip_data_through_eventhub(self): starting_position=test_start_time) self.assertEqual(NUM_MSGS, self.num_rx_msgs) + def test_round_trip_data_through_eventhub_default_creds(self): + + creds = default_credentials.get_DefaultAzureCredential(enable_cli_credential=True, + enable_environment=True, + enable_managed_identity=True, + enable_powershell=True, + enable_visual_studio_code=True, + enable_shared_token_cache=True, + enable_interactive_browser=False) + test_start_time = datetime.datetime.utcnow() + msg_interval = 1000 + msg_list = [pmt.from_long(i) for i in range(NUM_MSGS)] + + # Create dummy data to trigger messages + src_data = [] + for i in range(NUM_MSGS * msg_interval): + src_data.append(float(i)) + src = blocks.vector_source_f(src_data, False) + pmt_msg_gen = PmtMessageGenerator(msg_list, msg_interval) + + sink_block = EventHubSink( + authentication_method="default", + eventhub_host_name=self.eventhub_host_name, + eventhub_name=self.eventhub_name, + default_credential=creds) + + # Connect vector source to message gen + self.tb.connect(src, pmt_msg_gen) + + # Connect message generator to message consumer + self.tb.msg_connect(pmt_msg_gen, 'out_port', sink_block, 'in') + + # Verify that the messgae port query functions work + self.assertEqual( + pmt.to_python( + pmt_msg_gen.message_ports_out())[0], + 'out_port') + self.assertEqual( + 'in' in pmt.to_python( + sink_block.message_ports_in()), True) + + self.tb.start() + with self.eventhub_consumer: + self.eventhub_consumer.receive( + on_event=self.on_event, + starting_position=test_start_time) + self.assertEqual(NUM_MSGS, self.num_rx_msgs) + self.tb.stop() + if __name__ == '__main__': gr_unittest.run(IntegrationEventhubSink) diff --git a/gr-azure-software-radio/python/integration_eventhub_source.py b/gr-azure-software-radio/python/integration_eventhub_source.py index 5d43faf..193ada7 100644 --- a/gr-azure-software-radio/python/integration_eventhub_source.py +++ b/gr-azure-software-radio/python/integration_eventhub_source.py @@ -1,4 +1,4 @@ -# pylint: disable=missing-function-docstring, no-self-use, missing-class-docstring, no-member +# pylint: disable=missing-function-docstring, no-self-use, missing-class-docstring, no-member, duplicate-code #!/usr/bin/env python # -*- coding: utf-8 -*- # @@ -14,36 +14,22 @@ import datetime import json import os +import time import pmt -from azure_software_radio import EventHubSource +from azure_software_radio import EventHubSource, default_credentials from azure.eventhub import EventHubProducerClient, EventData +from gnuradio import blocks from gnuradio import gr, gr_unittest -# pylint: disable=abstract-method -class PmtMessageConsumer(gr.sync_block): - def __init__(self): - gr.sync_block.__init__( - self, - name="pmt message consumer", - in_sig=[], - out_sig=[] - ) - self.msg_list = [] - self.message_port_register_in(pmt.intern('in_port')) - self.set_msg_handler(pmt.intern('in_port'), - self.handle_msg) - - def handle_msg(self, msg): - self.msg_list.append(msg) - print('Received PMT message %s' % msg) - class IntegrationEventhubSource(gr_unittest.TestCase): def setUp(self): self.tb = gr.top_block() + self.eventhub_host_name = os.getenv( + 'AZURE_EVENTHUB_HOST_NAME') self.eventhub_connection_string = os.getenv( 'AZURE_EVENTHUB_CONNECTION_STRING') self.eventhub_consumer_group = os.getenv( @@ -59,7 +45,6 @@ def tearDown(self): def test_round_trip_data_through_eventhub(self): test_start_time = datetime.datetime.utcnow() - pmsg = pmt.make_dict() pmsg = pmt.dict_add( pmsg, @@ -80,7 +65,7 @@ def test_round_trip_data_through_eventhub(self): pmt.from_double(4)) pmsg = pmt.dict_add( pmsg, - pmt.string_to_symbol("eventhub"), + pmt.string_to_symbol("eventhub source"), pmt.from_long(5)) msg = json.dumps(pmt.to_python(pmsg)) @@ -88,7 +73,7 @@ def test_round_trip_data_through_eventhub(self): event_batch.add(EventData(msg)) self.eventhub_producer.send_batch(event_batch) - pmt_msg_rec = PmtMessageConsumer() + msg_debug_block = blocks.message_debug() source_block = EventHubSource( authentication_method="connection_string", @@ -97,20 +82,84 @@ def test_round_trip_data_through_eventhub(self): consumer_group=self.eventhub_consumer_group, starting_position=test_start_time) - self.tb.msg_connect(source_block, 'out', pmt_msg_rec, 'in_port') + self.tb.msg_connect(source_block, 'out', msg_debug_block, 'print') + self.tb.msg_connect(source_block, 'out', msg_debug_block, 'store') self.assertEqual( pmt.to_python( source_block.message_ports_out())[0], 'out') self.assertEqual( - 'in_port' in pmt.to_python( - pmt_msg_rec.message_ports_in()), True) + msg_debug_block.num_messages(), 0) + + self.tb.start() + time.sleep(10) + self.assertEqual( + msg_debug_block.num_messages(), 1) + source_block.stop() + self.tb.stop() + self.tb.wait() + + + def test_round_trip_data_through_eventhub_default_cred(self): + test_start_time = datetime.datetime.utcnow() + creds = default_credentials.get_DefaultAzureCredential(enable_cli_credential=True, + enable_environment=True, + enable_managed_identity=True, + enable_powershell=True, + enable_visual_studio_code=True, + enable_shared_token_cache=True, + enable_interactive_browser=False) + pmsg = pmt.make_dict() + pmsg = pmt.dict_add( + pmsg, + pmt.string_to_symbol("this"), + pmt.from_long(0)) + pmsg = pmt.dict_add(pmsg, pmt.string_to_symbol("is"), pmt.from_long(1)) + pmsg = pmt.dict_add( + pmsg, + pmt.string_to_symbol("a"), + pmt.from_double(2)) + pmsg = pmt.dict_add( + pmsg, + pmt.string_to_symbol("test"), + pmt.from_long(3)) + pmsg = pmt.dict_add( + pmsg, + pmt.string_to_symbol("for"), + pmt.from_double(4)) + pmsg = pmt.dict_add( + pmsg, + pmt.string_to_symbol("eventhub source"), + pmt.from_long(5)) - self.tb.run() - print('after run') - self.assertEqual(len(pmt_msg_rec.msg_list), 1) + msg = json.dumps(pmt.to_python(pmsg)) + event_batch = self.eventhub_producer.create_batch() + event_batch.add(EventData(msg)) + self.eventhub_producer.send_batch(event_batch) + msg_debug_block = blocks.message_debug() + source_block = EventHubSource( + authentication_method="default", + eventhub_host_name=self.eventhub_host_name, + eventhub_name=self.eventhub_name, + consumer_group=self.eventhub_consumer_group, + starting_position=test_start_time, + default_credential=creds) + self.tb.msg_connect(source_block, 'out', msg_debug_block, 'print') + self.tb.msg_connect(source_block, 'out', msg_debug_block, 'store') + self.assertEqual( + pmt.to_python( + source_block.message_ports_out())[0], + 'out') + self.assertEqual( + msg_debug_block.num_messages(), 0) + self.tb.start() + time.sleep(10) + source_block.stop() + self.tb.stop() + self.tb.wait() + self.assertEqual(msg_debug_block.num_messages(), 1) if __name__ == '__main__': gr_unittest.run(IntegrationEventhubSource) diff --git a/gr-azure-software-radio/python/qa_eventhub_sink.py b/gr-azure-software-radio/python/qa_eventhub_sink.py index c421e96..41732af 100644 --- a/gr-azure-software-radio/python/qa_eventhub_sink.py +++ b/gr-azure-software-radio/python/qa_eventhub_sink.py @@ -8,8 +8,9 @@ # import uuid -from gnuradio import gr, gr_unittest from azure_software_radio import EventHubSink +from gnuradio import gr, gr_unittest + class qa_EventHubSink(gr_unittest.TestCase): diff --git a/gr-azure-software-radio/python/qa_eventhub_source.py b/gr-azure-software-radio/python/qa_eventhub_source.py index 5049559..d0943c7 100644 --- a/gr-azure-software-radio/python/qa_eventhub_source.py +++ b/gr-azure-software-radio/python/qa_eventhub_source.py @@ -8,8 +8,8 @@ # import uuid -from gnuradio import gr, gr_unittest from azure_software_radio import EventHubSource +from gnuradio import gr, gr_unittest class qa_EventHubSource(gr_unittest.TestCase): diff --git a/images/example_flowgraph.png b/images/example_flowgraph.png new file mode 100644 index 0000000..3e150b6 Binary files /dev/null and b/images/example_flowgraph.png differ diff --git a/pages/devvm.md b/pages/devvm.md index c3a6be2..8692262 100644 --- a/pages/devvm.md +++ b/pages/devvm.md @@ -1,8 +1,8 @@ # Welcome To Azure software radio developer VM -The Azure software radio developer VM is the start of major investments by the Azure Spectrum Team to facilitate software defined radio development on Azure. This will accelerate development of SDR applications and harness the power of Azure to expand potential oppurtunities in this industry. +The Azure software radio developer VM is the start of major investments by the Azure Spectrum Team to facilitate software defined radio development on Azure. This will accelerate development of SDR applications and harness the power of Azure to expand potential opportunities in this industry. -To launch our first set of offerings we have built a developer vm on Ubuntu 20.04 which is ready to go with the most common tools for developing SDR. These include +To launch our first set of offerings we have built a developer VM on Ubuntu 20.04 which is ready to go with the most common tools for developing SDR. These include 1. GNU Radio 2. Fosphor @@ -14,26 +14,26 @@ We have also included Azure Native Tools including Once you deploy the virtual machine simply RDP/VNC to the machine and get developing! The VM is deployed into a self-contained resource group, virtual network and public ip address. -You should take additional steps to secure the public ip address to only allow connections from trusted IP's. +You should take additional steps to secure the public IP address to only allow connections from trusted IP's. -If you have any feedback simply log an issue on this github repo or get in touch with the team via email at azuresoftwareradio@microsoft.com +If you have any feedback simply log an issue on this GitHub repo or get in touch with the team via email at azuresoftwareradio@microsoft.com ## Signing up for the Private Preview of the Azure software radio developer VM -Currently the Azure software radio developer VM is in Private Preview and it requires for our team to authorize you to deploy the service. +Currently the Azure software radio developer VM is in Private Preview and our team must first authorize you to be able to deploy the service. To sign up for the Developer VM click [Here](https://forms.office.com/r/sbZqBUVUE0) Fill in the required details and our team will ensure you get authorized within 24 hours for the service. -Follow the deployment instructions to validate access to the developer vm and begin deployment +Follow the deployment instructions to validate access to the developer VM and begin deployment ## Validating Access to the Developer VM 1. Open your browser and navigate to [AzurePortal](https://portal.azure.com) and sign-in -2. In the search bar type "MarketPlace" and Click MarketPlace +2. In the search bar type "Marketplace" and Click Marketplace ![Marketplace](./../images/marketplace.jpg) @@ -48,7 +48,7 @@ Follow the deployment instructions to validate access to the developer vm and be **if you dont see the offering listed and it is more than 48 hours since you filled in the form contact the Azure software radio developer VM team via email azuresoftwareradio@microsoft.com** -## Validate Qouta Requirements +## Validate Quota Requirements 1. From the Azure Portal Click Subscriptions @@ -58,15 +58,15 @@ Follow the deployment instructions to validate access to the developer vm and be ![SelectSubscription](./../images/selectsubscription.jpg) -3. Click Usage & Qoutas +3. Click Usage & Quotas ![Usage&Quotas](./../images/usageqouta.jpg) -4. Type NV in the search bar and verify as shown that you have sufficient qouta (at least 12 free cores) for the region you want to deploy into. +4. Type NV in the search bar and verify as shown that you have sufficient quota (at least 12 free cores) for the region you want to deploy into. We recommend using the region with the lowest latency, which you can easily determine using [this web app](https://azurespeedtest.azurewebsites.net/). ![VerifyNVQouta](./../images/verifyqouta.jpg) -5. if you do not have enough qouta, click the pencil (edit) icon and request more cores and ensure it is successful before attempting to deploy the development vm. +5. if you do not have enough quota, click the pencil (edit) icon and request more cores and ensure it is successful before attempting to deploy the development VM. ## Deployment of the Azure software radio developer VM @@ -75,7 +75,7 @@ Follow the deployment instructions to validate access to the developer vm and be ![PrivateProductVMCreation](./../images/vmcreation.jpg) -3. On the Create VM Page 1, select the subscription which has been authorized for developer vm and allow for a dynamic resource group to be created or select an existing one. Enter a Name for the virtual machine, select the same region to which you have applied and have available qouta, leave the remaining settings as is except for use Password or SSH Key. Select the appropraite one for your deployment and Click Next: Disks +3. On the Create VM Page 1, select the subscription which has been authorized for developer VM and allow for a dynamic resource group to be created or select an existing one. Enter a Name for the virtual machine, select the same region to which you have applied and have available quota. Change Availability options to "No infrastructure redundancy required" (this is needed to be able to use the NV series VMs). Under Size you should now be able to choose an NV series. For Authentication type we recommend using a password, so that it will be easier to use RDP into the VM. Click Next: Disks ![VMCreateP1](./../images/vmcreate1.jpg) @@ -85,7 +85,7 @@ Follow the deployment instructions to validate access to the developer vm and be ![VMCreateP2](./../images/vmcreate2.jpg) - **The System Managed Identity can be assigned permissions to Azure Resources Post Deployment to allow the Azure Client and AzCopy to login to directly to Azure and access resources it has been authorized to** + **The System Managed Identity can be assigned permissions to Azure Resources Post Deployment to allow the Azure Client and AzCopy to login directly to Azure and access resources it has been authorized to** 7. Click Review+Create and then Click Create 8. Confirm the deployment is successful as shown and click Go To Resource @@ -98,4 +98,4 @@ Follow the deployment instructions to validate access to the developer vm and be ![ObtainPublicIP](./../images/rdptovm.jpg) -2. Start your favourite RDP client enter the IP Address and logon with the credentials set during deployment +2. Start your favorite RDP client enter the IP Address and logon with the credentials set during deployment diff --git a/tutorials/README.md b/tutorials/README.md new file mode 100644 index 0000000..27ac6bf --- /dev/null +++ b/tutorials/README.md @@ -0,0 +1,7 @@ +# Azure SDR Tutorials + +In these tutorials we demonstrate the power behind GNU Radio workflows in Azure. These tutorials were written such that no prior experience with Azure and/or GNU Radio is required to follow along. + +In **[Exploring GNU Radio Companion](exploring_grc/README.md)** we show how to install GNU Radio and the Azure blocks manually, starting with a fresh Ubuntu 20 VM in Azure. We then switch over to using our prebuild GNU Radio development VM that lets you skip having to install and configure everything yourself. As an example GNU Radio application we view the spectrum of the FM radio band, using an RF recording stored in Azure blob storage. + +**[Mapping Airplane Locations Using ADS-B and Power BI](adsb_powerbi/README.md)** involves demodulating and decoding [ADS-B](https://en.wikipedia.org/wiki/Automatic_Dependent_Surveillance%E2%80%93Broadcast) signals transmitted by commercial aircraft, and send the resulting information into Azure event hub. We have an RF recording of aircraft in the DC area, but with ~$50 in hardware you can receive and decode signals yourself! \ No newline at end of file diff --git a/tutorials/adsb_powerbi/README.md b/tutorials/adsb_powerbi/README.md new file mode 100644 index 0000000..f074bd6 --- /dev/null +++ b/tutorials/adsb_powerbi/README.md @@ -0,0 +1,229 @@ +# Mapping Airplane Locations Using ADS-B and Power BI + +This tutorial is split into two parts. First, we will show how ADS-B signals can be decoded and sent to Azure event hub. Next, we incorporate Power BI to plot the locations of aircraft over time on a map-based interface. + +## Prerequisites + +- As part of the second half of this stage you will need a Power BI account and access to at least one [Power BI Workspace](https://docs.microsoft.com/en-us/power-bi/collaborate-share/service-create-the-new-workspaces). Depending on your account you may be able to simply go to https://msit.powerbi.com and sign in. + +## Introduction + +Automatic Dependent Surveillance-Broadcast (ADS-B) is a wireless technology used by aircraft to broadcast their position and other onboard sensor data. The information can be received by air traffic control ground stations, as well as other aircraft, to provide situational awareness. ADS-B is automatic, i.e., it requires no pilot input. The data sent over ADS-B originates from the aircraft's navigation system and other sensors. The signal is transmitted at 1090 MHz, uses pulse position modulation (PPM), and has a bandwidth around 50 kHz (it's a very low data rate signal). + +
+ +For those who skipped the manual installation of GNU Radio steps, this will be the first time installing a GNU Radio out-of-tree module (OOT) from source. OOTs are an important part of GNU Radio, as GNU Radio only comes with a basic set of signal processing blocks, and most application-specific blocks are found in 3rd party OOTs. In addition, if you build your own GNU Radio application, there is a good chance you will want to create your own OOT to contain the custom blocks created. Most OOTs are installed using the same set of steps, although some have unique dependencies. + +If you haven't cloned a copy of these Tutorials to the VM yet, open a terminal in the VM and run the following: + +```console +git clone https://github.com/microsoft/azure-software-radio-hello-world.git +``` + +## Installing the gr-adsb Out-of-Tree Module + +The process of installing the **gr-adsb** OOT module onto a system with GNU Radio already installed is as follows. Open a terminal and type: +```console +git clone https://github.com/mhostetter/gr-adsb.git +cd gr-adsb +mkdir build +cd build +cmake .. +make +sudo make install +sudo ldconfig +``` + +This process is the same for most GNU Radio OOTs, you simply replace the github url with whichever OOT you are trying to install. If you are curious what other OOTs exist publicly, check out [CGRAN](https://www.cgran.org/) which is an online index of GNU Radio OOTs. Note that some OOTs have dependencies beyond what GNU Radio depends on, and may have an extra `apt install` step. You will know the installation of gr-adsb was successful because additional ADS-B blocks will be available in GNU Radio as shown in the red box highlighted below: + +
+ +## ADS-B and Event Hub + +Open GRC, but this time, launch it by opening Ubuntu's Terminal application and typing in: `gnuradio-companion`. The **gr-adsb** module will print out additional information in this Terminal window while it is running. Now, open the flowgraph [adsb_event_hub.grc](flowgraphs/adsb_event_hub.grc) which is in the adsb_powerbi/flowgraphs directory within this repo. You should see the following flowgraph (ignore the grayed out Event Hub Sink block): + +
+ +The first block, which is where the samples originate from, is the same Blob Source we used in Stage 1, but this time we are pulling down an RF recording that contains a capture of ADS-B, taken in the DC area. The signal is converted from complex samples to magnitude squared (power); due to the nature of PPM modulation we only need the power over time. The ADS-B Framer/Demod/Decoder blocks work together to demodulate and decode the signal, we will not be diving into the details of how they work in this stage, but you can refer to [this tutorial](https://wiki.analog.com/resources/eval/user-guides/picozed_sdr/tutorials/adsb) for more information. + +The grey input/output ports represent messages instead of a stream of samples. The connections use a dashed line to represent the fact they are asynchronous with respect to the samples flowing through the flowgraph. In other words, the output of the Demod block goes to a Time Sink for visualization purposes, but the actual demodulated data is sent out over a message and is no longer aligned to sample time. It turns out the Demod block sends the raw bits along with the metadata, so the Pass Only ADS-B Meta block is used to get rid of the raw bits because we don't want them sent to Event Hub, we only want the metadata. The metadata, stored within the message, arrives at the Event Hub Sink block and the block converts it from a GNU Radio message to JSON, and sends it to an Event Hub endpoint. In the second part of this tutorial we will actually process this data sent over Event Hub. + +When you run the flowgraph you should see the following output: + +
+ +This shows the signal over time, and the display is triggering in such a way that the beginning of each packet is aligned to the left side of the plot. So we can see that this particular packet was roughly 65 microseconds in duration, but you will notice that there are packets of various length. + +If you switch windows and bring up the Terminal window you used to launch GRC, you might see a list of decoded messages (in some installations this will *not* show up in your terminal, but instead you will see less structured output in the bottom-left GRC console): + +``` + Time ICAO Callsign Alt Climb Speed Hdng Latitude Longitude Msgs + ft ft/m kt deg deg deg +76:06:02 a46ab3 FFT2380 20775 -1728 425 15 38.7591705 -76.4883688 24 +86:06:02 aa7e74 30000 -1024 507 56 38.6728060 -76.6093750 17 +16:06:02 ab6394 5875 -128 237 179 38.9737663 -76.9388428 8 +16:06:02 a609d4 36025 -64 514 25 38.4288025 -76.7635644 4 +16:06:02 a95a1c 19000 0 393 -108 38.5884247 -76.6375799 8 +16:06:02 a9b088 AAL966 6175 -1920 252 123 38.7975660 -77.0895386 6 +66:06:02 a9f4c6 35000 64 476 48 38.5527191 -76.9266875 5 +``` + +This is actually the data from the aircraft, which provides the aircraft location and heading, along with some identifying information. While this is useful for navigation systems, humans prefer to have this visualized and that is where Event Hub and Power BI come in. + +## Power BI and Maps Interface + +
+ +[Azure Event Hub](https://azure.microsoft.com/en-us/services/event-hubs/) is a real-time data ingestion service. We will be using it to receive and visualize the ADS-B data that our GNU Radio flowgraph is generating. [Power BI](https://powerbi.microsoft.com/en-us/) makes it easy to build visualizations of your data and there is a [version](https://powerbi.com/) you can use in your browser. To connect Event Hubs to Power BI, we will be using the [Azure Stream Analytics](https://azure.microsoft.com/en-us/services/stream-analytics/) service which makes it easy to build data pipelines. + +### Event Hubs + +The first step is to create a new Event Hub. There is documentation for doing so using the [Azure Portal](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-create) or the [Azure CLI](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-quickstart-cli). Make sure to create both an Event Hubs namespace and an Event Hub under that namespace. + +**Note:** When creating the Event Hubs Namespace, set the **Pricing Tier** to **Standard** and set the **Throughput Units** to **1**. + +After you have completed this, your new Event Hub should be listed in the Resource Group you are using. You now need to get the Connection String of the Event Hub instance (not the namespace) for it so the flowgraph can send messages to the Event Hub. Follow [these steps](https://docs.microsoft.com/en-us/azure/event-hubs/event-hubs-get-connection-string) to get the Connection String and copy it to your clipboard. + +Now return to the GRC window and find the Event Hub Sink block in the far right side of the flowgraph. + +
+ +Double click it to bring up the properties for the block. Select **connection_string** for the Auth Method and paste in the Connection String in the appropriate field. Enter the name of the Event Hub instance you created (not the namespace) in the Event Hub Name field. Now enable the Event Hub Sink block and run the flowgraph again, you can continue to the next step while it runs. + +
+ + If you go to the Azure Portal and navigate to your Event Hub instance, you should start to see the graphs show that messages are being received from the flowgraph, note that it may take a minute to update. If you don't see messages arrive then re-run the flowgraph and refresh the page. Once message activity is seen, you can stop the flowgraph by closing the window that opened when it started. + +
+ + ## Stream Analytics + + Now that we have the ADS-B output in Azure, it is time to make it more accessible. Azure Stream Analytics makes it easy to connect services and applications to the streaming data coming from Event Hub. + +### Create a Job + + Follow these steps to create a Stream Analytics Job: + 1. Sign into the Azure portal + 2. Select **Create a resource** in the upper left-hand corner of the Azure portal + 3. Select **Analytics > Stream Analytics job** from the results list + 4. Fill out the Stream Analytics job page with the following information: + + +|Setting |Suggested value |Description| +|-----------|-------------------|-----------| +|Job name |ADSB-Job |Enter a name to identify your Stream Analytics job. Stream Analytics job name can contain alphanumeric characters, hyphens, and underscores only and it must be between 3 and 63 characters long.| +|Subscription| \ |Select the Azure subscription that you want to use for this job.| +|Resource group| \| Select the same resource group as your Event Hub.| +|Location |\