diff --git a/scripts/notebooks/EJFAT/LBCP-tester.ipynb b/scripts/notebooks/EJFAT/E2SAR-development-tester.ipynb
similarity index 84%
rename from scripts/notebooks/EJFAT/LBCP-tester.ipynb
rename to scripts/notebooks/EJFAT/E2SAR-development-tester.ipynb
index ddd69c9..bf99eba 100644
--- a/scripts/notebooks/EJFAT/LBCP-tester.ipynb
+++ b/scripts/notebooks/EJFAT/E2SAR-development-tester.ipynb
@@ -18,7 +18,7 @@
"## Preparation and overview\n",
"\n",
"- Be sure to [generate a keypair for Jupyter Hub](GitHubSSH.ipynb) and register it with GitHub - the keys will be used to check out the code from private repositories, like [UDPLBd](https://github.com/esnet/udplbd) and [E2SAR](https://github.com/JeffersonLab/E2SAR).\n",
- "- Note that for E2SAR development and testing sender and receiver node compile/build environments will be setup via post-boot scripts ([sender](post-boot/sender.sh) and [receiver](post-boot/recver.sh)) and grpc++ is installed as a tar.gz with static and dynamic libraries compiled for ubuntu22\n",
+ "- Note that for E2SAR development and testing sender and receiver node compile/build environments will be setup via post-boot scripts ([sender](post-boot/sender.sh) and [receiver](post-boot/recver.sh)) and grpc++/boost is installed as a debian package from [Github releases](https://github.com/JeffersonLab/E2SAR/releases) with static and dynamic libraries compiled for ubuntu22\n",
"- This does not setup the control plane node for anything, but testing a specific version - you can set which branch of UDPLBd to check out and a containerized version is built and stood up."
]
},
@@ -59,13 +59,16 @@
"# which of the available config files to use with UDPLBd\n",
"udplbd_config = 'lb_mock-tls.yml'\n",
"\n",
+ "#base distro type - either default or docker\n",
+ "distro_types = ['default','docker']\n",
+ "distro_type = distro_types[0]\n",
+ "\n",
"# base distro 'ubuntu' or 'rocky'\n",
"distro_name = 'ubuntu'\n",
"\n",
- "binary_extension = {\n",
- " 'ubuntu': 'ubuntu22',\n",
- " 'rocky': 'rocky8'\n",
- "}[distro_name]\n",
+ "#base distro version, currently only for ubuntu 20,22,24. E2SAR dependencies will be \n",
+ "#downloaded for the appropriate versions.\n",
+ "distro_version = '22'\n",
"\n",
"# note that the below is distribution specific ('ubuntu' for ubuntu and so on)\n",
"home_location = {\n",
@@ -75,10 +78,6 @@
"\n",
"vm_key_location = f'{home_location}/.ssh/github_ecdsa'\n",
"\n",
- "# grpc tar ball stored in E2SAR repo (via LFS)\n",
- "grpc_tar = f\"grpc-v1.54.1-{binary_extension}.tar.gz\"\n",
- "boost_tar = f\"boost-v1.85.0-{binary_extension}.tar.gz\"\n",
- "\n",
"# which test suites in E2SAR to run (leave empty to run all)\n",
"# you can set 'unit' or 'live' to run unit or live tests only\n",
"e2sar_test_suite = ''\n",
@@ -86,6 +85,11 @@
"# name of the network connecting the nodes\n",
"net_name = 'site_bridge_net'\n",
"\n",
+ "# url of e2sar deps. Find the appropriate version for the OS at https://github.com/JeffersonLab/E2SAR/releases\n",
+ "static_release_url = 'https://github.com/JeffersonLab/E2SAR/releases/download/' # don't need to change this\n",
+ "e2sar_dep_artifcat = 'e2sar-deps_0.1.0_amd64.deb'\n",
+ "e2sar_release_ver = 'E2SAR-0.1.0'\n",
+ "e2sar_dep_url = static_release_url + e2sar_release_ver + \"-\" + distro_name + \"-\" + distro_version + \".04/\" + e2sar_dep_artifcat\n",
"\n",
"#\n",
"# SHOULDN'T NEED TO EDIT BELOW\n",
@@ -103,10 +107,9 @@
"fablib = fablib_manager() \n",
"fablib.show_config();\n",
"\n",
- "images = {\n",
- " 'ubuntu': ('default_ubuntu_22', 'docker_ubuntu_22'),\n",
- " 'rocky': ('default_rocky_8', 'docker_rocky_8')\n",
- "}\n",
+ "# Using docker image for cpnode by default\n",
+ "distro_image = distro_type + '_' + distro_name + '_' + distro_version\n",
+ "cp_distro_image = distro_types[1] + '_' + distro_name + '_' + distro_version\n",
"\n",
"# variable settings\n",
"slice_name = f'UDP LB Control Plane Testing with udplbd[{udplbd_branch}], e2sar[{e2sar_branch}] on {distro_name}'\n",
@@ -117,19 +120,19 @@
"node_config = {\n",
" 'sender': {\n",
" 'ip':'192.168.0.1', \n",
- " 'image': images[distro_name][0],\n",
+ " 'image': distro_image,\n",
" 'cores': 8,\n",
" 'ram': 24,\n",
" 'disk': 100 },\n",
" 'recver': {\n",
" 'ip':'192.168.0.2', \n",
- " 'image':images[distro_name][0],\n",
+ " 'image':distro_image,\n",
" 'cores':8,\n",
" 'ram': 24,\n",
" 'disk': 100 },\n",
" 'cpnode': {\n",
" 'ip':'192.168.0.3', \n",
- " 'image':images[distro_name][1],\n",
+ " 'image':distro_image,\n",
" 'cores':8,\n",
" 'ram': 8,\n",
" 'disk': 100 },\n",
@@ -414,8 +417,8 @@
" f\"chmod go-rwx {vm_key_location}\",\n",
" # Meson won't detect boost by merely setting cmake_prefix_path, instead set BOOST_ROOT env variable \n",
" # for gRPC it is enough to set -Dpkg_config_path option to meson\n",
- " f\"echo 'export BOOST_ROOT=$HOME/boost-install PATH=$HOME/.local/bin:$HOME/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib' >> ~/.profile\",\n",
- " f\"echo 'export BOOST_ROOT=$HOME/boost-install PATH=$HOME/.local/bin:$HOME/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib' >> ~/.bashrc\",\n",
+ " f\"echo 'export BOOST_ROOT=/usr/local/ LD_LIBRARY_PATH=/usr/local/lib' >> ~/.profile\",\n",
+ " f\"echo 'export BOOST_ROOT=/usr/local/ LD_LIBRARY_PATH=/usr/local/lib' >> ~/.bashrc\",\n",
"]\n",
"\n",
"for node in [sender, recver]: \n",
@@ -427,15 +430,29 @@
{
"cell_type": "code",
"execution_count": null,
- "id": "813f3afc-f8cf-40f9-81c5-8eceffb1df74",
+ "id": "24e529e2-ec01-451f-91a3-52b1462a496d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#download boost and grpc dependencies from releases\n",
+ "commands = [\n",
+ " f\"wget -q -O boost_grpc.deb {e2sar_dep_url}\",\n",
+ " f\"sudo apt -yq install ./boost_grpc.deb\",\n",
+ "]\n",
+ " \n",
+ "execute_commands([sender, recver], commands)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "e8cf2d4e-7602-4cdd-ba55-b7abfe0b5538",
"metadata": {},
"outputs": [],
"source": [
"# checkout E2SAR (including the right branch) using that key, install grpc and boost binary that is stored in the repo\n",
"commands = [\n",
" f\"GIT_SSH_COMMAND='ssh -i {vm_key_location} -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' git clone --recurse-submodules --depth 1 -b {e2sar_branch} git@github.com:JeffersonLab/E2SAR.git\",\n",
- " f\"tar -zxf E2SAR/binary_artifacts/{grpc_tar}\",\n",
- " f\"tar -zxf E2SAR/binary_artifacts/{boost_tar}\",\n",
" #f\"cd E2SAR; GIT_SSH_COMMAND='ssh -i {vm_key_location} -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' git submodule init\",\n",
"]\n",
" \n",
@@ -476,13 +493,13 @@
"# (other tests simply ignore the parts of the URI they don't need.)\n",
"\n",
"commands = [\n",
- " f\"cd E2SAR; BOOST_ROOT=$HOME/boost-install PATH=$HOME/.local/bin:$HOME/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib meson setup -Dpkg_config_path=$HOME/grpc-install/lib/pkgconfig/:$HOME/grpc-install/lib64/pkgconfig/ --prefix {home_location}/e2sar-install build && sed -i 's/-std=c++11//g' build/build.ninja\",\n",
- " f\"cd E2SAR/build; PATH=$HOME/.local/bin:$HOME/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib meson compile -j 8\",\n",
- " f\"cd E2SAR/build; PATH=$HOME/.local/bin:$HOME/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib meson test {e2sar_test_suite} --suite unit --timeout 0\",\n",
- "# f\"cd E2SAR/build; EJFAT_URI='ejfats://udplbd@{cpnode_addr}:18347/lb/1?data=127.0.0.1&sync=192.168.88.199:1234' PATH=$HOME/.local/bin:$HOME/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib meson test {e2sar_test_suite} --suite live --timeout 0\"\n",
+ " f\"cd E2SAR; PATH=$HOME/.local/bin:/usr/local/bin:$PATH BOOST_ROOT=/usr/local/ LD_LIBRARY_PATH=/usr/local/lib/ meson setup -Dpkg_config_path=/usr/local/lib/pkgconfig/:/usr/lib/lib64/pkgconfig/ --prefix {home_location}/e2sar-install build && sed -i 's/-std=c++11//g' build/build.ninja\",\n",
+ " f\"cd E2SAR/build; PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ meson compile -j 8\",\n",
+ " f\"cd E2SAR/build; PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ meson test {e2sar_test_suite} --suite unit --timeout 0\",\n",
+ "# f\"cd E2SAR/build; EJFAT_URI='ejfats://udplbd@{cpnode_addr}:18347/lb/1?data=127.0.0.1&sync=192.168.88.199:1234' PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ meson test {e2sar_test_suite} --suite live --timeout 0\"\n",
"]\n",
" \n",
- "execute_commands([sender, recver], commands)"
+ "execute_commands([sender], commands)"
]
},
{
@@ -508,10 +525,10 @@
"\n",
"commands = [\n",
" f\"cd E2SAR; GIT_SSH_COMMAND='ssh -i {vm_key_location} -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' git pull origin {e2sar_branch}\",\n",
- " f\"cd E2SAR; BOOST_ROOT=$HOME/boost-install PATH=$HOME/.local/bin:$HOME/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib meson setup -Dpkg_config_path=$HOME/grpc-install/lib/pkgconfig/:$HOME/grpc-install/lib64/pkgconfig/ --prefix {home_location}/e2sar-install build --wipe && sed -i 's/-std=c++11//g' build/build.ninja\",\n",
- " f\"cd E2SAR/build; PATH=$HOME/.local/bin:/home/ubuntu/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib meson compile -j 8\",\n",
- " f\"cd E2SAR/build; PATH=$HOME/.local/bin:$HOME/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib meson test {e2sar_test_suite} --suite unit --timeout 0\",\n",
- "# f\"cd E2SAR/build; EJFAT_URI='ejfats://udplbd@{cpnode_addr}:18347/lb/1?data=127.0.0.1&sync=192.168.88.199:1234' PATH=$HOME/.local/bin:$HOME/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib meson test {e2sar_test_suite} --suite live --timeout 0\"\n",
+ " f\"cd E2SAR; BOOST_ROOT=/usr/local/ PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ meson setup -Dpkg_config_path=/usr/local/lib/pkgconfig/:/usr/lib/lib64/pkgconfig/ --prefix {home_location}/e2sar-install build --wipe && sed -i 's/-std=c++11//g' build/build.ninja\",\n",
+ " f\"cd E2SAR/build; PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ meson compile -j 8\",\n",
+ " f\"cd E2SAR/build; PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ meson test {e2sar_test_suite} --suite unit --timeout 0\",\n",
+ "# f\"cd E2SAR/build; EJFAT_URI='ejfats://udplbd@{cpnode_addr}:18347/lb/1?data=127.0.0.1&sync=192.168.88.199:1234' PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ meson test {e2sar_test_suite} --suite live --timeout 0\"\n",
"]\n",
" \n",
"execute_commands([sender, recver], commands)"
@@ -536,9 +553,9 @@
"outputs": [],
"source": [
"commands = [\n",
- " f\"cd E2SAR; BOOST_ROOT=$HOME/boost-install PATH=$HOME/.local/bin:$HOME/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib meson setup -Dpkg_config_path=$HOME/grpc-install/lib/pkgconfig/:$HOME/grpc-install/lib64/pkgconfig/ --prefix {home_location}/e2sar-install build && sed -i 's/-std=c++11//g' build/build.ninja\",\n",
- " f\"cd E2SAR/build; PATH=$HOME/.local/bin:/home/ubuntu/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib meson install\",\n",
- " f\"EJFAT_URI='ejfats://udplbd@{cpnode_addr}:18347?sync=192.168.100.10:19020&data=192.168.101.10:18020' PATH=$HOME/.local/bin:/home/ubuntu/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib ~/e2sar-install/bin/lbadm --reserve -e -v -6 -l myLib -d 01 -a 192.168.0.3\",\n",
+ " f\"cd E2SAR; BOOST_ROOT=/usr/local/ PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ meson setup -Dpkg_config_path=$HOME/grpc-install/lib/pkgconfig/:$HOME/grpc-install/lib64/pkgconfig/ --prefix {home_location}/e2sar-install build && sed -i 's/-std=c++11//g' build/build.ninja\",\n",
+ " f\"cd E2SAR/build; PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ meson install\",\n",
+ " f\"EJFAT_URI='ejfats://udplbd@{cpnode_addr}:18347?sync=192.168.100.10:19020&data=192.168.101.10:18020' PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ ~/e2sar-install/bin/lbadm --reserve -e -v -6 -l myLib -d 01 -a 192.168.0.3\",\n",
"]\n",
"execute_commands(sender,commands)"
]
@@ -552,7 +569,7 @@
"source": [
"#using option e to suppress messages\n",
"commands = [\n",
- " f\"EJFAT_URI='ejfats://udplbd@{cpnode_addr}:18347?sync=192.168.100.10:19020&data=192.168.101.10:18020' PATH=$HOME/.local/bin:/home/ubuntu/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib ~/e2sar-install/bin/lbadm --reserve -e -v -6 -l myLib -d 01 -a 192.168.0.3\",\n",
+ " f\"EJFAT_URI='ejfats://udplbd@{cpnode_addr}:18347?sync=192.168.100.10:19020&data=192.168.101.10:18020' PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ ~/e2sar-install/bin/lbadm --reserve -e -v -6 -l myLib -d 01 -a 192.168.0.3\",\n",
"]\n",
"execute_commands(sender,commands)"
]
@@ -567,7 +584,7 @@
"#need to replace the EJFAT_URI created in the last step\n",
"ejfat_uri = \"ejfats://999e421bf36382c8cb07f1ac3a355afccf9ed0e9e0e0d1e91947bc21d57170e6@192.168.0.3:18347/lb/1?sync=192.168.0.3:19531&data=192.0.2.1&data=[2001:db8::1]\"\n",
"commands = [\n",
- " f\"EJFAT_URI='{ejfat_uri}' PATH=$HOME/.local/bin:/home/ubuntu/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib ~/e2sar-install/bin/lbmon -v -6\"\n",
+ " f\"EJFAT_URI='{ejfat_uri}' PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ ~/e2sar-install/bin/lbmon -v -6\"\n",
"]\n",
"execute_commands(sender,commands)"
]
@@ -581,7 +598,7 @@
"source": [
"#No need to replace uri, because we are using admin token and getting the overview of the LB\n",
"commands = [\n",
- " f\"EJFAT_URI='ejfats://udplbd@192.168.0.3:18347' PATH=$HOME/.local/bin:/home/ubuntu/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib ~/e2sar-install/bin/lbmon -v -6\"\n",
+ " f\"EJFAT_URI='ejfats://udplbd@192.168.0.3:18347' PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ ~/e2sar-install/bin/lbmon -v -6\"\n",
"]\n",
"execute_commands(sender,commands)"
]
@@ -661,8 +678,8 @@
"numEvents = 10000 # number of events to send\n",
"bufSize = 300 * 1024 * 1024 # 100MB send and receive buffers\n",
"\n",
- "recv_command = f\"cd E2SAR; PATH=$HOME/.local/bin:/home/ubuntu/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib ./build/bin/e2sar_perf -r -u '{e2sarPerfURI}' -d {recverDuration} -b {bufSize}\"\n",
- "send_command = f\"cd E2SAR; PATH=$HOME/.local/bin:/home/ubuntu/grpc-install/bin:$PATH LD_LIBRARY_PATH=$HOME/grpc-install/lib/:$HOME/boost-install/lib ./build/bin/e2sar_perf -s -u '{e2sarPerfURI}' --mtu {mtu} --rate {rate} --length {length} -n {numEvents} -b {bufSize}\"\n",
+ "recv_command = f\"cd E2SAR; PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ ./build/bin/e2sar_perf -r -u '{e2sarPerfURI}' -d {recverDuration} -b {bufSize}\"\n",
+ "send_command = f\"cd E2SAR; PATH=$HOME/.local/bin:/usr/local/bin:$PATH LD_LIBRARY_PATH=/usr/local/lib/ ./build/bin/e2sar_perf -s -u '{e2sarPerfURI}' --mtu {mtu} --rate {rate} --length {length} -n {numEvents} -b {bufSize}\"\n",
"\n",
"# start the receiver for 10 seconds and log its output\n",
"print(f'Executing command {recv_command} on receiver')\n",
diff --git a/scripts/notebooks/EJFAT/E2SAR-release-tester.ipynb b/scripts/notebooks/EJFAT/E2SAR-release-tester.ipynb
new file mode 100644
index 0000000..130cc07
--- /dev/null
+++ b/scripts/notebooks/EJFAT/E2SAR-release-tester.ipynb
@@ -0,0 +1,691 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "id": "56b8f888-5788-4dfe-ad0a-cb5b0e567188",
+ "metadata": {},
+ "source": [
+ "# EJFAT LB Control Plane Tester\n",
+ "\n",
+ "This notebook stands up a slice of 3 nodes - sender, receiver and cpnode. The control plane daemon is deployed on the `cpnode` node in a 'mock' configuration (with no FPGA). DAQ and worker node code can be deployed on `sender` and `receiver` nodes for testing. The slice uses 'shared' and is created within a single FABRIC site for simplicity. It uses a single L2 bridge connection with RFC1918 IPv4 addressing, allowing all nodes to talk to each other. It is possible to run the dataplane assuming a single worker can keep up with a single sender since no actual load balancer is present in this configuration.\n",
+ "\n",
+ "This notebook uses the E2SAR release debian pacakges found at [Github releases](https://github.com/JeffersonLab/E2SAR/releases). If you want to build E2SAR from scratch, you can use the [E2SAR Dev tester](E2SAR-development-tester.ipynb) notebook.\n",
+ "\n",
+ "Slice example:\n",
+ "\n",
+ "
\n",
+ "
\n",
+ "
\n",
+ "\n",
+ "## Preparation and overview\n",
+ "\n",
+ "- Be sure to [generate a keypair for Jupyter Hub](GitHubSSH.ipynb) and register it with GitHub - the keys will be used to check out the code from private repositories, like [UDPLBd](https://github.com/esnet/udplbd) and [E2SAR](https://github.com/JeffersonLab/E2SAR).\n",
+ "- Note that for E2SAR development and testing sender and receiver node compile/build environments will be setup via post-boot scripts ([sender](post-boot/sender.sh) and [receiver](post-boot/recver.sh))\n",
+ "- The E2SAR [debian package]((https://github.com/JeffersonLab/E2SAR/releases)) comes with all necessary dependencies.\n",
+ "- This does not setup the control plane node for anything, but testing a specific version - you can set which branch of UDPLBd to check out and a containerized version is built and stood up."
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "58b0bb3e-52ed-48c7-98ae-b51e62af39e4",
+ "metadata": {},
+ "source": [
+ "## Preamble\n",
+ "\n",
+ "This cell must be executed whether you are creating a new slice or continuing work on the old one. If you are continuing work, you then skip the slice create section and proceed to wherever you left off."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "2d4a3378-dd68-4685-b0d2-4cae8ad462b8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#\n",
+ "# EDIT THIS\n",
+ "#\n",
+ "# if you want to force a site instead of using random\n",
+ "# Pick 'UCSD', 'SRI', 'FIU' or 'TOKY' - these sites have\n",
+ "# IPv4. Other sites use IPv6 management and have trouble\n",
+ "# retrieving git-lfs artifacts.\n",
+ "site_override = 'SRI'\n",
+ "#site_override = None\n",
+ "\n",
+ "# GitHub SSH key file (private) registered using the GitHubSSH.ipynb notebook referenced above\n",
+ "github_key = '/home/fabric/work/fabric_config/github_ecdsa'\n",
+ "\n",
+ "# branches for UDPLBd and E2SAR that we want checked out on the VMs\n",
+ "udplbd_branch = 'develop'\n",
+ "e2sar_branch = 'release' #there is no branch called release, this just keeps this notebooks slice separate.\n",
+ "\n",
+ "# which of the available config files to use with UDPLBd\n",
+ "udplbd_config = 'lb_mock-tls.yml'\n",
+ "\n",
+ "#base distro type - either default or docker\n",
+ "distro_types = ['default','docker']\n",
+ "distro_type = distro_types[0]\n",
+ "\n",
+ "# base distro 'ubuntu' or 'rocky'\n",
+ "distro_name = 'ubuntu'\n",
+ "\n",
+ "#base distro version, currently only for ubuntu 20,22,24. E2SAR dependencies will be \n",
+ "#downloaded for the appropriate versions.\n",
+ "distro_version = '22'\n",
+ "\n",
+ "# note that the below is distribution specific ('ubuntu' for ubuntu and so on)\n",
+ "home_location = {\n",
+ " 'ubuntu': '/home/ubuntu',\n",
+ " 'rocky' : '/home/rocky'\n",
+ "}[distro_name]\n",
+ "\n",
+ "vm_key_location = f'{home_location}/.ssh/github_ecdsa'\n",
+ "\n",
+ "# which test suites in E2SAR to run (leave empty to run all)\n",
+ "# you can set 'unit' or 'live' to run unit or live tests only\n",
+ "e2sar_test_suite = ''\n",
+ "\n",
+ "# name of the network connecting the nodes\n",
+ "net_name = 'site_bridge_net'\n",
+ "\n",
+ "# url of e2sar deb. Find the appropriate version for the OS at https://github.com/JeffersonLab/E2SAR/releases\n",
+ "static_release_url = 'https://github.com/JeffersonLab/E2SAR/releases/download/' # don't need to change this\n",
+ "e2sar_release_ver = 'E2SAR-0.1.0'\n",
+ "e2sar_release_artifact = \"e2sar_0.1.0_amd64.deb\"\n",
+ "e2sar_release_url = static_release_url + e2sar_release_ver + \"-\" + distro_name + \"-\" + distro_version + \".04/\" + e2sar_release_artifact\n",
+ "\n",
+ "#\n",
+ "# SHOULDN'T NEED TO EDIT BELOW\n",
+ "#\n",
+ "# Preamble\n",
+ "from datetime import datetime\n",
+ "from datetime import timezone\n",
+ "from datetime import timedelta\n",
+ "\n",
+ "from fabrictestbed_extensions.fablib.fablib import FablibManager as fablib_manager\n",
+ "\n",
+ "from ipaddress import ip_address, IPv4Address, IPv6Address, IPv4Network, IPv6Network\n",
+ "import ipaddress\n",
+ "\n",
+ "fablib = fablib_manager() \n",
+ "fablib.show_config();\n",
+ "\n",
+ "# Using docker image for cpnode by default\n",
+ "distro_image = distro_type + '_' + distro_name + '_' + distro_version\n",
+ "cp_distro_image = distro_types[1] + '_' + distro_name + '_' + distro_version\n",
+ "\n",
+ "# variable settings\n",
+ "slice_name = f'UDP LB Control Plane Testing with udplbd[{udplbd_branch}], e2sar[{e2sar_branch}] on {distro_name}'\n",
+ "\n",
+ "# for each node specify IP address (assuming /24), OS image\n",
+ "# note that most of the keys in these dictionaries map directly\n",
+ "# onto parameters to add_node()\n",
+ "node_config = {\n",
+ " 'sender': {\n",
+ " 'ip':'192.168.0.1', \n",
+ " 'image': distro_image,\n",
+ " 'cores': 8,\n",
+ " 'ram': 24,\n",
+ " 'disk': 100 },\n",
+ " 'recver': {\n",
+ " 'ip':'192.168.0.2', \n",
+ " 'image':distro_image,\n",
+ " 'cores':8,\n",
+ " 'ram': 24,\n",
+ " 'disk': 100 },\n",
+ " 'cpnode': {\n",
+ " 'ip':'192.168.0.3', \n",
+ " 'image':distro_image,\n",
+ " 'cores':8,\n",
+ " 'ram': 8,\n",
+ " 'disk': 100 },\n",
+ "}\n",
+ "# skip these keys as they are not part of add_node params\n",
+ "skip_keys = ['ip']\n",
+ "# this is the NIC to use\n",
+ "nic_model = 'NIC_Basic'\n",
+ "# the subnet should match IPs\n",
+ "subnet = IPv4Network(\"192.168.1.0/24\")\n",
+ "\n",
+ "def execute_single_node(node, commands):\n",
+ " for command in commands:\n",
+ " print(f'\\tExecuting \"{command}\" on node {node.get_name()}')\n",
+ " #stdout, stderr = node.execute(command, quiet=True, output_file=node.get_name() + '_install.log')\n",
+ " stdout, stderr = node.execute(command)\n",
+ " if not stderr and len(stderr) > 0:\n",
+ " print(f'Error encountered with \"{command}\": {stderr}')\n",
+ " \n",
+ "def execute_commands(node, commands):\n",
+ " if isinstance(node, list):\n",
+ " for n in node:\n",
+ " execute_single_node(n, commands)\n",
+ " else:\n",
+ " execute_single_node(node, commands)\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "d41b813d-21d5-4b18-a3f7-fd0f9f4d42c8",
+ "metadata": {},
+ "source": [
+ "## Create the slice"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "43dbf90d-5990-4453-9ae9-3def21b5910d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# list all slices I have running\n",
+ "output_dataframe = fablib.list_slices(output='pandas')\n",
+ "if output_dataframe:\n",
+ " print(output_dataframe)\n",
+ "else:\n",
+ " print('No active slices under this project')"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "bd4a6afb-78de-4251-81ba-d65cac091028",
+ "metadata": {},
+ "source": [
+ "If your slice is already active you can skip to the 'Get Slice Details' section."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "f956da26-1ebe-403b-a0aa-eedca260807f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# List available images (this step is optional)\n",
+ "available_images = fablib.get_image_names()\n",
+ "\n",
+ "print(f'Available images are: {available_images}')"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "415878c8-815b-4b3d-a1df-65ec5bc5f637",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# find an available site in continental US\n",
+ "lon_west=-124.3993243\n",
+ "lon_east=-69.9721573\n",
+ "\n",
+ "# getting a random site make take a bit of time\n",
+ "if not site_override:\n",
+ " selected_site = fablib.get_random_site(filter_function=lambda x: x['location'][1] < lon_east\n",
+ " and x['location'][1] > lon_west) \n",
+ "else:\n",
+ " selected_site = site_override\n",
+ "\n",
+ "if selected_site:\n",
+ " print(f'Selected site is {selected_site}')\n",
+ "else:\n",
+ " print('Unable to find a site matching the requirements')\n",
+ "\n",
+ "# write selected site into node attributes\n",
+ "for n in node_config:\n",
+ " node_config[n]['site'] = selected_site\n",
+ " "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "bd1b7969-df0d-4c8e-b909-acc30dc8c16e",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# build a slice\n",
+ "slice = fablib.new_slice(name=slice_name)\n",
+ "\n",
+ "# create a network\n",
+ "net1 = slice.add_l2network(name=net_name, subnet=subnet)\n",
+ "\n",
+ "nodes = dict()\n",
+ "# create nodes for sending and receiving with a selected network card\n",
+ "# use subnet address assignment\n",
+ "for node_name, node_attribs in node_config.items():\n",
+ " print(f\"{node_name=} {node_attribs['ip']}\")\n",
+ " nodes[node_name] = slice.add_node(name=node_name, **{x: node_attribs[x] for x in node_attribs if x not in skip_keys})\n",
+ " nic_interface = nodes[node_name].add_component(model=nic_model, name='_'.join([node_name, nic_model, 'nic'])).get_interfaces()[0]\n",
+ " net1.add_interface(nic_interface)\n",
+ " nic_interface.set_mode('config')\n",
+ " nic_interface.set_ip_addr(node_attribs['ip'])\n",
+ " # postboot configuration is under 'post-boot' directory\n",
+ " nodes[node_name].add_post_boot_upload_directory('post-boot','.')\n",
+ " nodes[node_name].add_post_boot_execute(f'chmod +x post-boot/{node_name}.sh && ./post-boot/{node_name}.sh')\n",
+ "\n",
+ "print(f'Creating a {distro_name} based slice named \"{slice_name}\" with nodes in {selected_site}')\n",
+ "\n",
+ "# Submit the slice\n",
+ "slice.submit();"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "4f0ef1ea-11b0-4eeb-9ca7-a4a6fba7fb4c",
+ "metadata": {},
+ "source": [
+ "## Get Slice Details\n",
+ "\n",
+ "If not creating a new slice, and just continuing work on an existing one, execute this cell (in addition to the preamble) and then any of the cells below will work."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5e6c4bf4-f5e6-42d8-9994-b513e9762d53",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# get slice details (if not creating new)\n",
+ "slice = fablib.get_slice(name=slice_name)\n",
+ "a = slice.show()\n",
+ "nets = slice.list_networks()\n",
+ "nodes = slice.list_nodes()\n",
+ "\n",
+ "cpnode = slice.get_node(name=\"cpnode\") \n",
+ "sender = slice.get_node(name=\"sender\")\n",
+ "recver = slice.get_node(name=\"recver\")\n",
+ "\n",
+ "\n",
+ "# get node dataplane addresses\n",
+ "cpnode_addr = cpnode.get_interface(network_name=net_name).get_ip_addr()\n",
+ "sender_addr = sender.get_interface(network_name=net_name).get_ip_addr()\n",
+ "recver_addr = recver.get_interface(network_name=net_name).get_ip_addr()\n",
+ "\n",
+ "sender_iface = sender.get_interface(network_name=net_name)\n",
+ "recver_iface = recver.get_interface(network_name=net_name)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "9fa3cdec-49f9-4c3c-89f8-b441e37d6302",
+ "metadata": {},
+ "source": [
+ "## Start the UDPLBd container"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "41a24024-c688-4100-b453-e34d0fe6ed4f",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# check if any dockers are running already and that we have compose and buildx installed by post-boot script\n",
+ "commands = [\n",
+ " 'docker container ls',\n",
+ " 'docker compose version',\n",
+ " 'docker buildx version'\n",
+ "]\n",
+ "execute_commands(cpnode, commands)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "6f156f84-6f00-4127-b563-58094206f3c0",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# upload the mock config file for UDPLBd \n",
+ "result = cpnode.upload_file(f'config/{udplbd_config}','lb_mock.yml')\n",
+ "\n",
+ "# upload the GitHub SSH key onto the VM\n",
+ "result = cpnode.upload_file(github_key, vm_key_location)\n",
+ "\n",
+ "# checkout UDPLBd (including the right branch) using that key\n",
+ "commands = [\n",
+ " f\"chmod go-rwx {vm_key_location}\",\n",
+ " f\"GIT_SSH_COMMAND='ssh -i {vm_key_location} -o IdentitiesOnly=yes -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' git clone -b {udplbd_branch} git@github.com:esnet/udplbd.git\",\n",
+ "]\n",
+ "\n",
+ "execute_commands(cpnode, commands)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "c4cd079c-97cd-41cb-8358-509a81c6992b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# copy configuration file into place, generate self-signed cert and start the UDPLBd container\n",
+ "commands = [\n",
+ " f'cp lb_mock.yml ./udplbd/etc/config.yml',\n",
+ " f'openssl req -x509 -newkey rsa:4096 -keyout udplbd/etc/server_key.pem -out udplbd/etc/server_cert.pem -sha256 -days 365 -nodes -subj \"/CN=cpnode/subjectAltName=IP:{cpnode_addr}\" -nodes',\n",
+ " f'cd udplbd; docker compose up -d'\n",
+ "]\n",
+ "\n",
+ "execute_commands(cpnode, commands)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5f8d2487-e135-440b-9c43-4e6a65b06c11",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# check the logs\n",
+ "commands = [\n",
+ " 'docker compose ls',\n",
+ " 'cd udplbd; docker compose logs'\n",
+ "]\n",
+ "\n",
+ "execute_commands(cpnode, commands)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "26a97579-1655-4970-b4bb-ea04a98aca5d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# if you need to restart it, this is the stop part\n",
+ "commands = [\n",
+ " 'cd udplbd; docker compose stop; docker compose rm -f; docker image rm udplbd'\n",
+ "]\n",
+ "\n",
+ "execute_commands(cpnode, commands)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "478c5e64-2a8c-443c-9f80-884b053ad6bf",
+ "metadata": {},
+ "source": [
+ "## Download and install E2SAR deb"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "15dd9b8f-87b6-4150-b03a-81902b8fd9ae",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# install github ssh key and set up build environment variables for interactive logins\n",
+ "commands = [\n",
+ " f\"chmod go-rwx {vm_key_location}\",\n",
+ " f\"echo 'export LD_LIBRARY_PATH=/usr/local/lib' >> ~/.profile\",\n",
+ " f\"echo 'export LD_LIBRARY_PATH=/usr/local/lib' >> ~/.bashrc\",\n",
+ "]\n",
+ "\n",
+ "for node in [sender, recver]: \n",
+ " # upload the GitHub SSH key onto the VM\n",
+ " result = node.upload_file(github_key, vm_key_location)\n",
+ " execute_commands(node, commands)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "24e529e2-ec01-451f-91a3-52b1462a496d",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#download boost and grpc dependencies from releases\n",
+ "commands = [\n",
+ " f\"wget -q -O e2sar-release.deb {e2sar_release_url}\",\n",
+ " f\"sudo apt -yq install ./e2sar-release.deb\",\n",
+ "]\n",
+ " \n",
+ "execute_commands([sender, recver], commands)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "930d979b-6adb-4157-b0bb-ca1e5f298da1",
+ "metadata": {},
+ "source": [
+ "## Testing LBmon\n",
+ "The following blocks are to install lbadm and lbmon in the default location ($HOME/e2sar-install/) and a simple test to reserve a LB from the sender and verify that the Lbmon works. (If this block is run later the LBid might be different, you would have to change the parameter in LBmon)\n",
+ "\n",
+ "If the Lbid is specified, the status of that reserved LB will be obtained, otherwise the Lb overview will be obtained. The appropriate instance/admin token must be given. For the status you need to copy the EJFAT_URI given by the Lbadm reserve operation to Lbmon. "
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "decfe1cb-6041-4c32-8f73-7fff31729482",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "commands = [\n",
+ " f\"EJFAT_URI='ejfats://udplbd@{cpnode_addr}:18347?sync=192.168.100.10:19020&data=192.168.101.10:18020' LD_LIBRARY_PATH=/usr/local/lib lbadm --reserve -e -v -6 -l myLib -d 01 -a 192.168.0.3\",\n",
+ "]\n",
+ "execute_commands(sender,commands)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "dc20fd53-ecbf-49e8-9dd5-dd9183601b91",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#using option e to suppress messages\n",
+ "commands = [\n",
+ " f\"EJFAT_URI='ejfats://udplbd@{cpnode_addr}:18347?sync=192.168.100.10:19020&data=192.168.101.10:18020' BOOST_ROOT=/usr/local/ LD_LIBRARY_PATH=/usr/local/lib lbadm --reserve -e -v -6 -l myLib -d 01 -a 192.168.0.3\",\n",
+ "]\n",
+ "execute_commands(sender,commands)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "3cc3727f-b1f9-478c-99f4-c7537e936bbb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#need to replace the EJFAT_URI created in the last step\n",
+ "ejfat_uri = \"ejfats://999e421bf36382c8cb07f1ac3a355afccf9ed0e9e0e0d1e91947bc21d57170e6@192.168.0.3:18347/lb/1?sync=192.168.0.3:19531&data=192.0.2.1&data=[2001:db8::1]\"\n",
+ "commands = [\n",
+ " f\"EJFAT_URI='{ejfat_uri}' BOOST_ROOT=/usr/local/ LD_LIBRARY_PATH=/usr/local/lib lbmon -v -6\"\n",
+ "]\n",
+ "execute_commands(sender,commands)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "17147f31-8977-471b-8837-a724d6d10008",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "#No need to replace uri, because we are using admin token and getting the overview of the LB\n",
+ "commands = [\n",
+ " f\"EJFAT_URI='ejfats://udplbd@192.168.0.3:18347' BOOST_ROOT=/usr/local/ LD_LIBRARY_PATH=/usr/local/lib lbmon -v -6\"\n",
+ "]\n",
+ "execute_commands(sender,commands)"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "933b9f66-61de-40dd-a14e-efd726562811",
+ "metadata": {},
+ "source": [
+ "# Performance Testing"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "859bd430-87d7-48f9-871f-4c92dffd2dbc",
+ "metadata": {},
+ "source": [
+ "We use `e2sar_perf` program located under bin/ to test performance of segmenting and reassembly code. To reach higher rates we must update some system parameters first.\n",
+ "\n",
+ "Set up large socket buffers for receive and send (512M), 9k MTU on both sender and receiver and test that it worked. For rates over 1Gbps this is a must."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "4d549025-93b5-4148-8da9-5251d7566945",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# set system-wide send and receive socket buffer limits to 512MB. e2sar_perf then will set SO_RCVBUF and SO_SNDBUF options on sending and receiving sockets\n",
+ "# this is system specific, so we don't do it through a file, but on command line. Normally this goes into /etc/sysctl.conf or /etc/sysctl.d/90-local.conf \n",
+ "# or similar\n",
+ "commands = [\n",
+ " f\"sudo sysctl net.core.rmem_max=536870912\",\n",
+ " f\"sudo sysctl net.core.wmem_max=536870912\",\n",
+ " f\"sysctl net.core.wmem_max net.core.rmem_max\"\n",
+ "]\n",
+ "execute_commands([sender, recver], commands);"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "a61de736-44c5-4adf-9c24-c6875ed7a4e9",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# note that in this slice we are guaranteed to have path MTU to be at least 9k, because FABRIC\n",
+ "# switches are configured for jumbo frames. In real life you need to consult your network administrator\n",
+ "# as simply setting MTU on sender and receiver may be insufficient.\n",
+ "mtu = '9000'\n",
+ "sender.execute(f\"sudo ip link set dev {sender_iface.get_os_interface()} mtu {mtu}\")\n",
+ "recver.execute(f\"sudo ip link set dev {recver_iface.get_os_interface()} mtu {mtu}\")\n",
+ "\n",
+ "# test with no-defragment (DF=1) ping packets that path indeed supports MTU of 9000 \n",
+ "# (ping packet of 8972 payload length)\n",
+ "# send 10 packets and expect all of them to make it\n",
+ "stdout, stderr = sender.execute(f\"sudo ping -f -s 8972 -c 10 -M do {recver_addr}\")"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "29c29be0-24b5-4250-8826-ed681f99081a",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "import time\n",
+ "\n",
+ "# for e2sar_perf only the data= part of the query is meaningful. sync= must be present but is ignored\n",
+ "# same for gRPC token, address and port (and lb id)\n",
+ "e2sarPerfURI = f\"ejfat://useless@10.10.10.10:1234/lb/1?data={recver_addr}&sync=192.168.77.7:1234\"\n",
+ "recverDuration = 20\n",
+ "mtu = 9000\n",
+ "rate = 15 # Gbps\n",
+ "length = 1000000 # event length in bytes\n",
+ "numEvents = 10000 # number of events to send\n",
+ "bufSize = 300 * 1024 * 1024 # 100MB send and receive buffers\n",
+ "\n",
+ "recv_command = f\"LD_LIBRARY_PATH=/usr/local/lib e2sar_perf -r -u '{e2sarPerfURI}' -d {recverDuration} -b {bufSize}\"\n",
+ "send_command = f\"LD_LIBRARY_PATH=/usr/local/lib e2sar_perf -s -u '{e2sarPerfURI}' --mtu {mtu} --rate {rate} --length {length} -n {numEvents} -b {bufSize}\"\n",
+ "\n",
+ "# start the receiver for 10 seconds and log its output\n",
+ "print(f'Executing command {recv_command} on receiver')\n",
+ "recver.execute_thread(recv_command, output_file=f\"{recver.get_name()}.perf.log\")\n",
+ "\n",
+ "# sleep 2 seconds to let receiver get going\n",
+ "time.sleep(2)\n",
+ "\n",
+ "# start the sender in the foreground\n",
+ "print(f'Executing command {send_command} on sender')\n",
+ "stdout_send, stderr_send = sender.execute(send_command, output_file=f\"{sender.get_name()}.perf.log\")\n",
+ "\n",
+ "print(f\"Inspect {recver.get_name()}.perf.log file in your Jupyter container to see the results\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "904ea7bf-744d-42fa-92c5-2af95b905ce3",
+ "metadata": {},
+ "source": [
+ "## You can SSH into the sender/receiver nodes and run lbadm, lbmon, or compile, link and test your application against E2SAR"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "c2a91bdf-ad59-45ab-b29c-1e1d3d4fa005",
+ "metadata": {},
+ "source": [
+ "## Manage the slice\n",
+ "\n",
+ "### Extend"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d4c55dd8-6b83-4711-80e1-4d73d3de04d1",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "# Set end host to now plus 14 days\n",
+ "end_date = (datetime.now(timezone.utc) + timedelta(days=14)).strftime(\"%Y-%m-%d %H:%M:%S %z\")\n",
+ "\n",
+ "try:\n",
+ " slice = fablib.get_slice(name=slice_name)\n",
+ "\n",
+ " slice.renew(end_date)\n",
+ "except Exception as e:\n",
+ " print(f\"Exception: {e}\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "id": "3edd1dbd-d502-461b-ad40-c589d7e32826",
+ "metadata": {},
+ "source": [
+ "### Delete"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "9f88bb58-a197-4853-9294-c90e2b250cc8",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "slice = fablib.get_slice(slice_name)\n",
+ "slice.delete()"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "cc7b909f-107f-4ff6-9fe1-baa14377fffe",
+ "metadata": {},
+ "outputs": [],
+ "source": []
+ }
+ ],
+ "metadata": {
+ "kernelspec": {
+ "display_name": "Python 3 (ipykernel)",
+ "language": "python",
+ "name": "python3"
+ },
+ "language_info": {
+ "codemirror_mode": {
+ "name": "ipython",
+ "version": 3
+ },
+ "file_extension": ".py",
+ "mimetype": "text/x-python",
+ "name": "python",
+ "nbconvert_exporter": "python",
+ "pygments_lexer": "ipython3",
+ "version": "3.11.8"
+ }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/scripts/notebooks/EJFAT/post-boot/cpnode.sh b/scripts/notebooks/EJFAT/post-boot/cpnode.sh
index c4155b3..9bd77d4 100644
--- a/scripts/notebooks/EJFAT/post-boot/cpnode.sh
+++ b/scripts/notebooks/EJFAT/post-boot/cpnode.sh
@@ -6,7 +6,7 @@ distro=`awk 'BEGIN { FS = "=" } ; /^ID=/ {gsub(/"/, ""); print $2}' /etc/*-relea
if [[ ${distro} == 'ubuntu' ]]; then
# install missing software
sudo apt-get -yq update
- sudo apt-get -yq install unzip
+ sudo apt-get -yq install unzip docker.io
# make sure we are in the 'docker' group
sudo usermod -a -G docker ubuntu
diff --git a/scripts/notebooks/EJFAT/post-boot/recver.sh b/scripts/notebooks/EJFAT/post-boot/recver.sh
index 8179416..0eff9e9 100644
--- a/scripts/notebooks/EJFAT/post-boot/recver.sh
+++ b/scripts/notebooks/EJFAT/post-boot/recver.sh
@@ -5,6 +5,21 @@ distro=`awk 'BEGIN { FS = "=" } ; /^ID=/ {gsub(/"/, ""); print $2}' /etc/*-relea
if [[ ${distro} == 'ubuntu' ]]; then
echo Installing for Ubuntu
+
+ # need to install python3.10 on ubuntu20.04
+ version=`awk 'BEGIN { FS = "=" } ; /^DISTRIB_RELEASE=/ {gsub(/"/, ""); print $2}' /etc/*-release`
+
+ if [[ ${version} == '20.04' ]]; then
+ echo Installing python3.10 for Ubuntu 20.04
+ sudo apt-get -yq update
+ sudo apt-get install -yq software-properties-common
+ sudo add-apt-repository -y ppa:deadsnakes/ppa
+ sudo apt-get -yq update
+ sudo apt-get -yq install python3.10
+ sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.8 1
+ sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.10 2
+ fi
+
# install missing software
sudo apt-get -yq update
sudo apt-get -yq install python3-pip build-essential autoconf cmake libtool pkg-config libglib2.0-dev ninja-build openssl libssl-dev libsystemd-dev protobuf-compiler libre2-dev gdb docker.io
@@ -23,6 +38,7 @@ if [[ ${distro} == 'ubuntu' ]]; then
# enable apport so core dumps can be caught under /var/lib/apport/coredump/
sudo systemctl enable apport.service
+
fi
if [[ ${distro} == 'rocky' ]]; then
diff --git a/scripts/notebooks/EJFAT/post-boot/sender.sh b/scripts/notebooks/EJFAT/post-boot/sender.sh
index 163e19a..e60b8a8 100644
--- a/scripts/notebooks/EJFAT/post-boot/sender.sh
+++ b/scripts/notebooks/EJFAT/post-boot/sender.sh
@@ -5,6 +5,21 @@ distro=`awk 'BEGIN { FS = "=" } ; /^ID=/ {gsub(/"/, ""); print $2}' /etc/*-relea
if [[ ${distro} == 'ubuntu' ]]; then
echo Installing for Ubuntu
+
+ # need to install python3.10 on ubuntu20.04
+ version=`awk 'BEGIN { FS = "=" } ; /^DISTRIB_RELEASE=/ {gsub(/"/, ""); print $2}' /etc/*-release`
+
+ if [[ ${version} == '20.04' ]]; then
+ echo Installing python3.10 for Ubuntu 20.04
+ sudo apt-get -yq update
+ sudo apt-get install -yq software-properties-common
+ sudo add-apt-repository -y ppa:deadsnakes/ppa
+ sudo apt-get -yq update
+ sudo apt-get -yq install python3.10
+ sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.8 1
+ sudo update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.10 2
+ fi
+
# install missing software
sudo apt-get -yq update
sudo apt-get -yq install python3-pip build-essential autoconf cmake libtool pkg-config libglib2.0-dev ninja-build openssl libssl-dev libsystemd-dev protobuf-compiler libre2-dev gdb docker.io
@@ -23,6 +38,7 @@ if [[ ${distro} == 'ubuntu' ]]; then
# enable apport so core dumps can be caught under /var/lib/apport/coredump/
sudo systemctl enable apport.service
+
fi
if [[ ${distro} == 'rocky' ]]; then
@@ -44,7 +60,7 @@ if [[ ${distro} == 'rocky' ]]; then
# install git-lfs
curl -s https://packagecloud.io/install/repositories/github/git-lfs/script.rpm.sh | sudo bash
sudo dnf -yq install git-lfs
-
+
fi
# install docker compose and buildx bits