From 448240caf21d87161dc38840a42e2dfc00e60cb2 Mon Sep 17 00:00:00 2001 From: Ynon Flum Date: Tue, 19 Nov 2024 15:44:07 +0200 Subject: [PATCH] Add pytorch MNIST Workflow tutorial Signed-off-by: Ynon Flum --- ...Prox_PyTorch_MNIST_Workflow_Tutorial.ipynb | 625 ++++++++++++++++++ ...> 402_FedProx_with_Synthetic_nonIID.ipynb} | 0 ...regator_Validation_Ray_Watermarking.ipynb} | 0 3 files changed, 625 insertions(+) create mode 100644 openfl-tutorials/experimental/401_Federated_FedProx_PyTorch_MNIST_Workflow_Tutorial.ipynb rename openfl-tutorials/experimental/{401_FedProx_with_Synthetic_nonIID.ipynb => 402_FedProx_with_Synthetic_nonIID.ipynb} (100%) rename openfl-tutorials/experimental/{401_MNIST_Aggregator_Validation_Ray_Watermarking.ipynb => 402_MNIST_Aggregator_Validation_Ray_Watermarking.ipynb} (100%) diff --git a/openfl-tutorials/experimental/401_Federated_FedProx_PyTorch_MNIST_Workflow_Tutorial.ipynb b/openfl-tutorials/experimental/401_Federated_FedProx_PyTorch_MNIST_Workflow_Tutorial.ipynb new file mode 100644 index 0000000000..6b6d8225a0 --- /dev/null +++ b/openfl-tutorials/experimental/401_Federated_FedProx_PyTorch_MNIST_Workflow_Tutorial.ipynb @@ -0,0 +1,625 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# FedProx PyTorch MNIST Tutorial using Workflow API\n", + "This notebook sets up a distributed training federation which runs the `FedProx`[https://arxiv.org/abs/1812.06127] algorithm using OpenFL's `Workflow API`[https://openfl.readthedocs.io/en/latest/about/features_index/workflowinterface.html] locally using a `LocalRuntime`[https://openfl.readthedocs.io/en/latest/about/features_index/workflowinterface.html#runtimes] - scalable to a federated setting in the future.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Import the relevant libraries" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import torch\n", + "import torch.nn as nn\n", + "import torch.nn.functional as F\n", + "\n", + "import torch.utils\n", + "import torch.utils.data\n", + "import torchvision\n", + "import torchvision.transforms as transforms\n", + "\n", + "from openfl.utilities.optimizers.torch.fedprox import FedProxAdam\n", + "\n", + "from openfl.experimental.interface import FLSpec, Aggregator, Collaborator\n", + "from openfl.experimental.runtime import LocalRuntime\n", + "from openfl.experimental.placement import aggregator, collaborator" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define the model:" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "class Net(nn.Module):\n", + " def __init__(self):\n", + " super(Net, self).__init__()\n", + " self.conv1 = nn.Conv2d(1, 16, 3)\n", + " self.pool = nn.MaxPool2d(2, 2)\n", + " self.conv2 = nn.Conv2d(16, 32, 3)\n", + " self.fc1 = nn.Linear(32 * 5 * 5, 32)\n", + " self.fc2 = nn.Linear(32, 84)\n", + " self.fc3 = nn.Linear(84, 10)\n", + "\n", + " def forward(self, x):\n", + " x = self.pool(F.relu(self.conv1(x)))\n", + " x = self.pool(F.relu(self.conv2(x)))\n", + " x = x.view(x.size(0),-1)\n", + " x = F.relu(self.fc1(x))\n", + " x = F.relu(self.fc2(x))\n", + " x = self.fc3(x)\n", + " return F.log_softmax(x, dim=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Set up the dataset:" + ] + }, + { + "cell_type": "code", + "execution_count": 22, + "metadata": {}, + "outputs": [], + "source": [ + "transform = transforms.Compose(\n", + " [transforms.ToTensor(),\n", + " transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n", + "\n", + "mnist_train = torchvision.datasets.MNIST(\n", + " \"./files/\",\n", + " train=True,\n", + " download=True,\n", + " transform=transform,\n", + ")\n", + "\n", + "mnist_test = torchvision.datasets.MNIST(\n", + " \"./files/\",\n", + " train=False,\n", + " download=True,\n", + " transform=transform,\n", + ")\n", + "\n", + "class CustomDataset(torch.utils.data.Dataset):\n", + " \"\"\"Dataset enumeration as tensors\"\"\"\n", + " def __init__(self, images, labels):\n", + " self.images = images\n", + " self.labels = labels\n", + "\n", + " def __len__(self):\n", + " return len(self.images)\n", + "\n", + " def __getitem__(self, idx):\n", + " image = self.images[idx]\n", + " label = self.labels[idx]\n", + " return image, label" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The next step is setting up the participants, an `Aggregator` and a few `Collaborator`s which will train the model, partition the dataset between the collaborators, and pass them to the appropriate runtime environment (in our case, a `LocalRuntime`).\n" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "def one_hot(labels, classes):\n", + " return np.eye(classes)[labels]\n", + "\n", + "# Setup participants\n", + "aggregator_ = Aggregator()\n", + "aggregator_.private_attributes = {}\n", + "\n", + "# Setup collaborators with private attributes\n", + "collaborator_names = [f'collaborator{i}' for i in range(4)]\n", + "collaborators = [Collaborator(name=name) for name in collaborator_names]\n", + "batch_size_train = 1024\n", + "batch_size_test = 1024\n", + "log_interval = 10\n", + "\n", + "for idx, collaborator_ in enumerate(collaborators):\n", + " train_images, train_labels = mnist_train.train_data, np.array(mnist_train.train_labels)\n", + " train_images = torch.from_numpy(np.expand_dims(train_images, axis=1)).float()\n", + " train_labels = one_hot(train_labels, 10)\n", + "\n", + " valid_images, valid_labels = mnist_test.test_data, np.array(mnist_test.test_labels)\n", + " valid_images = torch.from_numpy(np.expand_dims(valid_images, axis=1)).float()\n", + "\n", + " collaborator_.private_attributes = {\n", + " 'train_loader': torch.utils.data.DataLoader(\n", + " CustomDataset(train_images[idx::len(collaborators)], \n", + " train_labels[idx::len(collaborators)]), \n", + " batch_size=batch_size_train, \n", + " shuffle=True),\n", + " 'test_loader': torch.utils.data.DataLoader(\n", + " CustomDataset(valid_images[idx::len(collaborators)], \n", + " valid_labels[idx::len(collaborators)]), \n", + " batch_size=batch_size_test, \n", + " shuffle=True)\n", + " }\n", + "\n", + "local_runtime = LocalRuntime(aggregator=aggregator_, collaborators=collaborators, backend='single_process')\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Define an aggregation algorithm, optimizer and a loss function:" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [], + "source": [ + "# Aggregation algorithm\n", + "def FedAvg(models, weights=None):\n", + " new_model = models[0]\n", + " new_state_dict = dict()\n", + " for key in new_model.state_dict().keys():\n", + " new_state_dict[key] = torch.from_numpy(np.average([model.state_dict()[key].numpy() for model in models],\n", + " axis=0, \n", + " weights=weights))\n", + "\n", + " new_model.load_state_dict(new_state_dict)\n", + " return new_model\n", + "\n", + "def get_optimizer(model):\n", + " return FedProxAdam(model.parameters(), lr=1e-3, mu=0.01)\n", + "\n", + "def cross_entropy(output, target):\n", + " \"\"\"Binary cross-entropy loss function\"\"\"\n", + " return F.binary_cross_entropy_with_logits(input=output,target=target.float())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Set up work to be executed by the aggregator and the collaborators by extending `FLSpec`:" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Aggregator step \"start\" registered\n", + "Collaborator step \"aggregated_model_validation\" registered\n", + "Collaborator step \"train\" registered\n", + "Collaborator step \"local_model_validation\" registered\n", + "Aggregator step \"join\" registered\n", + "Aggregator step \"end\" registered\n" + ] + } + ], + "source": [ + "class FederatedFlow(FLSpec):\n", + " def __init__(self, model=None, optimizer=None, rounds=10, **kwargs):\n", + " super().__init__(**kwargs)\n", + " self.model = model\n", + " self.optimizer = optimizer\n", + " self.rounds = rounds\n", + " self.loss = 0.\n", + "\n", + " @aggregator\n", + " def start(self):\n", + " print(f'Performing initialization for model')\n", + " self.collaborators = self.runtime.collaborators\n", + " self.current_round = 0\n", + " self.next(self.aggregated_model_validation, foreach='collaborators')\n", + "\n", + " def compute_accuracy(self, data_loader):\n", + " self.model.eval()\n", + " test_loss = 0\n", + " correct = 0\n", + " with torch.no_grad():\n", + " for data, target in data_loader:\n", + " output = self.model(data)\n", + " test_loss += F.cross_entropy(output, target, size_average=False).item()\n", + " pred = output.data.max(1, keepdim=True)[1]\n", + " correct += pred.eq(target.data.view_as(pred)).sum()\n", + "\n", + " test_loss /= len(data_loader.dataset)\n", + " print('\\nTest set: Avg. loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n", + " test_loss, correct, len(data_loader.dataset),\n", + " 100. * correct / len(data_loader.dataset)))\n", + " accuracy = float(correct / len(data_loader.dataset))\n", + " return accuracy\n", + "\n", + " @collaborator\n", + " def aggregated_model_validation(self):\n", + " print(f'Performing aggregated model validation for collaborator {self.input}, model: {id(self.model)}')\n", + " self.agg_validation_score = self.compute_accuracy(self.test_loader)\n", + " self.next(self.train)\n", + "\n", + " @collaborator\n", + " def train(self):\n", + " # Log after processing a quarter of the samples\n", + " log_threshold = .25\n", + "\n", + " self.model.train()\n", + " self.optimizer = get_optimizer(self.model)\n", + " for batch_idx, (data, target) in enumerate(self.train_loader):\n", + " self.optimizer.zero_grad()\n", + " output = self.model(data)\n", + " loss = F.cross_entropy(output, target)\n", + " loss.backward()\n", + " self.optimizer.step()\n", + "\n", + " if (len(data) * batch_idx) / len(self.train_loader.dataset) >= log_threshold:\n", + " print('Train Epoch: [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n", + " batch_idx * len(data), len(self.train_loader.dataset),\n", + " 100. * batch_idx / len(self.train_loader), loss.item()))\n", + " self.loss = loss.item()\n", + " log_threshold += .25\n", + " torch.save(self.model.state_dict(), 'model.pth')\n", + " torch.save(self.optimizer.state_dict(), 'optimizer.pth')\n", + " \n", + " self.next(self.local_model_validation)\n", + "\n", + " @collaborator\n", + " def local_model_validation(self):\n", + " print(f'Performing local model validation for collaborator {self.input}')\n", + " self.local_validation_score = self.compute_accuracy(self.test_loader)\n", + " print(\n", + " f'Done with local model validation for collaborator {self.input}, Accuracy: {self.local_validation_score}')\n", + " self.next(self.join)\n", + "\n", + " @aggregator\n", + " def join(self, inputs):\n", + " self.model = FedAvg([input.model for input in inputs])\n", + " self.optimizer = inputs[0].optimizer\n", + " self.current_round += 1\n", + "\n", + " self.average_loss = sum(input.loss for input in inputs) / len(inputs)\n", + " self.aggregated_model_accuracy = sum(\n", + " input.agg_validation_score for input in inputs) / len(inputs)\n", + " self.local_model_accuracy = sum(\n", + " input.local_validation_score for input in inputs) / len(inputs)\n", + " print(f'Average aggregated model accuracy = {self.aggregated_model_accuracy}')\n", + " print(f'Average training loss = {self.average_loss}')\n", + " print(f'Average local model validation values = {self.local_model_accuracy}')\n", + "\n", + " if self.current_round < self.rounds:\n", + " self.next(self.aggregated_model_validation, foreach='collaborators')\n", + " else:\n", + " self.next(self.end)\n", + "\n", + " @aggregator\n", + " def end(self):\n", + " print(f'Flow ended')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, run the federation:" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "Calling start\n", + "\u001b[94mPerforming initialization for model\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling aggregated_model_validation\n", + "\u001b[94mPerforming aggregated model validation for collaborator collaborator0, model: 140162497619616\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 4.6833, Accuracy: 171/2500 (7%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling train\n", + "\u001b[94mTrain Epoch: [4096/15000 (27%)]\tLoss: 1.889274\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [8192/15000 (53%)]\tLoss: 1.279191\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [11264/15000 (73%)]\tLoss: 0.994200\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling local_model_validation\n", + "\u001b[94mPerforming local model validation for collaborator collaborator0\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.7548, Accuracy: 1929/2500 (77%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mDone with local model validation for collaborator collaborator0, Accuracy: 0.7716000080108643\u001b[0m\u001b[94m\n", + "\u001b[0mShould transfer from local_model_validation to join\n", + "\n", + "Calling aggregated_model_validation\n", + "\u001b[94mPerforming aggregated model validation for collaborator collaborator1, model: 140158910463952\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 4.7259, Accuracy: 173/2500 (7%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling train\n", + "\u001b[94mTrain Epoch: [4096/15000 (27%)]\tLoss: 1.675623\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [8192/15000 (53%)]\tLoss: 1.068585\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [11264/15000 (73%)]\tLoss: 0.687561\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling local_model_validation\n", + "\u001b[94mPerforming local model validation for collaborator collaborator1\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.6366, Accuracy: 2004/2500 (80%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mDone with local model validation for collaborator collaborator1, Accuracy: 0.8015999794006348\u001b[0m\u001b[94m\n", + "\u001b[0mShould transfer from local_model_validation to join\n", + "\n", + "Calling aggregated_model_validation\n", + "\u001b[94mPerforming aggregated model validation for collaborator collaborator2, model: 140162497661872\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 4.6549, Accuracy: 215/2500 (9%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling train\n", + "\u001b[94mTrain Epoch: [4096/15000 (27%)]\tLoss: 1.879489\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [8192/15000 (53%)]\tLoss: 1.325507\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [11264/15000 (73%)]\tLoss: 0.968176\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling local_model_validation\n", + "\u001b[94mPerforming local model validation for collaborator collaborator2\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.7462, Accuracy: 1901/2500 (76%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mDone with local model validation for collaborator collaborator2, Accuracy: 0.7603999972343445\u001b[0m\u001b[94m\n", + "\u001b[0mShould transfer from local_model_validation to join\n", + "\n", + "Calling aggregated_model_validation\n", + "\u001b[94mPerforming aggregated model validation for collaborator collaborator3, model: 140162498346528\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 4.7129, Accuracy: 193/2500 (8%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling train\n", + "\u001b[94mTrain Epoch: [4096/15000 (27%)]\tLoss: 1.720635\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [8192/15000 (53%)]\tLoss: 1.061211\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [11264/15000 (73%)]\tLoss: 0.762026\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling local_model_validation\n", + "\u001b[94mPerforming local model validation for collaborator collaborator3\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.6378, Accuracy: 1992/2500 (80%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mDone with local model validation for collaborator collaborator3, Accuracy: 0.7968000173568726\u001b[0m\u001b[94m\n", + "\u001b[0mShould transfer from local_model_validation to join\n", + "\n", + "Calling join\n", + "\u001b[94mAverage aggregated model accuracy = 0.07520000264048576\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mAverage training loss = 0.8529909627063148\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mAverage local model validation values = 0.782600000500679\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling aggregated_model_validation\n", + "\u001b[94mPerforming aggregated model validation for collaborator collaborator0, model: 140158910552480\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.6740, Accuracy: 1996/2500 (80%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling train\n", + "\u001b[94mTrain Epoch: [4096/15000 (27%)]\tLoss: 0.974921\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [8192/15000 (53%)]\tLoss: 0.633429\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [11264/15000 (73%)]\tLoss: 0.591566\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling local_model_validation\n", + "\u001b[94mPerforming local model validation for collaborator collaborator0\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.3951, Accuracy: 2214/2500 (89%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mDone with local model validation for collaborator collaborator0, Accuracy: 0.8855999708175659\u001b[0m\u001b[94m\n", + "\u001b[0mShould transfer from local_model_validation to join\n", + "\n", + "Calling aggregated_model_validation\n", + "\u001b[94mPerforming aggregated model validation for collaborator collaborator1, model: 140162497608672\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.6877, Accuracy: 1981/2500 (79%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling train\n", + "\u001b[94mTrain Epoch: [4096/15000 (27%)]\tLoss: 0.824028\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [8192/15000 (53%)]\tLoss: 0.515538\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [11264/15000 (73%)]\tLoss: 0.410188\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling local_model_validation\n", + "\u001b[94mPerforming local model validation for collaborator collaborator1\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.4668, Accuracy: 2166/2500 (87%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mDone with local model validation for collaborator collaborator1, Accuracy: 0.8664000034332275\u001b[0m\u001b[94m\n", + "\u001b[0mShould transfer from local_model_validation to join\n", + "\n", + "Calling aggregated_model_validation\n", + "\u001b[94mPerforming aggregated model validation for collaborator collaborator2, model: 140162498107328\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.6919, Accuracy: 1981/2500 (79%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling train\n", + "\u001b[94mTrain Epoch: [4096/15000 (27%)]\tLoss: 1.025180\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [8192/15000 (53%)]\tLoss: 0.616896\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [11264/15000 (73%)]\tLoss: 0.483282\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling local_model_validation\n", + "\u001b[94mPerforming local model validation for collaborator collaborator2\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.4406, Accuracy: 2163/2500 (87%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mDone with local model validation for collaborator collaborator2, Accuracy: 0.8651999831199646\u001b[0m\u001b[94m\n", + "\u001b[0mShould transfer from local_model_validation to join\n", + "\n", + "Calling aggregated_model_validation\n", + "\u001b[94mPerforming aggregated model validation for collaborator collaborator3, model: 140162498345664\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.6698, Accuracy: 2000/2500 (80%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling train\n", + "\u001b[94mTrain Epoch: [4096/15000 (27%)]\tLoss: 0.725868\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [8192/15000 (53%)]\tLoss: 0.450241\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [11264/15000 (73%)]\tLoss: 0.388554\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling local_model_validation\n", + "\u001b[94mPerforming local model validation for collaborator collaborator3\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.4106, Accuracy: 2211/2500 (88%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mDone with local model validation for collaborator collaborator3, Accuracy: 0.8844000101089478\u001b[0m\u001b[94m\n", + "\u001b[0mShould transfer from local_model_validation to join\n", + "\n", + "Calling join\n", + "\u001b[94mAverage aggregated model accuracy = 0.7958000004291534\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mAverage training loss = 0.4683974838455107\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mAverage local model validation values = 0.8753999918699265\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling aggregated_model_validation\n", + "\u001b[94mPerforming aggregated model validation for collaborator collaborator0, model: 140162648091376\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.3590, Accuracy: 2230/2500 (89%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling train\n", + "\u001b[94mTrain Epoch: [4096/15000 (27%)]\tLoss: 0.406638\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [8192/15000 (53%)]\tLoss: 0.313662\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [11264/15000 (73%)]\tLoss: 0.326520\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling local_model_validation\n", + "\u001b[94mPerforming local model validation for collaborator collaborator0\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.2096, Accuracy: 2338/2500 (94%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mDone with local model validation for collaborator collaborator0, Accuracy: 0.9351999759674072\u001b[0m\u001b[94m\n", + "\u001b[0mShould transfer from local_model_validation to join\n", + "\n", + "Calling aggregated_model_validation\n", + "\u001b[94mPerforming aggregated model validation for collaborator collaborator1, model: 140162646717344\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.3773, Accuracy: 2228/2500 (89%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling train\n", + "\u001b[94mTrain Epoch: [4096/15000 (27%)]\tLoss: 0.392126\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [8192/15000 (53%)]\tLoss: 0.228912\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [11264/15000 (73%)]\tLoss: 0.200197\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling local_model_validation\n", + "\u001b[94mPerforming local model validation for collaborator collaborator1\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.2601, Accuracy: 2317/2500 (93%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mDone with local model validation for collaborator collaborator1, Accuracy: 0.926800012588501\u001b[0m\u001b[94m\n", + "\u001b[0mShould transfer from local_model_validation to join\n", + "\n", + "Calling aggregated_model_validation\n", + "\u001b[94mPerforming aggregated model validation for collaborator collaborator2, model: 140162498503728\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.3683, Accuracy: 2240/2500 (90%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling train\n", + "\u001b[94mTrain Epoch: [4096/15000 (27%)]\tLoss: 0.583415\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [8192/15000 (53%)]\tLoss: 0.407979\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [11264/15000 (73%)]\tLoss: 0.299050\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling local_model_validation\n", + "\u001b[94mPerforming local model validation for collaborator collaborator2\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.2664, Accuracy: 2305/2500 (92%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mDone with local model validation for collaborator collaborator2, Accuracy: 0.921999990940094\u001b[0m\u001b[94m\n", + "\u001b[0mShould transfer from local_model_validation to join\n", + "\n", + "Calling aggregated_model_validation\n", + "\u001b[94mPerforming aggregated model validation for collaborator collaborator3, model: 140162497621488\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.3626, Accuracy: 2226/2500 (89%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling train\n", + "\u001b[94mTrain Epoch: [4096/15000 (27%)]\tLoss: 0.371595\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [8192/15000 (53%)]\tLoss: 0.234668\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mTrain Epoch: [11264/15000 (73%)]\tLoss: 0.177007\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling local_model_validation\n", + "\u001b[94mPerforming local model validation for collaborator collaborator3\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94m\n", + "Test set: Avg. loss: 0.2615, Accuracy: 2305/2500 (92%)\n", + "\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mDone with local model validation for collaborator collaborator3, Accuracy: 0.921999990940094\u001b[0m\u001b[94m\n", + "\u001b[0mShould transfer from local_model_validation to join\n", + "\n", + "Calling join\n", + "\u001b[94mAverage aggregated model accuracy = 0.8924000114202499\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mAverage training loss = 0.250693485351252\u001b[0m\u001b[94m\n", + "\u001b[0m\u001b[94mAverage local model validation values = 0.926499992609024\u001b[0m\u001b[94m\n", + "\u001b[0m\n", + "Calling end\n", + "\u001b[94mFlow ended\u001b[0m\u001b[94m\n", + "\u001b[0m" + ] + } + ], + "source": [ + "model = Net()\n", + "flflow = FederatedFlow(model, get_optimizer(model), rounds=3, checkpoint=False)\n", + "flflow.runtime = local_runtime\n", + "flflow.run()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.10" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/openfl-tutorials/experimental/401_FedProx_with_Synthetic_nonIID.ipynb b/openfl-tutorials/experimental/402_FedProx_with_Synthetic_nonIID.ipynb similarity index 100% rename from openfl-tutorials/experimental/401_FedProx_with_Synthetic_nonIID.ipynb rename to openfl-tutorials/experimental/402_FedProx_with_Synthetic_nonIID.ipynb diff --git a/openfl-tutorials/experimental/401_MNIST_Aggregator_Validation_Ray_Watermarking.ipynb b/openfl-tutorials/experimental/402_MNIST_Aggregator_Validation_Ray_Watermarking.ipynb similarity index 100% rename from openfl-tutorials/experimental/401_MNIST_Aggregator_Validation_Ray_Watermarking.ipynb rename to openfl-tutorials/experimental/402_MNIST_Aggregator_Validation_Ray_Watermarking.ipynb