diff --git a/README.md b/README.md
index 0b039a1..3a1946d 100644
--- a/README.md
+++ b/README.md
@@ -1,13 +1,12 @@
-[![PyPI version](https://badge.fury.io/py/traker.svg)](https://badge.fury.io/py/traker)
[![arXiv](https://img.shields.io/badge/arXiv-2303.14186-b31b1b.svg?style=flat-square)](https://arxiv.org/abs/2303.14186)
+[![PyPI version](https://badge.fury.io/py/traker.svg)](https://badge.fury.io/py/traker)
+
+# TRAK: Attributing Model Behavior at Scale
[[docs & tutorials]](https://trak.readthedocs.io/en/latest/)
-[[paper]](https://arxiv.org/abs/2303.14186)
[[blog post]](https://gradientscience.org/trak/)
[[website]](https://trak.csail.mit.edu)
-# TRAK: Attributing Model Behavior at Scale
-
In our [paper](https://arxiv.org/abs/2303.14186), we introduce a new data attribution method called `TRAK` (Tracing with the
Randomly-Projected After Kernel). Using `TRAK`, you can make accurate
counterfactual predictions (e.g., answers to questions of the form “what would
@@ -17,21 +16,10 @@ comparably effective methods, e.g., see our evaluation on:
![Main figure](/docs/assets/main_figure.png)
-## Citation
-If you use this code in your work, please cite using the following BibTeX entry:
-```
-@inproceedings{park2023trak,
- title = {TRAK: Attributing Model Behavior at Scale},
- author = {Sung Min Park and Kristian Georgiev and Andrew Ilyas and Guillaume Leclerc and Aleksander Madry},
- booktitle = {Arxiv preprint arXiv:2303.14186},
- year = {2023}
-}
-```
-
## Usage
-
-[[Quickstart]](https://trak.readthedocs.io/en/latest/quickstart.html)
+[[quickstart]](https://trak.readthedocs.io/en/latest/quickstart.html)
+[[pre-computed TRAK scores for CIFAR-10]](https://colab.research.google.com/drive/1Mlpzno97qpI3UC1jpOATXEHPD-lzn9Wg?usp=sharing)
Check [our docs](https://trak.readthedocs.io/en/latest/) for more detailed examples and
tutorials on how to use `TRAK`. Below, we provide a brief blueprint of using `TRAK`'s API to compute attribution scores.
@@ -74,6 +62,17 @@ scores = traker.finalize_scores()
## Examples
You can find several end-to-end examples in the `examples/` directory.
+## Citation
+If you use this code in your work, please cite using the following BibTeX entry:
+```
+@inproceedings{park2023trak,
+ title = {TRAK: Attributing Model Behavior at Scale},
+ author = {Sung Min Park and Kristian Georgiev and Andrew Ilyas and Guillaume Leclerc and Aleksander Madry},
+ booktitle = {Arxiv preprint arXiv:2303.14186},
+ year = {2023}
+}
+```
+
## Installation
To install the version of our package which contains a fast, custom `CUDA`
@@ -93,9 +92,8 @@ pip install traker
Please send an email to trak@mit.edu
-## Maintainers:
+## Maintainers
[Kristian Georgiev](https://twitter.com/kris_georgiev1)
[Andrew Ilyas](https://twitter.com/andrew_ilyas)
-[Guillaume Leclerc](https://twitter.com/gpoleclerc)
[Sung Min Park](https://twitter.com/smsampark)
diff --git a/docs/source/conf.py b/docs/source/conf.py
index 1d94000..7b0248f 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -22,8 +22,8 @@
author = 'Kristian Georgiev'
# The full version, including alpha/beta/rc tags
-release = '0.1.2'
-version = '0.1.2'
+release = '0.1.3'
+version = '0.1.3'
# -- General configuration ---------------------------------------------------
diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst
index e6dabda..9d009eb 100644
--- a/docs/source/quickstart.rst
+++ b/docs/source/quickstart.rst
@@ -92,21 +92,36 @@ classification task of your choice.)
)
return model
- def get_dataloader(batch_size=256, num_workers=8, split='train'):
-
- transforms = torchvision.transforms.Compose(
- [torchvision.transforms.RandomHorizontalFlip(),
- torchvision.transforms.RandomAffine(0),
- torchvision.transforms.ToTensor(),
- torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.201))])
-
+ def get_dataloader(batch_size=256, num_workers=8, split='train', shuffle=False, augment=True):
+ if augment:
+ transforms = torchvision.transforms.Compose(
+ [torchvision.transforms.RandomHorizontalFlip(),
+ torchvision.transforms.RandomAffine(0),
+ torchvision.transforms.ToTensor(),
+ torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465),
+ (0.2023, 0.1994, 0.201))])
+ else:
+ transforms = torchvision.transforms.Compose([
+ torchvision.transforms.ToTensor(),
+ torchvision.transforms.Normalize((0.4914, 0.4822, 0.4465),
+ (0.2023, 0.1994, 0.201))])
+
is_train = (split == 'train')
- dataset = torchvision.datasets.CIFAR10(root='/tmp/cifar/', download=True, train=is_train, transform=transforms)
- loader = torch.utils.data.DataLoader(dataset=dataset, shuffle=False, batch_size=batch_size, num_workers=num_workers)
-
+ dataset = torchvision.datasets.CIFAR10(root='/tmp/cifar/',
+ download=True,
+ train=is_train,
+ transform=transforms)
+
+ loader = torch.utils.data.DataLoader(dataset=dataset,
+ shuffle=shuffle,
+ batch_size=batch_size,
+ num_workers=num_workers)
+
return loader
- def train(model, loader, lr=0.4, epochs=24, momentum=0.9, weight_decay=5e-4, lr_peak_epoch=5, label_smoothing=0.0):
+ def train(model, loader, lr=0.4, epochs=24, momentum=0.9,
+ weight_decay=5e-4, lr_peak_epoch=5, label_smoothing=0.0, model_id=0):
+
opt = SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
iters_per_epoch = len(loader)
# Cyclic LR with single triangle
@@ -118,9 +133,8 @@ classification task of your choice.)
loss_fn = CrossEntropyLoss(label_smoothing=label_smoothing)
for ep in range(epochs):
- model_count = 0
for it, (ims, labs) in enumerate(loader):
- ims = ims.float().cuda()
+ ims = ims.cuda()
labs = labs.cuda()
opt.zero_grad(set_to_none=True)
with autocast():
@@ -131,15 +145,19 @@ classification task of your choice.)
scaler.step(opt)
scaler.update()
scheduler.step()
+ if ep in [12, 15, 18, 21, 23]:
+ torch.save(model.state_dict(), f'./checkpoints/sd_{model_id}_epoch_{ep}.pt')
+
+ return model
os.makedirs('./checkpoints', exist_ok=True)
+ loader_for_training = get_dataloader(batch_size=512, split='train', shuffle=True)
- for i in tqdm(range(3), desc='Training models..'):
+ # you can modify the for loop below to train more models
+ for i in tqdm(range(1), desc='Training models..'):
model = construct_rn9().to(memory_format=torch.channels_last).cuda()
- loader_train = get_dataloader(batch_size=512, split='train')
- train(model, loader_train)
+ model = train(model, loader_for_training, model_id=i)
- torch.save(model.state_dict(), f'./checkpoints/sd_{i}.pt')
.. raw:: html
@@ -311,4 +329,4 @@ The final line above returns :code:`TRAK` scores as a :code:`numpy.array` from t
That's it!
Once you have your model(s) and your data, just a few API-calls to TRAK
-let's you compute data attribution scores.
\ No newline at end of file
+let's you compute data attribution scores.
diff --git a/examples/cifar2_correlation.ipynb b/examples/cifar2_correlation.ipynb
index 0a4e462..9c7aad0 100644
--- a/examples/cifar2_correlation.ipynb
+++ b/examples/cifar2_correlation.ipynb
@@ -10,7 +10,7 @@
},
{
"cell_type": "code",
- "execution_count": 2,
+ "execution_count": null,
"id": "5ec6fa04-de0a-4e73-8b35-a4f08bf871c4",
"metadata": {
"tags": []
@@ -38,12 +38,9 @@
},
{
"cell_type": "code",
- "execution_count": 4,
+ "execution_count": null,
"id": "f949d611-c408-4fe0-a00a-e68006e766fe",
"metadata": {
- "jupyter": {
- "source_hidden": true
- },
"tags": []
},
"outputs": [],
@@ -62,6 +59,8 @@
" num_workers=8,\n",
" split='train', # split \\in [train, val]\n",
" aug_seed=0,\n",
+ " order='sequential',\n",
+ " subsample=False,\n",
" should_augment=True,\n",
" indices=None):\n",
" label_pipeline: List[Operation] = [IntDecoder(),\n",
@@ -74,7 +73,7 @@
" image_pipeline.extend([\n",
" RandomHorizontalFlip(),\n",
" RandomTranslate(padding=2, fill=tuple(map(int, STATS['mean']))),\n",
- " Cutout(4, tuple(map(int, STATS['std']))),\n",
+ " Cutout(4, tuple(map(int, STATS['mean']))),\n",
" ])\n",
"\n",
" image_pipeline.extend([\n",
@@ -88,11 +87,19 @@
" beton_url = BETONS[split]\n",
" beton_path = f'./{split}.beton'\n",
" wget.download(beton_url, out=str(beton_path), bar=None)\n",
+ " \n",
+ " if subsample and split == 'train':\n",
+ " indices = np.random.choice(np.arange(10_000), replace=False, size=5_000)\n",
"\n",
+ " if order == 'sequential':\n",
+ " order = OrderOption.SEQUENTIAL\n",
+ " else:\n",
+ " order = OrderOption.RANDOM\n",
+ " \n",
" return Loader(beton_path,\n",
" batch_size=batch_size,\n",
" num_workers=num_workers,\n",
- " order=OrderOption.SEQUENTIAL,\n",
+ " order=order,\n",
" drop_last=False,\n",
" seed=aug_seed,\n",
" indices=indices,\n",
@@ -153,7 +160,6 @@
" loss_fn = CrossEntropyLoss(label_smoothing=label_smoothing)\n",
"\n",
" for ep in range(epochs):\n",
- " model_count = 0\n",
" for it, (ims, labs) in enumerate(loader):\n",
" opt.zero_grad(set_to_none=True)\n",
" with autocast():\n",
@@ -163,37 +169,69 @@
" scaler.scale(loss).backward()\n",
" scaler.step(opt)\n",
" scaler.update()\n",
- " scheduler.step()"
+ " scheduler.step()\n",
+ "\n",
+ "def evaluate(model, loader_val):\n",
+ " model.eval()\n",
+ " with torch.no_grad():\n",
+ " total_correct, total_num = 0., 0.\n",
+ " for ims, labs in tqdm(loader):\n",
+ " with autocast():\n",
+ " out = model(ims)\n",
+ " total_correct += out.argmax(1).eq(labs).sum().cpu().item()\n",
+ " total_num += ims.shape[0]\n",
+ " print(f'Test accuracy: {total_correct / total_num * 100:.1f}%')"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "cfb400b1-9646-4352-a803-ae8cb1ee7b1a",
- "metadata": {},
+ "metadata": {
+ "tags": []
+ },
"outputs": [],
"source": [
- "os.makedirs('./checkpoints', exist_ok=True)\n",
+ "os.makedirs('./checkpoints_cifar2', exist_ok=True)\n",
"\n",
- "for i in tqdm(range(20), desc='Training models..'):\n",
+ "for i in tqdm(range(5), desc='Training models..'):\n",
" model = construct_rn9().to(memory_format=torch.channels_last).cuda()\n",
- " loader_train = get_dataloader(batch_size=512, split='train')\n",
+ " loader_train = get_dataloader(batch_size=512, split='train', order='random', subsample=True)\n",
" train(model, loader_train)\n",
" \n",
- " torch.save(model.state_dict(), f'./checkpoints/sd_{i}.pt')"
+ " torch.save(model.state_dict(), f'./checkpoints_cifar2/sd_{i}.pt')"
]
},
{
"cell_type": "code",
- "execution_count": 40,
+ "execution_count": null,
"id": "73561d60-eb99-462a-9c1b-93fa6db800b7",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
- "ckpt_files = list(Path('./checkpoints').rglob('*.pt'))\n",
- "ckpts = [torch.load(ckpt, map_location='cpu') for ckpt in ckpt_files]"
+ "ckpt_files = list(Path('./models_cifar2').rglob('model_sd_99.pt'))\n",
+ "ckpts = [torch.load(ckpt, map_location='cpu') for ckpt in ckpt_files]\n",
+ "\n",
+ "ckpt_files_old = list(Path('/mnt/cfs/projects/better_tracin/checkpoints/resnet9_cifar2/50pct/debug').rglob('*.pt'))\n",
+ "ckpts_old = [torch.load(ckpt, map_location='cpu') for ckpt in ckpt_files_old]"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "34185f06-482c-47a8-b51c-020fd13df7b9",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "loader = get_dataloader(split='val')\n",
+ "model = construct_rn9().to(memory_format=torch.channels_last).cuda()\n",
+ "model.load_state_dict(ckpts[1])\n",
+ "\n",
+ "evaluate(model, loader)"
]
},
{
@@ -206,7 +244,7 @@
},
{
"cell_type": "code",
- "execution_count": 41,
+ "execution_count": null,
"id": "19cf723c-d1b2-425b-a1e6-265bdedbae77",
"metadata": {
"tags": []
@@ -219,27 +257,30 @@
},
{
"cell_type": "code",
- "execution_count": 44,
+ "execution_count": null,
+ "id": "beb047af-0c14-425d-b1ea-5b2ba7e6b7e8",
+ "metadata": {
+ "tags": []
+ },
+ "outputs": [],
+ "source": [
+ "from trak.projectors import BasicProjector\n",
+ "from trak import TRAKer"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
"id": "c9804779-3229-47a2-84d8-771eb7568e1b",
"metadata": {
"tags": []
},
- "outputs": [
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Existing IDs in /tmp/trak_scores: []\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
- "from trak import TRAKer\n",
- "\n",
"traker = TRAKer(model=model,\n",
" task='image_classification',\n",
- " proj_dim=1024,\n",
- " save_dir='/tmp/trak_scores',\n",
+ " proj_dim=2048,\n",
+ " save_dir='./trak_results_cifar_2_debug_2k',\n",
" train_set_size=10_000)"
]
},
@@ -253,7 +294,7 @@
},
{
"cell_type": "code",
- "execution_count": 45,
+ "execution_count": null,
"id": "89886b24-27f2-4876-9e56-a7ed368fe3c0",
"metadata": {
"tags": []
@@ -266,25 +307,14 @@
},
{
"cell_type": "code",
- "execution_count": 46,
+ "execution_count": null,
"id": "48bbce69-67d0-49d6-8467-6f8602c89154",
"metadata": {
"tags": []
},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:12<00:00, 4.18s/it]\n",
- "Finalizing features for all model IDs..: 100%|████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 11.23it/s]\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
- "from tqdm import tqdm # for progress tracking\n",
- "\n",
- "for model_id, ckpt in enumerate(tqdm(ckpts[:3])):\n",
+ "for model_id, ckpt in enumerate(tqdm(ckpts)):\n",
" traker.load_checkpoint(ckpt, model_id=model_id)\n",
" for batch in loader_train:\n",
" traker.featurize(batch=batch, num_samples=batch[0].shape[0])\n",
@@ -302,49 +332,26 @@
},
{
"cell_type": "code",
- "execution_count": 47,
+ "execution_count": null,
"id": "96d6fbe7-2d76-47b5-8f35-a42d63705480",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
- "loader_targets = get_dataloader(batch_size=batch_size, split='val')"
+ "loader_targets = get_dataloader(batch_size=batch_size, split='val', should_augment=False)"
]
},
{
"cell_type": "code",
- "execution_count": 48,
+ "execution_count": null,
"id": "a6629868-7026-4e9f-bbea-1d69515d6944",
"metadata": {
"tags": []
},
- "outputs": [
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "100%|█████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:04<00:00, 1.38s/it]\n",
- "Finalizing scores for all model IDs..: 100%|██████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3/3 [00:00<00:00, 94.60it/s]"
- ]
- },
- {
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Saving scores in /tmp/trak_scores/scores/scores_394ca197-a6dc-436b-8863-7b800c29140d.npy\n"
- ]
- },
- {
- "name": "stderr",
- "output_type": "stream",
- "text": [
- "\n"
- ]
- }
- ],
+ "outputs": [],
"source": [
- "for model_id, ckpt in enumerate(tqdm(ckpts[:3])):\n",
+ "for model_id, ckpt in enumerate(tqdm(ckpts)):\n",
" traker.start_scoring_checkpoint(ckpt,\n",
" model_id=model_id,\n",
" num_targets=len(loader_targets.indices))\n",
@@ -359,12 +366,12 @@
"id": "a2d76d04-8b6d-4c46-b616-d1e5f4ca488e",
"metadata": {},
"source": [
- "# Visualize the attributions!"
+ "# Visualize the attributions"
]
},
{
"cell_type": "code",
- "execution_count": 49,
+ "execution_count": null,
"id": "a9d477cf-f12d-4a04-b480-f16c72e7d121",
"metadata": {
"tags": []
@@ -376,46 +383,25 @@
},
{
"cell_type": "code",
- "execution_count": 50,
+ "execution_count": null,
"id": "8417c903-f291-4185-b0e5-ba32dd45b346",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
- "targets = [1, 2] # let's look at two validation images\n",
+ "targets = [85, 100] # let's look at two validation images\n",
"loader_targets = get_dataloader(batch_size=2, split='val', indices=targets, should_augment=False)"
]
},
{
"cell_type": "code",
- "execution_count": 51,
+ "execution_count": null,
"id": "b2678eba-4d19-406c-ad2a-440d85c9b55b",
"metadata": {
"tags": []
},
- "outputs": [
- {
- "data": {
- "image/png": "iVBORw0KGgoAAAANSUhEUgAAAIcAAACHCAYAAAA850oKAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy88F64QAAAACXBIWXMAAA9hAAAPYQGoP6dpAAARLklEQVR4nO1dSY8kV7W+MWdEzjV2dVe53abdnnhIlh5IljBiXCH2sGH7JP4Fv4oVEhv0npBlkBn08MN2u91dc1WOkZEZIwtQnvOdx3VHo5Zgcb7VvTqZEZFRp858znWapmmMQvEP4P6rH0Dx7wtlDoUVyhwKK5Q5FFYocyisUOZQWKHMobBCmUNhhTKHwgq/7QcH3buwL91gu3aLFdC+9uBgu/6vH/8IaEdj5MfC5Nt1XkRAW68r2Ltuul0POx4+YN38o6UxxhgZA675NT18nrLCD09nm+36z4/PgfaX85vtenR4D2hejdf96huvbNfvPLwDNGc1h33oOPQ84uEdl2iei/doyhr3Fb2/ukHa+z/7uXkeVHIorFDmUFjRWq0YJuqMMSirBe16QmJyma2BNnj1CPbpZrld500ItNzD6+Y17au8ANqw29muPQ9VTl2jSK3LcrsuA1RlbtyHfZzQd3sr/Gx+Tr/tk88vgPbanT3YH98jtdPrJkBzfPydkUP/s7mLqrV26b07Ul2W+NmmpHdU/xP5VZUcCiuUORRWtFcrAo4hMeV6eJlZmm3Xzy5vgfbu22jV58zCXuUo+gp/hDftjLfL6fwUSJuUvjsaxeJhUa04Ltt3OkiL8Ls7PVIz/9HbB9p8Tff8n998gI8ai+sckprxQ/ydfoBqxWPksEH1WTukEqVaaRxUK6VD6rWuSvOiUMmhsEKZQ2GFMofCipdiczgO8ljG1NsX59dAS1fo2qYpRT3XKSrR+GAHbzogF7DuBEC6vXi2XXca1PfjQRf2AXOR/Uj+f+AzBA3Ra7MB2jimZzg5Qtf1waPX8NH3dumedQa0JkfXu8zZC6zQHnEb+qwvwwsB2la+IRukES5xG6jkUFihzKGworVacb8kQtoYpNUuRTqfsuSUMcZcT2awLzaUtFstUSwO76HqiIckmt0RRjKXKbl8Z8J9ns8wMdhPKNK5tycioh18JWVOqiRdpUAzzCU+PDkE0r2H92HvJPROAh/v4Tao9so1JSPNJjdIJJVTFeie8sivMcZUht5JKSLKbaCSQ2GFMofCCmUOhRWtbQ5H2BzcjWqkPcLCtuc3EyA9vbiC/cGIdLEnwtz5agn7PRbaDpMR0Hp9cplPn2Bo/XKFdo7XkG6+cxdD4sNhD/amov+f29kCSG5/uF3fO8JiqGSMz3c+ITuon+Br73XQ9Q465LKXHmaqw4C+K8qdTCHsE48V+7i5sF1aQCWHwgplDoUVyhwKK1rbHIFgo4L52I6L2s9tyAZZrlHX3dT42UcPKMwczUV62sOQr8fC9K5IV/d7ZCskotLq2RVWacUs9H55jXbEZ1/gZ4e9wXa9zDDsvXtAhdRfff0toO3soS2zTNl1a6wo85ox7B1W8eaKdL6JmA3iCBsjxD2rRTaxJy2U50Mlh8IKZQ6FFa3Vyne//3XY/+7Dj7bryS26q0FIrtl3fvAtoP3nt78D+0GfqaD8c6AVG8yCFgWpHcdDt3c8Jrfy4aM3gBb6KFKrisTvaomh9YUIvbseqQDHwes0Jem2yEWXM3JRdczZs/eHovj46CuwL3J6JjfC664reidn558BLQxRBcUJ/R2CEFVtG6jkUFihzKGwQplDYUVrm+MnP/0h7L/HbJCPPvoD0DodSkG/9z7aHAMRnq5yCm2vM7Qxnn7xBPZJ/2S7PjrGftMgJN28v49uZC9G/X9zTW7l5fkl0GSvLNxDptqZezidYYg+3hE6nlWGR128TuMLF75k/cMlphBuJpR+uLkV76eLYfjegMoGghDLH9pAJYfCCmUOhRXKHAorWtscO2Msp+uzEPXe4S7QYpZyjmK0MeQ8DNche6CsRMq+wEr11YpC3csUrxuy+L4saUwSLMNbd+m7QYTh89n8KeyHYwptV6IccsMalzeiDG8jQ+37dJ3BDj57UePvDBJ2nwZL/xZrsjnKBssWl6Ky/3ZKv/vkWJQitIBKDoUVyhwKK9pnZUN0k5qaxGgi+pYDn1RFJBqVA9FEtJ6R+M03KJoPDjDM3BvQjRo5c4Nlgl0Xxf9aiPgVa6SazUWVWIjhau7YyrYgPg+jEs8zn6O66o7p2aMI3UrPQ1fbOOTS12J8VBDT3hHV+hKzlFIBgyW61vvjR1/6XWNUcii+BMocCiuUORRWtLY55FhDng52HLxMkZMu9oTrKua8mIpVSCcx6sVOjC7yeIeqslwPbZm6ZhaBiIBPp2hXXFxQ+Pz2FlP0UQddvuGIXNAsw/S+C+8E0/lFLmyQCYXBNwVaL0mI76hmLrMwZUzSZSGFGxx9WdVos603ZPecXmA5xMOTb5rnQSWHwgplDoUV7ZuaXJE5ZOLOE3MhSjbiMC9QxsusZ8UqpPb2DoC2zpF3FwtSD0GIkcM0JZHviLGKMiobhuQ69lhjkjHGdAdY7Ht0TM1KZ6dnQOv1KAIp56KVhczuBlZaJUYuOyz76wUYJ+j3SdUOhtikPp/jLBSuastKG6kVLxHKHAorlDkUVrQPn4uupowNNZET+Wtmc/gluniXzzDrWS3ps3fv4xytx2eoU09P6bvLVM7VJPfv3jHOOjXCzinYnK+7RydA2ztAuydnA1A6Q9T/3QG5lalo+nbFm70bskZrkX12xIwww1IBgYsu+5BVkdWiaj3L8J34Ab37/V2sjmsDlRwKK5Q5FFYocyiseIE4B6aZGxb3+H+zW9jAk7DG761OsTsuu6X0+ZtvvAu03X3k3dGYbjRfYtNww5qsd3cw1T+foT2wekIzS6/OnwHt6BCr2hveyOyhTi9Liq2Md3FmquzI44PnInFUiG/EoDf2/poaaXFEsZWqi/dMOrjvDemzSQ8r+dpAJYfCCmUOhRXtx1uL6qqCzbx0ROGtw8LV8iC8rofibTInNeM2qILGuzjbM1uRKol7eN0VOxHq6grnjnlirtYrr766Xc8inMcxucYQ+cF9cgHHXbzO7Ha6XT+4h+7zcoMZ3JtLeqZPP0bX+uEb6JIGCWWnHV+mH0g9TWbo6veHmNXu81mtL35Qk0oOhR3KHAorlDkUVrSvBAvQHuCnDc5EpZWTEc/5ooJslGB117Oaqpmur6dAG7/+APbzOenYmxu0K/gcME8Ma1nO0ZXt96mirHeC80M/+O9fwd7x6RnunqCbe/2Ewvmnj3GQSn9nhJ89I1vm17/8BdDefOdN2L//ve9v10f30ZbJ1vRbbm+wCTzuYXi/wyr9c519rniZUOZQWKHMobCiffhclNr5rGtLdndVKYWOGzFH87CPZXivv/XOdr1IMXUdi3ndu+worP4Qy/t45frlJdojjhFD4JgdVOQYnnbEgNOnzJY4FqdM8/ls5QaffX+E561kEwpl73Qx1vOnD38P+8WcYhlvv/s20GJ2Vkxe4bCbO8doP4Xsf9+VNQQtoJJDYYUyh8KKF8jKIh9FMVUodcQsqtWaRGzYw9kYToRNQ/t9ErETMadiOscM7v4uifVBD6/TYSHnrqAlyQD2C+baLsUxHofHWBn2+f/9cbs+P8NQO69i93109fMMZ2c0bKbqo9cwXL47RnV1fjXdrj/74ydAG++SOt0UqMoqMUq8F9LfaChc6zZQyaGwQplDYYUyh8KKF0jZy4Zo0rG7e8LFa8i17Y1GQGsMDirh88PHXbRPzkRK+uKcwtWdCK8bxeyILx8rtrviiA2fdaetNuiGH7+GFfB1SXr9QhxB9soDCq3HCbqnV+don8zYfPhRH58nCfF5d4bk7o+76Pp32W+rMrQxzj7B48sW1/Pt+sFbaOfsvfc18zyo5FBYocyhsKL9idQiwhayOVZJgq5jGrDxzBVGIMMYM6YZE9vOBj876qOa2bAIatmI4zbm5AbPpnOg7R8ewT5mLujuACOt/fEx7O/uU1T249//Dj/LIp2pcF2vRTXaek3P1xXq0xfz1u7099hnUQVt1lRhli7wHRgHo7uLkp7p448+Bto33jPPhUoOhRXKHAorlDkUVrS2ORoxhNNlw0g6EerQIKLw9GKJrmLso8vXYTp1naLeTgKs9h6wZqXCx2zv2eMvtuvJFbp0foz/AzWzl2pR8Z5FaBP5bC7pkagwLzL6bRdP8XiLhWis7g9GtBFHaskZrzw1kYkZqvM52VN5iX+UuCPCBOyUybMn6Fq3gUoOhRXKHAorlDkUVrS2Ofi8UGOM4TPZgkDEQGLS47dXGHOoenidLpvzmUSoi+sS4x4uP5G6FvqWxWFiYatUOcYDGmZXVGIm6GKKe5cZW3Km+nQ63a4vztHOSUSlWn9AZQOea6/k/xvYfUSXesU+G0VoYyxTcQwqs09WYoZqG6jkUFihzKGwor0rKyQfnzUq53wmMYn1xmAzTSnC6TXjz1BkJ50Ab9owEZ8vsZEqYSdAH8n5V31UMyEbRe2JSjAjToDkqm1d4G/ho7HlHNK+mIfhs+uWhVTR+Du77GSpRoysjln2uRaqdSZOqGxY97R8njZQyaGwQplDYYUyh8KK1jZHLc52KLlrK5prjMtOX04wHF07ItXOrlMJne4YvKfLZmfNL7BK7Omf/rJd37mPzT29gxHek6W9PUfaNWLWGLNzlgvU6RWbJ5qIKntfuPcls11SERL3xJEkGaver8UMVY+dgs1D6cYY0xFHpnFXt9BGasXLhDKHwgplDoUV/3Scg9sgVY12xHpNqfewI8K/Dna18XI/r0FezXMM+To12QN//u0fgPa/v/ntdv1e51tAGxzjwJiKnT1SZiK0bhD86K50iWl4HtrmR5f97R5oL2029LvLQsR6RIj8ijWCyzhHUaBNxOGKDoElK5dYrTL58edCJYfCCmUOhRUvPrTh76iZuKvEEVE5m8G5ycXsDjH/YtxQZXgUoDvYiPmmm5RUgF8jLXbIbfOE+5evUZWl7BTqfIU0I+65WDDRLCrVuJophBteiSqtmh3VlQkRH4lsNHdR0yW6q90uuaujETY8yXBDwsLwUYRubhuo5FBYocyhsEKZQ2FFe5tD+LI1C3uXQt9u2CyvPEf9mosjvzLm4nUj5FWvwdB7yY6XuPsAh6zss8613RNM2d+KmaULNhSmETPBMmGfZGym+lqEoC+vaA4or/QyBmfDG4Ourayq426uMcZkrHKdl0YYYwyLrJt1B783HOIzdFgV+2YtUhwtoJJDYYUyh8KK9gXGIjKXM3Ero3YNk4T5BsWZrATj5FIUZRlHNCMZqugKDrGyKdknt225QpdzPhWnQzH30GnEUSGimXvNxPHtbIrXYTJ+bx9VmYyCchfZF9VmTSN/uF0FOazizfVQ7crobsXetVSXbaCSQ2GFMofCCmUOhRWtbY75HKugeOV1Y1AvBiFdVurM6RTD6emIhdr7qKc9RzQYuawZyRcnZDPjZVWgft0Iu2fNMrGhOOJrJVy+dEnPd3uDtksgmqfwe2j3BOxIkk6MjdOOyMryJqdINEcnbPCLrFqXY8Y5XY/UULxUKHMorFDmUFjR2ua4uMYjo6YT0r9xjLq3H9AAOVkRvUrxOufP6Hir/QEekxVFwv+H5m3UxYaFq6/EkRpPP32Ml2FxmSgS+l/EDlIWHyhEGn53h06AlqH1RqTPuc3hiQ7BtbBzkoSeSdo13Iabs0ZuY4zxQ2zQjjt0na6YT98GKjkUVihzKKxorVYuL69hP5nQPhEiK80oq+gHIuuZopv5bEEjq08OXwHaYIAqiVd0rTN54jOdMvnpJ4+Bdvo5zuvKWOFtt4eZzOF4B/abgkR+Ij5bs5B4Jk6V8uUp3Wy2Ry4LjEVDNHdtJxN0n3lzlFTZ/QQr6UKmymSlWhuo5FBYocyhsEKZQ2GF08gYrELxd6jkUFihzKGwQplDYYUyh8IKZQ6FFcocCiuUORRWKHMorFDmUFjxV6bM5xOvcWqVAAAAAElFTkSuQmCC\n",
- "text/plain": [
- "