From 602ec7e0dd12e13f314528b0860b8e759de0b55e Mon Sep 17 00:00:00 2001 From: Philipp Wirth <65946090+philippmwirth@users.noreply.github.com> Date: Fri, 12 Mar 2021 14:56:31 +0100 Subject: [PATCH 01/16] Add filter for lightly_outputs (#215) --- lightly/data/_helpers.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/lightly/data/_helpers.py b/lightly/data/_helpers.py index fb44ed0ee..8a21ed801 100644 --- a/lightly/data/_helpers.py +++ b/lightly/data/_helpers.py @@ -37,6 +37,19 @@ def _contains_videos(root: str, extensions: tuple): return any(is_video) +def _is_lightly_output_dir(dirname: str): + """Checks whether the directory is a lightly_output directory. + + Args: + dirname: Directory to check. + + Returns: + True if dirname is "lightly_outputs" else false. + + """ + return 'lightly_outputs' in dirname + + def _contains_subdirs(root: str): """Checks whether directory contains subdirectories. @@ -48,6 +61,7 @@ def _contains_subdirs(root: str): """ list_dir = os.listdir(root) + list_dir = list(filter(lambda x: not _is_lightly_output_dir(x), list_dir)) is_dir = \ [os.path.isdir(os.path.join(root, f)) for f in list_dir] return any(is_dir) From e6565c0cd58e17472841a16943c360266de212a2 Mon Sep 17 00:00:00 2001 From: Philipp Wirth <65946090+philippmwirth@users.noreply.github.com> Date: Fri, 12 Mar 2021 15:08:19 +0100 Subject: [PATCH 02/16] 98 make transform settable pw (#216) * Add transform property * Add test for transform property --- lightly/data/dataset.py | 14 ++++++++++++++ tests/data/test_LightlyDataset.py | 13 +++++++++++++ 2 files changed, 27 insertions(+) diff --git a/lightly/data/dataset.py b/lightly/data/dataset.py index 4f29dfa74..c07c7b05a 100644 --- a/lightly/data/dataset.py +++ b/lightly/data/dataset.py @@ -248,3 +248,17 @@ def dump(self, # dump images for i, filename in zip(indices, filenames): _dump_image(self.dataset, output_dir, filename, i, fmt=format) + + @property + def transform(self): + """Getter for the transform of the dataset. + + """ + return self.dataset.transform + + @transform.setter + def transform(self, t): + """Setter for the transform of the dataset. + + """ + self.dataset.transform = t diff --git a/tests/data/test_LightlyDataset.py b/tests/data/test_LightlyDataset.py index 06562bac2..ac5222e94 100644 --- a/tests/data/test_LightlyDataset.py +++ b/tests/data/test_LightlyDataset.py @@ -199,3 +199,16 @@ def test_video_dataset(self): out_dir = tempfile.mkdtemp() dataset.dump(out_dir) self.assertEqual(len(os.listdir(out_dir)), len(dataset)) + + def test_transform_setter(self): + + tmp_dir, _, _ = self.create_dataset() + dataset = LightlyDataset(input_dir=tmp_dir) + # the transform of both datasets should be None + self.assertIsNone(dataset.transform) + self.assertIsNone(dataset.dataset.transform) + # use the setter + dataset.transform = torchvision.transforms.ToTensor() + # assert that the transform is set in the nested dataset + self.assertIsNotNone(dataset.transform) + self.assertIsNotNone(dataset.dataset.transform) From 7ed12cca642ece5edad2559361f248642695fbd4 Mon Sep 17 00:00:00 2001 From: MalteEbner Date: Mon, 15 Mar 2021 11:19:04 +0100 Subject: [PATCH 03/16] github actions: set LIGHTLY_SERVER_LOCATION="localhost:-1" (#224) --- .github/workflows/test.yml | 1 + .github/workflows/test_setup.yml | 2 ++ 2 files changed, 3 insertions(+) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index b8b699ed6..dd9e0f0d0 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -27,6 +27,7 @@ jobs: run: pip install -e '.[all]' - name: Run Pytest run: | + LIGHTLY_SERVER_LOCATION="localhost:-1" pip install pytest-cov python -m pytest -s -v --runslow --cov=./lightly --cov-report=xml --ignore=./lightly/openapi_generated/ - name: Upload coverage to Codecov diff --git a/.github/workflows/test_setup.yml b/.github/workflows/test_setup.yml index 60027dcda..b416dcafa 100644 --- a/.github/workflows/test_setup.yml +++ b/.github/workflows/test_setup.yml @@ -29,6 +29,7 @@ jobs: pip install "git+https://github.com/lightly-ai/lightly.git@$BRANCH_NAME" - name: basic tests of CLI run: | + LIGHTLY_SERVER_LOCATION="localhost:-1" lightly-train --help lightly-embed --help lightly-upload --help @@ -36,6 +37,7 @@ jobs: lightly-download --help - name: test of CLI on a real dataset run: | + LIGHTLY_SERVER_LOCATION="localhost:-1" git clone https://github.com/alexeygrigorev/clothing-dataset-small clothing_dataset_small INPUT_DIR_1="clothing_dataset_small/test/dress" lightly-train input_dir=$INPUT_DIR_1 trainer.max_epochs=1 loader.num_workers=6 From 7b9f1f25d930e10ca114bdb53d2b7f62fd04869b Mon Sep 17 00:00:00 2001 From: MalteEbner Date: Mon, 15 Mar 2021 12:09:27 +0100 Subject: [PATCH 04/16] new endpoint to get quota (#221) * add quota api * added generated code for TagCreator --- .gitignore | 1 + lightly/api/api_workflow_client.py | 3 +- lightly/api/api_workflow_upload_dataset.py | 19 +-- lightly/api/constants.py | 3 - lightly/api/routes/users/__init__.py | 3 - lightly/api/routes/users/service.py | 1 - .../.swagger-codegen/VERSION | 2 +- .../swagger_client/__init__.py | 2 + .../swagger_client/api/__init__.py | 1 + .../swagger_client/api/quota_api.py | 121 ++++++++++++++++++ .../swagger_client/models/__init__.py | 1 + .../models/initial_tag_create_request.py | 28 +++- .../models/tag_create_request.py | 28 +++- .../swagger_client/models/tag_creator.py | 105 +++++++++++++++ .../mocked_api_workflow_client.py | 14 +- .../test_api_workflow_upload_dataset.py | 19 ++- .../test_api_workflow_upload_embeddings.py | 3 +- tests/imports/test_from_imports.py | 1 - tests/imports/test_nested_imports.py | 1 - tests/imports/test_seminested_imports.py | 1 - 20 files changed, 314 insertions(+), 43 deletions(-) delete mode 100644 lightly/api/constants.py create mode 100644 lightly/openapi_generated/swagger_client/api/quota_api.py create mode 100644 lightly/openapi_generated/swagger_client/models/tag_creator.py diff --git a/.gitignore b/.gitignore index bb503d19e..1f27256c6 100644 --- a/.gitignore +++ b/.gitignore @@ -32,3 +32,4 @@ lightly_outputs #ignore eggs .eggs tests/UNMOCKED_end2end_tests/call_test_api.py +tests/UNMOCKED_end2end_tests/get_versions_all_apis.py diff --git a/lightly/api/api_workflow_client.py b/lightly/api/api_workflow_client.py index bb256495b..0e5ae582c 100644 --- a/lightly/api/api_workflow_client.py +++ b/lightly/api/api_workflow_client.py @@ -17,7 +17,7 @@ from lightly.api.api_workflow_upload_dataset import _UploadDatasetMixin from lightly.api.api_workflow_upload_embeddings import _UploadEmbeddingsMixin from lightly.api.api_workflow_sampling import _SamplingMixin -from lightly.openapi_generated.swagger_client import TagData, ScoresApi +from lightly.openapi_generated.swagger_client import TagData, ScoresApi, QuotaApi from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi from lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi from lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi @@ -71,6 +71,7 @@ def __init__(self, token: str, dataset_id: str = None, embedding_id: str = None) self.mappings_api = MappingsApi(api_client=api_client) self.scores_api = ScoresApi(api_client=api_client) self.samples_api = SamplesApi(api_client=api_client) + self.quota_api = QuotaApi(api_client=api_client) def check_version_compatibility(self): minimum_version = get_minimum_compatible_version() diff --git a/lightly/api/api_workflow_upload_dataset.py b/lightly/api/api_workflow_upload_dataset.py index 8e0a7e5b1..0c0e201c7 100644 --- a/lightly/api/api_workflow_upload_dataset.py +++ b/lightly/api/api_workflow_upload_dataset.py @@ -1,17 +1,13 @@ import warnings from concurrent.futures.thread import ThreadPoolExecutor from typing import Union +import tqdm +from lightly.openapi_generated.swagger_client import TagCreator from lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest - from lightly.api.utils import check_filename, check_image, get_thumbnail_from_img, PIL_to_bytes - from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest -import tqdm - -from lightly.api.constants import LIGHTLY_MAXIMUM_DATASET_SIZE from lightly.data.dataset import LightlyDataset -from lightly.api.routes.users.service import get_quota class _UploadDatasetMixin: @@ -54,18 +50,13 @@ def upload_dataset(self, input: Union[str, LightlyDataset], max_workers: int = 8 f"but is of type {type(input)}") # check the allowed dataset size - api_max_dataset_size, status_code = get_quota(self.token) - max_dataset_size = min(api_max_dataset_size, LIGHTLY_MAXIMUM_DATASET_SIZE) + max_dataset_size_str = self.quota_api.get_quota_maximum_dataset_size() + max_dataset_size = int(max_dataset_size_str) if len(dataset) > max_dataset_size: msg = f'Your dataset has {len(dataset)} samples which' msg += f' is more than the allowed maximum of {max_dataset_size}' raise ValueError(msg) - # check whether connection to server was possible - if status_code != 200: - msg = f'Connection to server failed with status code {status_code}.' - raise RuntimeError(msg) - # handle the case where len(dataset) < max_workers max_workers = min(len(dataset), max_workers) @@ -118,7 +109,7 @@ def lambda_(i): else: img_type = 'meta' - initial_tag_create_request = InitialTagCreateRequest(img_type=img_type) + initial_tag_create_request = InitialTagCreateRequest(img_type=img_type, creator=TagCreator.USER_PIP) self.tags_api.create_initial_tag_by_dataset_id(body=initial_tag_create_request, dataset_id=self.dataset_id) def _upload_single_image(self, image, label, filename: str, mode): diff --git a/lightly/api/constants.py b/lightly/api/constants.py deleted file mode 100644 index 07dd21099..000000000 --- a/lightly/api/constants.py +++ /dev/null @@ -1,3 +0,0 @@ -""" Constants """ - -LIGHTLY_MAXIMUM_DATASET_SIZE = 25_000 diff --git a/lightly/api/routes/users/__init__.py b/lightly/api/routes/users/__init__.py index 4d2fedbe2..30daae4a7 100644 --- a/lightly/api/routes/users/__init__.py +++ b/lightly/api/routes/users/__init__.py @@ -4,8 +4,5 @@ # All Rights Reserved -# provided functions -from lightly.api.routes.users.service import get_quota # noqa: F401, E402 - # submodules from . import docker # noqa: F401, E402 diff --git a/lightly/api/routes/users/service.py b/lightly/api/routes/users/service.py index d0160298a..e0ba1b7ef 100644 --- a/lightly/api/routes/users/service.py +++ b/lightly/api/routes/users/service.py @@ -5,7 +5,6 @@ import requests -from lightly.api.constants import LIGHTLY_MAXIMUM_DATASET_SIZE from lightly.api.utils import getenv diff --git a/lightly/openapi_generated/.swagger-codegen/VERSION b/lightly/openapi_generated/.swagger-codegen/VERSION index b39b0b9e0..9b77657dc 100644 --- a/lightly/openapi_generated/.swagger-codegen/VERSION +++ b/lightly/openapi_generated/.swagger-codegen/VERSION @@ -1 +1 @@ -3.0.24 \ No newline at end of file +3.0.25 \ No newline at end of file diff --git a/lightly/openapi_generated/swagger_client/__init__.py b/lightly/openapi_generated/swagger_client/__init__.py index abcda15aa..e0205a7b1 100644 --- a/lightly/openapi_generated/swagger_client/__init__.py +++ b/lightly/openapi_generated/swagger_client/__init__.py @@ -20,6 +20,7 @@ from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi from lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi from lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi +from lightly.openapi_generated.swagger_client.api.quota_api import QuotaApi from lightly.openapi_generated.swagger_client.api.samples_api import SamplesApi from lightly.openapi_generated.swagger_client.api.samplings_api import SamplingsApi from lightly.openapi_generated.swagger_client.api.scores_api import ScoresApi @@ -68,6 +69,7 @@ from lightly.openapi_generated.swagger_client.models.tag_bit_mask_data import TagBitMaskData from lightly.openapi_generated.swagger_client.models.tag_change_data import TagChangeData from lightly.openapi_generated.swagger_client.models.tag_create_request import TagCreateRequest +from lightly.openapi_generated.swagger_client.models.tag_creator import TagCreator from lightly.openapi_generated.swagger_client.models.tag_data import TagData from lightly.openapi_generated.swagger_client.models.tag_filenames_data import TagFilenamesData from lightly.openapi_generated.swagger_client.models.tag_name import TagName diff --git a/lightly/openapi_generated/swagger_client/api/__init__.py b/lightly/openapi_generated/swagger_client/api/__init__.py index 0440733e3..4264cfb4d 100644 --- a/lightly/openapi_generated/swagger_client/api/__init__.py +++ b/lightly/openapi_generated/swagger_client/api/__init__.py @@ -7,6 +7,7 @@ from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi from lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi from lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi +from lightly.openapi_generated.swagger_client.api.quota_api import QuotaApi from lightly.openapi_generated.swagger_client.api.samples_api import SamplesApi from lightly.openapi_generated.swagger_client.api.samplings_api import SamplingsApi from lightly.openapi_generated.swagger_client.api.scores_api import ScoresApi diff --git a/lightly/openapi_generated/swagger_client/api/quota_api.py b/lightly/openapi_generated/swagger_client/api/quota_api.py new file mode 100644 index 000000000..4aa9a6c5b --- /dev/null +++ b/lightly/openapi_generated/swagger_client/api/quota_api.py @@ -0,0 +1,121 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + OpenAPI spec version: 1.0.0 + Contact: support@lightly.ai + Generated by: https://github.com/swagger-api/swagger-codegen.git +""" + + +from __future__ import absolute_import + +import re # noqa: F401 + +# python 2 and python 3 compatibility library +import six + +from lightly.openapi_generated.swagger_client.api_client import ApiClient + + +class QuotaApi(object): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + Ref: https://github.com/swagger-api/swagger-codegen + """ + + def __init__(self, api_client=None): + if api_client is None: + api_client = ApiClient() + self.api_client = api_client + + def get_quota_maximum_dataset_size(self, **kwargs): # noqa: E501 + """get_quota_maximum_dataset_size # noqa: E501 + + Get quota of the current user for the maximum dataset size # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + >>> thread = api.get_quota_maximum_dataset_size(async_req=True) + >>> result = thread.get() + + :param async_req bool + :return: str + If the method is called asynchronously, + returns the request thread. + """ + kwargs['_return_http_data_only'] = True + if kwargs.get('async_req'): + return self.get_quota_maximum_dataset_size_with_http_info(**kwargs) # noqa: E501 + else: + (data) = self.get_quota_maximum_dataset_size_with_http_info(**kwargs) # noqa: E501 + return data + + def get_quota_maximum_dataset_size_with_http_info(self, **kwargs): # noqa: E501 + """get_quota_maximum_dataset_size # noqa: E501 + + Get quota of the current user for the maximum dataset size # noqa: E501 + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + >>> thread = api.get_quota_maximum_dataset_size_with_http_info(async_req=True) + >>> result = thread.get() + + :param async_req bool + :return: str + If the method is called asynchronously, + returns the request thread. + """ + + all_params = [] # noqa: E501 + all_params.append('async_req') + all_params.append('_return_http_data_only') + all_params.append('_preload_content') + all_params.append('_request_timeout') + + params = locals() + for key, val in six.iteritems(params['kwargs']): + if key not in all_params: + raise TypeError( + "Got an unexpected keyword argument '%s'" + " to method get_quota_maximum_dataset_size" % key + ) + params[key] = val + del params['kwargs'] + + collection_formats = {} + + path_params = {} + + query_params = [] + + header_params = {} + + form_params = [] + local_var_files = {} + + body_params = None + # HTTP header `Accept` + header_params['Accept'] = self.api_client.select_header_accept( + ['application/json']) # noqa: E501 + + # Authentication setting + auth_settings = ['ApiKeyAuth', 'auth0Bearer'] # noqa: E501 + + return self.api_client.call_api( + '/v1/quota', 'GET', + path_params, + query_params, + header_params, + body=body_params, + post_params=form_params, + files=local_var_files, + response_type='str', # noqa: E501 + auth_settings=auth_settings, + async_req=params.get('async_req'), + _return_http_data_only=params.get('_return_http_data_only'), + _preload_content=params.get('_preload_content', True), + _request_timeout=params.get('_request_timeout'), + collection_formats=collection_formats) diff --git a/lightly/openapi_generated/swagger_client/models/__init__.py b/lightly/openapi_generated/swagger_client/models/__init__.py index 51def06f2..542e137d0 100644 --- a/lightly/openapi_generated/swagger_client/models/__init__.py +++ b/lightly/openapi_generated/swagger_client/models/__init__.py @@ -53,6 +53,7 @@ from lightly.openapi_generated.swagger_client.models.tag_bit_mask_data import TagBitMaskData from lightly.openapi_generated.swagger_client.models.tag_change_data import TagChangeData from lightly.openapi_generated.swagger_client.models.tag_create_request import TagCreateRequest +from lightly.openapi_generated.swagger_client.models.tag_creator import TagCreator from lightly.openapi_generated.swagger_client.models.tag_data import TagData from lightly.openapi_generated.swagger_client.models.tag_filenames_data import TagFilenamesData from lightly.openapi_generated.swagger_client.models.tag_name import TagName diff --git a/lightly/openapi_generated/swagger_client/models/initial_tag_create_request.py b/lightly/openapi_generated/swagger_client/models/initial_tag_create_request.py index 2db3d6385..9d08b65b0 100644 --- a/lightly/openapi_generated/swagger_client/models/initial_tag_create_request.py +++ b/lightly/openapi_generated/swagger_client/models/initial_tag_create_request.py @@ -34,26 +34,31 @@ class InitialTagCreateRequest(object): """ swagger_types = { 'name': 'TagName', + 'creator': 'TagCreator', 'img_type': 'ImageType' } attribute_map = { 'name': 'name', + 'creator': 'creator', 'img_type': 'imgType' } - def __init__(self, name=None, img_type=None, _configuration=None): # noqa: E501 + def __init__(self, name=None, creator=None, img_type=None, _configuration=None): # noqa: E501 """InitialTagCreateRequest - a model defined in Swagger""" # noqa: E501 if _configuration is None: _configuration = Configuration() self._configuration = _configuration self._name = None + self._creator = None self._img_type = None self.discriminator = None if name is not None: self.name = name + if creator is not None: + self.creator = creator self.img_type = img_type @property @@ -77,6 +82,27 @@ def name(self, name): self._name = name + @property + def creator(self): + """Gets the creator of this InitialTagCreateRequest. # noqa: E501 + + + :return: The creator of this InitialTagCreateRequest. # noqa: E501 + :rtype: TagCreator + """ + return self._creator + + @creator.setter + def creator(self, creator): + """Sets the creator of this InitialTagCreateRequest. + + + :param creator: The creator of this InitialTagCreateRequest. # noqa: E501 + :type: TagCreator + """ + + self._creator = creator + @property def img_type(self): """Gets the img_type of this InitialTagCreateRequest. # noqa: E501 diff --git a/lightly/openapi_generated/swagger_client/models/tag_create_request.py b/lightly/openapi_generated/swagger_client/models/tag_create_request.py index 9d6eabca2..acb244864 100644 --- a/lightly/openapi_generated/swagger_client/models/tag_create_request.py +++ b/lightly/openapi_generated/swagger_client/models/tag_create_request.py @@ -37,6 +37,7 @@ class TagCreateRequest(object): 'prev_tag_id': 'MongoObjectID', 'bit_mask_data': 'TagBitMaskData', 'tot_size': 'int', + 'creator': 'TagCreator', 'changes': 'TagChangeData' } @@ -45,10 +46,11 @@ class TagCreateRequest(object): 'prev_tag_id': 'prevTagId', 'bit_mask_data': 'bitMaskData', 'tot_size': 'totSize', + 'creator': 'creator', 'changes': 'changes' } - def __init__(self, name=None, prev_tag_id=None, bit_mask_data=None, tot_size=None, changes=None, _configuration=None): # noqa: E501 + def __init__(self, name=None, prev_tag_id=None, bit_mask_data=None, tot_size=None, creator=None, changes=None, _configuration=None): # noqa: E501 """TagCreateRequest - a model defined in Swagger""" # noqa: E501 if _configuration is None: _configuration = Configuration() @@ -58,6 +60,7 @@ def __init__(self, name=None, prev_tag_id=None, bit_mask_data=None, tot_size=Non self._prev_tag_id = None self._bit_mask_data = None self._tot_size = None + self._creator = None self._changes = None self.discriminator = None @@ -65,6 +68,8 @@ def __init__(self, name=None, prev_tag_id=None, bit_mask_data=None, tot_size=Non self.prev_tag_id = prev_tag_id self.bit_mask_data = bit_mask_data self.tot_size = tot_size + if creator is not None: + self.creator = creator if changes is not None: self.changes = changes @@ -160,6 +165,27 @@ def tot_size(self, tot_size): self._tot_size = tot_size + @property + def creator(self): + """Gets the creator of this TagCreateRequest. # noqa: E501 + + + :return: The creator of this TagCreateRequest. # noqa: E501 + :rtype: TagCreator + """ + return self._creator + + @creator.setter + def creator(self, creator): + """Sets the creator of this TagCreateRequest. + + + :param creator: The creator of this TagCreateRequest. # noqa: E501 + :type: TagCreator + """ + + self._creator = creator + @property def changes(self): """Gets the changes of this TagCreateRequest. # noqa: E501 diff --git a/lightly/openapi_generated/swagger_client/models/tag_creator.py b/lightly/openapi_generated/swagger_client/models/tag_creator.py new file mode 100644 index 000000000..529e689e7 --- /dev/null +++ b/lightly/openapi_generated/swagger_client/models/tag_creator.py @@ -0,0 +1,105 @@ +# coding: utf-8 + +""" + Lightly API + + Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501 + + OpenAPI spec version: 1.0.0 + Contact: support@lightly.ai + Generated by: https://github.com/swagger-api/swagger-codegen.git +""" + + +import pprint +import re # noqa: F401 + +import six + +from lightly.openapi_generated.swagger_client.configuration import Configuration + + +class TagCreator(object): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + + """ + allowed enum values + """ + UNKNOWN = "UNKNOWN" + USER_WEBAPP = "USER_WEBAPP" + USER_PIP = "USER_PIP" + SAMPLER_CORAL = "SAMPLER_CORAL" + SAMPLER_CORESET = "SAMPLER_CORESET" + SAMPLER_RANDOM = "SAMPLER_RANDOM" + + """ + Attributes: + swagger_types (dict): The key is attribute name + and the value is attribute type. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + """ + swagger_types = { + } + + attribute_map = { + } + + def __init__(self, _configuration=None): # noqa: E501 + """TagCreator - a model defined in Swagger""" # noqa: E501 + if _configuration is None: + _configuration = Configuration() + self._configuration = _configuration + self.discriminator = None + + def to_dict(self): + """Returns the model properties as a dict""" + result = {} + + for attr, _ in six.iteritems(self.swagger_types): + value = getattr(self, attr) + if isinstance(value, list): + result[attr] = list(map( + lambda x: x.to_dict() if hasattr(x, "to_dict") else x, + value + )) + elif hasattr(value, "to_dict"): + result[attr] = value.to_dict() + elif isinstance(value, dict): + result[attr] = dict(map( + lambda item: (item[0], item[1].to_dict()) + if hasattr(item[1], "to_dict") else item, + value.items() + )) + else: + result[attr] = value + if issubclass(TagCreator, dict): + for key, value in self.items(): + result[key] = value + + return result + + def to_str(self): + """Returns the string representation of the model""" + return pprint.pformat(self.to_dict()) + + def __repr__(self): + """For `print` and `pprint`""" + return self.to_str() + + def __eq__(self, other): + """Returns true if both objects are equal""" + if not isinstance(other, TagCreator): + return False + + return self.to_dict() == other.to_dict() + + def __ne__(self, other): + """Returns true if both objects are not equal""" + if not isinstance(other, TagCreator): + return True + + return self.to_dict() != other.to_dict() diff --git a/tests/api_workflow/mocked_api_workflow_client.py b/tests/api_workflow/mocked_api_workflow_client.py index b1a9c7621..0b06815dc 100644 --- a/tests/api_workflow/mocked_api_workflow_client.py +++ b/tests/api_workflow/mocked_api_workflow_client.py @@ -14,7 +14,7 @@ from typing import * from lightly.openapi_generated.swagger_client import ScoresApi, CreateEntityResponse, SamplesApi, SampleCreateRequest, \ - InitialTagCreateRequest, ApiClient, VersioningApi + InitialTagCreateRequest, ApiClient, VersioningApi, QuotaApi from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi from lightly.openapi_generated.swagger_client.api.jobs_api import JobsApi from lightly.openapi_generated.swagger_client.api.mappings_api import MappingsApi @@ -169,18 +169,16 @@ def get_latest_pip_version(self, **kwargs): def get_minimum_compatible_pip_version(self, **kwargs): return "1.0.0" +class MockedQuotaApi(QuotaApi): + def get_quota_maximum_dataset_size(self, **kwargs): + return "60000" + def mocked_upload_file_with_signed_url(file: str, url: str, mocked_return_value=True) -> bool: assert isinstance(file, BufferedReader) assert isinstance(url, str) return mocked_return_value -def mocked_get_quota(token: str) -> Tuple[int, int]: - quota = 25000 - status = 200 - return quota, status - - def mocked_put_request(dst_url, data=None, params=None, json=None, max_backoff=32, max_retries=5) -> bool: assert isinstance(dst_url, str) success = True @@ -216,8 +214,8 @@ def __init__(self, *args, **kwargs): self.scores_api = MockedScoresApi(api_client=self.api_client) self.samples_api = MockedSamplesApi(api_client=self.api_client) self.datasets_api = MockedDatasetsApi(api_client=self.api_client) + self.quota_api = MockedQuotaApi(api_client=self.api_client) - lightly.api.api_workflow_upload_dataset.get_quota = mocked_get_quota lightly.api.api_workflow_client.put_request = mocked_put_request self.wait_time_till_next_poll = 0.001 # for api_workflow_sampling diff --git a/tests/api_workflow/test_api_workflow_upload_dataset.py b/tests/api_workflow/test_api_workflow_upload_dataset.py index 5af11a82c..0de21083a 100644 --- a/tests/api_workflow/test_api_workflow_upload_dataset.py +++ b/tests/api_workflow/test_api_workflow_upload_dataset.py @@ -1,23 +1,21 @@ import os import tempfile -from typing import Tuple import torchvision from lightly.data.dataset import LightlyDataset -import lightly - -from tests.api_workflow.mocked_api_workflow_client import MockedApiWorkflowSetup, MockedSamplesApi, mocked_get_quota, \ - mocked_put_request +from tests.api_workflow.mocked_api_workflow_client import MockedApiWorkflowSetup class TestApiWorkflowUploadDataset(MockedApiWorkflowSetup): def setUp(self) -> None: MockedApiWorkflowSetup.setUp(self) + self.n_data = 100 self.create_fake_dataset() self.api_workflow_client.tags_api.no_tags = 0 - def create_fake_dataset(self, n_data=1000): + def create_fake_dataset(self): + n_data = self.n_data self.dataset = torchvision.datasets.FakeData(size=n_data, image_size=(3, 32, 32)) @@ -29,6 +27,15 @@ def create_fake_dataset(self, n_data=1000): path = os.path.join(self.folder_path, sample_names[sample_idx]) data[0].save(path) + def test_upload_dataset_over_quota(self): + quota = self.n_data-1 + def get_quota_reduced(): + return str(quota) + self.api_workflow_client.quota_api.get_quota_maximum_dataset_size = get_quota_reduced + with self.assertRaises(ValueError): + self.api_workflow_client.upload_dataset(input=self.folder_path) + + def test_upload_dataset_from_folder(self): self.api_workflow_client.upload_dataset(input=self.folder_path) diff --git a/tests/api_workflow/test_api_workflow_upload_embeddings.py b/tests/api_workflow/test_api_workflow_upload_embeddings.py index 92b8314e2..943613632 100644 --- a/tests/api_workflow/test_api_workflow_upload_embeddings.py +++ b/tests/api_workflow/test_api_workflow_upload_embeddings.py @@ -2,6 +2,7 @@ import tempfile import numpy as np +from lightly.utils.io import save_embeddings import lightly from tests.api_workflow.mocked_api_workflow_client import MockedApiWorkflowSetup @@ -19,7 +20,7 @@ def t_ester_upload_embedding(self, n_data, special_name_first_sample: bool = Fal if special_name_first_sample: sample_names[0] = "bliblablub" labels = [0] * len(sample_names) - lightly.utils.save_embeddings( + save_embeddings( path_to_embeddings, np.random.randn(n_data, 16), labels, diff --git a/tests/imports/test_from_imports.py b/tests/imports/test_from_imports.py index b7cd536a0..29b6d8cd3 100644 --- a/tests/imports/test_from_imports.py +++ b/tests/imports/test_from_imports.py @@ -13,7 +13,6 @@ def test_from_imports(self): from lightly.active_learning.scorers.classification import ScorerClassification # api imports - from lightly.api.routes.users import get_quota from lightly.api.routes.users.docker import get_authorization from lightly.api.routes.users.docker import get_soft_authorization from lightly.api.routes.users.docker import post_diagnostics diff --git a/tests/imports/test_nested_imports.py b/tests/imports/test_nested_imports.py index 704392f58..29c344cf8 100644 --- a/tests/imports/test_nested_imports.py +++ b/tests/imports/test_nested_imports.py @@ -14,7 +14,6 @@ def test_nested_imports(self): #lightly.active_learning.scorers.classification.ScorerClassification # api imports - lightly.api.routes.users.get_quota lightly.api.routes.users.docker.get_authorization lightly.api.routes.users.docker.get_soft_authorization lightly.api.routes.users.docker.post_diagnostics diff --git a/tests/imports/test_seminested_imports.py b/tests/imports/test_seminested_imports.py index bcdbe8763..09cd931bb 100644 --- a/tests/imports/test_seminested_imports.py +++ b/tests/imports/test_seminested_imports.py @@ -16,7 +16,6 @@ def test_seminested_imports(self): # api imports from lightly import api - api.routes.users.get_quota api.routes.users.docker.get_authorization api.routes.users.docker.get_soft_authorization api.routes.users.docker.post_diagnostics From ce0603ced2824334ded318fa572f93c4a3fc44c3 Mon Sep 17 00:00:00 2001 From: IgorSusmelj Date: Mon, 15 Mar 2021 12:50:10 +0100 Subject: [PATCH 05/16] Add benchmarking module for kNN evaluation of models (#222) * Add benchmarking module with kNN evaluator * Update benchmark to use new BenchmarkModule from lighlty * Adapt benchmark to run on multiple GPUs * Implemented feedback --- .../benchmarks/cifar10_benchmark.py | 132 ++--------- docs/source/lightly.utils.rst | 5 + lightly/utils/__init__.py | 2 + lightly/utils/benchmarking.py | 221 ++++++++++++++++++ 4 files changed, 251 insertions(+), 109 deletions(-) create mode 100644 lightly/utils/benchmarking.py diff --git a/docs/source/getting_started/benchmarks/cifar10_benchmark.py b/docs/source/getting_started/benchmarks/cifar10_benchmark.py index 2e36e1bd2..baad6f134 100644 --- a/docs/source/getting_started/benchmarks/cifar10_benchmark.py +++ b/docs/source/getting_started/benchmarks/cifar10_benchmark.py @@ -1,6 +1,13 @@ # -*- coding: utf-8 -*- """ +Note that this benchmark also supports a multi-GPU setup. If you run it on +a system with multiple GPUs make sure that you kill all the processes when +killing the application. Due to the way we setup this benchmark the distributed +processes might continue the benchmark if one of the nodes is killed. +If you know how to fix this don't hesitate to create an issue or PR :) + + Code to reproduce the benchmark results: | Model | Epochs | Batch Size | Test Accuracy | Peak GPU usage | @@ -23,6 +30,7 @@ import numpy as np import pytorch_lightning as pl import lightly +from lightly.utils import BenchmarkModule num_workers = 8 memory_bank_size = 4096 @@ -38,7 +46,8 @@ batch_sizes = [128, 512] # use a GPU if available -gpus = 1 if torch.cuda.is_available() else 0 +gpus = -1 if torch.cuda.is_available() else 0 +distributed_backend = 'ddp' if torch.cuda.device_count() > 1 else None # Adapted from our MoCo Tutorial on CIFAR-10 # @@ -128,107 +137,11 @@ def get_data_loaders(batch_size: int): ) return dataloader_train_ssl, dataloader_train_kNN, dataloader_test - -# code for kNN prediction from here: -# https://colab.research.google.com/github/facebookresearch/moco/blob/colab-notebook/colab/moco_cifar10_demo.ipynb -def knn_predict(feature, feature_bank, feature_labels, classes: int, knn_k: int, knn_t: float): - """Helper method to run kNN predictions on features based on a feature bank - - Args: - feature: Tensor of shape [N, D] consisting of N D-dimensional features - feature_bank: Tensor of a database of features used for kNN - feature_labels: Labels for the features in our feature_bank - classes: Number of classes (e.g. 10 for CIFAR-10) - knn_k: Number of k neighbors used for kNN - knn_t: - - """ - - # compute cos similarity between each feature vector and feature bank ---> [B, N] - sim_matrix = torch.mm(feature, feature_bank) - # [B, K] - sim_weight, sim_indices = sim_matrix.topk(k=knn_k, dim=-1) - # [B, K] - sim_labels = torch.gather(feature_labels.expand(feature.size(0), -1), dim=-1, index=sim_indices) - - # we do a reweighting of the similarities - sim_weight = (sim_weight / knn_t).exp() - - # counts for each class - one_hot_label = torch.zeros(feature.size(0) * knn_k, classes, device=sim_labels.device) - # [B*K, C] - one_hot_label = one_hot_label.scatter(dim=-1, index=sim_labels.view(-1, 1), value=1.0) - # weighted score ---> [B, C] - pred_scores = torch.sum(one_hot_label.view(feature.size(0), -1, classes) * sim_weight.unsqueeze(dim=-1), dim=1) - - pred_labels = pred_scores.argsort(dim=-1, descending=True) - return pred_labels - - -class BenchmarkModule(pl.LightningModule): - """A PyTorch Lightning Module for automated kNN callback - - At the end of every training epoch we create a feature bank by inferencing - the backbone on the dataloader passed to the module. - At every validation step we predict features on the validation data. - After all predictions on validation data (validation_epoch_end) we evaluate - the predictions on a kNN classifier on the validation data using the - feature_bank features from the train data. - We can access the highest accuracy during a kNN prediction using the - max_accuracy attribute. - """ - def __init__(self, dataloader_kNN): - super().__init__() - self.backbone = nn.Module() - self.max_accuracy = 0.0 - self.dataloader_kNN = dataloader_kNN - - def training_epoch_end(self, outputs): - # update feature bank at the end of each training epoch - self.backbone.eval() - self.feature_bank = [] - self.targets_bank = [] - with torch.no_grad(): - for data in self.dataloader_kNN: - img, target, _ = data - if gpus > 0: - img = img.cuda() - target = target.cuda() - feature = self.backbone(img).squeeze() - feature = F.normalize(feature, dim=1) - self.feature_bank.append(feature) - self.targets_bank.append(target) - self.feature_bank = torch.cat(self.feature_bank, dim=0).t().contiguous() - self.targets_bank = torch.cat(self.targets_bank, dim=0).t().contiguous() - self.backbone.train() - - def validation_step(self, batch, batch_idx): - # we can only do kNN predictions once we have a feature bank - if hasattr(self, 'feature_bank') and hasattr(self, 'targets_bank'): - images, targets, _ = batch - feature = self.backbone(images).squeeze() - feature = F.normalize(feature, dim=1) - pred_labels = knn_predict(feature, self.feature_bank, self.targets_bank, classes, knn_k, knn_t) - num = images.size(0) - top1 = (pred_labels[:, 0] == targets).float().sum().item() - return (num, top1) - def validation_epoch_end(self, outputs): - if outputs: - total_num = 0 - total_top1 = 0. - for (num, top1) in outputs: - total_num += num - total_top1 += top1 - acc = float(total_top1 / total_num) - if acc > self.max_accuracy: - self.max_accuracy = acc - self.log('kNN_accuracy', acc * 100.0) - class MocoModel(BenchmarkModule): - def __init__(self, dataloader_kNN): - super().__init__(dataloader_kNN) + def __init__(self, dataloader_kNN, num_classes): + super().__init__(dataloader_kNN, num_classes) # create a ResNet backbone and remove the classification head resnet = lightly.models.ResNetGenerator('resnet-18', num_splits=8) self.backbone = nn.Sequential( @@ -264,8 +177,8 @@ def configure_optimizers(self): class SimCLRModel(BenchmarkModule): - def __init__(self, dataloader_kNN): - super().__init__(dataloader_kNN) + def __init__(self, dataloader_kNN, num_classes): + super().__init__(dataloader_kNN, num_classes) # create a ResNet backbone and remove the classification head resnet = lightly.models.ResNetGenerator('resnet-18') self.backbone = nn.Sequential( @@ -295,8 +208,8 @@ def configure_optimizers(self): class SimSiamModel(BenchmarkModule): - def __init__(self, dataloader_kNN): - super().__init__(dataloader_kNN) + def __init__(self, dataloader_kNN, num_classes): + super().__init__(dataloader_kNN, num_classes) # create a ResNet backbone and remove the classification head resnet = lightly.models.ResNetGenerator('resnet-18') self.backbone = nn.Sequential( @@ -333,25 +246,26 @@ def configure_optimizers(self): # loop through configurations and train models for batch_size in batch_sizes: - for Model in models: + for BenchmarkModel in models: runs = [] for seed in range(n_runs): pl.seed_everything(seed) dataloader_train_ssl, dataloader_train_kNN, dataloader_test = get_data_loaders(batch_size) - model = Model(dataloader_train_kNN) + benchmark_model = BenchmarkModel(dataloader_train_kNN, classes) trainer = pl.Trainer(max_epochs=max_epochs, gpus=gpus, - progress_bar_refresh_rate=100) + progress_bar_refresh_rate=100, + distributed_backend=distributed_backend) trainer.fit( - model, + benchmark_model, train_dataloader=dataloader_train_ssl, val_dataloaders=dataloader_test ) gpu_memory_usage.append(torch.cuda.max_memory_allocated()) torch.cuda.reset_peak_memory_stats() - runs.append(model.max_accuracy) + runs.append(benchmark_model.max_accuracy) # delete model and trainer + free up cuda memory - del model + del benchmark_model del trainer torch.cuda.empty_cache() bench_results.append(runs) diff --git a/docs/source/lightly.utils.rst b/docs/source/lightly.utils.rst index 12620d5a7..8aac98ba2 100644 --- a/docs/source/lightly.utils.rst +++ b/docs/source/lightly.utils.rst @@ -13,3 +13,8 @@ lightly.utils .. automodule:: lightly.utils.embeddings_2d :members: +.benchmarking +--------------- +.. automodule:: lightly.utils.benchmarking + :members: + diff --git a/lightly/utils/__init__.py b/lightly/utils/__init__.py index e49fbb63e..53e37e8d6 100644 --- a/lightly/utils/__init__.py +++ b/lightly/utils/__init__.py @@ -13,3 +13,5 @@ from lightly.utils.io import load_embeddings from lightly.utils.io import load_embeddings_as_dict from lightly.utils.embeddings_2d import fit_pca +from lightly.utils.benchmarking import BenchmarkModule +from lightly.utils.benchmarking import knn_predict diff --git a/lightly/utils/benchmarking.py b/lightly/utils/benchmarking.py new file mode 100644 index 000000000..723265802 --- /dev/null +++ b/lightly/utils/benchmarking.py @@ -0,0 +1,221 @@ +""" Helper modules for benchmarking SSL models """ + +# Copyright (c) 2020. Lightly AG and its affiliates. +# All Rights Reserved + +import torch +import torch.nn as nn +import torch.distributed as dist +from torch.utils.data import DataLoader +import torch.nn.functional as F +import pytorch_lightning as pl + +# code for kNN prediction from here: +# https://colab.research.google.com/github/facebookresearch/moco/blob/colab-notebook/colab/moco_cifar10_demo.ipynb + + +def knn_predict(feature: torch.Tensor, + feature_bank: torch.Tensor, + feature_labels: torch.Tensor, + num_classes: int, + knn_k: int=200, + knn_t: float=0.1) -> torch.Tensor: + """Run kNN predictions on features based on a feature bank + + This method is commonly used to monitor performance of self-supervised + learning methods. + + The default parameters are the ones + used in https://arxiv.org/pdf/1805.01978v1.pdf. + + Args: + feature: + Tensor of shape [N, D] for which you want predictions + feature_bank: + Tensor of a database of features used for kNN + feature_labels: + Labels for the features in our feature_bank + num_classes: + Number of classes (e.g. `10` for CIFAR-10) + knn_k: + Number of k neighbors used for kNN + knn_t: + Temperature parameter to reweights similarities for kNN + + Returns: + A tensor containing the kNN predictions + + Examples: + >>> images, targets, _ = batch + >>> feature = backbone(images).squeeze() + >>> # we recommend to normalize the features + >>> feature = F.normalize(feature, dim=1) + >>> pred_labels = knn_predict( + >>> feature, + >>> feature_bank, + >>> targets_bank, + >>> num_classes=10, + >>> ) + """ + + # compute cos similarity between each feature vector and feature bank ---> [B, N] + sim_matrix = torch.mm(feature, feature_bank) + # [B, K] + sim_weight, sim_indices = sim_matrix.topk(k=knn_k, dim=-1) + # [B, K] + sim_labels = torch.gather(feature_labels.expand( + feature.size(0), -1), dim=-1, index=sim_indices) + # we do a reweighting of the similarities + sim_weight = (sim_weight / knn_t).exp() + # counts for each class + one_hot_label = torch.zeros(feature.size( + 0) * knn_k, num_classes, device=sim_labels.device) + # [B*K, C] + one_hot_label = one_hot_label.scatter( + dim=-1, index=sim_labels.view(-1, 1), value=1.0) + # weighted score ---> [B, C] + pred_scores = torch.sum(one_hot_label.view(feature.size( + 0), -1, num_classes) * sim_weight.unsqueeze(dim=-1), dim=1) + pred_labels = pred_scores.argsort(dim=-1, descending=True) + return pred_labels + + +class BenchmarkModule(pl.LightningModule): + """A PyTorch Lightning Module for automated kNN callback + + At the end of every training epoch we create a feature bank by feeding the + `dataloader_kNN` passed to the module through the backbone. + At every validation step we predict features on the validation data. + After all predictions on validation data (validation_epoch_end) we evaluate + the predictions on a kNN classifier on the validation data using the + feature_bank features from the train data. + + We can access the highest test accuracy during a kNN prediction + using the `max_accuracy` attribute. + + Attributes: + backbone: + The backbone model used for kNN validation. Make sure that you set the + backbone when inheriting from `BenchmarkModule`. + max_accuracy: + Floating point number between 0.0 and 1.0 representing the maximum + test accuracy the benchmarked model has achieved. + dataloader_kNN: + Dataloader to be used after each training epoch to create feature bank. + num_classes: + Number of classes. E.g. for cifar10 we have 10 classes. (default: 10) + knn_k: + Number of nearest neighbors for kNN + knn_t: + Temperature parameter for kNN + + Examples: + >>> class SimSiamModel(BenchmarkingModule): + >>> def __init__(dataloader_kNN, num_classes): + >>> super().__init__(dataloader_kNN, num_classes) + >>> resnet = lightly.models.ResNetGenerator('resnet-18') + >>> self.backbone = nn.Sequential( + >>> *list(resnet.children())[:-1], + >>> nn.AdaptiveAvgPool2d(1), + >>> ) + >>> self.resnet_simsiam = + >>> lightly.models.SimSiam(self.backbone, num_ftrs=512) + >>> self.criterion = lightly.loss.SymNegCosineSimilarityLoss() + >>> + >>> def forward(self, x): + >>> self.resnet_simsiam(x) + >>> + >>> def training_step(self, batch, batch_idx): + >>> (x0, x1), _, _ = batch + >>> x0, x1 = self.resnet_simsiam(x0, x1) + >>> loss = self.criterion(x0, x1) + >>> return loss + >>> def configure_optimizers(self): + >>> optim = torch.optim.SGD( + >>> self.resnet_simsiam.parameters(), lr=6e-2, momentum=0.9 + >>> ) + >>> return [optim] + >>> + >>> model = SimSiamModel(dataloader_train_kNN) + >>> trainer = pl.Trainer() + >>> trainer.fit( + >>> model, + >>> train_dataloader=dataloader_train_ssl, + >>> val_dataloaders=dataloader_test + >>> ) + >>> # you can get the peak accuracy using + >>> print(model.max_accuracy) + + """ + + def __init__(self, + dataloader_kNN: DataLoader, + num_classes: int, + knn_k: int=200, + knn_t: float=0.1): + super().__init__() + self.backbone = nn.Module() + self.max_accuracy = 0.0 + self.dataloader_kNN = dataloader_kNN + self.num_classes = num_classes + self.knn_k = knn_k + self.knn_t = knn_t + + # create dummy param to keep track of the device the model is using + self.dummy_param = nn.Parameter(torch.empty(0)) + + def training_epoch_end(self, outputs): + # update feature bank at the end of each training epoch + self.backbone.eval() + self.feature_bank = [] + self.targets_bank = [] + with torch.no_grad(): + for data in self.dataloader_kNN: + img, target, _ = data + img = img.to(self.dummy_param.device) + target = target.to(self.dummy_param.device) + feature = self.backbone(img).squeeze() + feature = F.normalize(feature, dim=1) + self.feature_bank.append(feature) + self.targets_bank.append(target) + self.feature_bank = torch.cat( + self.feature_bank, dim=0).t().contiguous() + self.targets_bank = torch.cat( + self.targets_bank, dim=0).t().contiguous() + self.backbone.train() + + def validation_step(self, batch, batch_idx): + # we can only do kNN predictions once we have a feature bank + if hasattr(self, 'feature_bank') and hasattr(self, 'targets_bank'): + images, targets, _ = batch + feature = self.backbone(images).squeeze() + feature = F.normalize(feature, dim=1) + pred_labels = knn_predict( + feature, + self.feature_bank, + self.targets_bank, + self.num_classes, + self.knn_k, + self.knn_t + ) + num = images.size() + top1 = (pred_labels[:, 0] == targets).float().sum() + return (num, top1) + + def validation_epoch_end(self, outputs): + device = self.dummy_param.device + if outputs: + total_num = torch.Tensor([0]).to(device) + total_top1 = torch.Tensor([0.]).to(device) + for (num, top1) in outputs: + total_num += num[0] + total_top1 += top1 + + if dist.get_world_size() > 1: + dist.all_reduce(total_num) + dist.all_reduce(total_top1) + + acc = float(total_top1.item() / total_num.item()) + if acc > self.max_accuracy: + self.max_accuracy = acc + self.log('kNN_accuracy', acc * 100.0, prog_bar=True) From 2a48f076ec974e82d6667487742c5bf4c2a721bd Mon Sep 17 00:00:00 2001 From: IgorSusmelj Date: Mon, 15 Mar 2021 13:11:33 +0100 Subject: [PATCH 06/16] General documentation updates (#217) * Make it clear that random rotate is only +90 * Add example to run docs with new AL tutorial * Update readme information * Update documentation * Update docstrings * Minor changes to tutorial * Implement feedback in docs * Implement feedback in docs * Fixed typo --- README.md | 10 +++- docs/README.md | 5 ++ docs/source/docker/advanced/pretagging.rst | 2 +- .../docker/getting_started/first_steps.rst | 11 +++- docs/source/docker/getting_started/setup.rst | 2 +- docs/source/docker/known_issues_faq.rst | 19 +++++++ docs/source/docker/overview.rst | 8 +++ .../getting_started/active_learning.rst | 8 ++- .../getting_started/command_line_tool.rst | 12 ++-- .../images/al_accuracy_plot.png | Bin 0 -> 52893 bytes .../getting_started/lightly_at_a_glance.rst | 1 + docs/source/index.rst | 18 +++++- .../platform/tutorial_active_learning.py | 20 +++++-- lightly/api/api_workflow_client.py | 2 +- lightly/data/collate.py | 6 +- lightly/data/dataset.py | 53 +++++++++++++----- lightly/embedding/embedding.py | 16 +++--- lightly/loss/ntx_ent_loss.py | 12 +++- lightly/loss/sym_neg_cos_sim_loss.py | 4 +- lightly/models/__init__.py | 4 +- lightly/models/moco.py | 7 ++- lightly/models/resnet.py | 15 ++++- lightly/models/simclr.py | 6 +- lightly/models/simsiam.py | 30 +++++++--- lightly/transforms/rotation.py | 11 +++- 25 files changed, 217 insertions(+), 65 deletions(-) create mode 100644 docs/source/getting_started/images/al_accuracy_plot.png diff --git a/README.md b/README.md index bf2b768bc..76d19b83c 100644 --- a/README.md +++ b/README.md @@ -24,11 +24,13 @@ Want to jump to the tutorials and see lightly in action? - [Train MoCo on CIFAR-10](https://docs.lightly.ai/tutorials/package/tutorial_moco_memory_bank.html) - [Train SimCLR on clothing data](https://docs.lightly.ai/tutorials/package/tutorial_simclr_clothing.html) - [Train SimSiam on satellite images](https://docs.lightly.ai/tutorials/package/tutorial_simsiam_esa.html) +- [Use lightly with custom augmentations](https://docs.lightly.ai/tutorials/package/tutorial_custom_augmentations.html) ### Benchmarks Currently implemented models and their accuracy on cifar10. All models have been evaluated using kNN. We report the max test accuracy over the epochs as well as the maximum GPU memory consumption. All models in this benchmark use the same augmentations as well as the same ResNet-18 backbone. Training precision is set to FP32 and SGD is used as an optimizer with cosineLR. +One epoch on cifar10 takes ~35 secondson a V100 GPU. [Learn more about the cifar10 benchmark here](https://docs.lightly.ai/getting_started/benchmarks.html) | Model | Epochs | Batch Size | Test Accuracy | Peak GPU usage | |---------|--------|------------|---------------|----------------| @@ -38,6 +40,9 @@ Currently implemented models and their accuracy on cifar10. All models have been | MoCo | 200 | 512 | 0.85 | 7.4 GBytes | | SimCLR | 200 | 512 | 0.83 | 7.8 GBytes | | SimSiam | 200 | 512 | 0.81 | 7.0 GBytes | +| MoCo | 800 | 128 | 0.89 | 2.1 GBytes | +| SimCLR | 800 | 128 | 0.87 | 1.9 GBytes | +| SimSiam | 800 | 128 | 0.80 | 2.0 GBytes | | MoCo | 800 | 512 | 0.90 | 7.2 GBytes | | SimCLR | 800 | 512 | 0.89 | 7.7 GBytes | | SimSiam | 800 | 512 | 0.91 | 6.9 GBytes | @@ -59,7 +64,7 @@ Lightly requires **Python 3.6+**. We recommend installing Lightly in a **Linux** - hydra-core>=1.0.0 - numpy>=1.18.1 -- pytorch_lightning>=0.10.0 +- pytorch_lightning>=1.0.4 - requests>=2.23.0 - torchvision - tqdm @@ -88,7 +93,8 @@ To create an embedding of a dataset you can use: lightly-embed input_dir=/mydataset checkpoint=/mycheckpoint ``` -The embeddings with the corresponding filename are stored in a human-readable .csv file. +The embeddings with the corresponding filename are stored in a +[human-readable .csv file](https://docs.lightly.ai/getting_started/command_line_tool.html#create-embeddings-using-the-cli). ### Next Steps Head to the [documentation](https://docs.lightly.ai) and see the things you can achieve with Lightly! diff --git a/docs/README.md b/docs/README.md index 353a4240b..7c30293fe 100644 --- a/docs/README.md +++ b/docs/README.md @@ -13,6 +13,11 @@ pip install sphinx_rtd_theme make html ``` +Shortcut to build the docs (with env variables for active-learning tutorial) use: +``` +LIGHTLY_SERVER_LOCATION='https://api.lightly.ai' TOKEN='YOUR_TOKEN' AL_TUTORIAL_DATASET_ID='YOUR_DATASET_ID' make html && python -m http.server 1234 -d build/html +``` + You can host the docs after building using the following python command `python -m http.server 1234 -d build/html` from the docs folder. Open a browser and go to `http://localhost:1234` to see the documentation. diff --git a/docs/source/docker/advanced/pretagging.rst b/docs/source/docker/advanced/pretagging.rst index 3f1ad2fef..8ca85563d 100644 --- a/docs/source/docker/advanced/pretagging.rst +++ b/docs/source/docker/advanced/pretagging.rst @@ -45,7 +45,7 @@ before filtering. For every docker run with pretagging enabled we also dump all model predictions into a json file with the following format: -.. code-block:: json +.. code-block:: javascript // boxes have format x1, y1, x2, y2 [ diff --git a/docs/source/docker/getting_started/first_steps.rst b/docs/source/docker/getting_started/first_steps.rst index e3f019bc2..5ca7c53d6 100644 --- a/docs/source/docker/getting_started/first_steps.rst +++ b/docs/source/docker/getting_started/first_steps.rst @@ -57,7 +57,7 @@ There are **three** types of volume mappings: * **Input Directory:** The input directory contains the dataset we want to process. The format of the input data should be either a single folder containing all the images or a folder containing a subfolder which holds the images. - See the tutorial "Structure Your Input" for more information. + See the tutorial :ref:`input-structure-label` for more information. The container has only **read access** to this directory (note the *:ro* at the end of the volume mapping). * **Shared Directory:** @@ -124,6 +124,15 @@ The command above does the following: will be 30% of the initial dataset size. You can also specify the exact number of remaining images by setting **n_samples** to an integer value. + This allows you to specify the minimum allowed distance between two image + embeddings in the output dataset. After normalizing the input embeddings + to unit length, this value should be between 0 and 2. This is often a more + convenient method when working with different data sources and trying to + combine them in a balanced way. + +- **stopping_condition.min_distance=0.2** would remove all samples which are + closer to each other than 0.2. + Train a Self-Supervised Model ----------------------------------- diff --git a/docs/source/docker/getting_started/setup.rst b/docs/source/docker/getting_started/setup.rst index b877a9fe6..e950db370 100644 --- a/docs/source/docker/getting_started/setup.rst +++ b/docs/source/docker/getting_started/setup.rst @@ -23,7 +23,7 @@ container has a working internet connection and has access to https://api.lightly.ai. -Download image +Download the Docker Image ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Ask your account manager from Lightly for the credentials diff --git a/docs/source/docker/known_issues_faq.rst b/docs/source/docker/known_issues_faq.rst index 16f76054b..08a19acb6 100644 --- a/docs/source/docker/known_issues_faq.rst +++ b/docs/source/docker/known_issues_faq.rst @@ -3,6 +3,25 @@ Known Issues and FAQ =================================== +Docker is slow when working with long videos +--------------------------------------------------- + +We are working on this issue internally. For now we suggest to split the large +videos into chunks. You can do this using ffmpeg and without losing quality. +The following code just breaks up the video in a way that no re-encoding is needed. + +.. code-block:: console + + ffmpeg -i input.mp4 -c copy -map 0 -segment_time 01:00:00 -f segment -reset_timestamps 1 output%03d.mp4 + +What exactly happens here? + +- `input.mp4`, this is your input video +- `-c copy -map 0`, this makes sure we just copy and don't re-encode the video +- `-segment_time 01:00:00 -f segment`, defines that we want chunks of 1h each +- `-reset_timestamps 1`, makes sure we reset the timestamps (each video starts from 0) +- `output%03d.mp4`, name of the output vidoes (output001.mp4, output002.mp4, ...) + Shared Memory Error when running Lightly Docker ----------------------------------------------- diff --git a/docs/source/docker/overview.rst b/docs/source/docker/overview.rst index e52f0934f..ff43756bd 100644 --- a/docs/source/docker/overview.rst +++ b/docs/source/docker/overview.rst @@ -12,8 +12,16 @@ and an easy way to work with lightly. But there is more! With the introduction of our on-premise solution, you can process larger datasets completely on your end without data leaving your infrastructure. We worked hard to make this happen and are very proud to present you with the following specs: +* **NEW** Lightly Docker has built-in pretagging models (see :ref:`ref-docker-pretagging` ) + + * Use this feature to pre-label your dataset or to only select images which contain certain objects + + * Supported object categories are: bicycle, bus, car, motorcycle, person, train, truck + * Sample more than 1 Million samples within a few hours! +* Runs directly with videos without prior extraction of the frames! + * Wrapped in a docker container (no setup required if your system supports docker) * Configurable diff --git a/docs/source/getting_started/active_learning.rst b/docs/source/getting_started/active_learning.rst index f923be8a7..27a795657 100644 --- a/docs/source/getting_started/active_learning.rst +++ b/docs/source/getting_started/active_learning.rst @@ -1,11 +1,15 @@ .. _lightly-active-learning: -Active Learning +Active learning =================== Lightly enables active learning with only a few lines of additional code. Learn here, how to get the most out of your data by maximizing the available information in your annotated dataset. +.. figure:: images/al_accuracy_plot.png + + Plot showing the different samples and how they perform on the clothing dataset. + Preparations ----------------- Before you read on, make sure you have read the section on the :ref:`lightly-platform`. @@ -20,7 +24,7 @@ Lightly makes use of the following concepts for active learning: * **ApiWorkflowClient:** :py:class:`lightly.api.api_workflow_client.ApiWorkflowClient` The `ApiWorkflowClient` is used to connect to our API. The API handles the - selection of the images based on embeddings and active-learning scores. To initialize + selection of the images based on embeddings and active learning scores. To initialize the `ApiWorkflowClient`, you will need the `datasetId` and the `token` from the :ref:`lightly-platform`. diff --git a/docs/source/getting_started/command_line_tool.rst b/docs/source/getting_started/command_line_tool.rst index d7c196538..6bcffe963 100644 --- a/docs/source/getting_started/command_line_tool.rst +++ b/docs/source/getting_started/command_line_tool.rst @@ -90,8 +90,8 @@ You will get a *token* and *dataset_id* which can be used to upload your dataset # upload only the dataset lightly-upload input_dir=cat token=your_token dataset_id=your_dataset_id - # you can upload the dataset together with the embedding - lightly-upload input_dir=cat embedding=your_embedding.csv \ + # you can upload the dataset together with the embeddings + lightly-upload input_dir=cat embeddings=your_embedding.csv \ token=your_token dataset_id=your_dataset_id .. note:: To obtain your *token* and *dataset_id* check: @@ -107,12 +107,12 @@ You can upload embeddings directly to the Lightly Platform using the CLI. .. code-block:: bash - # upload only the embedding - lightly-upload embedding=your_embedding.csv token=your_token \ + # upload only the embeddings + lightly-upload embeddings=your_embedding.csv token=your_token \ dataset_id=your_dataset_id - # you can upload the dataset together with the embedding - lightly-upload input_dir=cat embedding=your_embedding.csv \ + # you can upload the dataset together with the embeddings + lightly-upload input_dir=cat embeddings=your_embedding.csv \ token=your_token dataset_id=your_dataset_id diff --git a/docs/source/getting_started/images/al_accuracy_plot.png b/docs/source/getting_started/images/al_accuracy_plot.png new file mode 100644 index 0000000000000000000000000000000000000000..e54cc6afd917b9497291ef6bf3e4892cffd0dbf5 GIT binary patch literal 52893 zcmZ_$by!wgvke|-13 zK6{HBmhW0?t}(~>)!N}o3X-pp36Y_opk7N$iG6~Cg3*M6g8qgG55D90fix8g3f0o$ z<3}awj~~hH9qmjltWBVxq{5Rl5HwZ#akI4JzkM5n!Tk2d6RSYuEkzw7%%@jG6iC#G z_-_LJ+n5*{T`_wisv_u{n_+57!rGW3+E{V16J3uxWH(l%wrM)B$ zE%Cwno04fz_KVa*{l(dp-TWPthV|)FS^=6r46HD`vqATEKXt|296gjt$xBOX3lfI7 z->`Dm)#%v^1Fv^L6ao}G(uQ+;R0@j2d#GE9EV1wCP|CRtsadSbYfTu3GGF{LekiZW z(X3Qhcef$%R_N3c6$ zPI7xkL1hg&%qF)D&JZ8{pTbMX`wP3r{XdU7wma|CcRtWtQc$K*hiYH?CGYsj+QxYQ z4gFxPn#)0{5GPczetcvRhb;HCQ9FZ_B){ZCw-04|%?VTvb}m1qvPCT6nX$E|P^bS~ z+UV=>;5o#NcMb~9i89$)6v&&xs2V6*^aH!p!WHOeh5<|*dM34Myvz#QD3ycXYCbbO z@l-jF{rElm(ZiKew&z8Hey!w|E!yylD0??{7&D5P^f-o<@DcT3OswjB)LYz|{7~zH zb8hj+WQ;zf?)5Gi-wN`Ngt)~utiwsKwSBg{ z4*$pxGap=%t8*LpP08d$vsgF-|HsCD!WKf`+uaRpX@!abQZBr$V8+WGjFv&<4K=5; z0`Erdq(}D2Y5~;rUqZE*1 zA^PXbyyu|uuopwkKFh&>gpPyy8?yTfJqB4AddiP0Gdqhr+C88UGfz5J(b%NaJf2sCCV+xJ$)j0I;brUy*`L@tirYWBmwbo|7 zO~Ru0K2h))_YyQx*mp1IlHrmf3U~=KYvVNJF$|qHz9e^Xy@U+J9LY)aQ~eb&UOY9k zoU?LAt);hovhhi*E<&G87vMi{^SkDn-u)wZ?j>u@=8DJ{nb|pFB(Eu8VAxj;fAq#x zH6T=DcCu^w!xppcyDaO!RQI7mRRhI&@$9>VyDYnk1J&LlU)l{ftBa*03i9N>2qwJh zK!c%`9C9M;advTaffhciRBmryVTbRaK-~PTu?+Jb>M;@?9{zj#0p&X1&6slI)2Z+D zymRMl_baH8%FX`5S2843?ig#@qF5&!um&(p{>(#tIPNe)7;vIpni#NegppHW%9(-> zb&!jK(hNxHp-l{4X`*oYGaK;Dy}k|z$|29vab0oF+ zctRfgjb-cgIcAR%Ls=wY{>+~F6p=}8oH-0H!LZoHn4vR8Gis%9`BjIa@JIW6xZ=m)Uffe{n^+bH{#EwX$r0i+6p!c z$_vmwWq)t}Ui!UUD^;uPyQWRMR?`A$y^*EHbXNIQLj7NLV|_=|G({FlR=QJiu6Wi2 zzQ0e{HW9CTvg3vO^M>+Ol`P*DzrA^DH$6Q)Jnb{pR~|XFTc%M$rX*DSEcFDd7HX+z zqC%&@5@@o%vQ|5a5b!csTa;*R%&atEjZiY9pfMC92YKl)s0%Y z9ei`Ha1nL6&%%csV}@LA|!u0Msu*R=jdGPJm`G0|Lfp-Z-1}p zkY*p}K<$8J+RDrT)e1f|?vLgO_ut0;TkhZPa8|(v-SL?9R8=wVKZHIBN$Kb4mv!jB zggi^a7KxOI{OXbk@(e-vuS5Rcr#b_kpE-#%xZhI72E! zr0=O}5#Kk>bJnWY`Z^I^h+W+6JPea*DlxS(vopgAg)c9&LHE}(Z42XdNn2>! zSo_3t4eY_%KDp!E));5Ry8*LAch%_)+@o?kN=Ra(>5o!F1ltXxmIs-PP2Vu zeCK_=9(nubqA;W8*>=J^nuKQk(*3-k-jg#MSP6UOd?x4m$?(tNn>dD$x!IUgG6`d3 zeaaQX9FHcUH}|c;Vv0cIGI8M7;Lya7x{!ll+7MerAAD2NWCC@p8LSgbL8?}sKnzTR zYW5A>zSKXPs!pE0p3#{fu2rc&unRhV+}fmnptn%n>WekFPLv zkcv>vS?y8X5knUfOQO&7{5eB*X{fRG?DDqs%xhzn29AzNu`$IwonFyedP44dqH5-) zAP=>Nn&U(8IF_k=W2#{)rHq9XO(AXJiQI7}KCi=ZcZ@V?f(-3)hV}a<`}4cEAu5hV z#F=G0NqkpIgcQs?Hi`>$wx|sei|HfMtEnr0j4o!*^v;8Bt!{N;{JI#rE>K^h)Ce-u zp3}_Ab=)J6?_8t1Udysd4_pbRmq9h{ZD)~^O;-yZcab3d1e)~CEf+)q>yCb|~|6nI)xL!M|A<^cp zQLXOk`qw`j7quC+?Zxm#lwU%Nadn$ViYr2DryNV4e_dR#oK;;fJG3aOd8uifvMtoM z!dJ%4Iv;1&-3+vU^Lbq#Q@?oq{yK#O3SkRD?=j>0VmK%cK^x73tn8`hASF6mgg5Aua7bx7vriWwFsXG(O(WO536Wviw5#NB~!@qiM8FW*WAy}R!pUxZo-GK z6t#+eE%EWJZ(XTuD%NMk@cW#|&7~!neKw2Dz7U#qUb%UULs`K)VD;3iXxVbu|7SCAf!S2G&ZKc2I z;@-Av`Ps|q6ze2KaLiNbd2H{?^r>sdsojmW&^`UucwygJ?5V)IfXAKc^`%PRZ?ihH z?NRw`4Yy`jPM5DPx*lw2;(fCHgU=`NLUk{5w*%MHtbXuM9;IfYihWl5hXGu&tAwq~ zS1Awr%x%?e=&Rf9hkp7CW@8NxK{uhbWC=oor`gZpm!%uE%e%d-ar!6dTWF|Kjh{}| zl%axN5C4T)8)ktre+xCXP7uhh?14v&E#~1zBDN;A$FSWcM8801q6)p8T@pEG9OV{ z5y24+b_=O|Mt1&~ErjPyH}J5}#pAmQD-L(E_VBFF$@OhY5%v}rkPmo6SxGVQ75uvU z*y0DiAlXZOc7lRpp@RGi?OrJ43ciWpEG;jNunmiXhK!V;oMQ~WMc^#1>HN{o#>T|f z8S0~>$roo6V{$hOXLE8%X?Z1$05m)(C~_!iF%ebwg`;IRZ&kJX+q2Qk{-FXzPa1)M zZ^4x0!Q>yXMRdf(G+&A6;-HjQ=`S*r=XHqp)H7At;Eb!OGWl1L(=!I6#bpUgDI)Is zuqVjzbqE=nrpX7EK)>A`9!yEo9%*wr$!r?Ucvy16rU({}rFj z^rh1u+N@4NUjB7uW##3;^dMVhbu}g#8Ch9*IT9k`o@3k6l1^_dnWDNnZdO)S$NZVr zPXlNyI)$wTs~-aS+lPmT7M7Nj3=9VuCuSZVJcQ9ji}(CwsdBW2hK6%Lzre!6qjKk8 z=^Bixm)4#tymNhUScU$qG)6>(7v1s$U zkx)+5zot+t^*w7AG9wb|>y0Mz*e^`+y>|8X@$tBt))+VE=-5B|guQJxy??UYayO{| z>^O4cbi7b+z1sfND)geOtLrz;Q$~->!gB7`?(7h6_6wXbF%i3Ol}<5BOc|S{WOh+FDva7mwXNJX}79%v<*wv#%cj#Z3k^_ZQ=K>+gvn8E$i>`>OBdgB`VYz!3QtHzmu1jSLiF=O!e^ebZwH) z9}gQgJ-imkupzlR+TNbFd3U3-`;9tNw&s;={76k}EB{I3CR%(#0(e+!JLz^if8(fP z?$pbZa2VCUWo5<0#-is-#+vrW(<~>aq^0!_bFIoLD4;>n(9l$Tu7$6i_g?VdZ8>c> z+8IjQyYifvm~cO7+HKZtud7QGdfHd)?(2JRJ@+n|MOWtIM_8y;-v?&a_FKwqzh|CW zvr*HTiqFIXo>SGfA|lWY%dSM8ZCM2GO@G?03ih@B{w-?TerKwsr3E!tV}cSZ^vn(U z>qdPsG_9;>d9dwkIhSSIR`V(+jO%xdMHaEeZJN# zAt~wWvh8kV!^%ckd6npo+4rtTn$Kskeq%E;JM=9jC1HFgjX|LN)=y7Ihtl5nkB+(x zjgF4)wm+Ouv9QDpvUX(P>3ff3Mpf#zB+u%3MT4t(=XGsuI+QA+rZ%nEK5Lfkll-k z-VA(&BBh|vJ31Qcak0ab*&)zx%zn1yG+J#v&(4G!xv4WSG-P~#xep!-WF2K>WQY$R zcZFWo?{>4zmYZFo2tP!uw0T|38O8D(D98K07mY6r05>W9Wz!|pIlN>NDt zibzqRUfy_6R^A-|ix7+<%E84Ishlqn4%SYd@ObM0 z`V;H-^z^S7q80ibdT_5^h5J2UWam{^D^JbLM0lMvNiu2w8mc}$ZM(^8-1%-LrmP(I zE=|r5EZXlse?GSU`;B7e%#X{e$ArtGL(jm-D6XQCaN8&JOwrob)&m|oMKH#1)7tRk z#fHf9bH^{$n%&`8bltG%{NyVhyCmSJV3TevyRG89HyyHXUESF+{ZczGCLs}0Qc_}H z;WjF869$^yZ1u6lWKlS};H@w|D*X7u0z(X$5Q{<<|Jz`Uyw+A7YHI4RtHW88pe}Mt zE2|!-Q31Lv-+PNM4U6I~F7@s^X=dH&cI|JGk&(rGecRuq4;TCVIBkEZa$gHT5K~i2 z0^ivH7q&5$`w^@?c*s!Q(OfHgC4&tsp0aWo+{*mdb4Ga;6>{M0XO9=7kzk)MDx2!* zWli3-eE9bEhw%jcC0 z9;fb`RX>zj8Sb~R#=SF1E)7OU**|VX! z3rZ5Nw6qi)4GIpQ+dgNm+9-7L;ck=c#a#kg9V|8!q=+U3Uy}(gVH&uYTUlWh78d5^ zV>Ag$erHbH6&`OqZXUo(>1CR#{V%zN-D+Qj({Gvy+g`F!*nOKc-5N z9A>unc{te23J)8bo14xfyW890VPS}pBtBq*8t#qdD!8;dY)Y`X9H`!zP(KwJqa{;k znuv>w^WX2~ThG^gg*3RyeQx)E@w&UeN2p*kOPD|^%3Tgiz zbRpC_x|G8!&*F0*Z|{F+XZx-X;I@dl?fnZ13f?cQf@TzgPIi*0xV5`0DIt%lE-oM1+uMa6bkcd8Xm6IBVmGgk!LB1&=5k`lEguoYTZ~O^)?KD8--r_7Wbv6r(pSF znqwm(!0xvP&!m`znD=^DRWg=zyJI>riyEovM}-N2Y|VtdVw~2YM!^ zt$*u(qKLR+KT@ad=%@!TD?5AqsNh2sn2X;M68=Q8wo5rW zvMchRb+ZZzjtaf-1I(~}HLLHI(ZRLstdin#E*t#ytHW-l+xbn$OXk$nl!ES;jDD<|wl?OWMj@uS_^#JNj0n^&FOg^`X4nM!Xp3<6{c&=6NT9t2@q@*LMY)0>F7!B7 z`ZW(=h?*MqzMj#Zp6xp6D}kQr>7;bq=5R9K+pf2by!F$&&Z45Ci0J5ae0+;+l|@A% z%-X-W^|&}WsnnEQr$^F$~-{t);Nyh=z{_uM&-(fE zCyQNztK^T_TI2pVCmk=oSsptCOUuij+r67q+z>&705CE`I)N6lqyU8diNImZfGPX? zR|;U(KaylV!2lYk=(-X(I5<@3G{sw0HHG*--B(-9vIst1sz?q>8yQjgJuI%`3wUz0 zd0yp#r`NFRoBkCA59Xc41hwGHqto+wN{9V%{p<#KOw!UwfZOmt-)=TGH&2vnu#ac377e?awC8})rsQxmtcVgw5aFj@#IfhZ`z=3zX5LJXfD@3ywKetKS6mX(#^ zbC_eWv9SqzjmU_K!mO;UluTRp#*jo}1WuQ0sDRG5)a29zz!!1DMPN`60XVw8z8)ZT z(9Qo{UKY5Wu3R3@j$&FPQG~_D(!G6)#LCKQ%}E44lA4BQePbiA$B=@9BM~y^uBR>Q z;}a9bz?BeJD$VM;k$}OX$%G3*;GOdlP9O$N>q6{ zqfu`Z1tlenxN=x{IQT>0{k>kGBqQ8Hz`a3W{$;=!LuI6>7LMTRH zcefaLT#%Z`CgEF^cW*4pOz65g98;=qRNn$m)E!v!N~1-Nsoz%+1S{Kfr%;N=vcG4R6B1? zO-maOK3Spok6|@vCQ{zsttx}s*47oM69ub@lv~l3((xk~{%VNph@=3qL7t{&v-4X3 zlxhO0rFmKVwS(iSGiMnX7#g3h7n{I&e$HD1S{YJa)f~|kgkV%VuaKU^e{NECj#^bxCK7+B+n6msHCK%?48dU zo;+P!GCzi*@$gp^c z3w7adRf_&Xl_PxM;;MByR4?6|WKFDNFL5sUEVq=02(X@fchK)@SxeEx@{ z+PP!b7*c`z1jSm=W%T@>Tp+#lo#_xh8Y%x@(4D(~e);2RoDH~HX?gjdf}#1o8N(JVkD}01 zVnzmWU|`@}tH=8UT6xFI{Yxuw-h;zKBxK~@tE(@0LKI*)X;ofVT)sWmwz9B*AVAPU z=kcN;pRAtgb|V9@?hxoBO-D6@BaQWYKuOtJ@j9iUr$_W>GwOY3*p29VvUFE|(`Q(1 z(6DH-N`nUtdajO+4&ca1NfbcH)765W8CBEQ9|g31f3e}m>Bqe9i_ zjsCt-)YZ+j7C9AhZ6Tq61O0i=xtWv|C@)BMcBisjo%gFV;VSxWZkWt0EIX%uFD;WI zf!7q2m!55tH4I6}pkqUTw1Yzvxnd{aAmEA00I2-@{0yk=w{PFpE`2R6Eg{qa;3yzt z001EkN*x&yXc-wWD)n0Oa&i#FuIrmd%!R#Kb3XUe;wO+4AW(`1o`CfneH($HbgCW{L#CR|7Gr7-g;F`HEE{hB!Ps zJA1N*HUUDYz=yFkLJb;L{{yu4yrJVMT9VWwxS)U<)T9UC=J@>X?*K%F8xwq913fR5 z%Z?Oq=%TW+2yNS@uVAw}uKL{YIIf>xTmXdZ1tIN#hGFC3U4W7P6YSHf#tr0R8vq8` z9k-+;4R8Ve=kNo)EQ(a%I}lK^CLvc0>CZoYobt38_yF7mex$Wxd9t3I46rCW*e#z4 z1f*ib-CcnWk_Cv`)4|y>wYhD*l8R0+jTK!R`U}c&hQ_~GMeu!9b@k`6#CR0JPGm$x zaxewKJpy%UGqv4flD0@b-L;n)wfOnPk97e2E_(jM?z5qlG#PxpdCxoE&m6Ja;v@0G{_cZJRygWn+s2LLWdN zsKrpurz>rcqkv}m%XWF-HP_OZN!v5-$ki`Jfly=r>DnuXi+yU3c0EGEC{xSG^J>!h z0R0ofyrCC~!a5=EnQxiUW)z+G^?iqda)@^T0+TdoV~ej}FWT)s-K-W@Rt}$`99@hG zomTt)Wciy=of`ng!S~`U&w;m%f!FFCG5SwdvsE;pse(~qYJv^#QdZ&hts1JE)Q8p^p#yg z_?ApN&&$X0tK6!wRZZYz;zeX z_|qB@^`Vii-}b!}Ts1j+`X$R`8=+$uECy|Cgi1BOc34iSTS^};3dnbtn-~3Bc-HDI zw7bmw8{lOKkcCQS*4|0=wG>kA>FH7E-y|CZ;8N zSOyY^6C4SbP5tvZ@pE^2N>EK-%ywSR=t#SJzA*8>QUScckDdF#vRxuS7qr`5YoUf6 z)!OR1yNsFPC2Bv@DBfBVzUE3WRF=uS@$i5O*y3Y7XUxuX@GD56m}Y*&fP8@b+3l7}kp2-Wq4H^A>u_UGY*?)5GR;x1T@ueD)i3R9 z0#?(ox9vmkxijCzx)pEM0wXKyscRTnrh??BPj8xYygTd1loV=^@5e~16C#!pkD>W#e)fw@zncoRMl3+K8m1Sf7C`~ z`TQ}^hc9B|^&I8_6k;>X{jiucKH75}{;_F{D)CO_kcoB4od2|~I62}JyrajzZMoQN zh2vbU*1FwYDMt3r`>*$XuQF#J@GvpEm>ZUU17%#o(b2J(xI6}J3PLz1CMVN`{MuF6 zTQLIF%GDv<9T+bY+nm5q0kn#5e0&^e@S$5<#sHu6O)3fvL!`{Qqp4DryR{(n|1rdz zkxHv73t3L-H#+?gD>Y>Uxns_a?i-X4s9+F8RxRk;Bt?~o8}~}sW`5Gy!Riyg1P(RJ zS?9GW4jSmb`xqGdj@g$G_9H@`%D$$X`Lf5uiiy8V+$Zd>fH6b19ccKeK;FsC&E)_D z9e5>x3J=$q4B`Xn28coAD0nPe;dEK#OiWSD&AfmIKrp-};3O7aWIz}(Jlp7jR5btj zUnjs7|kq z?Ee`Qbt`;P+9O~3aB{qOkY_~$q@rChAZP%-vmS&lXaJgx^>T9(px{-l=Q6xEz zA-@JvdEB2+bSxYkabsiZhnv$~&~^4yXG(P?Anj(?LuuNy;MY0ahd%U8$1Oe>%ZB)p z5m;w`ob@+h{|;f5ElTdAJ|aoOj9yRSB=m?W^gwgs*ihy(1@V6*EhE!zq+FtEWZGk= zl~6Rc-!tH^$5W@PWbbVL`!yuw98`lN5cVe)79tdi4>?Q|!2AK`6|jPWfI!SIAkaeW zXYqJ@7DXo1J}7JpL@!YJLCYTCagg{4T}`Q+uYqgS4GR{=KP;hV%w;nZI>u z0Z(Zd9|9| z6Mq|;3#&$bO%xGZJ}R#4^S?x(A5(RnF;+#-J*ueni5`b8%z9CTwQ!_5Om{oxUrcC@ zULWS8if^|hK z-5Gpseeu-P2L5xySxiLzb0h8pKW6a%#Ic!o_`Pubrb@Lg6e*&!%CYB!Am|N7D1$dt zS;;y^2BO}GF7AAAG?qNwoJMu5?sf~ex>rtDrqUIG=NI#0#}TbE-JO$0^J!H*Rv2n6|B7NxAQj&S_0Px91l%l%pJwln2gLP< z|9d_Kk(n4_&2M5zdE~Bg1un~YejAuDG7RPT%O6LNfm%L^q;

D-tyzHecq$F~M_{ zA(EZfXo`iMM)+GfT3FB&EFqh3lqNhY46_6ib{%6X^$(+J&*fFFUT%t*0nPs|*4$?~ zjzrSkG4uxvp3j#J*0j?7Hm8~-uhLl%*H?*L%-_finTO#o`J((pshm-FJanAbZBO64 z>g%@~DsC;zFqM4|#UL(TqD}v7`yA{M!bWe7p%JVv5-hk8U*Sn$po^T%8u=Aig@8%^ zyS}F7=5ACm%Li;`VL=NS1?BAOiiVxNW|G~smY{b2$i0#t8Q1`sArowb(Yk0~a>mUw z1~>?#fw~S8*&_-P1)gmy5Yw0sKKxl999dUipXr(kmn8EQ#3T4`K_du+n<*kZk2OhX zabwNJ;X&2#i-BK^MLOgSmZXNe5h3v<=#B~*qaJfvR`F~r&8qm$k zrN*J1uBSx4dcx!=Hg|WX&_E5KRSSGssz4V*%v^qx(0GhR!;@6|gJivIR3!Q}F=-0&WwN zPUAN~ji)Sf_JCu2HgA@VNkVd5$LjZVsDJl#>UTtw^QvxD#?=&Rb{x93@CG=H^Aal z;LJ4E-xj=9JI?>DDIzZWDydfnRUH-?nbZ=UQh@5r@#?q6?x4RvbW{|+bfO?~QeN{G zdMN5omh&}BAW&G({S08NYJ|yD*hP$LzuiEAxcprPkXQrlhYv}>7Xf5i3H$5NkR0G@ z{XhkgmX~*_Lk6B)1`yT_YUhE;3(U1tFpN?ma(A^ZAm9K^cL9EuaV=SC>vup6oC=6K z_4IH9QP2ST0LB9oa5Xcv{AAQ&Vh+ z-ggkAJgY8QS)E*y%r3`?1+~Jm((T^2R^`&CeJCH{hn!C-fw_rA1E~|Y_v?R9Oy5d2Jb+{Jabs>y^V6qK!~x&H5OpNC$W)j$ z;VDMjojh}M8p3@1NU&$ieC_keap+n1zM03M)yT89NasDyh zebFVx`#FzRF^B;bMZVw-Zo2b0d)@fiPb(t?hT9;+}IMesfZLIHrs0+h~X zral9RAu|6~Usk!fuC@*PJ4kW;W_9MDKNCRt`Yxjj z7;q5w4-aoQA66YY`bz4ld3ZEYby`|jtOM^1@Ejn8KsJ6X|2ZP?#`@zxUZkk8an52P zNkgx{GXR$0odFa`4iE#g>lF|SNXbFXUtT&Fdo%ijXE?ks06ydSvRJ4;nE}XYB%(^% z+JCBoJd96yJEbO-cbdEdM+$K;5R=f>N#RApOGF<;j_?s>p}DecBd`*Q1|XfjEy=5t zQ+?9Xrm$D@V?b_aTa zil$1IGrknoZ@p2lPHj?YQsVoFcSs;xr&yO?I|jE3Cvih4Pxc~c-ttyE9$I}goD8<> zY2=s_mU7g6JZ7dCp!!QspOiu1IAM{IDVms=q+BpEFhC(9A>G+U%@(7xw*G^G0?rHd z#qOvX0G30Ue6tqIU?%}-(0iei73@*atZ5aqhCu3Q;MDhq57J;&)d5G~6^Nz>LLysv zMX6C=E~V>fzp2~>rmf2zuAMw?&d0TAdz27=vBVs>#5w0M{3$_N11_OtzBjXO$*0uC zf(SwL{<*`#(oRS&$<5bQxY*3$_hOzNH@_*F5Glf@``DXj&F|r_#fxY9VX0s%oQ#(q zFFbE3si+_hHJ|Iz9;Y9&0gw(_fsPGm76NFC#U8+VG&`EB2ELJ7Qx!1sVg;^&gFa*c z(Hof+g)}rZ@qviYZOq>9BnyUtLV3@?_#@V%mmBm!Yrf8~o^SDrzt$uSJz_m6KNUGQ z5=|XhcZ|(xi_D^rpC9uMZjEKloWprX_@R8I{1(^lq@|T&h?_=B!d8OVPmOmH<(TC^ z-yD++m;6(vMxf1iiVH{r@kcsflJmbjUO;pgu&^Poi9%!wMG-~B#b1Hpwm!mlx&`F8 z9Y7A(fFZmK{u|gINT#Ny?0(PQ?Jv&{lRJE`Nq7^1A`3Bt0Pk8$wQX1g0;$3vLqwoV z;l}ORCh$c-Ls2T9Mx>x;W;U6v(gn3SX+Z)K8l}WZGBgPrG}6s$NEGxxtV|zrDWBUI zQ=~(PZQbr-3|wfBrT39V@t_3R=qaQz;ucoi)Pzwpiui(gRY)KVA>SKeJH$~=9Sii5 z^Y0hL;fKCjLT`Mq&Ri}E=O>~t0->Q{@ku!|;5y(u7?t2PC-$RA_>z+tHEMu71SD#F z9!F}BzJSR3G_ng$O_!bmWPXoq5Ty}BDGh*=2_z~9h!OQee-xP58SSVbVaNN5571L% zkYj)#1`w!V!bO2_R!Bx*vIdAyUjRD=!3g_Fc`iuaO-(~r+{dRaKYvoC0pT5x_KM*= zDd2vzo+AgrzdksJOn`mw*$GpM@}KcPu5rihnNmIvd|9!-gW;0b^-?M{@1nwn@yrjV zeo3ucA>)TtAgWNqXSmHhN08iT?l;T>?d6MXE|r785_5+8syiU2b(&gACTE5nOH4=V^P>v44AX+@B=+p`@@Pu-p|oqR)Z}@pJ$t_f!mHz_S^hFq?Fiku%D7q^#Rx>Y z>2R)<3n1=6DF$m}+}=naMTMqTW3a(_|5orK`bp*BqjAR)GfkF1za;0R{hnr+hC`E( z*a(i+lkQCwdY?6y5uU5)(nl#2c`6$ESNa6NU;lhW&g zA9nhI{6X)uqUOG4!ezrvh)znON_FdQvY>zaFY3Jid)%_R`qZ6(v3-|;)3>i(x7?arreByUozO(QxV_YN8KrXD3z6d&N1(~c+yHJ z7z_V{3giz2uR{IbSqQ&t&$Y56>(eN*OWZCdc zN56(}olxdK%v%dJW#$!8qME`+S00BS+FWx&wd%A=oy-4T{gaI=ivlB}-_c(1wQ#(z z1HFo+>U<4Az+xZ_gB|;pm6Z;JZh`NSr=Hz@Q40tl8ZozjmjSSjz5;oBaHSnY;tNQX z)l>$#T4Ef`Z*MH#axVB<==wZU_dWZNuc%ZrT)6czuBZ&K74MDT;Wuu^ z@Qoc_K~hV7WPTZtoYvS!bQdCHkO++h#`eWVBjqPFE**kOU1t9qwuau^b8j zWp=H~WFX05VPkg#F`af3j!{M_$;lExDh47wEj#;pyg;LNgKdvB zC!bzRQxls(r#}lH3NsTEgc04|x`RYt#!gaVqCM~xz;FzUjC4W{0U?dywXPr#7U?o( zul9LxEl&RE<_1x2GdwMX)WA^ox;~=d;=%(F3zHPxUVms<;c8A;K!5_vESLuXBK_I_ zO+-R63}y<*GoRA<*y{uR91;Nnz62l`Yry9<29QWqRn=MR8ek5y-@!E{qkrAa=b=Oz^>1PL7kn^?d^^Evs~SQ5MmEeizIiqobU&8w1Gy+FOQpJ zUP~51<(%3?l4Y@DrdFGg8LYR8`+oE#tZVjHU(jYq3Ysekxsr%rAK~9o7{UO#2N6zr zMR`d6`yd&@Q=G&2rm;PZ_D@gR&U;Aavv@x)F79Z)c6ZlDDVQAG07(4nOdz-eED2B> zia#U)2V9X_+htzcrfu9}a+m#yLMGkj#8|=mFi4)koMVJ!A{y*1kO=){x4L2>yz}BR z{9$$s_$Yl2jTa+)Y46R@K!lPX1l^#3OZMJsh6xfzOMI)6@$0?YiO!1egUvG-`@BUr zhqJLD|KxDFr`#tziW~VgMb9%F2>QyOK2Zt{$5TJmSj84bZ}RX|tat|(L~JhQKEG+B z#Z#idW(_uDP5fr8`URH!3uQ2MK;(6u3E>bt5Gv=C+k-ZG4dFH6l?+(O%V|(*+nz=O z8zQ-++of-tA0`YfL<&px$mX)^#yH+QVvUv6LpoWx8PJx5nAcBEmZCdA>4SJHh!_-W zOOCDlPD)Phg{TP8(XSvX56A}wq^72_G$f>?^a00|cgPT2^&h_5s-WllLe}KL(1*|FX{)9qb&!0a* zAoCgD6N&fUpcBfOH#7eOc&p$G2O-H1ZEfvLtsDS@KG&IJLU2CNir^3sY5_`yBpX1` zFEe@CoCCaoAOO+Q!9!g;Je1YeCIYXeKyrokR3VP)m(B$@3xA93XokN8~Sj5C{eKu^r02ehK^ z4>X9RfGObwQfl?59eE%L;sLy_Mj&bc=@Jq+{sP?d7+m^3FEwnC3+_Ao(V+l(frg$( zVZI>mLD8m5pQ~b0YO0iq3Krl2!66|vpzi{Ier7ue1*F12mLDR6-rTq)egH4#fFJaN z7fJqIU70{$eaYm*t5iRn+fjU_8IhMWZ8^U0yvu>1gk8Akqr#3!))K zEPi`>atnkE^G;WI;Rp!%{q$KjMra zj4XMlI%Z(q8AAfH8ga=~##-;eY!spB6$rr|#ST0YIbI#MwcNOL2=)vaSefyY9?V}p zxwSo(Vmy%U_%#Gs`^Gs}?@m6@O)-dxmyUZZz(gSCbB)T#xDN9Jl$sx;t3h1cxyc(u z*g#@UK?J*Q*Y?Sd{nxm8K}iWB&<3$7I;t;TGoa$^*7Pm}g%z^_FeB$19jF*Js>F&X*?b?}KdGoRf#*R2U8Y_xUifMK_;@%^ zJn|Ow)*(O+Nko9J1hSCzlZPrUaCIb=i9ox(y}x(WLL;ZA|LdTmqpJ&Iu;5{XSfj^A zI32h5?K=>OgZSaV6LZ@7iwUBtfW`_D7?e(1!oa}%ySU&p>l-#^2SZU2q<&FAvTc0A_j`Rgv?=cj(FuZV9H8MiqgiizbCIRH8C{9>#{YED zp(?%7c^i0%2%>%}m8ybdUo7CWF~mHS9TwmP%aatG-ZDzpBObZ^XKM*< zKZD={E9`ps02EGLC_sQn;t?IN$SvvJvjlUO%-I|(cJTQI-QrhMW4jDe!gwYtGb6zr<*K?x}Z*% z>V;ll=fS%LlH?p<*Gfd;cL9A~!PAqMnVA_9R6aPc2xD_ z9S(Rl2l7|g9C+MsX0KR0@9(dBMwiip3fqjvM7n~;aQ`2=zAB)qEnF9rln!Y`q>+~H zMx;}^ySr0Ly1N^sL6q*42I&Ur?iB7=d+&4ZeYuakOy(MM#6Q0JEgKomxV>7u|J1}n z&)6CQ`zS;`4dhM`-vKD1Zh@fwsKNSoVtF~;8;@O5@Qz@@Z3Cs8@k)!kDO+xXtNaPl zvEIYo-P!PmvD4p5a?C(F2V{RAfpiS4UDrSD_20kC16c`}BkqSa{kzj;jNk1-AN&*7BqIHvl$C#mn2` z7_)ag3WU|Y`_p9*`Q9HYV7N7%VJ8Tyn{VY z!17=JyUr@0BX@-7WV@vU$et8y6AKNC1y8MKW_vz0Txn3exHWkgFcE?pAOiKx$aqYy z=!0lon-iaf*mV=b%ktHjJZkAwmLP!RrlqB2#AWapnV4|5wt@MLC*Z>ioCgrkY3eLl zeLAQ8JBW-P=wBe%51=sSN(%?{n9xCO3g`=n^dDlG@S1FBumkircazku?%mxT0B*sS z2X-ikaS|dj^15@7$XW(PyEo!IdEhmOdRVRA(Q@l34+L3A)Vfege zTe1jSNQrG(@$}>;F{vP>=!1zj6Fv7mdG-@}wJE1|XL`!F)Nrt9?}pz?zrn@V%k%}u zC{X&DTHi?tSy+4{`}r*(IT;sderO?v3t9jS!2D(9*7Q@bBOP>7^f5Z zC0sS2s({Fts6q`uC-86F{DAr`E;iQxQz6i9W&XBkylZhzG!~_WbUSZ7U5}_r-etd6IT$Zp z;1T#y%2(qlmV+uQip52=(I+U+Thi2iS+AivSTSS_AQgvwN2U}Myb*C(JXx?#w6Sq9 zTd=-fM)gDymsM|^?8{k5dqy7sg7duLBek%LZ{)OvSpEdiU@jji1%ge}v@kpJBC=CY zuxvUjQ9`GgEHDQY74;h4st0NaLN+$Xw&&RE;Al@_F%I(ztNtHlZ*i1Gp)SOUBj`Cx zyHkIUg>7=OqqmJ=VCnc#(K`T%Qc%zSOlUXX4pCC{H6NydgugC%M$!Agq92ryvR0)n z9Nf+UDaHDeU1?5h_oJ4pO7rCB)nxQ4bPel@lGkjpIIL)*yq9o`crA-@*7LP=^GWq5&44&I5wFWyB{Af&N95bu5B_Ip>DZ-LVI zF&Yt_Dxr$sw|@T6_-f7!X990VWL@B%mpa%%9%~ke_za`HQhD%d7PpY0D@m`L@G>u_ zn%j}hvUWy3K`~G^G0&M4S_-&zUtnD|?ln32JEE8l5FMDr-xHV^V`2xT7`)4fiHcJV zn^1wJa1|V7VOm^uZjomdO>PcXA}em-^p2e$r(6G+vfikF|L9%P;|fn*jV+&{7@=U> zgL|$ok8Cxv`0IRl>ksADucXhyFOq-_Gf2FfgKIdot^zrWYe?}~?0B}-PuBDW>pO13 z8d2PDb;$8$1Yzv{mV=%@G){xOzRUCy2Do@OrA7~9A;9ilTr&pYIsE4#}-?#R|7<&l|s0Up>+ zh-N7WghVllUm05Qyq~xS+EIvLb3AVd4U1kVeUJEQ=Wt0b;wU0&fMK#@x|ep;MHs4k z{^4L)dG5U(MPo|zC{|R^pzN?og(baIhEz4xoUwHvOgWm-mrD<9F;aAdB}`WgpNt@3 z(!7b?5RZ@o^o>@rFVGBH`)fTl;=lSC?4s|vaf}g^6Y*3HlIZLhx?*(TdNntap|OZt zG}oJg`>I`2>hev^32Y7Mn1YMUrSFP%PhHTXdT%sZG#ZY-bDssb1R9Wr9rgZh+%Nf9 z)?vjow1I%AMnWFYDQI8-KnF75G}k?Fd$PYg5IVOI8IKM(V-4%y>@;K5nT?i0ID1%6 zwO3koN{!k?u|j6Qa9@_KoQdfFEovZnSAxH_t1Y#e4EX%CCV~-NP3`vt`OPtOD32Ev1);ZqlHV`O_7QcG;k{XACbTdr5zqx zQBqQ-WM)#}gnO6!ey*AYfW!Tq_fS6dD{w(7o}x)c2)gA9?KQYWjdHmCBldgS<44Jr zmC{YX=odKA$kG@M0yI_FgH6|$MSnUyTTEZZLLDg`u|}|tb4`UZ9+DBEeE36r9~pE& z8GvXzfYfn+cvU{EB`M?p{p}s!KIF4xe)|UOu`#i+o&AlBWqH)UXJ;WM4{fTe=%EwJ zz}vqFFd`Y&glj!#lu5{$f2QvL&Rr7wy^*QGs58pv#Lab75t%rOX@wa+X{D*<_*3ZI zl6W8Mao_qk&`|$HE)-wZ8%klU+1Hi0JiuD*Bi~(%EvecfJOW) zKXgJ9s3s_0+)Bi|*u7R7zl347#^~%hPPh#fl)`{Sd9%Q<{h*xi9^UUD8LUuZl3!bpu}PN6y=ErLLCq_mdv zzZFJCFxBvzW7HDm$ViKOFq4Q}lP@YS-1mXxLy@q?YYTDp7FU&Hhw8%i9b-W#*AAH} zHLNSLE98gZ(7wEezc$<^JP4L&U02v5pIYqrCI+?4q6EnWE9sNDKNE*%hOaulCnP1; zLnIBn9XNyQ%4p#nAnezeXR!}s+|{9wJc1QeQZ|_39VihV(RtOIPFl9l=lNNiZ7X|Y zco%K#9bdAQdDkE4lOP0%!{zNa3;dx*<%E)#Ky2`^)i}IG{RNC1nmDE zIM{c1p_7w*ot421wF|O!B0xG<(DtIK$Bmbz(7*0guZnk}>$GC^({&RSN+iY!Lb154 zy~iOdnIl=l&T_=PMAh`?RAnqVLkE z8XhYOcZlhncT=)|5e`3=<(g;HG9HSJAoO(bc8T~Pod`|ihn41E$M+Rt2WK^Gkd@Bt z^<_KM#KXihupN{=9ESQ$n=0YtLJeK%~2)V_FM3FMH zWcwZsZ{M9k71GiM%?LZhJ4}>vztezeo*3;rj8x_+8?4{?Wi0sb8P&Cp)XTZVEwnj` z%D-i~ad7X=86o2bjZHlF>unuop&cJ~WyqyLL#M&{M>^phCn^c}=t-7&;Rk6REPq7d z8gwYiX;FJcW-w5!tkND>z1Ie+CY1$r_0sG*IGEKSP=b!m&_GWeemS_*0MMj8@EGn06r-5K%UP#Aq}3d)JbEYfs7J~E%NH_hZm+y z=q@%fYF)s>rG~|%<$%nu#AfIq7&;JdfJCP@a_fHQ#vwyBcA;BO_Gr2+wTl4F%w#Oo zXE31qk^(H2^I(t9+OH#E8d=>#y8}YZ-`Ey)Px({*5o$iyRTaibosq^wMw~-tKaW3G z&#X^#kl-3b9Q@yvZ|v1KHoB=zWZN)+%lRvNS4UJ1Y8-)lR)2ILFpmC=-b(R8aC-T4 z&i|v|%$X`G3em>nt@{Gs(QUpD6gIX49an++Bv>E?=~JclGu3q7_;-VMc*+5EQU8`A z>#$|oR84cPkC?u>tfy4>#p76^&rCHbQ5hj$w`Ub~T9M16ZrFY>9&SF)xvw1irYD`@ zEVLep8l2UjeDkMs#3|iYR+OgsL$y^$X^HaM{B!2#U)hcSpP)*)mWyeOCQRrTt`q%qm(-qc(r|R!;UfmiikG)0)*a%Bx z%S^ad5Yg2LG<}D0_2jhN}r`+iLm4I5Qq4pi+*H58Z zq=f|=H!IhUk@<(X{{)$tnp2uU5)$8ROP`~jbj>ZvMAbU&~_{J%T6uZ^k$c)Nl z8M3hAuacm@Zk~|+!Iqv;Kx9G0Rz9F5r0^-$s)6p`MLXBI-%ED%xZx!5tYmJIe4I>9 z`_k`4Fu5H=@X~8}XOcg^TdFp$Y8siN(1=WRPxwb=GMiN!DnDOxN~4Ayiv6M3s$mDE z$z<}{cTNTD`L_SIQSiRM>A(x0KeJt4yk3UA-p{6z+-r6yk_-IJVmB?gMIyLvt@p7Z z*S^Z8!>fAdt?gxtP+4(e3eL-p5`w`WMoMvt!p0U9Wp%1b`_vRd*>92Fk?Rd&gYvP9 zjb+_rwno-xoN3_mkts~>cMkQJjg7c5TYcBe)a%@K9>vq=_} zqUDQ{ISg{nPyZ3vJ!ARrY&sn_V@>Nd@O%M>rK{a#f2&wPdDwccYa&cZF-Y;dV78ju zlFhi(SS?aj9r>SAtLFupsz`T>TS5FuX7>7f3tTGcs<>QMEkxB)y&=Efm;>4PVI>!vEwb*G_?b6gID#j^xH61$Bji`kUZO4ry1^qLJkur*y zS(!Q*(dsX%lDF@a3ez|HmW^modKh83JCb0p4{vAKRc?f2uTZSJcsGdGFHg;N4!j~i zOqn9|cR{H+tKt;Xnb_YO%$Qac^HyYtf^d}}bR;f4*cZ?QgoNPc4si_@N+st_wccbh zNw(KmqQ-!+l+kN5CxlT=zHsHQ~zJ&wL&fYV#Xx zs-ZMz@2J4Q%S%@*}1JT;qu{5j6{+R z(xm9=C)EQErn9>--!}kv2N(Oh{m<>cfOzJ%to^>s6-6BY4Zt^eF86#X2cXx}e3HLk zQ&aJMuIsX6Gw29|UyP?QFj96BFUa4XrIsz!-@SP4O}=%oI}nf z19dAg9nA@ML2zG!Zy02ILM8gMbj!uP!(H{d(v;jtK3=sO31z;gSC-!W;MqS$0^Xnj7%7Ys z!QhH;3hMS7>u^pE>0a&LX)aUmNZTsqwLy`8VSrT}>NJ3Lq{X)Sa^C!w>tQ)0B6u%z zuh9N`EU3!N(rpagd~Tb%uD*Sz{qe_U{YS=P%3*ebGy29RPPIV|9z@Iq7%6ITL$7Gs z;;BK7_B$DXLfuwETUuCz0kMXquk6Q<5PnPW?OX8SZg-J9_RO0HE+j(SyvBP-%9ONN zhzq2bgTA2t#{O6aE%gqME~b@;_BiMg(=B<(_1G_6Dn7Y{q_o&MFLCHYa%$u-gn4%| z0>c!m(HWe9Qa+|GCvE@-6%V$n4MW1}w#O|-T~}T}5CL&;>%qm-IpcF2gw6qm89=!S zfk1*{x$JGv)rXab`)8?s{Zx3;s%f;uRhgSYoGd=>J~;{cL;02#Yqy|J%QnjIa_~CT z39D)pp-jbE6wF-Mc9Bo;7`}<^W$*1A&Z&<^WpABd8hf<1!WatqS$2L~QW#AeX0!UW z7zmaOZd{9MZ$F7gfN?;6JfGijsu|?*h?=XOx zDWJ$4X-hjeujly~%aK6BGygDi<_wWy04dm;Zi+AD3#yEy-oP0YF}~#~VYwepI~kKj zAkyY|)APQWoiVSUJZqr9_Riy@J1RWdhvE_FHRVhzcI%=6HPJ6=egyK&SLvLyu2GH| zp8b}ubDEnqb&NR9mIz)2aXS3@3R8i$w&VdoMKq&B;FSdyx9c+mwa?vC(;T-du^(aT z<{vAccYU89Am+lz(NS3tPq4hayacR`hpX+`0MK*g^#N#bhJf!=6D%zqoe@yqauP*p z-jk32{v9~91q|HUD~%k3kB_`4C@3JPN7Ak})Qg|QbTmUBh`W&}F!CDVcg?KVi*aRm zIn4j)Ob^L++pF$o35%hHVJMa%@`Oo-mC|vz2(DPiwBHkJE{Yl|z_kXjMsrM^?3r5G zM3_IbR&=U%X|zlW=kdpW4`jtz#EIV-Fq9c3n4~j5nYaIIQ4Nr@INW&|CqPU95;ba? zHd5U-NsSIdCW2@!3PeN>U(X^C9eDWu1&LFGy1wtTvHP z)fh}_p-s%O(WaNkk7SD7w+oU<V?ma4rlO-xwF|6sIDMFmDj*8~D6HL4gf2&}63B#*W}7gO!8@h|wo!4>rnH$r)h) zYS;tt`EdiceJxhvO>TaE08r5aA^qPhHlfwqK8{;pZyf@#J3b*{enCNCmDkq$CFfb~ z+Ac7xkvJ$*u6LLMi>sZ{5}4E~%04Akrtwdv9snZ@eQ)xRg50}Ez5AN4P_nQOgZfnJ>DI<*Amw{W4xYu{efp*$-2;eX>tp-AuC zCdWgWT|6%0y0=(o8BlaRvt{g{E=$cNycuV$ zPV%#~Z%Iz{<@VvKK6O!Z!sR?qeI-4`ZkI7-SiE<04ZttvQcAB2eufdPF5D|x-h z?r!ZHv?eVK?^?Eh1ckg0N9?8IeM>-w<)Y+}GLn)%jWjnF+;m5YYr;_0P@qAsr7KKQ zaZ-FjOplcvPW)(3QHBfvoKT9VX^=?&Ef;SW;EV5iJBJ=0H(255e+ld{IQ;aX8vLZm zt48t1&yt3mYon8P4O)UGVadN=eZW|bqux+0I%fwsue3bA`;TtCo;uX|9jTW?= z_nl(u7WhA42#?)4Xlc_OIqDcpGo5sC|G8Muc`?i68=3{1qSjw1ACR*fJl0o6%I&$X z%WFWRNbkkeBgakZt-KT>F`CYKZEf;9>DS&M&S<7@JV0i}Gh;;!-)YF_11#yd(ali~ zHt2;VbYW>BC{mX$Y`P|xoGZC!cFL*7vTm|ugdtNb5Pc(`q}+fYIb?~~u_2yHlk%gg zftT3qVDVK|TxABFG7~qcCCwZWdPa~L!ufvAtqUcVk~yAb!Ftz_+m|4kI9&v$KMdx+u(W|(3axa$Gv;u0UgMcQ7%x?8<#A@ z|L-z@6V+$7m%hCd((G1Y7^Ww^d0`iAo_9pBz-<~3AKKq+;1N~~w96PK(Ris9e zMaFrFBAH`T9E%}hww>lKEWA9@2Tt#^>{h7dED# zOz@vtw=)mnR|TJi2XO{8B22ym)6I_AY#FN@D^^%gb^4+7LopYNOW@fWCwe=!+EEGR zdm0}aJARfiE4!rMfy&1;Eg`Sf-xoYI{X1p!FJQ@7U1ZLOf^|@xno~o~mWriM-MjU1 z4s^sR;KE+Vg>4?;SgFUT6Qil(Z{>jO2o7QIF&)|H%s$-f*tnQ3FTJeIm;LxCQF%Ff z)9R)6zt?J;$ICoktV7lLs{^cJDbuSu%z~v#{L^2*0$vePRWqe~slDI54#(6w z-a(@Jo2hJr`lsU0vTmWwzvr`UG65FafBDFonj{VGO1Yq8f<<}Yzjx-vKZm?1nOH3gtfJkC zdi9@DH<|yM&+KP+f5jbtONv4izTvDQrb+7idTt~6Sa<>*my*HG>=eBwb4mP&f`1pV+nk#tWuj%iLIxG8pu36EPR(S%bhzb=EPZ{r(tj5~|Lql(0%9O5Y zy--g)t2}V8*NeKT59`a*%4!;3@9frc(G@S=Wh#MVkkcvZpT!bTp!L9^Vk~-F2o=8) z$g$gtbs1wkvT@wiTsl}1>w*Ld6G}brBTL=Q10WUUiMqMDnVOvJ0&)V{I{Lr{8r~=L ze#U4=8_r*jD7dLe5`(5$wg+Pl%}?%_c<|UvMSV9&PAVev>;t9H`LCg5rgpRTwg);e zG>SCOUHUXF{fObLH|j`61nikdW#!#)DXCN4SO2MmL7cdh^f{f8FhdwL6zuSJCs%u=}9l_e;N6KXb(G zjCUn-sA7a0d==PN3wVyXT%!>g>vN~f;$CC`p^kTq|32~qV>-{wEMe{wwG@4~!B_Y) zJ0|Ix))Y`c(zqrVHaVCpAdvbnLcb>?3jj%%in_XTzP4#0t-OHOk)i(#4{CeL;!P% zrUnHv33*OhluT3*&f38Az=bz;%Qp_ZA4N=)@Gdt>I>C#oRlH%P^)v7rW|Md3m}o%} zWxNzp?5k()s$s&VVsNlqd%K)vW1ta%|BPJm;j9@3`kc-^f#+9ES~;kd407>QqG)G~ ztMZ2xf^`zn0)@Xu%Xa_+A}+8Sf-q1m*GFKf%&o4D0jjE0YtkMbGlEOGku#G6>V6(d z&gqkF-`Au6qXi%(JKT}D7iT}n?iwJ<5wf6HUV@e4f6b_qo&K`o<`ZZ5B-ip_XJ>#` zR@<P&?3&lC%hgc~9CS!KfG1t>1Rj zdIE7%w{4mNfndwkj3%T`JYm|)+!@%iNBth@STs9wsO8r_UGGq}2W#~S7oE(cOpsgNgBl2?s9b7Y5nfGkQzObpBrmETGXW=lt+UhHx5cCJ@PCd+s@L5};1y^gxFjku_hl3g3gpfXX-3b5%%j&$=9%Yb z0(%?J%t!CE&7qJ*Bh=*^FuadMVo+dZhU;; z(?H&&f;+vMSRRI|X}O8@wZ?h;#RLszc_2E;nI8xqg$R}aSOO$T+BbC|O!U{#ka1BE zx@aB&{Y#r`ZM-e3>~thDo#QCfbF#m0UQPdm`bz8aWuB0U{u#MREWOj=?~2cqzxjU1 zzB#c4g+S6+j_IjJ^$WYS3}vow)f985Fnnfd;+bVpi&{TpO~*w3);A*up>L28R|Cf! z#AB=burPbu-2T7=Qj!otx-mNq&5nhbr(cUtbH|=G#VkiDW*ktKNQD2rk=&*@K9i*-I;UG<<`{xnmeTyK6(rV z>5+c~j2!$u+W66)NtZxpwQWk{01>l60sd5pbVU!i-l4cFU5RI%IyvPp@e<*suvl8VB&jSfKdBW z>xj@OaS=*t-bj$+G6d;1$1@UyPx3F1`aiwYN^9!@1EKIt?HJ1i#d3+iWZo1{vxD7M zjnGFWTbOSL>GrJKjVR;%g;!J zMWs~5h_IIC`t^t6bVHGAIKUK=^D%2Pz`i2o_kk`0A*diqVabN0M(uFn4EXp#T+jPD z`ksF9+530tY;}mX9&) z!ReGbGC%Tr<09T!q}F?#pL^EIg{rY;pUAOM^e0on3HGMU8@afb=AR!6R}KMewita= zA>$1;;`0LFIKH1aiG2Fc0DDji z6#44Lr2KmMAD~gS2sVwzSpQD>yjlu)JQ+Wrn>l)+oQH?cL|-Vd!*D*kD(5UT;k^1` zWO!_x>32uXIyi!x2G*9=0NMm-n|FmT(g(%KaY&crM$^jKuv5bu+_v)q!4B2Wp(mEF z0I4QUfeaI5CvXBh7qV_3B1GTk8{g<6a}M5_jt(dgnzjWZ`q%5LuN%j_qjs`=-;_lTjup{~GPe1@b;9U7gJmaT zO@|DD;vjqq*YLx_sYmO{!O}ZG)H3MZdGQ0FX+(;#wP#=efj=8fW{L?988W7aOO5ec zL&>Y-Y9Rk=14;Mx1q3aFf*71VZ;lH=b|nxHK7o7I^tATP4#>h#QcyUHvi1x_=a68+ z+|sgb8U1GJfI7m{P0|=$r|uaAv&@;pT!>>&YR@)Rm~Aw_&wW01G@C^d7TmyO@Bi~* zb$e-!>tB)<8@rtQJ)o`u$((X!@+nTL#Bo|7OEn5$u7kM3OWe>yz)1x}nqNBKqJ{*3 z|NY^^2N95l4pLu1V~&+^&Iz*w2<8t2c8?3p`4a}gVpG>%PF~w#40b? z{0Zv!bUiZIQ>+Uirt_}$2}s>_fqy?7+GVegt|84Oy2xX1|EAN3lrkS{3)$EroY&dz zDwx%F_lbxqMMb?NS^GUe1Ms0H)fSrxb?Wc04Wz1Lt)xv9_TTMmIVTh(>HQw`R7bE6 zhbwPBZVJD?b^(n7?he|YEZgozY->Sb0a1;&UDsDF&kge6kCY-)J}YgkeS0hBdHiTUN3u+tZ&X+k z#pSVuEm)FK*fM06gY_pUkF_+UP0p*@?Duaj9jl+;UWr_>%CYHM{?t>B{6n=JZzbDj z?ovx`jR+CTr6}HN{;Z3g{T6fa?RKED(Iq|%uX2TJ*N*eljTaKW3{F2tKrhH?lXYrZ zZ9AD=S|S2<3fLt;{7EITFX)s5DY7A57sd}(d9a@TPzeCVsDJk3AF7gl+Z9Ym>NLny z)pE5mF$o6wW=X)Npm`62jvAt%a;3c^``91Yt&a@X{l!ZL98A&y^5kIMy}TiRAMB!p zy^K~8c**5t!uRp>uXpdlFr46|J+7K|B)%hfck-j&qK)Gp!Ms(FG%7QHpNNQjROYsu z8}cMF)^6^KQB?3~(ar?8KyYojZ|eu9At;!D&V#52FMi;D@I4^pM@KjX%?(m)y*3F? z@X;ZmGoXh<6;Cc`RssV4(=?kxlJlrR*N%0EMvy_0?0PK2A|N0Kdcc6_WhB}LIAH}@ zXE_HSJ-F!pp^c9`Wkpd+?9VS=i5uYIbEqh0H>AoAX?)UhFMlsZ)rsz#t<(D9^l{f6 zK%=`~opSQ>K3^JXu+Y>e#2H9+t~<$R>O@ZKdF%QLWRLX&^OxFBo6MF0o4n&=8z_*M zW5i;U2Xv^_UU%oNt?E)OzBOtQkiS8)Kd>U&+}|1+8n#beT#*zGDyk%c_~}iT<|-b3 zUS4SvlWkj>r=79v;+mSXpFd1KN=vs?c6*UyHoE+Oc#0pe7}a^qkw5g&9vC7f`S)`t zS9?j^Zshk@qy3=>#3@m>v{)JQS86dFlB)A5)4ydHDFaC%>Nx96Pu4LX(7iFS2!PwG z5OYjcjI560n>)$7a_0(T^FBqQ?dRK&w6Y6Yo2)VtCqPR)!K;-`(_VPQzXV*wszNt z>gp<_E5&-8ze1xBS6VpXrn-4CIi*Jqhf?zMp&Amsnko{tzW<&u`$j2175}WUnaBA7 zku?(RRW7c_+iM*{9$tJLimejqvL34#RU`-rLBgFp)CU9>b+j4xr53&She0-jq$C@L0CjGMP8Jp8X8hM#IwH;BGj=kYD z8<70e9U3#_#j3oyik;bF;s%#rJt)xE>mjT5<$&{ui^Eh`Ahn5c)PstOO(C<<>D;8xx?_+u#zTZXEr1@B5cN0g4v$Mb@;1NX? z%{O>9xM}v8+izR*0wRH;afL8FoOGzEMx+qPs;9!xY+~k6+;xO6fDe<= zfW(M`3M-i<@kiX02DECXzZ~7C&Fe6yzDK^tqs8apm5Elt7Pr=MjRByBUdnRiVZ&#e zlW|Ga`|qJ-=wS}!-Vw8!;vdrw|42P$Ccs?hV^Rnbm?9JI%9Q2yeovq&SKcA7@wTV* z$WJH9e8SDz^MxZmdvUeVW1wnUVIuW}gHy~1n%54%{L6;e5EG1ZZLDt10J-~L%5 zow>{I#Uz#>6A!8NN4loCT>R`vx&5x+WZf~m&aM!4-h0pVvHBnc91l72acU5OixvIS zr^bAu_!V+2xRc!@UMr%z9cU}EO)B>^C6tS`&)*J8j5PF+JBQq;^mT~&`KNUug)I|pO)z#3!jZ& zeL*P^MOa&lYfR!_bU>wfPf8QXvV$DZ`PseZ!kHBXIm~}Q!Gd4=+MIpcmb^FP9oGAQ zKP=lwNPfnNYhAN@N{1Oq_SunXqAwbbH4#b>zc$(-$#?t;w}?Mp+ma0>QN+{!_b9PEf^HiD+aFPOKRgpO#%O*#xURqA$cNHlG5@JL zWvt%2*tCVGSKwz+N)a<5!qgn! zVAhO+RXnlR%ncmd1w6C)o4nn<56V;4W!r%^#LJ+AX(mY?DFCc^!~G)Ho$ngWsfxKY!8 zA(FdHka%O->Y>YjoTRFuHGF-OxYGY>_SG<4opkyC%nM|R)HEc;Rj$t1Awwg%>>au7 zm7I4@FZiH^$ys8mQ(T5vPrq7lA&Sa(>hrCS)Ot((bSTgQ9Zv{_58P> z#KiKLO?Jq>N4w4W+G9U|WYpw?pF?*;YMwzeliq)|SIe-S&=&R51S`HM`OJ*oj2Q(s zG3__OkJna)mg}l4m>-N3|DVC)AkqJgwdU3jv*RgFoyI*9(B_>WzOUf-R@Gxwd-eO-b^{mvxPf;q=JzG)1fBb%4RD>tSLtq1(x5^jkIw+C;1n zO*C()HPx%AOKJ{3F|i)ih?ACN$pxZoXsNH7pWhORP32h6b(dyIE(K^S%e*nQJVn4? z5QExQrb?zWZqaA1>E{iUbAG8OANiZxVqT*JYqt5l+D1O?yTbEJDPP}kfZ^gJE z3jpHGcd&>5{g{*@e-1p8Nx}kWy|knuZSu&BTv`wxx~q<@y#4wEN+=hv_0*J^6P5~) zJ3EXt0$0S~b>0!(kQU{Ck3hf(?X!CjK^}wQoaPT~KS18vys|>!(EHppF>c?PF*-Hq za-A*=llT^@v`-mA59>5!vdvo-_-X%3>_a`~B1}1wE=ZV|wK^ecK4Z+dX{whJ3#+_y z!?L(dwT?2SCwSxh2C8nftM34_f3~l{ujT|anzwY|CS-i zm_G4^%fGMQKua(MhAu+deV5z!mFAA+T&qozIK1TNQOC}>o*ShjBIex*=ui)yviMm$ zPb+Llm3Lvq!yYd~36OYly}wURPEMyFMPXuYZh+WzsT1Tge4V;maa99oEKpDl zW$}5YWM!Fv{$7vkBu}K~=H|7(HeBL7tlnnGQ}-mdT~dI%AH-2xtwsl~!x#uKMnSzNmtLzEmF~84XD+^%%_Z zg%1jO%;VZ+2XnQHU+DFIYV@XY#j!TDoDiSHQrcM0{wiGn*=X77(tE~NSnUm;NJme)1HH?bC69+(#rqz9(1r9+z(p~WyI6>@CXY{>Ncz3VTNnz z=?XO>&)D$g!JIRf?nA(13$oHiwRAi&=+A4Wkz?#Ai9fOi>i-ur5~8dGTad{9vwo6x zmtiy3^FcYEIN&gEv+VGIH1j$%6$sa<>fb!mtJkQ4r6*mvIbbSUKZCWq(KWih90fMQ zhp6bXriML z&NRh~lZDe6S96$>v|rvbK^K4Fof^eEL9WbtG~dP^d)s{^9#2v6&(CVS^zCDn0dYry z78d8yWgry=m4rV+@zs&swT@4FHptMyj1+GK8JTMJ7Oz2k8RQc$IZeszHu3MCTwYyG ztbn*J;^=;?8%^=s*bsmeyhYC`E;bcydu}k_I6*__Z45aLt>}Tf=d!^uxKcjf{0_~{ zic3H+b&2>VRY%-7vmbV|@|^%Z5+6Y>2Lb+N8W2AgVuSWDrps@7SE=xjDfelEM^kfO zE6(N?Cgg$S#+r5vk$C*k-)uyvXmk>*EDP zNsGP1c=Qp_z5Dgq4fM*o1>mS8aC(Dqbv{5!X!+c21w6@uRxY5QkKJT28t5I{CbVu- zfEdz^jEoFy;2ay=0#i&(`9jLP;>huBzqwe&ujMlz;IB2T{lrsMhYeh_M4{NE~X>A@k(!7 z;NY@wI{#7Or|OATtk_g-(4EKn%78n6v*J%9cM6L&;(TN8ZHDjNdu-GYb>O}Q4a!ns z%1NJ`8;yr>7@7}RX!_8@Dm3cDV3BavoKXL6 zi~#Z_^d-~tDYVqAGw6TFvpentLMOoVF! zJXV*vf%9B<=Cj8^`*@X8&V*?{^ag>S%9S%ge2avT9rZ4yFi1eQxM4nX~YPu z?wHnq3sP(KU;|0HaBCo`!F0VV2ryLkAcVOVG=6*IeW?JTR@&@pJaU3V!?iieg{a3Y znZNz>R^dw4(dZL0*mgp6va-YCROVXv>?tYih^tGUXGt>Mn+_kL!Lj!!wzFi`4Dt#= zmV=8N>cOk z^<@8OoE`8XZ&bO+GT^9y--_FI8WipI2;vV&oOFGB_(3LEyI0$!B|6h^}?Z|1h8d+i!Ib!oT6fr_vxs--P=!}PQ0^y`gcguN60G zceHQt!VK7H>4*z(qWZaSbe8fda|aFlKsg3%JJ8hecAx2+ub*(s(pR>s_lrl1n4pnP z=k-!Fn?3E)xjnsc$Mv`6i}E5|T-Iru5&D;2&((!N;oE|Ng+o`-VH z;1fMEN>J_B(##^poNrjTu?uEZ`(#&*z657tTE-Z-;yv6NbDFM197`q(gOUOwq+!|o zJ$bH3;ma+JI2l%B__8f)LK zb%8OiUb;8hOznKm(5&D;AAq?lXj9)sxne6*M9!XkNi+b5>bhTFDQF!8d}A0{Jig7@ zb&lBl>iEJ(glZ_ar25A?LrS3cN~;-{q0r1vVDkhOdW2CCOX+dRMnypTy5Nv%6yaZ( z=aguoqIkm6IE&WE z09tzgYAvBx1gtXvBao?azA=&5L|rL$kH@O;EDgV5>evfsF->>ryO!A+;Ht^S*6z{mqwmftlXyp6RkFG%Tw``?2$7rNnW{M7K2Ln-6w+kw=bbW( z);!~;ODHn?)2CUDD=n7p&0&;C8XofR#1SvB;~kDFA}+7cl(IabX}TxmdVL}WEnE13 zcAIQjAfW2ufO?iacihZ*>`zK-f*nz9tIIDwd&Ny+wf8uk!;2m1^g*xx{dl~PkN3@F zdQe6AMi~?JN^#LQ?9B&cL7}BkM%9k;QAA#J&t&W)nBeX0z!NVE6#~^lg|t0`gz4yX zruzgrZJ&w_@3MlCe9m~U>hN$Fs2r8d)yS5z)1z_61*u-jULIv{|%Uwfs5W>9;aaL2^r2A z?})4hl5xD(j`22bU#~}Zs5&<@irZ1ucI{+#s#`O@=EiP1$|u=SIp@5w)Ic0=BYZ^} zP$cF5-=jhl!+uqMTH(KASK(7}E4O*f<&U~17K45>)wNp^9it#B2~?H_ZL+~W*h-F$ zlb+C213g812_{X{=hRF7O1nWH^k$Z0m|x)!gX=a}`gjVMbuTqt3}~^Edwbz4NYsobPj<4qLxo~e9cjs|E53K2UZmK+?(luA# zY;Jxi%lLsxa6Tb1woqZGx$l~(rZ+0@ii_-gnh}$M3N25o2vpf)?5}et-Z7oTUSL5> zRCxnA3giw+WKEQZA!}0(Rw0{jz+QXO+Hz9#E_vfiajBsv6?19Sf&p*#Ely=U!gRW9 zW0tad9z{q$z(}-BXX+M9S8u+R+xk`shhyty6FvJhn0v6-Y>u98E_a3NX;Z20NaXIS zXQW4&YSBkzrakAqc}Q@uKVH>!yv@6E-?HOpE>Lo0Clcg@4ankZx}!*Z#{Xn|zN+#v z??@R^vS7ShzTF599Gk98c3q#zq8bwM`{FyT$da${iN^R96kGV$XsEWbwPn)tHW|(v zR`8fh&Ix@5Pc-%gXm2R1{Z=N6F|L%nR<2*FrN6dmKmAnSRVMx4yKbI&-}-ol|A=t- z(Qqc;0Nw9f#SJDxw3ld6zOg-*rV@;?&Egge&Ztrx}&58E0e>%U&SPS`o1 zOW^LTb5^b~@J4?;S@EGtAuZj1t10Noub56J^&q{x;T<;i?Vxx(YfKtZo6I(^U5!tw z2Ra>_UO0kgCv3K#-p8j%XL;aYXGX4-D8F63r-D1TVJCK*3)gfKXI1RmO{{;X#ejtX zx46`S)@prZ;MQ73qc%!oPg`;lR?210H&|i)ctmX7V*arbMt=;wiHqjhPkhqSua0(( zEhn|0xcv5%muA;FY_b_G=asF$Lh}qwK9xZG-#ekWIM5%kRbM{K$F3dPZs} z)KHytFoVB7K7w3e=t>W5jS)MGwq8!tF`<6?7+2zxSVL<z zWtM%J6aO>M@rZer-xS!eo(g`fl%OSjpWMJ{%9IqH6ornRDHL~Il#4}X)Sr{BIpLzn7zp{3y!e z7?Mi=awD!=%*OS>#KadP2K<_Vc+&DHlF~jK=K*4hCr|#EoF?+Ov7g|tw2*uzEbird z=eu-Ys8z}#;JRKWn#V&d{6c^F5kJPKnE##9`W|ipd)Z^+W2!Uj+$mwFN)lhCfaD## zJ6Q#sW1C59Cj|7m3okm338M>G(A387sEpY$wp*4e#V%3Km^Dx71-3?`s%!=H6Zy5$ zh!Op@m{@u~)f(m&mh^;EO*=gc8*7uV)3K6PWZzVEgthEwlX0%SPqNhPymc?kr}!dC zcJoH|(qg3*4wpN(h0jCkc!RL{-I26KdB69rA}&|V9i^cs2sv4@*e&RzZkANrKN@7r za-%QVp_11G!hsM-bR8hIxWXwdi)-|O(0YgNlgn-FbQ0Ba-6q_;pM>qv8awzEKTBr# z?liPM@$2o##UiRVC)!s;Y12hhu|-h`Ab`XVFls0m(+aT)ySnngc;2YBLnVLZB( zI<2PA?Xf(p+nfUWJBwN*Kma7h@EbYT$0iUBu_zA=`D`_ciJALznU1=5+@)?h%P} zjE7#dpH0I9LK-eDutXyu*l?78Ib44edNPq&BPc_LAyn{CLS?S{c@rcU=;-JUzb$*g zP~S2~>XGbxMJr{T>XFrJ!+UDU8m-4|dr5Xl3LkmiF<0|Wy2n47^62aPrQxX>DIuRm z6w)tO+ARKL_NL{;@=nmR=BaSKlzPR|l&`4EvelpR2oA|YJM9<3_GYi{H?DLUjCap= z#b!XQC(?IMN=bSL(UAl>Y(%#aWvMr{7Zj-xLDH@`mWyx8?sd=u@K(zOkd7bUho4cDX1 zc~{EA4I;cQRX2;hs1A9Uimjf8m*0)bvGv*^q;VP@P@M_BqN1Z9pUh`pH<*7)EX05%)}>WPxdclitF+09Xr`Vq$lWn2psi3S(>is8i#(wyz zdU83i!rBZ}7IOF;09E627nkrL+-Zs(r`)p}d2_1bRpR~2+Y7i;K4}jI&>P={=hL~o zOAmahL8jEi9JXLX{&BqXkoJt=!uaBr?I*w7@)-XsT^1FG_FB#b^4E0__c}pdIapLtI(hF!SL}m@iV0hp$?1RfbC6|; zg*?F+rVz}JIaZ#rdZi22%CqEcsq!BSxV2tjirl)d&}&rhghvu^$0{u@w~)B+;YJ~2 z)<=Z}sdY6%Z6Ch??1u6fTt_=mKIINqP0&lbj?vE~PnU>Fe^gQdg|Z;L)g(*}v$tNX z#In^5IXQta+Z;hwx#dz5ZOZ})3+VeOg``UE@QCjWmv-6PymWVj?TvOa&)`;3Mz+tLDemZ{U#&ys5AEXkeZ zhxV(ZxsckZ@NUr2KA4s)&PwDY=$Yo*7;p0 z9WXw9`XqHQ^5X})`|e7^YB@0CWm|L-9!hXcan&mBU2;C>msXZhlrAp!W~B9}?e8OA zP83gw3V2|fk$(f*h3J#+h(n1h)4^((XUBPJrui^&LSXa0W*MI&ouq?e{8pRQuF>2b`F=cx|HB{q#drn#-&WO07_d(4J9_DKlbI^K8oH?b5({Yr_m;x>PuR9Cv zeuK;9Xnw*2>l@?cAus^X4*4Ug!%ky}UA(=1jg(#1CQcSi(C7S?FwQlEZso8dZ)(PL zmvO#nsu)+Ub(O7@xBrlD&At6sr87~)I0dy>K27fOpL!?MM=1|GOCQCUR!L!hnGHJW zr(p@aP7#UNz{mDDh5qGkJBElzA#g}{0oPjx60V!*}UAzwYG_C6H~9M;~dL8C@)O;S{x@>KI7fbG%IcwL~BkK z*Q^fFNkv;0zRk^XX?f~ube!*hNamF4GSkk1l5}y62~Uw~!{fVzX_R#@_k{%#6S?u( zs16JAzJE^$x)FbV$4ksV0%b5lD1`t$GFPZHz!nc!Q9~+m5@B^qK95lrUb2p>@?X5 z;_H_8N44eR$a56C()0h2HhCS@8t8Qpd)<=$qr6!A3SFD@MIJNefR3B$RS7+Ot?8J- z{1UUU*gYhp4KaSRwq0eMdj2yLvvqmLM^(_BQSrlWQS}!Aiprpw(gs7L`KL@q4F!!a zY+;n+<3tOXl}9Y?y&Xe1hUU!sv7XosZiZ>=>F;x<)U>91kW#>(7)93R9J z0aeG-?kV}6;R(~EaIlGka6VVta-COF1a9fe?~7$JGDfvMDifV>Sf^)Vzs}q5O#k{T zF)n|7NHk7UESbw#&ft2dL1?-E!w~Ga3xaCKE7?BiZA>U3x__}GtWJH5@`lw^yUZ}l zT*NVyAaaVoM18W+W_u?UP8G2_R_cK6{>ybEtNW^Afr(R;Qe$}G^>`A?Mfa14{{CMR z?Vl$E@j|A;r!p330%{H}Ywe51zgS*fzt%}F0{0Jv40}Qj!-9O>_KBS7w!#Vee_@Nn zaX1R=G*57`g;8IPeE$z^xklCR#ssDFdgnppW$-uI5W8+J_MxhA;Sb2ylxScJ-w39Czz z2Xy2?LK4-_q;w7(9xfUt>}1f3Nmcv5)ZoC;qe&3h+ocz|rVb|#pzEA$I&Yu5Z`?<{ z4BE*jn(id+0uiP8PEf1JN@347mXp}F`jCHewXXk$O4#EbTB4AW{;`E6+Xea!EuMQ; zsPFQeR=8}412QQo<4KI3Y@(|rYrTG2syzEZU;B)E4SlVz z|9uKu$juqL&~SXtJqn2s4Gr^H4iTAlMbe&>yD|$6BF*weMwf=^qlsCcXE^xZOl_kp zy>^nTY3{V;x2t6DSo0ToNBVMk?nxZK_;k!wQEp8&_%7HYp&(D!Uudh{#KFOPODxku z9zEfppTAF3jq~-9Dc+0f+c8oOz1O7YDNt~g)_A1P{(0SX)g59osg;BR{KNMU%;=Gmf+Ja0*2scj7 zApVU$T@$ekE#LbwjipI{ukWwZv@=)!+c}Ikuz$Jlty|knOILFwOENWm9Dgd>O}AR{ zoSCWeYEHVj)v@o2@a+XkKH00wk(U)K8JqC_`alH{;Eek;S)XAom0Ff5ps zclW+>e3bh2DZ7}ODH03b*T=Sl<*wB@qLio(SDa-`tItW9(ydXVYjbO7ePGA$;6t=` z=R`7CH2x-Y7H@*|$F2amR+0(Vpdm?Wn$JC_WOK5NV-?S0UfQ#MBd$x18PHt$v(>7? zs?`Jt?eDu4nDma1`}i#5(U81fk?*5!4_+>NOzf~fPCYsIvo8BcU?ZysU!Un);;`NC zXM-M@{K~7>4~mN|sIuAV?FsF@=Zd9Qop7Q|_s}pU(y-%Ng;QNHDlA6cMy0$Ii~9>~z&2-(U$Q~oLm)qH)Otr& zb$-GJFNyxX__uOvY@v>PxGra=}x8w z+Dd05Q7)~1?^ph9j?9Q;5tldD6UE}1{xj4R&77m-pWpFVPB$u|<4k?6+1tL!MrAz8 zUg|X&I`w*>+>fgN-o6UPIL&{6%$opb>R)t$hFU` z<36DUzq|?1w{IUfC|KVc+_)zQI!9lqWu?pTGelgD2p3Z7V~VM(jE~4NE3N;cdGN`< z)CM)1M_j#v&Y#&89w>=VX@?D23te%iN2Rcm z9#WiK(&Nr9>6`BL;+wEvHyCJs@JjCbwnq__^BN`1yQRoQ=g6hE*LnWEaB<(?-yLn4 zYu|V4CRSy-p?;cj7Ud7OH<@fZjae<*eP6bVGh3nkP2bny(M0iW-OEq$ghI|Z8n{Ca zV?Lx_M4t)TJ`gp{jS_qm=A2^nErmB?)nv@rZ6niT!|@8)zn%Ln&4ToL8&ev;a%;zu zf9Amtr~KHC(U|_B=dV{*B4(XNzlFH6k2l=X6~L5`d*byFD-zE# zty!&UBAk()nCCU$htiH;yx(^Djo(364FX%PUf0YH9-iqr6s8#AliuO`lWU3F$8m#t z*dmKlRl>j?1I1aV2d!V5uONxZao0h;hg(@z$Rf&=G&%ave~(=}tZ=r8Z47SL_{C<4 zbj?4*_p{^b<*IC7!`y@8;NC-{F{Ha=@{l2}& zzRN%;`NLv)`D5mv*qesZAt9Bx0NqD32u{f zBzv<{C$Q79u9OgFQz3AebFR?;fk|g^!}P^I4!e=fXecVfVBzcou8_B<)`ZD%G9M@8 zD|-E=80L^)iRo5Gy476ygTOlovtITZLwYY>m!xbm0VT= zd{H#Aa?ASr*tPf+3I=Kn>av}X5~TtpOP^o%Zx@^s!iWzvbqxIYA+D)Of=?qWe8Py~ zaB}7iLVl7mGE_V~2T5*1chDT%(eOBVV)B`#J!T}-mCy^s3Y>M?KTJ7Yqf4ePl(+4e z6qm0|LA4^qC@nIQ$&BCN{lU^Sao&D)H1P2+VpDl((_@5hIf4BOU<~~_pW&6 zI58+?IhE!lYY$9QaZF0)n^Af=#X49whD=x(bPPQnsKY6#{kMm9BIc9$Uz#M)G+dl* zSFIP;*jMSG{9IoCGqvp<2Wk{&W6MD4bH1l{@s7o7RR*$6Ndk5s8$y&$Qk!%5*|;#c z^5TAnS43{R$Wv!Ox7{mq^yqYFr<5DYO6uJ|)y{pBsb9Rp^j=F??M*aRL^9`>JDQ;) zf49Y+>y-kere9lIn|adJ*_oS{cU0Qz1tU*Yi?P~t%*=vc7z$Qb=9mo=0za(A6_3St z9aPicKjr`OLc4PC3!y@7$XaOE(Op;iFaz3vyh;~o!6OUXfhlhGe@AYfqLa`04J&Eb zrP&>~w_6Gd3P41L-OTz~1HCsz!26UJ;r0iuujBl4c^jJt0$B}!Y4>uh$IT@cn+r#)L$K0v{M%nEEzbRXx^&Wb{kR9j;OQkrS z#qfVQRz<1rv?Ed(yU9(ZkU;zU;o0wHiQoBk8GJP;oh{Zj z4nzuGr)d1NXg^ZPA6ZygQL(V#>QZ>#HvNi`!MN%fM;Slvsdq1f6}@|@h$vN z;<77EIu6g6O|nRMtqFfCCtWaJ58mJ1Ra8|4SUa#QOB< zQ=RAiVvVa$d{6c_7eKZ^aNP;Sln^DXqgj7k-$5evt(BG8mSBA7V@d>x?z5u$oCUeM zho93iu)|o~EH&0n(KffPZFK93f1!%AAIdCe`JQD|8n2Y#*PRhf_0RHL^!4QVW?7K( zOxmOIOg+a(L~LOhYGIh?;o0sW41&!_mes(JdKGAFXnxj5ji!+^9LSWCDk}9juq~`! zV*vWN^M>;j2F!uOrOD@%C(}m7R%qDtPGfOiw@|f7`zu9MdQphNdw!m>jcGiWQ?vLF zI@iHS*KcIn>^gI$FJwDm_A!y$Mw;Jb51#cm%)TB|={0l8#V9XJn!gfzjatpXqY_;o zgr*r1q%qlo6%DhGOEK9_9j|maH=kGj=KWuXbYsvt8Ic=96unbY$Q_-WFuHLLW#%jv zJKrr2d0_H7j%`)N>i4{E;kq4=m%EL{UQYL}i#GY$TgnNmcG~~3$b$0Z5z9%*x6BW! z6aJ?c;PA_u>r!zSSC~UJcC+G;GdfA?^1>SZ6}qgb#nR~cimo3>wGa!R{p~S+8p`W% zi^pE-ktIw#GC4#qv!mvUqnZC*z4;B+;Ze)u*o<@mNrQywbyEU^erIL;{4$wZ1e*om z^S#`=1}=*IkxOB!(yM0Euj^%RSDn+=Zx+J!gXYA;^}Q}}a*AiwRC$Zv6O6;o>3$5Z z>FIxQgUqRy`0{Zjh#Y7N(yHl#qiHdX@(+Y7x7bsA94Yv=TTn5!9<`SjCjGZ;h8odS z8mTu%C={+&Z~U{MY?B@7F$%uT*ZB#K&hLLwgH(tL;xmE>_nqrRli_ zgfiV8hIgATqz95WEjos+gqOoOW?30$?Xx0Jk3jnqkP^$v$au^@dXFGXJg=xI2mbPg z?NxPkHOjZ|-(w81(m|dYI=@1vY~%js=V|4W(h3T&@|{;$K6(CQL+r8nGDU;Bz75_3 zWRe>Vi$f3f3L}Khc_D>+4fg+AyX_mRo0}$}o{O}!8m(Ry{|ICTwl$mFKpe=@O8j`c z_74t9oSt_Tfzj0mT4h1Vj2`F$LZ9BbTir@Vh(@U?HuU&X$oO^v4QxTd#R2GoS>|bSz3zk3zakJM$*T?7zwP2V`_mKQ1USJxb>vnw3H1r z-^9a6sauJge6Jv~y`bCFQ)hjTA_{pQNS76|XPgp}if%{&?IVo7DhH*XzSaEnmmsCl zD`;%f1!Qvs>053@f8C^r02M3+^h#Vdxj|%hv}Y`xi2WN-ItP z+qdimL|?<&7oZy(I943^lcnMs*yN%?FE7)^RbL+^f0*A~-^$90Q6hr;?AxX~Jt!ML zg6pGzr^lK|*grTJ?CWa`#-|NxHFT?+7KyucY_*0L(GHXxb0D0_ONKLPdyvK*CHV?+ z-@xO$<6Z|Oo(7sKSp>Sex~l5xgOih?-KMuyBTDzmvn_BwN&&+P{I0D0{19LpDa9Gs zT3efkp)7a`Fz|t!Ch5@c3MC0LA@3w$4b@>D(LCu6566MlLTNezN^0uGf<5rDzzK1) znN?Csiew0bIN&P~Q1XKFRLkEpYA_5Y`%1bbJNOg!{`|}9%(DTvZ6!}9YF{`h*TDRJ z3Oo$L5`ffNjUpb2GZD(HnfshBP&k2#BB=Y2X3Hmcmi7vENxrA*A0O}hz1W55tJ2a2 zy3TkV6o~A@9#}#O6}>_i4+nD;5a%gn0~6keG$5jH-T-vGh*mvxa!c8`Y)y6y<*B?_ z&5CpT86|P}-AwR_o!$25f_2cT(g2kq@T)(6{_LBYO5QyLz5qTxP^GNqEZ$_+EJ03V zrVC5m?oEKX`g?1`llINnp4%QXGUMLZ&9gAuehuoz)BqI?pcHWmgW`8$xu$@rF`8Ck zX)ch$^z3+V(rqCO*}qZ~kp{%4Ck+<@(7tK9IbMzk%0m+_&FTpm7W&cexQ6eb#Mzi= zS1+Zcgtz|VlgzO;934bm3n9RmIoonY#Kd557^|z=dtG~m61aUqcpHYpi|(JFG;cLr zcr#b8WMtO6z>P}T0Ch?WG>sl^+WB}gw&8$OEI3B8YURQlc zu5ikMMb)2OQle~PYWfjSJY!bO+SZoZpQUb<^^qclvavFY=mc(?gtfIbWoi1SDk>iU zSPX$Ao`mQf0Re$wXy6C<@veHO?e;K85vJ@`S|qGV2!VhGylPcW(x=%LA7P+6AB@sR z;k%KCOo-{*1JN$}LC$yZPle7GgodVEXOa&eo_aSVJbY2&4swSv%F@vFy$3DRPtSI9 zgzm&TPnBE6jaND4jF*^%TvGM$@EBM)J3GTv^z!nOnkceJ2<8SFQA|a+7isz|cMp%@ z)1yDb30zh;Z9qBf5ts9t>TrL5dWM<$Q>raB33N)YLu#>jZU0o<3_X^cklk z1up7wsBDYLr99+QLWq>oW{;K7B~84h$3V$HyZLDQ;6hnQ$c+2tXLJV&FF_GO86QHUM=(fC&Y1 zP;#Rz5-foNm;v5FKM{=C=K*s{z{+@8jXIVtYa5b)?p zSK9#|S}UyClAiR2L5(3hpOcA9Zh)Sjfdto^Yu?fwny_&(HT;ByQ7vccy@F4g=00B< z+TWbO7inC5{&8t(Ni-2e{;uT0ac&7`;o(tlH7piQ1ojF>;Vb@M4YUV21P&uGHQ<#} z98JQR6!$v+ld4d?{hH_*^5~M2-XKpl45Qe|LrW7g=M~4TEZ%IC6=OUzo+=MCKHqgX zvC=z9aEH_IcHDDW0J@l{$RqLr>E=1F|DfKZmWgA23R?DR^AZ?oP=UwrdUkTKO@ZfJ z2czf1Rz-EO1fMIc$9_j`_FhJ9m`NoF@@t700ov?52nU(z<83A@mB2Ymicv-x=nV>O z;)5QTbGdQH+e~gSJxU;~CO_#7($snN=o{+z885Ix!rOgnU{)y&V5APl={Jvj;d;m+lH(j>xFMZFMazXy3<#p%G-hDIm8YOAVV z^xXx9aaDeP{;QWS0joGncCeezrcc=}_oY8ko)t=&QIL_5Aq_3GAcbcPmaq8og(#vu zn)BE1?{N=rT)qENOH8aajemJ*iH6ylf4~#5Avc3(9jn|xIl2m@1@Zm%W!(-3zqYC6 zzXOZ_Oe>Y85h+Ag0C(96sVS8W08+n9$Z2Vj0wzrZ z8Z$^@P?Ou)+0pXw@X$Y8t(FIxfjN8Leu~tiIp?F z3lTyoqcbq)g*`5mTy^K_Wq>~jKm$#J(yi1O<>2!@g^A3pENb!--fvls0ere6FLwJS z@by-ZXbRL*oj_)=#OLB%6vMsZ>zdh1-)svCK7W(S9_u3yPdDL3iQ%0O{Oh=1_$JKQ zlvyevMd1cBSonLICC1l)IdS9#GLbcXP=BCq=TEiAE(V+olinwrwa07u{@!yJ3Jtwx z6~jVP+(NaFaMckdK!n*3IphO97uqw&aMpi-@@O|ekE*2vD{~tgx$hbR6Ru;Bwi?C2 zGJ`|A1n(D_wnL)G*DRiXGp9>DK075gW|sSL6%!nr=ab&@;rhW10>)mW;7{^CeUjt< zbSqf&?fCBg{@di_u-@h7IeS((G?|*W&BDiJllh-wV(})+pL+WYDA8mQ00s|k9)KOU zdvHK|g)NH!{8Mm`>rk$e>eHv#ADuirPBi13h_^W9WY$nVsWF0?T5KiKleLae$(AMA zW#l9bfp!Z~;EfkdjR+0Jf-U+M0lOuorGYk^z?7p|YOs@ICd%`@dUzS z4znK75oZuZ#FxkuwARp*aqi|ZYz%8Hau0AWo}byBl^F<2jjf@cSkYSh@*P|zRb zIQEMX;bEcU=Un{${Ttd*zOsg9+7H8g7_Xs4Hf{;Q$AJ@GwiN@-ls&2+0S#zWx<+&dyL0jy}!@{SD`<)*GC7+^M3IFI*4Gr#7B{q&u;o5Fnuw{g@E10PVlaChW;dG9l9%v4eigo+>U%D?>7Nk zyoi+4H56)UYBFB?yB0ca@58^8l@a*P&l^HovMMuK4|=D-M8XUWJ?BN;^XB&yI8IC7 z=b(#4#?onHl&weT!V@U;s|!o@zh070;*AhU|wzjEXyTZ3~DOkW7X(kehbDPP+I@(jbkcaE=8)+_7qsj!e7&zVzB1sDl$;RFtY zR>Q|<-bW_4Fkinl-8(&928B!@aDPfipd4V3JMzp!4)Mrr-BX-$&R_yQ0uz-E^s(Iv zX&r)0jfyk0-_T~8!^|I-Sr6GBKoJD|uoA0+SZ&%dO!x6It&>KyBbP%?i^Y$0MM?O0 zR*W(j04a{EiHV8Bp$R_qCdU^9slrfH zfUX?i#bKBwEkMX*a)s}vJx-6reEm3hyGB{sW~N>jq}Hj|9J0?TQt%Fccd_I{=M@{< zCokx=V_;!nvB+L_t^adpFU-#K2`qsLae({&nAtGmjds5TgurbB>VZuFOG>-uZrZjz zc09iAwe7J6ae->Txhj1Yf`btR0{*{%9qS1KZM*HfRlK0>OKukxlC7O{_PJoB0gJ7z zud9ISYKT^q__hp(DMYL4P!qNGFZZRT5y&WObsV;YAeNB-0&&oxU&o?i|qQ{ES5ZoL7V}&SYnH}&FlIXLY6d-$)ohSMx*1qF5F)m(bZ{cElee!m=8IS{005Jvr2 zR;Kz00B~@4F?=5ehErAGu;ghK8@^6pH|b1sjs@>ObW2?)-L`TLz1(_?-5CPR`!0@V zfPWO^l6aM-puu+y%&!{qJEOCmacOV!*s8+mW_a*G4wZqHRsw$0M_^k680-#t@mpy~ zl(pB0hD#^>Xn*v=YJ1u!p>~?r*C&ra(Jxk}U6;{0O_pF(te>y+v)n}=6PsyI5c4@( zTLAGp>WP<$Z_kQn14PZ@RX%Ld44RWYo7c^0%j0p62!n`4K&xVXyK>(N7NXj|+Jy=M2{%ZkHG%>rErXf%tFb%rd3IUFGZ z5)vtp=DzF;d1H?I6J^=i0Vs-|o)1CGdq?3zzvQ?P7>7(%1~Ct0sFb}SzH2c9kfxHF z8etunQTRz~Z&vD$_NlX+lZg>ta=LQPAkGQgRFqte>Bg|ktA#4$W zR79EE&b-LMI0S7fwzkX=;tYO&&tUc3;stC%lX}x2Rzmf#$bmSi^JlZ4pq**F{vjxga5P#94rh%o!#Kq0A znW&J;uC}k4Dg!S9K!XXeD;^4j5-UD?XIJ_c5+0qz-=zd!2m&wQEh%Ye&>7w_%no3w z<=^^ZJN4MwnrVK1{t`3Sz}VQe=#E#gjG__}Lfo4x2PbHPB-GIz_m5~Go8w+b&K!5$ zvYcO^`OU;8q3$q@YvaVL1Fcx!^mI6AkWljRz4*rb4k)|eW4B02F{^ZNibk#nHlzA~ zMWKBB`0=F}X_Lm5p@u(4a*~q7yQ*`Fb}A@uJ@F}Db{O9rd~n$kw_zp%grl2HOAWv? zqsF!WcCYJOeGbp2m_Ljf$47#hByZMu-=az<9B7vvA}UioY#_e`vHZQbE;e{#UlVW> zAW%$zdF3WPe&7b07~C>V^fi~7JD9I8IXF0=#%Y&KY{E5z-n@Z3dfd3rn%E6wgKi-I zo~E34iTS{kl~})6BQh!J;YwapU>F)x8?jMnabRflB#BWdTN`hqA$%oXKna1bMxpJe=%E^1HU0x_aQs%8JPPB3s)zPbEeO+kTNY%!&ir`XaI94qne+Dw<{v ztV|_Pk2VGiB+Ju+PeOYq{TjlGG$>HOV}h}cXz@T)G9XuQ*xWkW@XF&evC#%Dq{P^K z&OIT2F;)5<+Qa+$`%&|UF@b=avI(*f_pf)d|9!qkO9_dIQStFt^M~I+ut-OMX>4W| zlBEDFfbbDNm^3cF{CZ+jTwD7#eC2-Oxb5|Bd$hCslWzCn?(u>mkj5VT5_LTnb9kigJW^WqN;h;Rjb{>-XV<#>htJ=WORm`~NN1k7f@sv3Nm zNbfg?>(cN|!vcW>k=sLZ5KR&RD-%0n5IQA9PH7y9ibD863rS5vPkxg%w-I;;ItGT9 z?F*JsR~I8_Pnf+ZPNIphSFGWHKM4Y5d^~+I+gaQ;B~)sboi;|w?Z0F zNn1MvjGh5ti@x=}w%M_#46<6NK-5)#0fONj1|3hA~s zKoqSAkE$oon!yM~BVZ!PEHV^&-HmAX2ZKWlL)U&^^&6+5-3x80f>AEnx983zwfx7IBR1ysG0>5z=yC1D7U z4fQN6=xwH|pIY}qb`m0g37}CB-970%0C2qvu|bd(36~{-Zk3~0l2Ne+9nf|I*n#WA zcDpQWJ<~N-YDWL)QSx`w($RyX<gGqW5zU_cdtM9mEosPQqcl}~KOfZ!Nt3K1;YaNmCP=R3zPMDjQB zltBL}Sy-0E;WPpV=FCT4$^*#L6EdP1w1o}o%e`Ph^dW5pAA~rH1VB8i%{Zqg z1n+P`6yM0m8|LL&o?r_#8oVDN)nGg-@wEJW8Rd~by+SPKK;*$^(n-)!@3h<#?2fAc z_2m`CJXJ#vu#mY01t8-^`ux*Kk)8wuCUBT(#*4CmifwFce(q&V3;`4X(qNwf*~0uW zi4ksq6PV1$qN1;iwVC6Z*Z0mp@vAbtyE<^Z9uyoLwQ(H>C-yR%C@0+v1Nc+%4|uSO zpC>jWyB2x9Pb)KJU0f>eD@x1B-8wQjn78Lem(yi+{3(gz3l*%P1NEk+rby63fOJaj z`SE%@hZ&A(Z%Wwf>qJmexB|GV1xg5?Au57oyfAazlXOL_k7oKQD)zs&zbyJ~f=BKf z9OTGN##rlZz})>)qQMjt6ok}8?w@dxVuLeBK|zXHkoJDHw?1M7rD#MR3{o!Sd3hXv zT^IMyUD^=b4Y6ySUSIveOuqf8joP5_4!m9{m^e`}F~6W+kgh#5LD^H!`0&Ai{r&xu zV-M?$j12P1XYXZcZcf5xqNJo$)YJ?%Y7Y-4e*{rzeSLl4faw>znON(@Zc`~=Zbg6= z;E$)h4yJtDPyD|_M1l>F>xqd8HRSkSzIlU*NEimD7GqS=+T*jfZArMf@v1W10@II* z0*C`LgFfbac96Qo>gOZFfe|olB)b5loRRO}2Ol1~LF6fX*-i8n6tJMs)3C??;>8QP z2M=yZ?UP4fK$zGBQP`z@=BX}NPau*JQ%IWGZii(?o)bouE@NO&adHy4)aa54fr^}D z_C*dv*K-D6=rw1B7Jo(DHaZIGMg0EWUa%K98@}njKLwAg#zf+>(`q*-!b+N55I=| zHwhdMC;^H%JMRf?u^DeT+!8(Ad&L$o6BrKy>eRvA5bP5RdRBgmprl;ca|d4tWo8oneq~o?x3KcN=S){s1ja}k){TwSYrTB8D~ob+FywRa+1S|NI6W2+Ph1i*|NY>XDq-$O-#fjj_wQ92P5 zn*s0+;KmW8dh=$>sop=J3QGkU-$1QTSg}v2w-uvso zu#Mbn(!UiTDBFId1MdF|ev;|`#ikCve}DXc23i063>yxZz5idByZ`GH>> # load cifar10 from a local folder + >>> # load a dataset consisting of images from a local folder + >>> # mydata/ + >>> # `- img1.png + >>> # `- img2.png + >>> # `- ... >>> import lightly.data as data - >>> dataset = data.LightlyDataset(input_dir='path/to/cifar10/') + >>> dataset = data.LightlyDataset(input_dir='path/to/mydata/') >>> sample, target, fname = dataset[0] - + >>> + >>> # also works with subfolders + >>> # mydata/ + >>> # `- subfolder1 + >>> # `- img1.png + >>> # `- subfolder2 + >>> # ... + >>> + >>> # also works with videos + >>> # mydata/ + >>> # `- video1.mp4 + >>> # `- video2.mp4 + >>> # `- ... """ def __init__(self, @@ -150,11 +174,11 @@ def from_torch_dataset(cls, A LightlyDataset object. Examples: - >>> # load cifar10 from torchvision - >>> import torchvision - >>> import lightly.data as data - >>> base = torchvision.datasets.CIFAR10(root='./') - >>> dataset = data.LightlyDataset.from_torch_dataset(base) + >>> # load cifar10 from torchvision + >>> import torchvision + >>> import lightly.data as data + >>> base = torchvision.datasets.CIFAR10(root='./') + >>> dataset = data.LightlyDataset.from_torch_dataset(base) """ # create an "empty" dataset object @@ -223,7 +247,10 @@ def dump(self, filenames: Filenames of the images to store. If None, stores all images. format: - Image format. + Image format. Can be any pillow image format (png, jpg, ...). + By default we try to use the same format as the input data. If + not possible (e.g. for videos) we dump the image + as a png image to prevent compression artifacts. """ diff --git a/lightly/embedding/embedding.py b/lightly/embedding/embedding.py index 91c75e795..2e5e4069b 100644 --- a/lightly/embedding/embedding.py +++ b/lightly/embedding/embedding.py @@ -20,15 +20,13 @@ class SelfSupervisedEmbedding(BaseEmbedding): Implements an embedding strategy based on self-supervised learning. A model backbone, self-supervised criterion, optimizer, and dataloader are passed to the constructor. The embedding itself is a pytorch-lightning - module which can be trained very easily: - - https://pytorch-lightning.readthedocs.io/en/stable/ + module. The implementation is based on contrastive learning. - SimCLR: https://arxiv.org/abs/2002.05709 - - MoCo: https://arxiv.org/abs/1911.05722 + * SimCLR: https://arxiv.org/abs/2002.05709 + * MoCo: https://arxiv.org/abs/1911.05722 + * SimSiam: https://arxiv.org/abs/2011.10566 Attributes: model: @@ -76,14 +74,14 @@ def embed(self, Args: dataloader: - A torchvision dataloader. + A PyTorch dataloader. device: - Selected device (see PyTorch documentation) + Selected device (`cpu`, `cuda`, see PyTorch documentation) to_numpy: Whether to return the embeddings as numpy array. Returns: - A tuple consisting of a tensor or ndarray of embeddings + A tuple consisting of a tensor or ndarray of embeddings with shape n_images x num_ftrs and labels, fnames Examples: diff --git a/lightly/loss/ntx_ent_loss.py b/lightly/loss/ntx_ent_loss.py index 1f08079dc..a5d62541c 100644 --- a/lightly/loss/ntx_ent_loss.py +++ b/lightly/loss/ntx_ent_loss.py @@ -11,6 +11,13 @@ class NTXentLoss(MemoryBankModule): """Implementation of the Contrastive Cross Entropy Loss. + + This implementation follows the SimCLR[0] paper. If you enable the memory + bank by setting the `memory_bank_size` value > 0 the loss behaves like + the one described in the MoCo[1] paper. + + [0] SimCLR, 2020, https://arxiv.org/abs/2002.05709 + [1] MoCo, 2020, https://arxiv.org/abs/1911.05722 Attributes: temperature: @@ -18,7 +25,8 @@ class NTXentLoss(MemoryBankModule): use_cosine_similarity: Whether to use cosine similarity over L2 distance. memory_bank_size: - Number of samples to store in the memory bank. + Number of negative samples to store in the memory bank. + Use 0 for SimCLR. For MoCo we typically use numbers like 4096 or 65536. Raises: ValueError if abs(temperature) < 1e-8 to prevent divide by zero. @@ -100,7 +108,7 @@ def _cosine_simililarity(self, x, y): def forward(self, out0: torch.Tensor, out1: torch.Tensor): - """Forward pass through Contrastive Cross Entropy Loss. + """Forward pass through Contrastive Cross-Entropy Loss. If used with a memory bank, the samples from the memory bank are used as negative examples. Otherwise, within-batch samples are used as diff --git a/lightly/loss/sym_neg_cos_sim_loss.py b/lightly/loss/sym_neg_cos_sim_loss.py index a5dfbb468..61f29abbb 100644 --- a/lightly/loss/sym_neg_cos_sim_loss.py +++ b/lightly/loss/sym_neg_cos_sim_loss.py @@ -6,7 +6,9 @@ import torch class SymNegCosineSimilarityLoss(torch.nn.Module): - """Implementation of the Symmetrized Loss. + """Implementation of the Symmetrized Loss used in the SimSiam[0] paper. + + [0] SimSiam, 2020, https://arxiv.org/abs/2011.10566 Examples: diff --git a/lightly/models/__init__.py b/lightly/models/__init__.py index d31400741..091a435fc 100644 --- a/lightly/models/__init__.py +++ b/lightly/models/__init__.py @@ -1,9 +1,9 @@ """The lightly.models package provides model implementations. -The package contains an implementation of the commonly used ResNet and +The package contains an implementation of the commonly used ResNet and adaptations of the architecture which make self-supervised learning simpler. -The package also hosts the Lightly model zoo - a list of downloadable ResNet +The package also hosts the Lightly model zoo - a list of downloadable ResNet checkpoints. """ diff --git a/lightly/models/moco.py b/lightly/models/moco.py index 9837c1627..9af8b5f8e 100644 --- a/lightly/models/moco.py +++ b/lightly/models/moco.py @@ -25,7 +25,12 @@ def _get_moco_projection_head(num_ftrs: int, out_dim: int): class MoCo(nn.Module, _MomentumEncoderMixin): - """Implementation of the MoCo (Momentum Contrast) architecture. + """Implementation of the MoCo (Momentum Contrast)[0] architecture. + + Recommended loss: :py:class:`lightly.loss.ntx_ent_loss.NTXentLoss` with + a memory bank. + + [0] MoCo, 2020, https://arxiv.org/abs/1911.05722 Attributes: backbone: diff --git a/lightly/models/resnet.py b/lightly/models/resnet.py index e0f97ae76..f50a52923 100644 --- a/lightly/models/resnet.py +++ b/lightly/models/resnet.py @@ -1,4 +1,12 @@ -""" ResNet Implementation """ +"""Custom ResNet Implementation + +Note that the architecture we present here differs from the one used in +torchvision. We replace the first 7x7 convolution by a 3x3 convolution to make +the model faster and run better on smaller input image resolutions. + +Furthermore, we introduce a resnet-9 variant for extra small models. These can +run for example on a microcontroller with 100kBytes of storage. +""" # Copyright (c) 2020. Lightly AG and its affiliates. # All Rights Reserved @@ -239,7 +247,10 @@ def ResNetGenerator(name: str = 'resnet-18', num_classes: Output dim of the last layer. num_splits: - Number of splits to use for SplitBatchNorm. + Number of splits to use for SplitBatchNorm (for MoCo model). + Increase this number to simulate multi-gpu behavior. + E.g. `num_splits=8` simulates a 8-GPU cluster. + `num_splits=0` uses normal PyTorch BatchNorm. Returns: ResNet as nn.Module. diff --git a/lightly/models/simclr.py b/lightly/models/simclr.py index aed66c245..a6c6c550e 100644 --- a/lightly/models/simclr.py +++ b/lightly/models/simclr.py @@ -24,7 +24,11 @@ def _get_simclr_projection_head(num_ftrs: int, out_dim: int): class SimCLR(nn.Module): - """Implementation of the SimCLR architecture. + """Implementation of the SimCLR[0] architecture + + Recommended loss: :py:class:`lightly.loss.ntx_ent_loss.NTXentLoss` + + [0] SimCLR, 2020, https://arxiv.org/abs/2002.05709 Attributes: backbone: diff --git a/lightly/models/simsiam.py b/lightly/models/simsiam.py index e1cfbc5d6..79fe6d3af 100644 --- a/lightly/models/simsiam.py +++ b/lightly/models/simsiam.py @@ -85,7 +85,11 @@ def _projection_mlp(in_dims: int, class SimSiam(nn.Module): - """ Implementation of SimSiam network + """Implementation of SimSiam[0] network + + Recommended loss: :py:class:`lightly.loss.sym_neg_cos_sim_loss.SymNegCosineSimilarityLoss` + + [0] SimSiam, 2020, https://arxiv.org/abs/2011.10566 Attributes: backbone: @@ -132,8 +136,8 @@ def forward(self, """Forward pass through SimSiam. Extracts features with the backbone and applies the projection - head and prediction head to the output space. If both x0 and x1 are not - None, both will be passed through the backbone, projection, and + head and prediction head to the output space. If both x0 and x1 are not + None, both will be passed through the backbone, projection, and prediction head. If x1 is None, only x0 will be forwarded. Args: @@ -145,11 +149,23 @@ def forward(self, Whether or not to return the intermediate features backbone(x). Returns: - The output prediction and projection of x0 and (if x1 is not None) - the output prediction and projection of x1. If return_features is - True, the output for each x is a tuple (out, f) where f are the + The output prediction and projection of x0 and (if x1 is not None) + the output prediction and projection of x1. If return_features is + True, the output for each x is a tuple (out, f) where f are the features before the projection head. - + + Examples: + >>> # single input, single output + >>> out = model(x) + >>> + >>> # single input with return_features=True + >>> out, f = model(x, return_features=True) + >>> + >>> # two inputs, two outputs + >>> out0, out1 = model(x0, x1) + >>> + >>> # two inputs, two outputs with return_features=True + >>> (out0, f0), (out1, f1) = model(x0, x1, return_features=True) """ f0 = self.backbone(x0).squeeze() z0 = self.projection_mlp(f0) diff --git a/lightly/transforms/rotation.py b/lightly/transforms/rotation.py index 5f62ce3a5..00f84b831 100644 --- a/lightly/transforms/rotation.py +++ b/lightly/transforms/rotation.py @@ -10,13 +10,20 @@ class RandomRotate(object): """Implementation of random rotation. - Randomly rotates an input image by an angle. + Randomly rotates an input image by a fixed angle. By default, we rotate + the image by 90 degrees with a probability of 50%. + + This augmentation can be very useful for rotation invariant images such as + in medical imaging or satellite imaginary. Attributes: prob: Probability with which image is rotated. angle: - Angle by which the image is rotated. + Angle by which the image is rotated. We recommend multiples of 90 + to prevent rasterization artifacts. If you pick numbers like + 90, 180, 270 the tensor will be rotated without introducing + any artifacts. """ From f558b0f0fb4db61c7aa575638fa27e873275adc7 Mon Sep 17 00:00:00 2001 From: MalteEbner Date: Tue, 16 Mar 2021 14:13:08 +0100 Subject: [PATCH 07/16] lightly dataset.subset from filenames me (#226) * Create lighty_subset.py * LightlySubset: added tests and bugfixes * typehints for LightlyDataset.__init__ * add docstrings to lightly_subset --- lightly/data/dataset.py | 13 +++--- lightly/data/lighty_subset.py | 74 +++++++++++++++++++++++++++++++ tests/data/test_LightlyDataset.py | 39 ++++++++-------- tests/data/test_LightlySubset.py | 51 +++++++++++++++++++++ 4 files changed, 153 insertions(+), 24 deletions(-) create mode 100644 lightly/data/lighty_subset.py create mode 100644 tests/data/test_LightlySubset.py diff --git a/lightly/data/dataset.py b/lightly/data/dataset.py index 65918c139..0983abeb7 100644 --- a/lightly/data/dataset.py +++ b/lightly/data/dataset.py @@ -6,10 +6,11 @@ import os import shutil from PIL import Image -from typing import List, Union +from typing import List, Union, Callable import torch.utils.data as data import torchvision.datasets as datasets +from torchvision import transforms from lightly.data._helpers import _load_dataset from lightly.data._helpers import DatasetFolder @@ -53,6 +54,7 @@ def _copy_image(input_dir, output_dir, filename): _ensure_dir(target) shutil.copyfile(source, target) + def _save_image(image, output_dir, filename, fmt): """Saves an image in the output directory. @@ -141,8 +143,8 @@ class LightlyDataset: def __init__(self, input_dir: str, - transform=None, - index_to_filename=None): + transform: transforms.Compose = None, + index_to_filename: Callable[[datasets.VisionDataset, int], str] = None): # can pass input_dir=None to create an "empty" dataset self.input_dir = input_dir @@ -151,7 +153,7 @@ def __init__(self, # initialize function to get filename of image self.index_to_filename = _get_filename_by_index - if index_to_filename is not None: + if index_to_filename is not None: self.index_to_filename = index_to_filename @classmethod @@ -205,9 +207,8 @@ def __getitem__(self, index: int): """ fname = self.index_to_filename(self.dataset, index) sample, target = self.dataset.__getitem__(index) - - return sample, target, fname + return sample, target, fname def __len__(self): """Returns the length of the dataset. diff --git a/lightly/data/lighty_subset.py b/lightly/data/lighty_subset.py new file mode 100644 index 000000000..3bae3cad7 --- /dev/null +++ b/lightly/data/lighty_subset.py @@ -0,0 +1,74 @@ +from typing import List, Dict, Tuple + +from lightly.data.dataset import LightlyDataset + + +class LightlySubset(LightlyDataset): + def __init__(self, base_dataset: LightlyDataset, filenames_subset: List[str]): + """Creates a subset of a LightlyDataset. + + Args: + base_dataset: + The dataset to subset from. + filenames_subset: + The filenames of the samples to be part of the subset. + """ + self.base_dataset = base_dataset + self.filenames_subset = filenames_subset + + dict_base_dataset_filename_index: Dict[str, int] = dict() + for index in range(len(base_dataset)): + fname = base_dataset.index_to_filename(self.dataset, index) + dict_base_dataset_filename_index[fname] = index + + self.mapping_subset_index_to_baseset_index = \ + [dict_base_dataset_filename_index[filename] for filename in filenames_subset] + + def __getitem__(self, index_subset: int) -> Tuple[object, object, str]: + """An overwrite for indexing. + + Args: + index_subset: + The index of a sample w.r.t. to the subset. + E.g. if index_subset == 0, the sample belonging to + the first filename in self.filenames_subset is returned. + + Returns: + A tuple of the sample, its target and its filename. + + """ + index_baseset = self.mapping_subset_index_to_baseset_index[index_subset] + sample, target, fname = self.base_dataset.__getitem__(index_baseset) + return sample, target, fname + + def __len__(self) -> int: + """Overwrites the len(...) function. + + Returns: + The number of samples in the subset. + """ + return len(self.filenames_subset) + + def index_to_filename(self, dataset, index_subset: int): + """Maps from an index of a sample to its filename. + + Args: + dataset: + Unused, but specified by the overwritten + function of the parent class. + index_subset: + The index of the sample w.r.t. the subset. + + Returns: + The filename of the sample. + """ + fname = self.filenames_subset[index_subset] + return fname + + @property + def input_dir(self): + return self.base_dataset.input_dir + + @property + def dataset(self): + return self.base_dataset.dataset diff --git a/tests/data/test_LightlyDataset.py b/tests/data/test_LightlyDataset.py index ac5222e94..cd5ad3346 100644 --- a/tests/data/test_LightlyDataset.py +++ b/tests/data/test_LightlyDataset.py @@ -15,6 +15,7 @@ from lightly.data._video import VideoDataset import av import cv2 + VIDEO_DATASET_AVAILABLE = True except Exception: VIDEO_DATASET_AVAILABLE = False @@ -27,13 +28,13 @@ def ensure_dir(self, path_to_folder: str): def setUp(self): self.available_dataset_names = ['cifar10', - #'cifar100', - #'cityscapes', - #'stl10', - #'voc07-seg', - #'voc12-seg', - #'voc07-det', - #'voc12-det] + # 'cifar100', + # 'cityscapes', + # 'stl10', + # 'voc07-seg', + # 'voc12-seg', + # 'voc07-det', + # 'voc12-det] ] def create_dataset(self, n_subfolders=5, n_samples_per_subfolder=20): @@ -63,7 +64,7 @@ def create_video_dataset(self, n_videos=5, n_frames_per_video=10, w=32, h=32, c= self.n_videos = n_videos self.n_frames_per_video = n_frames_per_video - + self.input_dir = tempfile.mkdtemp() self.ensure_dir(self.input_dir) self.frames = (np.random.randn(n_frames_per_video, w, h, c) * 255).astype(np.uint8) @@ -119,7 +120,6 @@ def test_create_lightly_dataset_from_folder_nosubdir(self): tmp_dir = tempfile.mkdtemp() sample_names = [f'img_{i}.jpg' for i in range(n_tot)] for sample_idx in range(n_tot): - data = dataset[sample_idx] path = os.path.join(tmp_dir, sample_names[sample_idx]) data[0].save(path) @@ -150,7 +150,7 @@ def test_check_images(self): path = os.path.join(tmp_dir, sample_name) data[0].save(path) - corrupt_sample_names = [f'img_{i}.jpg' for i in range(n_healthy,n_healthy+n_corrupt)] + corrupt_sample_names = [f'img_{i}.jpg' for i in range(n_healthy, n_healthy + n_corrupt)] for sample_name in corrupt_sample_names: path = os.path.join(tmp_dir, sample_name) with open(path, 'a') as f: @@ -158,8 +158,8 @@ def test_check_images(self): # tests healthy_images, corrupt_images = check_images(tmp_dir) - assert(len(healthy_images) == n_healthy) - assert(len(corrupt_images) == n_corrupt) + assert (len(healthy_images) == n_healthy) + assert (len(corrupt_images) == n_corrupt) def test_not_existing_folder_dataset(self): with self.assertRaises(ValueError): @@ -173,6 +173,8 @@ def test_from_torch_dataset(self): self.assertEqual(len(_dataset), len(dataset)) self.assertEqual(len(dataset.get_filenames()), len(dataset)) + + def test_video_dataset(self): if not VIDEO_DATASET_AVAILABLE: @@ -189,10 +191,10 @@ def test_video_dataset(self): dataset = LightlyDataset(input_dir=tmp_dir) warnings.warn( - 'Did not test video dataset because of missing requirements') + 'Did not test video dataset because of missing requirements') shutil.rmtree(tmp_dir) return - + self.create_video_dataset() dataset = LightlyDataset(input_dir=self.input_dir) @@ -200,10 +202,11 @@ def test_video_dataset(self): dataset.dump(out_dir) self.assertEqual(len(os.listdir(out_dir)), len(dataset)) - def test_transform_setter(self): - - tmp_dir, _, _ = self.create_dataset() - dataset = LightlyDataset(input_dir=tmp_dir) + def test_transform_setter(self, dataset: LightlyDataset = None): + + if dataset is None: + tmp_dir, _, _ = self.create_dataset() + dataset = LightlyDataset(input_dir=tmp_dir) # the transform of both datasets should be None self.assertIsNone(dataset.transform) self.assertIsNone(dataset.dataset.transform) diff --git a/tests/data/test_LightlySubset.py b/tests/data/test_LightlySubset.py new file mode 100644 index 000000000..53a3ec9e3 --- /dev/null +++ b/tests/data/test_LightlySubset.py @@ -0,0 +1,51 @@ +import os +import tempfile +import random +from typing import Tuple, List + +import torchvision + +from lightly.data.dataset import LightlyDataset +from lightly.data.lighty_subset import LightlySubset + +from tests.data.test_LightlyDataset import TestLightlyDataset + + +class TestLightlySubset(TestLightlyDataset): + def setUp(self) -> None: + tmp_dir, folder_names, sample_names = self.create_dataset(n_subfolders=5, n_samples_per_subfolder=5) + self.input_dir = tmp_dir + + def create_subset(self) -> Tuple[LightlySubset, List[str]]: + base_dataset = LightlyDataset(input_dir=self.input_dir) + filenames_base_dataset = base_dataset.get_filenames() + + no_samples_subset = int(len(filenames_base_dataset) * 0.5) + filenames_subset = random.sample(filenames_base_dataset, no_samples_subset) + + subset = LightlySubset(base_dataset=base_dataset, filenames_subset=filenames_subset) + return subset, filenames_subset + + def test_create_lightly_subset(self): + subset, filenames_subset = self.create_subset() + + assert subset.get_filenames() == filenames_subset + for index_subset, filename_subset in enumerate(filenames_subset): + sample, target, fname = subset.__getitem__(index_subset) + assert filename_subset == fname + + def test_lightly_subset_transform(self): + subset, filenames_subset = self.create_subset() + self.test_transform_setter(dataset=subset) + + def test_lightly_subset_dump(self): + subset, filenames_subset = self.create_subset() + dataset = subset + + out_dir = tempfile.mkdtemp() + dataset.dump(out_dir) + + files_output_dir = LightlyDataset(input_dir=out_dir).get_filenames() + assert set(files_output_dir) == set(dataset.get_filenames()) + + \ No newline at end of file From 53e6a0cd83c0c03f3c36afd0f555dd525ed2bb74 Mon Sep 17 00:00:00 2001 From: Philipp Wirth <65946090+philippmwirth@users.noreply.github.com> Date: Tue, 16 Mar 2021 16:48:12 +0100 Subject: [PATCH 08/16] Change rebase from master to develop (#229) --- CONTRIBUTING.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 5e75e3823..c486abf2b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -118,7 +118,7 @@ Follow these steps to start contributing: ```bash $ git fetch upstream - $ git rebase upstream/master + $ git rebase upstream/develop ``` Push the changes to your account using: From 28271e0b1bbc60cc23bba5a03719ce427815a61b Mon Sep 17 00:00:00 2001 From: MalteEbner Date: Wed, 17 Mar 2021 09:34:48 +0100 Subject: [PATCH 09/16] 234 no ci triggered on p rs from forks me (#237) * trigger CI on pull_request_target --- .github/workflows/test.yml | 2 +- .github/workflows/test_setup.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index dd9e0f0d0..82a6c66bc 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,6 +1,6 @@ name: Unit Tests -on: [push] +on: [push, pull_request_target] jobs: test: diff --git a/.github/workflows/test_setup.yml b/.github/workflows/test_setup.yml index b416dcafa..ced0dbf4f 100644 --- a/.github/workflows/test_setup.yml +++ b/.github/workflows/test_setup.yml @@ -1,5 +1,5 @@ name: check setup.py -on: [push] +on: [push, pull_request_target] jobs: test: From 502ac532ccbb374c6d79fbc27fc531df03023af9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Adri=C3=A1n=20Arnaiz-Rodr=C3=ADguez?= Date: Wed, 17 Mar 2021 09:46:05 +0100 Subject: [PATCH 10/16] Barlow twins loss and architecture (#230) * Add barlow twins loss * Add barlow architecture based on SimSiam #211 * Add barlow architecture based on SimSiam #211 * Add custom BarlowTwins Architecture #211 * Add barlow twins loss * Add barlow architecture based on SimSiam #211 * Add barlow architecture based on SimSiam #211 * Add custom BarlowTwins Architecture #211 * change device setting #211 --- lightly/loss/__init__.py | 3 +- lightly/loss/barlow_twins_loss.py | 55 ++++++++++ lightly/models/__init__.py | 1 + lightly/models/barlow_twins_arch.py | 156 ++++++++++++++++++++++++++++ 4 files changed, 214 insertions(+), 1 deletion(-) create mode 100644 lightly/loss/barlow_twins_loss.py create mode 100644 lightly/models/barlow_twins_arch.py diff --git a/lightly/loss/__init__.py b/lightly/loss/__init__.py index aabf40c5a..9c4838a62 100644 --- a/lightly/loss/__init__.py +++ b/lightly/loss/__init__.py @@ -4,4 +4,5 @@ # All Rights Reserved from lightly.loss.ntx_ent_loss import NTXentLoss -from lightly.loss.sym_neg_cos_sim_loss import SymNegCosineSimilarityLoss \ No newline at end of file +from lightly.loss.sym_neg_cos_sim_loss import SymNegCosineSimilarityLoss +from lightly.loss.barlow_twins_loss import BarlowTwinsLoss diff --git a/lightly/loss/barlow_twins_loss.py b/lightly/loss/barlow_twins_loss.py new file mode 100644 index 000000000..087b43b36 --- /dev/null +++ b/lightly/loss/barlow_twins_loss.py @@ -0,0 +1,55 @@ +import torch + +class BarlowTwinsLoss(torch.nn.Module): + """Implementation of the Barlow Twins Loss from Barlow Twins[0] paper. + This code specifically implements the Figure Algorithm 1 from [0]. + [0] Zbontar,J. et.al, 2021, Barlow Twins... https://arxiv.org/abs/2103.03230 + + Examples: + + >>> # initialize loss function + >>> loss_fn = BarlowTwinsLoss() + >>> + >>> # generate two random transforms of images + >>> t0 = transforms(images) + >>> t1 = transforms(images) + >>> + >>> # feed through SimSiam model + >>> out0, out1 = model(t0, t1) + >>> + >>> # calculate loss + >>> loss = loss_fn(out0, out1) + + """ + + def __init__(self, lambda_param=5e-3): + """Lambda param configuration with default value like in [0] + + Args: + lambda_param ([float], optional): parameter for importance of + redundancy reduction term. + Defaults to 5e-3 [0]. + """ + super(BarlowTwinsLoss, self).__init__() + self.lambda_param = lambda_param + + def forward(self, z_a: torch.Tensor, z_b: torch.Tensor): + + device = z_a.device + + # normalize repr. along the batch dimension + z_a_norm = (z_a - z_a.mean(0)) / z_a.std(0) # NxD + z_b_norm = (z_b - z_b.mean(0)) / z_b.std(0) # NxD + + N = z_a.size(0) + D = z_a.size(1) + + # cross-correlation matrix + c = torch.mm(z_a_norm.T, z_b_norm) / N # DxD + # loss + c_diff = (c - torch.eye(D, device=device)).pow(2) # DxD + # multiply off-diagonal elems of c_diff by lambda + c_diff[~torch.eye(D, dtype=bool)] *= self.lambda_param + loss = c_diff.sum() + + return loss diff --git a/lightly/models/__init__.py b/lightly/models/__init__.py index 091a435fc..6e4906101 100644 --- a/lightly/models/__init__.py +++ b/lightly/models/__init__.py @@ -17,3 +17,4 @@ from lightly.models.moco import MoCo from lightly.models.zoo import ZOO from lightly.models.zoo import checkpoints +from lightly.models.barlow_twins_arch import BarlowTwins diff --git a/lightly/models/barlow_twins_arch.py b/lightly/models/barlow_twins_arch.py new file mode 100644 index 000000000..998bbab2b --- /dev/null +++ b/lightly/models/barlow_twins_arch.py @@ -0,0 +1,156 @@ +""" Barlow Twins resnet-based Model [0] +[0] Zbontar,J. et.al. 2021. Barlow Twins... https://arxiv.org/abs/2103.03230 +""" + +# Copyright (c) 2020. Lightly AG and its affiliates. +# All Rights Reserved + +import torch +import torch.nn as nn +from . import ResNetGenerator +# from . since it is imported in __init__ : '.'=lightly.models.resnet + +def _projection_head_barlow(in_dims: int, + h_dims: int = 8192, + out_dims: int = 8192, + num_layers: int = 3) -> nn.Sequential: + """ + Projection MLP. The original paper's implementation [0] has 3 layers, with + 8192 output units each layer. BN and ReLU applied to first and second layer. + + Args: + in_dims: + Input dimension of the first linear layer. + h_dims: + Hidden dimension of all the fully connected layers. + 8192 on [0]. + out_dims: + Output Dimension of the final linear layer. + Dimension of the latent space. 8192 on [0]. + num_layers: + Controls the number of layers; must be 2 or 3. Defaults to 3. + + Returns: + nn.Sequential: + The projection head. + """ + l1 = nn.Sequential(nn.Linear(in_dims, h_dims), + nn.BatchNorm1d(h_dims), + nn.ReLU(inplace=True)) + + l2 = nn.Sequential(nn.Linear(h_dims, h_dims), + nn.BatchNorm1d(h_dims), + nn.ReLU(inplace=True)) + + l3 = nn.Sequential(nn.Linear(h_dims, out_dims)) + #SimSiam and BarlowTwins only differs in one BN layer + + if num_layers == 3: + projection = nn.Sequential(l1, l2, l3) + elif num_layers == 2: + projection = nn.Sequential(l1, l3) + else: + raise NotImplementedError("Only MLPs with 2 and 3 layers are implemented.") + + return projection + +class BarlowTwins(nn.Module): + """Implementation of Barlow twins[0] network. + ResNet-50 backbone with projection head. + + Recommended loss: :py:class:`lightly.loss.barlow_twins_loss.BarlowTwinsLoss` + + Default params are the ones explained in the original paper [0]. + [0] Zbontar,J. et.al. 2021. Barlow Twins... https://arxiv.org/abs/2103.03230 + + Attributes: + backbone: + Backbone model to extract features from images. + ResNet-50 in original paper [0]. + num_ftrs: + Dimension of the embedding (before the projection head). + proj_hidden_dim: + Dimension of the hidden layer of the projection head. This should + be the same size as `num_ftrs`. + out_dim: + Dimension of the output (after the projection head). + + """ + + def __init__(self, + backbone: nn.Module = ResNetGenerator('resnet-50'), + num_ftrs: int = 2048, + proj_hidden_dim: int = 8192, + out_dim: int = 8192, + num_mlp_layers: int = 3): + + super(BarlowTwins, self).__init__() + + self.backbone = backbone + self.num_ftrs = num_ftrs + self.proj_hidden_dim = proj_hidden_dim + self.out_dim = out_dim + + self.projection_mlp = \ + _projection_head_barlow(num_ftrs, proj_hidden_dim, out_dim, num_mlp_layers) + + def forward(self, + x0: torch.Tensor, + x1: torch.Tensor = None, + return_features: bool = False): + + """Forward pass through BarloTwins. + + Extracts features with the backbone and applies the projection + head to the output space. If both x0 and x1 are not None, both will be + passed through the backbone and projection. If x1 is None, only x0 will + be forwarded. + Barlow Twins only implement a projection head unlike SimSiam. + + Args: + x0: + Tensor of shape bsz x channels x W x H. + x1: + Tensor of shape bsz x channels x W x H. + return_features: + Whether or not to return the intermediate features backbone(x). + + Returns: + The output projection of x0 and (if x1 is not None) + the output projection of x1. If return_features is + True, the output for each x is a tuple (out, f) where f are the + features before the projection head. + + Examples: + >>> # single input, single output + >>> out = model(x) + >>> + >>> # single input with return_features=True + >>> out, f = model(x, return_features=True) + >>> + >>> # two inputs, two outputs + >>> out0, out1 = model(x0, x1) + >>> + >>> # two inputs, two outputs with return_features=True + >>> (out0, f0), (out1, f1) = model(x0, x1, return_features=True) + """ + # forward pass first input + f0 = self.backbone(x0).squeeze() + out0 = self.projection_mlp(f0) + + # append features if requested + if return_features: + out0 = (out0, f0) + + if x1 is None: + return out0 + + # forward pass second input + f1 = self.backbone(x1).squeeze() + out1 = self.projection_mlp(f1) + + # append features if requested + if return_features: + out1 = (out1, f1) + + return out0, out1 From a9c22c81681e4f69bcb9961b32970519af6f0bad Mon Sep 17 00:00:00 2001 From: MalteEbner Date: Wed, 17 Mar 2021 13:26:18 +0100 Subject: [PATCH 11/16] trigger CI on workflow_dispatch (#239) --- .github/workflows/test.yml | 2 +- .github/workflows/test_setup.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 82a6c66bc..0936d87e8 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,6 +1,6 @@ name: Unit Tests -on: [push, pull_request_target] +on: [push, workflow_dispatch] jobs: test: diff --git a/.github/workflows/test_setup.yml b/.github/workflows/test_setup.yml index ced0dbf4f..e3bf6d422 100644 --- a/.github/workflows/test_setup.yml +++ b/.github/workflows/test_setup.yml @@ -1,5 +1,5 @@ name: check setup.py -on: [push, pull_request_target] +on: [push, workflow_dispatch] jobs: test: From 50e543fbb381966ff1749955a1c4376b2c21469d Mon Sep 17 00:00:00 2001 From: IgorSusmelj Date: Wed, 17 Mar 2021 17:17:49 +0100 Subject: [PATCH 12/16] update readme with barlow twins (#241) * Add supported models to readme * Add barlowtwins to docs * Rename barlow twins file. Remove default backbone. * Rename barlow twins * Make tests run with only 1 workers --- Makefile | 2 +- README.md | 7 +++++++ docs/source/lightly.models.rst | 5 +++++ lightly/models/__init__.py | 2 +- lightly/models/{barlow_twins_arch.py => barlowtwins.py} | 7 +++---- 5 files changed, 17 insertions(+), 6 deletions(-) rename lightly/models/{barlow_twins_arch.py => barlowtwins.py} (96%) diff --git a/Makefile b/Makefile index 2c28cb283..e813d2541 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ lint-tests: ## run tests test: - pytest tests -n 4 --runslow + pytest tests --runslow ## build source and wheel package dist: clean diff --git a/README.md b/README.md index 76d19b83c..b50524e53 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,13 @@ Lightly is a computer vision framework for self-supervised learning. - [Github](https://github.com/lightly-ai/lightly) - [Discord](https://discord.gg/xvNJW94) +### Supported Models + +- [MoCo, 2019](https://arxiv.org/abs/1911.05722) +- [SimCLR, 2020](https://arxiv.org/abs/2002.05709) +- [SimSiam, 2021](https://arxiv.org/abs/2011.10566) +- [Barlow Twins, 2021](https://arxiv.org/abs/2103.03230) + ### Tutorials diff --git a/docs/source/lightly.models.rst b/docs/source/lightly.models.rst index 239e407c8..46087d007 100644 --- a/docs/source/lightly.models.rst +++ b/docs/source/lightly.models.rst @@ -8,6 +8,11 @@ lightly.models .. automodule:: lightly.models.resnet :members: +.barlowtwins +--------------- +.. automodule:: lightly.models.barlowtwins + :members: + .simclr --------------- .. automodule:: lightly.models.simclr diff --git a/lightly/models/__init__.py b/lightly/models/__init__.py index 6e4906101..56d7a0e09 100644 --- a/lightly/models/__init__.py +++ b/lightly/models/__init__.py @@ -12,9 +12,9 @@ # All Rights Reserved from lightly.models.resnet import ResNetGenerator +from lightly.models.barlowtwins import BarlowTwins from lightly.models.simclr import SimCLR from lightly.models.simsiam import SimSiam from lightly.models.moco import MoCo from lightly.models.zoo import ZOO from lightly.models.zoo import checkpoints -from lightly.models.barlow_twins_arch import BarlowTwins diff --git a/lightly/models/barlow_twins_arch.py b/lightly/models/barlowtwins.py similarity index 96% rename from lightly/models/barlow_twins_arch.py rename to lightly/models/barlowtwins.py index 998bbab2b..fc9da95c3 100644 --- a/lightly/models/barlow_twins_arch.py +++ b/lightly/models/barlowtwins.py @@ -55,8 +55,7 @@ def _projection_head_barlow(in_dims: int, return projection class BarlowTwins(nn.Module): - """Implementation of Barlow twins[0] network. - ResNet-50 backbone with projection head. + """Implementation of BarlowTwins[0] network. Recommended loss: :py:class:`lightly.loss.barlow_twins_loss.BarlowTwinsLoss` @@ -78,7 +77,7 @@ class BarlowTwins(nn.Module): """ def __init__(self, - backbone: nn.Module = ResNetGenerator('resnet-50'), + backbone: nn.Module, num_ftrs: int = 2048, proj_hidden_dim: int = 8192, out_dim: int = 8192, @@ -99,7 +98,7 @@ def forward(self, x1: torch.Tensor = None, return_features: bool = False): - """Forward pass through BarloTwins. + """Forward pass through BarlowTwins. Extracts features with the backbone and applies the projection head to the output space. If both x0 and x1 are not None, both will be From e72589475927056103352c1644b415407494c246 Mon Sep 17 00:00:00 2001 From: Philipp Wirth <65946090+philippmwirth@users.noreply.github.com> Date: Wed, 17 Mar 2021 17:38:27 +0100 Subject: [PATCH 13/16] 6 full dataset download pw (#233) * Add api workflow download client * Add download option to CLI * Add download tests and fix tox * Add new download to cli instructions * Minor change+ * Fix tests --- .../getting_started/command_line_tool.rst | 21 +++- lightly/api/__init__.py | 2 + lightly/api/api_workflow_client.py | 3 +- lightly/api/api_workflow_download_dataset.py | 111 ++++++++++++++++++ lightly/cli/download_cli.py | 12 +- .../mocked_api_workflow_client.py | 13 +- tests/api_workflow/test_api_workflow.py | 3 +- .../test_api_workflow_download_dataset.py | 46 ++++++++ tox.ini | 2 +- 9 files changed, 199 insertions(+), 14 deletions(-) create mode 100644 lightly/api/api_workflow_download_dataset.py create mode 100644 tests/api_workflow/test_api_workflow_download_dataset.py diff --git a/docs/source/getting_started/command_line_tool.rst b/docs/source/getting_started/command_line_tool.rst index 6bcffe963..82bc9ab66 100644 --- a/docs/source/getting_started/command_line_tool.rst +++ b/docs/source/getting_started/command_line_tool.rst @@ -119,16 +119,27 @@ You can upload embeddings directly to the Lightly Platform using the CLI. Download data using the CLI ----------------------------------------------- You can download a dataset with a given tag from the Lightly Platform using the -following CLI command. The CLI provides you with two options. Either you -download just a list or copy the files from the original dataset into a new -folder. The second option is very handy for quick prototyping. +following CLI command. The CLI provides you with three options: + +* Download the list of filenames for a given tag in the dataset. + +* Download the images for a given tag in the dataset. + +* Copy the images for a given tag from an input directory to a target directory. + +The last option allows you to very quickly extract only the images in a given tag +without the need to download them explicitly. .. code-block:: bash # download a list of files lightly-download tag_name=my_tag_name dataset_id=your_dataset_id token=your_token - # copy files in a tag to a new folder + # download the images and store them in an output directory + lightly-download tag_name=my_tag_name dataset_id=your_dataset_id token=your_token \ + output_dir=path/to/output/dir + + # copy images from an input directory to an output directory lightly-download tag_name=my_tag_name dataset_id=your_dataset_id token=your_token \ - input_dir=cat output_dir=cat_curated + input_dir=path/to/input/dir output_dir=path/to/output/dir diff --git a/lightly/api/__init__.py b/lightly/api/__init__.py index be93fa1f9..86570f7b8 100644 --- a/lightly/api/__init__.py +++ b/lightly/api/__init__.py @@ -2,3 +2,5 @@ # Copyright (c) 2020. Lightly AG and its affiliates. # All Rights Reserved + +from lightly.api import routes diff --git a/lightly/api/api_workflow_client.py b/lightly/api/api_workflow_client.py index 06400a3a5..f54616f0a 100644 --- a/lightly/api/api_workflow_client.py +++ b/lightly/api/api_workflow_client.py @@ -16,6 +16,7 @@ from lightly.api.api_workflow_upload_dataset import _UploadDatasetMixin from lightly.api.api_workflow_upload_embeddings import _UploadEmbeddingsMixin +from lightly.api.api_workflow_download_dataset import _DownloadDatasetMixin from lightly.api.api_workflow_sampling import _SamplingMixin from lightly.openapi_generated.swagger_client import TagData, ScoresApi, QuotaApi from lightly.openapi_generated.swagger_client.api.embeddings_api import EmbeddingsApi @@ -27,7 +28,7 @@ from lightly.openapi_generated.swagger_client.configuration import Configuration -class ApiWorkflowClient(_UploadEmbeddingsMixin, _SamplingMixin, _UploadDatasetMixin, _DatasetsMixin): +class ApiWorkflowClient(_UploadEmbeddingsMixin, _SamplingMixin, _UploadDatasetMixin, _DownloadDatasetMixin, _DatasetsMixin): """Provides a uniform interface to communicate with the api The APIWorkflowClient is used to communicaate with the Lightly API. The client diff --git a/lightly/api/api_workflow_download_dataset.py b/lightly/api/api_workflow_download_dataset.py new file mode 100644 index 000000000..a743b8302 --- /dev/null +++ b/lightly/api/api_workflow_download_dataset.py @@ -0,0 +1,111 @@ +import warnings +from concurrent.futures.thread import ThreadPoolExecutor +from typing import Union +import io +import os +import tqdm +from urllib.request import Request, urlopen +from PIL import Image + +from lightly.openapi_generated.swagger_client import TagCreator +from lightly.openapi_generated.swagger_client.models.sample_create_request import SampleCreateRequest +from lightly.api.utils import check_filename, check_image, get_thumbnail_from_img, PIL_to_bytes +from lightly.api.bitmask import BitMask +from lightly.openapi_generated.swagger_client.models.initial_tag_create_request import InitialTagCreateRequest +from lightly.openapi_generated.swagger_client.models.image_type import ImageType +from lightly.data.dataset import LightlyDataset + + + +def _make_dir_and_save_image(output_dir: str, filename: str, img: Image): + """Saves the images and creates necessary subdirectories. + + """ + path = os.path.join(output_dir, filename) + + head = os.path.split(path)[0] + if not os.path.exists(head): + os.makedirs(head) + + img.save(path) + img.close() + + +def _get_image_from_read_url(read_url: str): + """Makes a get request to the signed read url and returns the image. + + """ + request = Request(read_url, method='GET') + with urlopen(request) as response: + blob = response.read() + img = Image.open(io.BytesIO(blob)) + return img + + +class _DownloadDatasetMixin: + + def download_dataset(self, + output_dir: str, + tag_name: str = 'initial-tag', + verbose: bool = True): + """Downloads images from the web-app and stores them in output_dir. + + Args: + output_dir: + Where to store the downloaded images. + tag_name: + Name of the tag which should be downloaded. + verbose: + Whether or not to show the progress bar. + + Raises: + ValueError if the specified tag does not exist on the dataset. + RuntimeError if the connection to the server failed. + + """ + + # check if images are available + dataset = self.datasets_api.get_dataset_by_id(self.dataset_id) + if dataset.img_type != ImageType.FULL: + # only thumbnails or metadata available + raise ValueError( + f"Dataset with id {self.dataset_id} has no downloadable images!" + ) + + # check if tag exists + available_tags = self._get_all_tags() + try: + print(available_tags) + tag = next(tag for tag in available_tags if tag.name == tag_name) + except StopIteration: + raise ValueError( + f"Dataset with id {self.dataset_id} has no tag {tag_name}!" + ) + + # get sample ids + sample_ids = self.mappings_api.get_sample_mappings_by_dataset_id( + self.dataset_id, + field='_id' + ) + + indices = BitMask.from_hex(tag.bit_mask_data).to_indices() + sample_ids = [sample_ids[i] for i in indices] + filenames = [self.filenames_on_server[i] for i in indices] + + if verbose: + print(f'Downloading {len(sample_ids)} images:', flush=True) + pbar = tqdm.tqdm(unit='imgs', total=len(sample_ids)) + + # download images + for sample_id, filename in zip(sample_ids, filenames): + read_url = self.samples_api.get_sample_image_read_url_by_id( + self.dataset_id, + sample_id, + type="full", + ) + + img = _get_image_from_read_url(read_url) + _make_dir_and_save_image(output_dir, filename, img) + + if verbose: + pbar.update(1) diff --git a/lightly/cli/download_cli.py b/lightly/cli/download_cli.py index 57e0b732e..feacb2a37 100644 --- a/lightly/cli/download_cli.py +++ b/lightly/cli/download_cli.py @@ -67,8 +67,12 @@ def _download_cli(cfg, is_cli_call=True): msg += os.path.join(os.getcwd(), cfg['tag_name'] + '.txt') print(msg, flush=True) - if cfg['input_dir'] and cfg['output_dir']: + if not cfg['input_dir'] and cfg['output_dir']: + # download full images from api + output_dir = fix_input_path(cfg['output_dir']) + api_workflow_client.download_dataset(output_dir, tag_name=tag_name) + elif cfg['input_dir'] and cfg['output_dir']: input_dir = fix_input_path(cfg['input_dir']) output_dir = fix_input_path(cfg['output_dir']) print(f'Copying files from {input_dir} to {output_dir}.') @@ -118,9 +122,11 @@ def download_cli(cfg): >>> # download list of all files in tag 'my-tag' from the Lightly platform >>> lightly-download token='123' dataset_id='XYZ' tag_name='my-tag' >>> + >>> # download all images in tag 'my-tag' from the Lightly platform + >>> lightly-download token='123' dataset_id='XYZ' tag_name='my-tag' output_dir='my_data/' + >>> >>> # copy all files in 'my-tag' to a new directory - >>> lightly-download token='123' dataset_id='XYZ' tag_name='my-tag' \\ - >>> input_dir=data/ output_dir=new_data/ + >>> lightly-download token='123' dataset_id='XYZ' tag_name='my-tag' input_dir='data/' output_dir='my_data/' """ diff --git a/tests/api_workflow/mocked_api_workflow_client.py b/tests/api_workflow/mocked_api_workflow_client.py index 0b06815dc..aa0a1505d 100644 --- a/tests/api_workflow/mocked_api_workflow_client.py +++ b/tests/api_workflow/mocked_api_workflow_client.py @@ -133,12 +133,16 @@ def get_sample_image_write_url_by_id(self, dataset_id, sample_id, is_thumbnail, url = f"{sample_id}_write_url" return url + def get_sample_image_read_url_by_id(self, dataset_id, sample_id, type, **kwargs): + url = f"{sample_id}_write_url" + return url + class MockedDatasetsApi(DatasetsApi): def __init__(self, api_client): no_datasets = 3 self.default_datasets = [DatasetData(name=f"dataset_{i}", id=f"dataset_{i}_id", last_modified_at=i, - type="", size_in_bytes=-1, n_samples=-1, created_at=-1) + type="", img_type="full", size_in_bytes=-1, n_samples=-1, created_at=-1) for i in range(no_datasets)] self.reset() @@ -157,6 +161,9 @@ def create_dataset(self, body: DatasetCreateRequest, **kwargs): response_ = CreateEntityResponse(id=id) return response_ + def get_dataset_by_id(self, dataset_id): + return next(dataset for dataset in self.default_datasets if dataset_id == dataset.id) + def delete_dataset_by_id(self, dataset_id, **kwargs): datasets_without_that_id = [dataset for dataset in self.datasets if dataset.id != dataset_id] assert len(datasets_without_that_id) == len(self.datasets) - 1 @@ -222,5 +229,5 @@ def __init__(self, *args, **kwargs): class MockedApiWorkflowSetup(unittest.TestCase): - def setUp(self) -> None: - self.api_workflow_client = MockedApiWorkflowClient(token="token_xyz", dataset_id="dataset_id_xyz") + def setUp(self, token="token_xyz", dataset_id="dataset_id_xyz") -> None: + self.api_workflow_client = MockedApiWorkflowClient(token=token, dataset_id=dataset_id) diff --git a/tests/api_workflow/test_api_workflow.py b/tests/api_workflow/test_api_workflow.py index 43b6af6ae..03763daa6 100644 --- a/tests/api_workflow/test_api_workflow.py +++ b/tests/api_workflow/test_api_workflow.py @@ -16,13 +16,14 @@ class TestApiWorkflow(MockedApiWorkflowSetup): def setUp(self) -> None: - lightly.api.api_workflow_client.__version__ = "1.1.1" + lightly.api.api_workflow_client.__version__ = lightly.__version__ self.api_workflow_client = MockedApiWorkflowClient(token="token_xyz") def test_error_if_version_is_incompatible(self): lightly.api.api_workflow_client.__version__ = "0.0.0" with self.assertRaises(ValueError): MockedApiWorkflowClient(token="token_xyz") + lightly.api.api_workflow_client.__version__ = lightly.__version__ def test_dataset_id_nonexisting(self): self.api_workflow_client.datasets_api.reset() diff --git a/tests/api_workflow/test_api_workflow_download_dataset.py b/tests/api_workflow/test_api_workflow_download_dataset.py new file mode 100644 index 000000000..c2ded73aa --- /dev/null +++ b/tests/api_workflow/test_api_workflow_download_dataset.py @@ -0,0 +1,46 @@ +import os +import shutil + +from unittest.mock import patch + +import PIL +import numpy as np + +import torchvision + +import lightly +from lightly.data.dataset import LightlyDataset + +from tests.api_workflow.mocked_api_workflow_client import MockedApiWorkflowSetup +from lightly.openapi_generated.swagger_client.models.dataset_data import DatasetData + + + +class TestApiWorkflowDownloadDataset(MockedApiWorkflowSetup): + def setUp(self) -> None: + MockedApiWorkflowSetup.setUp(self, dataset_id='dataset_0_id') + self.api_workflow_client.tags_api.no_tags = 3 + + def test_download_non_existing_tag(self): + with self.assertRaises(ValueError): + self.api_workflow_client.download_dataset('path/to/dir', tag_name='this_is_not_a_real_tag_name') + + def test_download_thumbnails(self): + def get_thumbnail_dataset_by_id(*args): + return DatasetData(name=f'dataset', id='dataset_id', last_modified_at=0, + type='thumbnails', size_in_bytes=-1, n_samples=-1, created_at=-1) + self.api_workflow_client.datasets_api.get_dataset_by_id = get_thumbnail_dataset_by_id + with self.assertRaises(ValueError): + self.api_workflow_client.download_dataset('path/to/dir') + + def test_download_dataset(self): + def my_func(read_url): + return PIL.Image.fromarray(np.zeros((32, 32))).convert('RGB') + #mock_get_image_from_readurl.return_value = PIL.Image.fromarray(np.zeros((32, 32))) + lightly.api.api_workflow_download_dataset._get_image_from_read_url = my_func + self.api_workflow_client.download_dataset('path-to-dir-remove-me', tag_name='initial-tag') + shutil.rmtree('path-to-dir-remove-me') + + + + diff --git a/tox.ini b/tox.ini index 8f007592d..d3fac701b 100644 --- a/tox.ini +++ b/tox.ini @@ -107,4 +107,4 @@ commands = pip install torch==1.7.0+cu101 torchvision==0.8.1+cu101 -f https://download.pytorch.org/whl/torch_stable.html pip install .[all] echo "Running video test" - make test + make test \ No newline at end of file From 7cef5f4898e33217fc5f8429b4b67511d97fccfe Mon Sep 17 00:00:00 2001 From: IgorSusmelj Date: Wed, 17 Mar 2021 20:49:03 +0100 Subject: [PATCH 14/16] Bump version to 1.1.2 (#243) --- lightly/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lightly/__init__.py b/lightly/__init__.py index fd2c6c63c..bf0871418 100644 --- a/lightly/__init__.py +++ b/lightly/__init__.py @@ -70,7 +70,7 @@ # All Rights Reserved __name__ = 'lightly' -__version__ = '1.1.1' +__version__ = '1.1.2' try: From 50f873ea48a2d0c5d11b027fc8da03d4abb913bf Mon Sep 17 00:00:00 2001 From: Philipp Wirth <65946090+philippmwirth@users.noreply.github.com> Date: Thu, 18 Mar 2021 10:36:23 +0100 Subject: [PATCH 15/16] Remove print statement (#247) --- lightly/api/api_workflow_download_dataset.py | 1 - 1 file changed, 1 deletion(-) diff --git a/lightly/api/api_workflow_download_dataset.py b/lightly/api/api_workflow_download_dataset.py index a743b8302..4cb4cf902 100644 --- a/lightly/api/api_workflow_download_dataset.py +++ b/lightly/api/api_workflow_download_dataset.py @@ -75,7 +75,6 @@ def download_dataset(self, # check if tag exists available_tags = self._get_all_tags() try: - print(available_tags) tag = next(tag for tag in available_tags if tag.name == tag_name) except StopIteration: raise ValueError( From 212a5b4325ee42d8abd4a4370176bc659bee8870 Mon Sep 17 00:00:00 2001 From: IgorSusmelj Date: Thu, 18 Mar 2021 12:04:30 +0100 Subject: [PATCH 16/16] Add coverage config to ignore generated files (#246) --- .coveragerc | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 .coveragerc diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 000000000..7aaf308ea --- /dev/null +++ b/.coveragerc @@ -0,0 +1,3 @@ +[run] +omit = + lightly/openapi_generated/* \ No newline at end of file