From 4970eaa914311a90baad4c566a391a24466017fe Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Fri, 5 Jun 2020 15:38:56 +0200 Subject: [PATCH 01/54] TwitterReader v0 --- README.md | 1 + nck/helpers/twitter_helper.py | 77 +++++ nck/readers/README.md | 58 ++++ nck/readers/__init__.py | 4 +- nck/readers/twitter_reader.py | 466 +++++++++++++++++++++++++++ requirements.txt | 4 +- tests/readers/test_twitter_reader.py | 213 ++++++++++++ 7 files changed, 821 insertions(+), 2 deletions(-) create mode 100644 nck/helpers/twitter_helper.py create mode 100644 nck/readers/twitter_reader.py create mode 100644 tests/readers/test_twitter_reader.py diff --git a/README.md b/README.md index 91f5dde7..3d58d54f 100644 --- a/README.md +++ b/README.md @@ -14,6 +14,7 @@ Nautilus connectors kit is a tool which aim is getting raw data from different s - Google Adwords - Google Search Console - Facebook Business Manager +- Twitter Ads - Amazon S3 - Oracle - SalesForce diff --git a/nck/helpers/twitter_helper.py b/nck/helpers/twitter_helper.py new file mode 100644 index 00000000..25cc5e57 --- /dev/null +++ b/nck/helpers/twitter_helper.py @@ -0,0 +1,77 @@ +# License as published by the Free Software Foundation; either +# version 3 of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +import logging + +from twitter_ads.campaign import FundingInstrument, Campaign, LineItem +from twitter_ads.creative import MediaCreative, PromotedTweet + + +class JobTimeOutError(Exception): + def __init__(self, message): + super().__init__(message) + logging.error(message) + + +REPORT_TYPES = ["ANALYTICS", "REACH", "ENTITY"] + +ENTITY_OBJECTS = { + "FUNDING_INSTRUMENT": FundingInstrument, + "CAMPAIGN": Campaign, + "LINE_ITEM": LineItem, + "MEDIA_CREATIVE": MediaCreative, + "PROMOTED_TWEET": PromotedTweet, +} + +ENTITY_ATTRIBUTES = { + entity: list(ENTITY_OBJECTS[entity].__dict__["PROPERTIES"].keys()) + for entity in ENTITY_OBJECTS +} + +GRANULARITIES = ["DAY", "TOTAL"] + +METRIC_GROUPS = [ + "ENGAGEMENT", + "BILLING", + "VIDEO", + "MEDIA", + "MOBILE_CONVERSION", + "WEB_CONVERSION", + "LIFE_TIME_VALUE_MOBILE_CONVERSION", +] + +PLACEMENTS = [ + "ALL_ON_TWITTER", + "PUBLISHER_NETWORK", +] + +SEGMENTATION_TYPES = [ + "AGE", + "APP_STORE_CATEGORY", + "AUDIENCES", + "CONVERSATIONS", + "CONVERSION_TAGS", + "DEVICES", + "EVENTS", + "GENDER", + "INTERESTS", + "KEYWORDS", + "LANGUAGES", + "LOCATIONS", + "METROS", + "PLATFORMS", + "PLATFORM_VERSIONS", + "POSTAL_CODES", + "REGIONS", + "SIMILAR_TO_FOLLOWERS_OF_USER", + "TV_SHOWS", +] diff --git a/nck/readers/README.md b/nck/readers/README.md index fd730835..97871bdb 100644 --- a/nck/readers/README.md +++ b/nck/readers/README.md @@ -50,6 +50,64 @@ See the documents below for a better understanding of the parameters: - [API Reference for Ad Insights](https://developers.facebook.com/docs/marketing-api/reference/adgroup/insights/) - [Available Fields for Nautilus](../helpers/facebook_helper.py) +## Twitter Ads Reader + +#### How to obtain credentials + +* **Apply for a developper account** trough [this link](https://developer.twitter.com/en/apply). +* **Create a Twitter app** on the developper portal: it will generate your authentication credentials. +* **Apply for Twitter Ads API access** by filling out [this form]([https://developer.twitter.com/en/docs/ads/general/overview/adsapi-application](https://developer.twitter.com/en/docs/ads/general/overview/adsapi-application)). Receiving Twitter approval may take up to 7 business days. +* **Get a Campaign Analyst access to the Twitter Ads account** you wish to retrieve data for, on the @handle that you used to create your Twitter App. + +#### Quickstart + +The Twitter Ads Reader can collect **3 types of reports**, making calls to 3 endpoints of the Twitter Ads API: +* **ANALYTICS reports**, making calls to the [Asynchronous Analytics endpoint](https://developer.twitter.com/en/docs/ads/analytics/api-reference/asynchronous). These reports return performance data for a wide range of metrics, that **can be aggregated over time**. Output data **can be splitted by day** when requested over a larger time period. +* **REACH reports**, making calls to the [Reach and Average Frequency endpoint](https://developer.twitter.com/en/docs/ads/analytics/api-reference/reach). These reports return performance data with a focus on reach and frequency metrics, that **cannot be aggregated over time** (*e.g. the reach of day A and B is not equal to the reach of day A + the reach of day B, as it counts unique individuals*). Output data **cannot be splitted by day** when requested over a larger time period. These reports are available **only for the Funding Instrument and Campaign entities**. +* **ENTITY reports**, making calls to [Campaign Management endpoints](https://developer.twitter.com/en/docs/ads/campaign-management/api-reference). These reports return details on entity configuration since the creation of the Twitter Ads account. + +*Call example for ANALYTICS reports* +This call will collect engagement metrics for Line Item entities, splitting the results by day, from 2020-01-01 to 2020-01-03: +``` +python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type ANALYTICS --twitter-entity LINE_ITEM --twitter-metric-group ENGAGEMENT --twitter-segmentation-type AGE --twitter-granularity DAY --twitter-start-date 2020-01-01 --twitter-end-date 2020-01-03 write_console +``` + +*Call example for REACH reports* +This call will collect reach metrics (*total_audience_reach, average_frequency*) for Campaign entities, from 2020-01-01 to 2020-01-03: +``` +python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type REACH --twitter-entity CAMPAIGN --twitter-start-date 2020-01-01 --twitter-end-date 2020-01-03 write_console +``` + +*Call example for ENTITY reports* +This call collects details on the configuration of Campaign entities (id, name, total_budget_amount_local_micro, currency), since the creation of the Twitter Ads account: +``` +python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type REACH --twitter-entity CAMPAIGN --twitter-entity-attribute id --twitter-entity-attribute name --twitter-entity-attribute total_budget_amount_local_micro --twitter-entity-attribute currency write_console +``` + +#### Parameters + +|CLI option|Documentation| +|--|--| +|`--twitter-consumer-key`|API key, available in the 'Keys and tokens' section of your Twitter Developper App.| +|`--twitter-consumer-secret`|API secret key, available in the 'Keys and tokens' section of your Twitter Developper App.| +|`--twitter-access-token`|Access token, available in the 'Keys and tokens' section of your Twitter Developper App.| +|`--twitter-access-token-secret`|Access token secret, available in the 'Keys and tokens' section of your Twitter Developper App.| +|`--twitter-account-id`|Specifies the Twitter Account ID for which the data should be returned.| +|`--twitter-report-type`|Specifies the type of report to collect. *Possible values: ANALYTICS, REACH, ENTITY.*| +|`--twitter-entity`|Specifies the entity type to retrieve data for. *Possible values: FUNDING_INSTRUMENT, CAMPAIGN, LINE_ITEM, MEDIA_CREATIVE, PROMOTED_TWEET.*| +|`--twitter-entity-attribute`|Specific to ENTITY reports. Specifies the entity attribute (configuration detail) that should be returned.| +|`--twitter-granularity`|Specific to ANALYTICS reports. Specifies how granular the retrieved data should be. *Possible values: TOTAL (default), DAY.*| +|`--twitter-metric-group`|Specific to ANALYTICS reports. Specifies the list of metrics (as a group) that should be returned. *Possible values can be found [here](https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation).* | +|`--twitter-placement`|Specific to ANALYTICS reports. Scopes the retrieved data to a particular placement. *Possible values: ALL_ON_TWITTER (default), PUBLISHER_NETWORK.*| +|`--twitter-segmentation-type`|Specific to ANALYTICS reports. Specifies how the retrieved data should be segmented. *Possible values can be found [here](https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation).* | +|`--twitter-platform`|Specific to ANALYTICS reports. Required if segmentation_type is set to DEVICES or PLATFORM_VERSION. *Possible values can be identified through the targeting_criteria/locations*| +|`--twitter-country`|Specific to ANALYTICS reports. Required if segmentation_type is set to CITIES, POSTAL_CODES, or REGION. *Possible values can be identified through the GET targeting_criteria/platforms endpoint.*| +|`--twitter-start-date`|Specifies report start date (format: YYYY-MM-DD).| +|`--twitter-end-date`|Specifies report end date (format: YYYY-MM-DD).| +|`--twitter-add-request-date-to-report`|If set to *True* (default: *False*), the date on which the request is made will appear on each report record.| + +If you need any further information, the documentation of Twitter Ads API can be found [here](https://developer.twitter.com/en/docs/ads/general/overview). + ## Google Readers diff --git a/nck/readers/__init__.py b/nck/readers/__init__.py index 7d9993bb..3081303a 100644 --- a/nck/readers/__init__.py +++ b/nck/readers/__init__.py @@ -26,6 +26,7 @@ from nck.readers.gsheets_reader import gsheets from nck.readers.salesforce_reader import salesforce from nck.readers.facebook_reader import facebook_marketing +from nck.readers.twitter_reader import twitter from nck.readers.dbm_reader import dbm from nck.readers.dcm_reader import dcm from nck.readers.ga_reader import ga @@ -44,6 +45,7 @@ s3, sa360_reader, facebook_marketing, + twitter, oracle, dbm, dcm, @@ -52,7 +54,7 @@ adobe, radarly, yandex_campaigns, - yandex_statistics + yandex_statistics, ] diff --git a/nck/readers/twitter_reader.py b/nck/readers/twitter_reader.py new file mode 100644 index 00000000..d1851bfe --- /dev/null +++ b/nck/readers/twitter_reader.py @@ -0,0 +1,466 @@ +# License as published by the Free Software Foundation; either +# version 3 of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +import logging +import click +from click import ClickException +import pandas as pd +from time import sleep +from itertools import chain +from datetime import datetime, timedelta + +from nck.utils.args import extract_args +from nck.commands.command import processor +from nck.readers.reader import Reader +from nck.streams.json_stream import JSONStream +from nck.helpers.twitter_helper import ( + REPORT_TYPES, + ENTITY_OBJECTS, + ENTITY_ATTRIBUTES, + GRANULARITIES, + METRIC_GROUPS, + PLACEMENTS, + SEGMENTATION_TYPES, + JobTimeOutError, +) + +from twitter_ads.client import Client +from twitter_ads.utils import split_list +from twitter_ads import API_VERSION +from twitter_ads.http import Request +from twitter_ads.cursor import Cursor + +logging.basicConfig(level="INFO") +logger = logging.getLogger() + +API_DATEFORMAT = "%Y-%m-%dT%H:%M:%SZ" +REP_DATEFORMAT = "%Y-%m-%d" +MAX_WAITING_SEC = 3600 +MAX_ENTITY_IDS_PER_JOB = 20 +MAX_CONCURRENT_JOBS = 100 + + +@click.command(name="read_twitter") +@click.option( + "--twitter-consumer-key", + required=True, + help="API key, available in the 'Keys and tokens' section of your Twitter Developper App.", +) +@click.option( + "--twitter-consumer-secret", + required=True, + help="API secret key, available in the 'Keys and tokens' section of your Twitter Developper App.", +) +@click.option( + "--twitter-access-token", + required=True, + help="Access token, available in the 'Keys and tokens' section of your Twitter Developper App.", +) +@click.option( + "--twitter-access-token-secret", + required=True, + help="Access token secret, available in the 'Keys and tokens' section of your Twitter Developper App.", +) +@click.option( + "--twitter-account-id", + required=True, + help="Specifies the Twitter Account ID for which the data should be returned.", +) +@click.option( + "--twitter-report-type", + required=True, + type=click.Choice(REPORT_TYPES), + help="Specifies the type of report to collect: " + "ANALYTICS (performance report, any kind of metrics), " + "REACH (performance report, focus on reach and frequency metrics), " + "ENTITY (entity configuration report)", +) +@click.option( + "--twitter-entity", + required=True, + type=click.Choice(list(ENTITY_OBJECTS.keys())), + help="Specifies the entity type to retrieve data for.", +) +@click.option( + "--twitter-entity-attribute", + multiple=True, + help="Specific to 'ENTITY' reports. Specifies the entity attribute (a.k.a. dimension) that should be returned.", +) +@click.option( + "--twitter-granularity", + type=click.Choice(GRANULARITIES), + default="TOTAL", + help="Specific to 'ANALYTICS' reports. Specifies how granular the retrieved data should be.", +) +@click.option( + "--twitter-metric-group", + multiple=True, + type=click.Choice(METRIC_GROUPS), + help="Specific to 'ANALYTICS' reports. Specifies the list of metrics (as a group) that should be returned: " + "https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation", +) +@click.option( + "--twitter-placement", + type=click.Choice(PLACEMENTS), + default="ALL_ON_TWITTER", + help="Specific to 'ANALYTICS' reports. Scopes the retrieved data to a particular placement.", +) +@click.option( + "--twitter-segmentation-type", + type=click.Choice(SEGMENTATION_TYPES), + help="Specific to 'ANALYTICS' reports. Specifies how the retrieved data should be segmented: " + "https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation", +) +@click.option( + "--twitter-platform", + help="Specific to 'ANALYTICS' reports. Required if segmentation_type is set to 'DEVICES' or 'PLATFORM_VERSION'. " + "To get possible values: GET targeting_criteria/locations", +) +@click.option( + "--twitter-country", + help="Specific to 'ANALYTICS' reports. Required if segmentation_type is set to 'CITIES', 'POSTAL_CODES', or 'REGION'. " + "To get possible values: GET targeting_criteria/platforms", +) +@click.option( + "--twitter-start-date", type=click.DateTime(), help="Specifies report start date." +) +@click.option( + "--twitter-end-date", + type=click.DateTime(), + help="Specifies report end date (inclusive).", +) +@click.option( + "--twitter-add-request-date-to-report", + type=click.BOOL, + default=False, + help="If set to 'True', the date on which the request is made will appear on each report record.", +) +@processor( + "twitter_consumer_key", + "twitter_consumer_secret", + "twitter_access_token", + "twitter_access_token_secret", +) +def twitter(**kwargs): + return TwitterReader(**extract_args("twitter_", kwargs)) + + +class TwitterReader(Reader): + def __init__( + self, + consumer_key, + consumer_secret, + access_token, + access_token_secret, + account_id, + report_type, + entity, + entity_attribute, + granularity, + metric_group, + placement, + segmentation_type, + platform, + country, + start_date, + end_date, + add_request_date_to_report, + ): + # Authentification params + self.client = Client( + consumer_key, consumer_secret, access_token, access_token_secret + ) + self.account = self.client.accounts(account_id) + + # General params + self.report_type = report_type + self.entity = entity + self.start_date = start_date + self.end_date = end_date + timedelta(days=1) + self.add_request_date_to_report = add_request_date_to_report + + # Report params: ENTITY + self.entity_attributes = list(entity_attribute) + + # Report params: ANALYTICS + self.granularity = granularity + self.metric_groups = list(metric_group) + self.placement = placement + self.segmentation_type = segmentation_type + self.platform = platform + self.country = country + + # Check input parameters + + if self.report_type == "ANALYTICS": + + if ( + self.segmentation_type in ["DEVICES", "PLATFORM VERSION"] + and not self.platform + ): + raise ClickException("Please provide a value for 'platform'.") + + elif ( + self.segmentation_type in ["CITIES", "POSTAL_CODES", "REGION"] + and not self.country + ): + raise ClickException("Please provide a value for 'country'.") + + if self.entity == "FUNDING_INSTRUMENT" and any( + [ + metric_group not in ["ENGAGEMENT", "BILLING"] + for metric_group in self.metric_groups + ] + ): + raise ClickException( + "'FUNDING_INSTRUMENT' only accept the 'ENGAGEMENT' and 'BILLING' metric groups." + ) + + if "MOBILE_CONVERSION" in self.metric_groups and len( + self.metric_groups > 1 + ): + raise ClickException( + "'MOBILE_CONVERSION' data should be requested separately." + ) + + elif self.report_type == "REACH": + + if self.entity not in ["CAMPAIGN", "FUNDING_INSTRUMENT"]: + raise ClickException( + "'REACH' reports only accept the 'CAMPAIGN' and 'FUNDING_INSTRUMENT' entities." + ) + + elif self.report_type == "ENTITY": + + if not all( + [ + attr in ENTITY_ATTRIBUTES[self.entity] + for attr in self.entity_attributes + ] + ): + raise ClickException( + f"Available attributes for '{self.entity}' are: {ENTITY_ATTRIBUTES[self.entity]}" + ) + + def get_daily_period_items(self): + """ + Returns a list of datetime instances representing each date contained + in the requested period. Useful when granularity is set to 'DAY'. + """ + + period_items = [] + current_date = self.start_date + + while current_date < self.end_date: + period_items.append(current_date) + current_date += timedelta(days=1) + + return period_items + + def get_active_entity_ids(self): + """ + Step 1 of 'ANALYTICS' report generation process: + Returns a list containing the ids of active entities over the requested time period + Documentation: https://developer.twitter.com/en/docs/ads/analytics/api-reference/active-entities + """ + + active_entities = ENTITY_OBJECTS[self.entity].active_entities( + self.account, self.start_date, self.end_date + ) + return [obj["entity_id"] for obj in active_entities] + + def get_job_ids(self, entity_ids): + """ + Step 2 of 'ANALYTICS' report generation process: + Create asynchronous analytics jobs and return their ids for progress tracking + Documentation: https://developer.twitter.com/en/docs/ads/analytics/api-reference/asynchronous + """ + + return [ + ENTITY_OBJECTS[self.entity] + .queue_async_stats_job( + self.account, + chunk_entity_ids, + self.metric_groups, + granularity=self.granularity, + placement=self.placement, + start_time=self.start_date, + end_time=self.end_date, + segmentation_type=self.segmentation_type, + platform=self.platform, + country=self.country, + ) + .id + for chunk_entity_ids in split_list(entity_ids, MAX_ENTITY_IDS_PER_JOB) + ] + + def get_job_result(self, job_id): + """ + Step 3 of 'ANALYTICS' report generation process: + Get job info to track its progress (job_result.status) and download report once completed (job_result.url) + Documentation: https://developer.twitter.com/en/docs/ads/analytics/api-reference/asynchronous + """ + + return ( + ENTITY_OBJECTS[self.entity] + .async_stats_job_result(self.account, job_ids=[job_id]) + .first + ) + + def get_raw_analytics_response(self, job_result): + """ + Step 4 of 'ANALYTICS' report generation process: + Download raw response from job once completed + Documentation: https://developer.twitter.com/en/docs/ads/analytics/api-reference/asynchronous + """ + + return ENTITY_OBJECTS[self.entity].async_stats_job_data( + self.account, url=job_result.url + ) + + def parse(self, raw_analytics_response): + """ + Parse a single raw response into a generator of JSON-like records. + """ + + for entity_resp in raw_analytics_response["data"]: + + for entity_data in entity_resp["id_data"]: + + entity_df = pd.DataFrame(entity_data["metrics"]).fillna(0) + entity_df["id"] = entity_resp["id"] + if self.granularity == "DAY": + entity_df["date"] = [ + item.strftime(REP_DATEFORMAT) + for item in self.get_daily_period_items() + ] + if self.segmentation_type: + entity_df[self.segmentation_type.lower()] = entity_data["segment"][ + "segment_name" + ] + + yield from entity_df.to_dict("records") + + def get_analytics_report(self, job_ids): + """ + Get 'ANALYTICS' report through the 'Asynchronous Analytics' endpoint of Twitter Ads API. + Documentation: https://developer.twitter.com/en/docs/ads/analytics/api-reference/asynchronous + """ + + all_responses = [] + + for job_id in job_ids: + + logging.info(f"Processing job_id: {job_id}") + + job_result = self.get_job_result(job_id) + waiting_sec = 2 + + while job_result.status == "PROCESSING": + logging.info(f"Waiting {waiting_sec} seconds for job to be completed") + sleep(waiting_sec) + if waiting_sec > MAX_WAITING_SEC: + raise JobTimeOutError("Waited too long for job to be completed") + waiting_sec *= 2 + job_result = self.get_job_result(job_id) + + raw_analytics_response = self.get_raw_analytics_response(job_result) + all_responses.append(self.parse(raw_analytics_response)) + + return chain(*all_responses) + + def get_entity_report(self): + """ + Get 'ENTITY' report through 'Core Entity' endpoints of Twitter Ads API. + Documentation: https://developer.twitter.com/en/docs/ads/campaign-management/api-reference + """ + + ACCOUNT_CHILD_OBJECTS = { + "FUNDING_INSTRUMENT": self.account.funding_instruments(), + "CAMPAIGN": self.account.campaigns(), + "LINE_ITEM": self.account.line_items(), + "MEDIA_CREATIVE": self.account.media_creatives(), + "PROMOTED_TWEET": self.account.promoted_tweets(), + } + + yield from [ + {attr: getattr(entity_obj, attr, None) for attr in self.entity_attributes} + for entity_obj in ACCOUNT_CHILD_OBJECTS[self.entity] + ] + + def get_reach_report(self): + """ + Get 'REACH' report through the 'Reach and Average Frequency' endpoint of Twitter Ads API. + Documentation: https://developer.twitter.com/en/docs/ads/analytics/api-reference/reach + """ + + resource = ( + "/" + + API_VERSION + + f"/stats/accounts/{self.account.id}/reach/{self.entity.lower()}s" + ) + entity_ids = self.get_active_entity_ids() + + for chunk_entity_ids in split_list(entity_ids, MAX_ENTITY_IDS_PER_JOB): + params = { + "account_id": self.account.id, + f"{self.entity.lower()}_ids": ",".join(entity_ids), + "start_time": self.start_date.strftime(API_DATEFORMAT), + "end_time": self.end_date.strftime(API_DATEFORMAT), + } + request = Request(self.client, "get", resource, params=params) + yield from Cursor(None, request) + + def add_date_if_necessary(self, record): + """ + Add request_date, period_start_date and/or period_end_date to a JSON-like record. + """ + + if self.add_request_date_to_report: + record["request_date"] = datetime.today().strftime(REP_DATEFORMAT) + + if ( + self.report_type == "ANALYTICS" and self.granularity == "TOTAL" + ) or self.report_type == "REACH": + record["period_start_date"] = self.start_date.strftime(REP_DATEFORMAT) + record["period_end_date"] = (self.end_date - timedelta(days=1)).strftime( + REP_DATEFORMAT + ) + + return record + + def read(self): + + if self.report_type == "ANALYTICS": + entity_ids = self.get_active_entity_ids() + + total_jobs = (len(entity_ids) // MAX_ENTITY_IDS_PER_JOB) + 1 + logging.info(f"Processing a total of {total_jobs} jobs") + + data = [] + for chunk_entity_ids in split_list( + entity_ids, MAX_ENTITY_IDS_PER_JOB * MAX_CONCURRENT_JOBS + ): + job_ids = self.get_job_ids(chunk_entity_ids) + data += self.get_analytics_report(job_ids) + + elif self.report_type == "REACH": + data = self.get_reach_report() + + elif self.report_type == "ENTITY": + data = self.get_entity_report() + + def result_generator(): + for record in data: + yield self.add_date_if_necessary(record) + + yield JSONStream("results_" + self.account.id, result_generator()) diff --git a/requirements.txt b/requirements.txt index 71a60889..e72f7c25 100644 --- a/requirements.txt +++ b/requirements.txt @@ -60,4 +60,6 @@ Unidecode==1.1.1 uritemplate==3.0.0 urllib3==1.25.7 Werkzeug==0.16.0 -googleads==22.0.0 \ No newline at end of file +googleads==22.0.0 +twitter-ads==7.0.1 +pandas==1.0.3 \ No newline at end of file diff --git a/tests/readers/test_twitter_reader.py b/tests/readers/test_twitter_reader.py new file mode 100644 index 00000000..b57d52de --- /dev/null +++ b/tests/readers/test_twitter_reader.py @@ -0,0 +1,213 @@ +# GNU Lesser General Public License v3.0 only +# Copyright (C) 2020 Artefact +# licence-information@artefact.com +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3 of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +from unittest import TestCase, mock +from freezegun import freeze_time + +from datetime import datetime, timedelta + +from nck.readers.twitter_reader import TwitterReader + + +class TwitterReaderTest(TestCase): + def mock_twitter_reader(self, **kwargs): + for param, value in kwargs.items(): + if param == "end_date": + setattr(self, param, value + timedelta(days=1)) + else: + setattr(self, param, value) + setattr(self, "account", mock.MagicMock()) + + @mock.patch.object(TwitterReader, "__init__", mock_twitter_reader) + def test_get_daily_period_items(self): + kwargs = {"start_date": datetime(2020, 1, 1), "end_date": datetime(2020, 1, 3)} + output = TwitterReader(**kwargs).get_daily_period_items() + expected = [datetime(2020, 1, 1), datetime(2020, 1, 2), datetime(2020, 1, 3)] + self.assertEqual(output, expected) + + @mock.patch.object(TwitterReader, "__init__", mock_twitter_reader) + def test_parse_with_total_granularity(self): + kwargs = {"granularity": "TOTAL", "segmentation_type": None} + raw_analytics_response = { + "data": [ + { + "id": "XXXXX", + "id_data": [ + {"segment": None, "metrics": {"retweets": [11], "likes": [12]}} + ], + }, + { + "id": "YYYYY", + "id_data": [ + {"segment": None, "metrics": {"retweets": [21], "likes": [22]}} + ], + }, + ], + } + output = TwitterReader(**kwargs).parse(raw_analytics_response) + expected = [ + {"id": "XXXXX", "retweets": 11, "likes": 12}, + {"id": "YYYYY", "retweets": 21, "likes": 22}, + ] + for output_record, expected_record in zip(output, expected): + self.assertEqual(output_record, expected_record) + + @mock.patch.object(TwitterReader, "__init__", mock_twitter_reader) + def test_parse_with_day_granularity(self): + kwargs = { + "granularity": "DAY", + "segmentation_type": None, + "start_date": datetime(2020, 1, 1), + "end_date": datetime(2020, 1, 3), + } + raw_analytics_response = { + "data": [ + { + "id": "XXXXX", + "id_data": [ + { + "segment": None, + "metrics": { + "retweets": [11, 12, 13], + "likes": [14, 15, 16], + }, + } + ], + }, + { + "id": "YYYYY", + "id_data": [ + { + "segment": None, + "metrics": { + "retweets": [21, 22, 23], + "likes": [24, 25, 26], + }, + } + ], + }, + ], + } + output = TwitterReader(**kwargs).parse(raw_analytics_response) + expected = [ + {"date": "2020-01-01", "id": "XXXXX", "retweets": 11, "likes": 14}, + {"date": "2020-01-02", "id": "XXXXX", "retweets": 12, "likes": 15}, + {"date": "2020-01-03", "id": "XXXXX", "retweets": 13, "likes": 16}, + {"date": "2020-01-01", "id": "YYYYY", "retweets": 21, "likes": 24}, + {"date": "2020-01-02", "id": "YYYYY", "retweets": 22, "likes": 25}, + {"date": "2020-01-03", "id": "YYYYY", "retweets": 23, "likes": 26}, + ] + for output_record, expected_record in zip(output, expected): + self.assertEqual(output_record, expected_record) + + @mock.patch.object(TwitterReader, "__init__", mock_twitter_reader) + def test_parse_with_segment(self): + kwargs = {"granularity": "TOTAL", "segmentation_type": "GENDER"} + raw_analytics_response = { + "data": [ + { + "id": "XXXXX", + "id_data": [ + { + "segment": {"segment_name": "Male"}, + "metrics": {"retweets": [11], "likes": [12]}, + }, + { + "segment": {"segment_name": "Female"}, + "metrics": {"retweets": [13], "likes": [14]}, + }, + ], + }, + { + "id": "YYYYY", + "id_data": [ + { + "segment": {"segment_name": "Male"}, + "metrics": {"retweets": [21], "likes": [22]}, + }, + { + "segment": {"segment_name": "Female"}, + "metrics": {"retweets": [23], "likes": [24]}, + }, + ], + }, + ], + } + output = TwitterReader(**kwargs).parse(raw_analytics_response) + expected = [ + {"id": "XXXXX", "gender": "Male", "retweets": 11, "likes": 12}, + {"id": "XXXXX", "gender": "Female", "retweets": 13, "likes": 14}, + {"id": "YYYYY", "gender": "Male", "retweets": 21, "likes": 22}, + {"id": "YYYYY", "gender": "Female", "retweets": 23, "likes": 24}, + ] + for output_record, expected_record in zip(output, expected): + self.assertDictEqual(output_record, expected_record) + + @freeze_time("2020-01-01") + @mock.patch.object(TwitterReader, "__init__", mock_twitter_reader) + def test_add_date_if_necessary(self): + kwargs = { + "report_type": "ANALYTICS", + "granularity": "TOTAL", + "start_date": datetime(2020, 1, 1), + "end_date": datetime(2020, 1, 3), + "add_request_date_to_report": True, + } + record = {"id": "XXXXX", "name": "Artefact Campaign"} + output = TwitterReader(**kwargs).add_date_if_necessary(record) + expected = { + "id": "XXXXX", + "name": "Artefact Campaign", + "period_start_date": "2020-01-01", + "period_end_date": "2020-01-03", + "request_date": "2020-01-01", + } + self.assertEqual(output, expected) + + def mock_get_job_result(*args): + job_result = mock.MagicMock() + job_result.status = "SUCCESS" + return job_result + + def mock_parse(*args): + yield from [ + {"id": "XXXXX", "retweets": 11, "likes": 12}, + {"id": "YYYYY", "retweets": 21, "likes": 22}, + ] + + @mock.patch.object(TwitterReader, "__init__", mock_twitter_reader) + @mock.patch.object( + TwitterReader, "get_active_entity_ids", lambda *args: ["XXXXX", "YYYYYY"] + ) + @mock.patch.object(TwitterReader, "get_job_ids", lambda *args: ["123456789"]) + @mock.patch.object(TwitterReader, "get_job_result", mock_get_job_result) + @mock.patch.object(TwitterReader, "get_raw_analytics_response", lambda *args: {}) + @mock.patch.object(TwitterReader, "parse", mock_parse) + def test_read_analytics_report(self): + kwargs = { + "report_type": "ANALYTICS", + "granularity": "DAY", + "add_request_date_to_report": False, + } + output = next(TwitterReader(**kwargs).read()) + expected = [ + {"id": "XXXXX", "retweets": 11, "likes": 12}, + {"id": "YYYYY", "retweets": 21, "likes": 22}, + ] + for output_record, expected_record in zip(output.readlines(), iter(expected)): + self.assertEqual(output_record, expected_record) From dbdf63f60843b61dfccaba0c8f8bf99866dae851 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gabrielle=20B=C3=A9ranger?= <56034720+gabrielleberanger@users.noreply.github.com> Date: Fri, 5 Jun 2020 16:38:21 +0200 Subject: [PATCH 02/54] Harmonizing Readers README file --- nck/readers/README.md | 500 ++++++++++++++++++++++++------------------ 1 file changed, 287 insertions(+), 213 deletions(-) diff --git a/nck/readers/README.md b/nck/readers/README.md index f265c255..306f98d4 100644 --- a/nck/readers/README.md +++ b/nck/readers/README.md @@ -2,6 +2,28 @@ Each reader role is to read data from external source and transform it into a Stream understable format to be written on GCS and BQ thanks to the corresponding writers. +## List of Readers + +- Adobe Analytics 1.4 +- Adobe Analytics 2.0 +- Amazon S3 +- Facebook Marketing +- Google Ads +- Google Analytics +- Google Cloud Storage +- Google Campaign Manager +- Google Display & Video 360 +- Google Search Ads 360 +- Google Search Console +- Google Sheets +- Oracle +- MySQL +- Radarly +- SalesForce +- Twitter Ads +- Yandex Campaign +- Yandex Statistics + ## Step to create a new Reader 1. Create python module following naming nomenclature ``` [command]_reader.py ``` @@ -10,11 +32,92 @@ Each reader role is to read data from external source and transform it into a St 4. Reference click command into [commands list](./__init__.py) 5. Update current README.md -## Facebook Reader +## Adobe Analytics Readers + +As of May 2020 (last update of this section of the documentation), **two versions of Adobe Analytics Reporting API are coexisting: 1.4 and 2.0**. As some functionalities of API 1.4 have not been made available in API 2.0 yet (Data Warehouse reports in particular), our Adobe Analytics Readers are also available in these two versions. + +### Adobe Analytics Reader 1.4 + +#### How to obtain credentials + +Our Adobe Analytics Reader 1.4 uses the **WSSE authentication framework**. This authentication framework is now deprecated, so you won't be able to generate new WSSE authentication credentials (Username, Password) on Adobe Developper Console if you don't already have them. #### Quickstart -The Facebook Reader handles calls to 2 endpoints of the Facebook Marketing API: **Facebook Ad Insights** (to retrieve performance data), and **Facebook Object Node** (to retrieve configuration data). +Call example to Adobe Analytics Reader 1.4, getting the number of visits per day and tracking code for a specified Report Suite, between 2020-01-01 and 2020-01-31: + +``` +python nck/entrypoint.py read_adobe --adobe-username --adobe-password --adobe-report-suite-id --adobe-date-granularity day --adobe-report-element-id trackingcode --adobe-report-metric-id visits --adobe-start-date 2020-01-01 --adobe-end-date 2020-01-31 write_console +``` + +#### Parameters + +|CLI option|Documentation| +|--|--| +|`--adobe-username`|Username used for WSSE authentication| +|`--adobe-password`|Password used for WSSE authentication| +|`--adobe-list-report-suite`|Should be set to *True* if you wish to request the list of available Adobe Report Suites (*default: False*). If set to *True*, the below parameters should be left empty.| +|`--adobe-report-suite-id`|ID of the requested Adobe Report Suite| +|`--adobe-report-element-id`|ID of the element (i.e. dimension) to include in the report| +|`--adobe-report-metric-id`|ID of the metric to include in the report| +|`--adobe-date-granularity`|Granularity of the report. *Possible values: PREVIOUS_DAY, LAST_30_DAYS, LAST_7_DAYS, LAST_90_DAYS*| +|`--adobe-start-date`|Start date of the report (format: YYYY-MM-DD)| +|`--adobe-end-date`|End date of the report (format: YYYY-MM-DD)| + +#### Addtional information +- **The full list of available elements and metrics** can be retrieved with the [GetElements](https://github.com/AdobeDocs/analytics-1.4-apis/blob/master/docs/reporting-api/methods/r_GetElements.md) and [GetMetrics](https://github.com/AdobeDocs/analytics-1.4-apis/blob/master/docs/reporting-api/methods/r_GetMetrics.md) methods. +- **Adobe Analytics Reader 1.4 requests Data Warehouse reports** (the "source" parameter is set to "warehouse" in the report description), allowing it to efficiently process multiple-dimension requests. +- **If you need further information**, the documentation of Adobe APIs 1.4 can be found [here](https://github.com/AdobeDocs/analytics-1.4-apis). + +### Adobe Analytics Reader 2.0 + +#### How to obtain credentials + +Adobe Analytics Reader 2.0 uses the **JWT authentication framework**. +- Get developper access to Adobe Analytics (documentation can be found [here](https://helpx.adobe.com/enterprise/using/manage-developers.html)) +- Create a Service Account integration to Adobe Analytics on [Adobe Developper Console](https://console.adobe.io/) +- Use the generated JWT credentials (Client ID, Client Secret, Technical Account ID, Organization ID and private.key file) to retrieve your Global Company ID (to be requested to [Discovery API](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/discovery.md')). All these parameters will be passed to Adobe Analytics Reader 2.0. + +#### Quickstart + +Call example to Adobe Analytics Reader 2.0, getting the number of visits per day and tracking code for a specified Report Suite, between 2020-01-01 and 2020-01-31: + +``` +python nck/entrypoint.py read_adobe_2_0 --adobe-client-id --adobe-client-secret --adobe-tech-account-id --adobe-org-id --adobe-private-key --adobe-global-company-id --adobe-report-suite-id --adobe-dimension daterangeday --adobe-dimension campaign --adobe-start-date 2020-01-01 --adobe-end-date 2020-01-31 --adobe-metric visits write_console +``` + +#### Parameters + +|CLI option|Documentation| +|--|--| +|`--adobe-client-id`|Client ID, that you can find on Adobe Developper Console| +|`--adobe-client-secret`|Client Secret, that you can find on Adobe Developper Console| +|`--adobe-tech-account-id`|Technical Account ID, that you can find on Adobe Developper Console| +|`--adobe-org-id`|Organization ID, that you can find on Adobe Developper Console| +|`--adobe-private-key`|Content of the private.key file, that you had to provide to create the integration. Make sure to enter the parameter in quotes, include headers, and indicate newlines as \n.| +|`--adobe-global-company-id`|Global Company ID (to be requested to [Discovery API](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/discovery.md'))| +|`--adobe-report-suite-id`|ID of the requested Adobe Report Suite| +|`--adobe-dimension`|Dimension to include in the report| +|`--adobe-metric`|Metric to include in the report| +|`--adobe-start-date`|Start date of the report (format: YYYY-MM-DD)| +|`--adobe-end-date`|End date of the report (format: YYYY-MM-DD)| + +#### Additional information + +- **In API 2.0, dimension and metric names are slightly different from API 1.4**. To get new metric and dimension names and reproduce the behavior of Adobe Analytics UI as closely as possible, [enable the Debugger feature in Adobe Analytics Workspace](https://github.com/AdobeDocs/analytics-2.0-apis/blob/master/reporting-tricks.md): it allow you to visualize the back-end JSON requests made by Adobe Analytics UI to Reporting API 2.0. +- **In API 2.0, the date granularity parameter was removed, and should now be handled as a dimension**: a request featuring `--adobe-dimension daterangeday` will produce a report with a day granularity. +- **API 2.0 does not feature Data Warehouse reports yet** (along with other features, that are indicated on the "Current limitations" section of [this page](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/migration-guide.md)). For this reason, if you wish to collect multiple-dimension reports, Adobe Analytics Reader 1.4 might be a more efficient solution in terms of processing time. +- **If you need any further information**, the documentation of Adobe APIs 2.0 can be found [here](https://github.com/AdobeDocs/analytics-2.0-apis). + +## Amazon S3 Reader + +*Not documented yet.* + +## Facebook Marketing Reader + +#### Quickstart + +The Facebook Marketing Reader handles calls to 2 endpoints of the Facebook Marketing API: **Facebook Ad Insights** (to retrieve performance data), and **Facebook Object Node** (to retrieve configuration data). *Example of Facebook Ad Insights Request* ``` @@ -46,9 +149,9 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- |`--facebook-breakdown`|How to break down the result. *This parameter is only relevant for Facebook Ad Insights Requests.*| |`--facebook-action-breakdown`|How to break down action results. *This parameter is only relevant for Facebook Ad Insights Requests.*| -#### Additional details for a relevant use of the Facebook Reader +#### Additional information -**#1: Make sure to select the appropriate `--facebook-level`** +**1. Make sure to select the appropriate `--facebook-level`** |If Facebook Object Type is...|Facebook Level can be...| |:--|:--| @@ -58,7 +161,7 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- |`ad`|ad, creative| |`creative`|creative| -**#2: Format Facebook Reader response using `--facebook-fields`** +**2. Format Facebook Marketing Reader response using `--facebook-fields`** 2.1. The list of **applicable fields** can be found on the links below: @@ -67,7 +170,7 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- 2.2. If you want to select **a nested field value**, simply indicate the path to this value within the request field. -*Facebook Reader Request* +*Facebook Marketing Reader Request* ``` --facebook-field object_story_spec[video_data][call_to_action][value][link] ``` @@ -87,14 +190,14 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- } ``` -*Facebook Reader Response* +*Facebook Marketing Reader Response* ``` {"object_story_spec_video_data_call_to_action_value_link": "https://www.artefact.com"} ``` (2.3) **Action Breakdown filters** can be applied to the fields of ***Facebook Ad Insights* Requests** using the following syntax: [:]. You can combine multiple Action Breakdown filters on the same field by adding them in cascade next to each other. -*Facebook Reader Request* +*Facebook Marketing Reader Request* ``` --facebook-action-breakdown action_type --facebook-field actions[action_type:video_view][action_type:post_engagement] @@ -122,69 +225,11 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- ] ``` -*Facebook Reader Response* +*Facebook Marketing Reader Response* ``` {"actions_action_type_video_view": "17", "actions_action_type_post_engagement": "25"} ``` -## Twitter Ads Reader - -#### How to obtain credentials - -* **Apply for a developper account** trough [this link](https://developer.twitter.com/en/apply). -* **Create a Twitter app** on the developper portal: it will generate your authentication credentials. -* **Apply for Twitter Ads API access** by filling out [this form]([https://developer.twitter.com/en/docs/ads/general/overview/adsapi-application](https://developer.twitter.com/en/docs/ads/general/overview/adsapi-application)). Receiving Twitter approval may take up to 7 business days. -* **Get a Campaign Analyst access to the Twitter Ads account** you wish to retrieve data for, on the @handle that you used to create your Twitter App. - -#### Quickstart - -The Twitter Ads Reader can collect **3 types of reports**, making calls to 3 endpoints of the Twitter Ads API: -* **ANALYTICS reports**, making calls to the [Asynchronous Analytics endpoint](https://developer.twitter.com/en/docs/ads/analytics/api-reference/asynchronous). These reports return performance data for a wide range of metrics, that **can be aggregated over time**. Output data **can be splitted by day** when requested over a larger time period. -* **REACH reports**, making calls to the [Reach and Average Frequency endpoint](https://developer.twitter.com/en/docs/ads/analytics/api-reference/reach). These reports return performance data with a focus on reach and frequency metrics, that **cannot be aggregated over time** (*e.g. the reach of day A and B is not equal to the reach of day A + the reach of day B, as it counts unique individuals*). Output data **cannot be splitted by day** when requested over a larger time period. These reports are available **only for the Funding Instrument and Campaign entities**. -* **ENTITY reports**, making calls to [Campaign Management endpoints](https://developer.twitter.com/en/docs/ads/campaign-management/api-reference). These reports return details on entity configuration since the creation of the Twitter Ads account. - -*Call example for ANALYTICS reports* -This call will collect engagement metrics for Line Item entities, splitting the results by day, from 2020-01-01 to 2020-01-03: -``` -python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type ANALYTICS --twitter-entity LINE_ITEM --twitter-metric-group ENGAGEMENT --twitter-segmentation-type AGE --twitter-granularity DAY --twitter-start-date 2020-01-01 --twitter-end-date 2020-01-03 write_console -``` - -*Call example for REACH reports* -This call will collect reach metrics (*total_audience_reach, average_frequency*) for Campaign entities, from 2020-01-01 to 2020-01-03: -``` -python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type REACH --twitter-entity CAMPAIGN --twitter-start-date 2020-01-01 --twitter-end-date 2020-01-03 write_console -``` - -*Call example for ENTITY reports* -This call collects details on the configuration of Campaign entities (id, name, total_budget_amount_local_micro, currency), since the creation of the Twitter Ads account: -``` -python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type REACH --twitter-entity CAMPAIGN --twitter-entity-attribute id --twitter-entity-attribute name --twitter-entity-attribute total_budget_amount_local_micro --twitter-entity-attribute currency write_console -``` - -#### Parameters - -|CLI option|Documentation| -|--|--| -|`--twitter-consumer-key`|API key, available in the 'Keys and tokens' section of your Twitter Developper App.| -|`--twitter-consumer-secret`|API secret key, available in the 'Keys and tokens' section of your Twitter Developper App.| -|`--twitter-access-token`|Access token, available in the 'Keys and tokens' section of your Twitter Developper App.| -|`--twitter-access-token-secret`|Access token secret, available in the 'Keys and tokens' section of your Twitter Developper App.| -|`--twitter-account-id`|Specifies the Twitter Account ID for which the data should be returned.| -|`--twitter-report-type`|Specifies the type of report to collect. *Possible values: ANALYTICS, REACH, ENTITY.*| -|`--twitter-entity`|Specifies the entity type to retrieve data for. *Possible values: FUNDING_INSTRUMENT, CAMPAIGN, LINE_ITEM, MEDIA_CREATIVE, PROMOTED_TWEET.*| -|`--twitter-entity-attribute`|Specific to ENTITY reports. Specifies the entity attribute (configuration detail) that should be returned.| -|`--twitter-granularity`|Specific to ANALYTICS reports. Specifies how granular the retrieved data should be. *Possible values: TOTAL (default), DAY.*| -|`--twitter-metric-group`|Specific to ANALYTICS reports. Specifies the list of metrics (as a group) that should be returned. *Possible values can be found [here](https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation).* | -|`--twitter-placement`|Specific to ANALYTICS reports. Scopes the retrieved data to a particular placement. *Possible values: ALL_ON_TWITTER (default), PUBLISHER_NETWORK.*| -|`--twitter-segmentation-type`|Specific to ANALYTICS reports. Specifies how the retrieved data should be segmented. *Possible values can be found [here](https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation).* | -|`--twitter-platform`|Specific to ANALYTICS reports. Required if segmentation_type is set to DEVICES or PLATFORM_VERSION. *Possible values can be identified through the targeting_criteria/locations*| -|`--twitter-country`|Specific to ANALYTICS reports. Required if segmentation_type is set to CITIES, POSTAL_CODES, or REGION. *Possible values can be identified through the GET targeting_criteria/platforms endpoint.*| -|`--twitter-start-date`|Specifies report start date (format: YYYY-MM-DD).| -|`--twitter-end-date`|Specifies report end date (format: YYYY-MM-DD).| -|`--twitter-add-request-date-to-report`|If set to *True* (default: *False*), the date on which the request is made will appear on each report record.| - -If you need any further information, the documentation of Twitter Ads API can be found [here](https://developer.twitter.com/en/docs/ads/general/overview). - ## Google Readers ### Authentication @@ -195,11 +240,9 @@ via the oAuth flow. A full script to do this can be found here: [Refresh token generator](https://github.com/artefactory/Refresh-token-generator-for-google-oauth) - ### Google Ads Reader -#### How to obtain Credentials - +#### How to obtain credentials Using the Google Ads API requires four things: - A developer token (Generated at a company level - one per company -, takes around 2 days to be approved by Google) which can be completely independant from the Google Ads Account you will be calling (though you need a Manager Google Ads Account to request a token for your company) @@ -216,8 +259,7 @@ to apply for access if your Company does not already have a developer token (gra See the [documentation here](https://developers.google.com/adwords/api/docs/guides/first-api-call "Make your first API call") to set-up your OAuth2 credentials and refresh token specifically for your Google Ads Accounts. - -#### Which Reports and Metrics are available in the API +#### Which reports and metrics are available in the API The list of available reports for the API, and the associated metrics, can be [found here](https://developers.google.com/adwords/api/docs/appendix/reports#available-reports "Report Types") @@ -225,7 +267,6 @@ The list of available reports for the API, and the associated metrics, can be [f - Call Example - The following command retrieves insights about the Ads of *my_first_campaign* and *my_second_campaign* in the Google Ads Account thanks to your company , and your , and with the necessary permissions to access your Accounts. @@ -236,12 +277,26 @@ python nck/entrypoint.py read_googleads --googleads-developer-token and +*Not documented yet.* -- A refresh token, created with the email address able to access to your Google Search Console Account. +### Google Campaign Manager Reader -- The URLs whose performance you want to see. +*Not documented yet.* -See the [documentation here](https://developers.google.com/webmaster-tools/search-console-api-original/v3/prereqs "Search Console API") -to see an Overview of the Search Console API. +### Google Display & Video Reader +*Not documented yet.* + +### Google Search Console Reader -#### Search Analytics +#### Which reports and metrics are available in the API The list of available dimensions and metrics in the API can be [found here](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query "Search Analytics") @@ -280,19 +337,27 @@ with the necessary permissions to access your Accounts. python nck/entrypoint.py read_search_console --search-console-client-id --search-console-refresh-token --search-console-site-url --search-console-dimensions country --search-console-dimensions device --search-console-start-date 2020-01-01 --search-console-end-date 2020-01-01 write_console ``` -- Parameters of the Google Search Console Readers +- Parameters of the Google Search Console Reader -| --search-console-client-id | --search-console-client-secret | --search-console-access-token | --search-console-refresh-token | --search-console-dimensions | --search-console-site-url | --search-console-start-date | --search-console-end-date | --search-console-date-column | --search-console-row-limit | -|:-----------------:|:---------------------:|:-----------------------:|:-----------------------:|:-----------------------:|:-----------------------:|:--------------------:|:---------------------------:|:----------------------:|:----------------------:| -|OAuth2 ID| OAuth2 Secret| Access token | Refresh token for OAuth2 | [Dimensions to request](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query#dimensionFilterGroups.filters.dimension) |Site URL whose performance you want to request| Start Date for the request | End Date for the request | If true, include date column in the report | Row number by report page | +|CLI option|Documentation| +|--|--| +|`--search-console-client-id`|OAuth2 ID| +|`--search-console-client-secret`|OAuth2 Secret| +|`--search-console-access-token`|Access token| +|`--search-console-refresh-token`|Refresh token for OAuth2| +|`--search-console-dimensions`|[Dimensions to request](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query#dimensionFilterGroups.filters.dimension)| +|`--search-console-site-url`|Site URL whose performance you want to request| +|`--search-console-start-date`|Start Date for the request| +|`--search-console-end-date`|End Date for the request| +|`--search-console-date-column`|If true, include date column in the report| +|`--search-console-row-limit`|Row number by report page| See the documents below for a better understanding of the parameters: - [Google Search Console API](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query) +### Google Search Ads 360 Reader -### Search Ads 360 Reader (SA360) - -#### How to obtain Credentials +#### How to obtain credentials Using the Search Ads API requires two things: @@ -303,8 +368,7 @@ Using the Search Ads API requires two things: See the [documentation here](https://developers.google.com/search-ads/v2/authorizing "SA360 Authentication") to set-up your OAuth2 credentials and refresh token specifically for Search Ads 360 Reporting. - -#### Which Reports and Metrics are available in the API +#### Which reports and metrics are available in the API The list of available reports for the API, and the associated metrics, can be [found here](https://developers.google.com/search-ads/v2/report-types "Report Types") @@ -312,7 +376,6 @@ The list of available reports for the API, and the associated metrics, can be [f - Call Example - The following command retrieves insights about the Ads in the Search Ads 360 Account from the agency thanks to your , and with the necessary permissions to access your Accounts. @@ -323,179 +386,190 @@ python nck/entrypoint.py read_sa360 --sa360-client-id --sa360-client *If it doesn't work, try to* `export PYTHONPATH="."` *in the nautilus-connector-kit folder (to be sure Python is reading correctly)* *If you want the output to be printed in your console, add* `write_console` *at the end of your command (see writers for more details)* - - Parameters of the SA360 Reader -| CLI option | Documentation | -| ---------- | ------------- | -|`--sa360-access-token` | (Optional) Access token | -|`--sa360-client-id` | OAuth2 ID | -|`--sa360-client-secret` | OAuth2 ID Secret | -|`--sa360-refresh-token` | Refresh token | -|`--sa360-agency-id` | Agency ID to request in SA360 | -|`--sa360-advertiser-id` | (Optional) Advertiser ids to request. If not provided, every advertiser of the agency will be requested| -|`--sa360-report-name` | (Optional) Name of the output report | -|`--sa360-report-type` | Type of the report to request. List [here](https://developers.google.com/search-ads/v2/report-types)| -|`--sa360-column` | Dimensions and metrics to request in the report | -|`--sa360-saved-column` | (Optional) Saved columns to report. See [documentation](https://developers.google.com/search-ads/v2/how-tos/reporting/saved-columns)| -|`--sa360-start-date` | Start date of the period to request | -|`--sa360-end-date` | End date of the period to request | +|CLI option|Documentation| +|--|--| +|`--sa360-access-token`|(Optional) Access token| +|`--sa360-client-id`|OAuth2 ID| +|`--sa360-client-secret`|OAuth2 ID Secret| +|`--sa360-refresh-token`|Refresh token| +|`--sa360-agency-id`|Agency ID to request in SA360| +|`--sa360-advertiser-id`|(Optional) Advertiser ids to request. If not provided, every advertiser of the agency will be requested| +|`--sa360-report-name`|(Optional) Name of the output report| +|`--sa360-report-type` Type of the report to request (list [here](https://developers.google.com/search-ads/v2/report-types))| +|`--sa360-column`|Dimensions and metrics to request in the report| +|`--sa360-saved-column`|(Optional) Saved columns to report (see [documentation](https://developers.google.com/search-ads/v2/how-tos/reporting/saved-columns))| +|`--sa360-start-date`|Start date of the period to request| +|`--sa360-end-date`|End date of the period to request| See the documents below for a better understanding of the parameters: - [SA360 Reporting](https://developers.google.com/search-ads/v2/how-tos/reporting) +#### How to obtain credentials -## Yandex readers +Using the Google Search Console API requires three main parameters: +- OAuth2 credentials: and -For now, there is only one Yandex API you can access through Nautilus connectors: [Direct API](https://tech.yandex.com/direct/). -This API allows you to collect display metrics. +- A refresh token, created with the email address able to access to your Google Search Console Account. -### Access Yandex Direct API +- The URLs whose performance you want to see. -In order to access Yandex Direct API, you need two accounts: an advertiser account and a developer account. -Here is the process: +See the [documentation here](https://developers.google.com/webmaster-tools/search-console-api-original/v3/prereqs "Search Console API") +to see an Overview of the Search Console API. -1. Create a developer account if you don't already have one. Click on the *Get started* button on this [page](https://direct.yandex.com/). -2. Create and register an app that will access Yandex Direct API via [Yandex OAuth](https://oauth.yandex.com/client/new). -3. Keep app client id safe. Log in with your advertiser account and [give permission to the app to access your data](https://tech.yandex.com/oauth/doc/dg/tasks/get-oauth-token-docpage/). -4. Store your token very carefully. -5. Log out and log in as a developer and [ask permission to access Yandex Direct API](https://direct.yandex.com/registered/main.pl?cmd=apiSettings) (ask for Full access). Fill in the form. -6. Wait for Yandex support to reply but it should be within a week. +### Google Sheets Reader -### Yandex campaign reader +*Not documented yet.* -[Official documentation](https://tech.yandex.com/direct/doc/ref-v5/campaigns/get-docpage/) +## Oracle Reader -#### Quickstart +*Not documented yet.* -If you want to quickly get to the point, here is a simple command that get the daily budget for all your campaigns. +## MySQL Reader -```bash -python nck/entrypoint.py read_yandex_campaigns --yandex-token --yandex-field-name Id --yandex-field-name Name --yandex-field-name DailyBudget write_console -``` +*Not documented yet.* -Didn't work? See [troubleshooting](#troubleshooting) section. +## Radarly Reader -#### Parameters +*Not documented yet.* -| CLI option | Documentation | -| ---------- | ------------- | -| `--yandex-token` | Bear token that allows you to authenticate to the API | -| `--yandex-campaign-id` | (Optional) Selects campaigns with the specified IDs. | -| `--yandex-campaign-state` | (Optional) Selects campaigns with the specified [states](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status). | -| `--yandex-campaign-status` | (Optional) Selects campaigns with the specified [statuses](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status). | -| `--yandex-campaign-payment-status` | (Optional) Selects campaigns with the specified payment [statuses](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status). | -| `--yandex-field-name` | Parameters to get that are common to all types of campaigns. | +## Salesforce Reader -### Yandex statistics reader +*Not documented yet.* -[Official documentation](https://tech.yandex.com/direct/doc/reports/reports-docpage/) +## Twitter Ads Reader + +#### How to obtain credentials + +* **Apply for a developper account** trough [this link](https://developer.twitter.com/en/apply). +* **Create a Twitter app** on the developper portal: it will generate your authentication credentials. +* **Apply for Twitter Ads API access** by filling out [this form]([https://developer.twitter.com/en/docs/ads/general/overview/adsapi-application](https://developer.twitter.com/en/docs/ads/general/overview/adsapi-application)). Receiving Twitter approval may take up to 7 business days. +* **Get a Campaign Analyst access to the Twitter Ads account** you wish to retrieve data for, on the @handle that you used to create your Twitter App. #### Quickstart -The command below gives you a performance report for all your campaigns and since the beginning. +The Twitter Ads Reader can collect **3 types of reports**, making calls to 3 endpoints of the Twitter Ads API: +* **ANALYTICS reports**, making calls to the [Asynchronous Analytics endpoint](https://developer.twitter.com/en/docs/ads/analytics/api-reference/asynchronous). These reports return performance data for a wide range of metrics, that **can be aggregated over time**. Output data **can be splitted by day** when requested over a larger time period. +* **REACH reports**, making calls to the [Reach and Average Frequency endpoint](https://developer.twitter.com/en/docs/ads/analytics/api-reference/reach). These reports return performance data with a focus on reach and frequency metrics, that **cannot be aggregated over time** (*e.g. the reach of day A and B is not equal to the reach of day A + the reach of day B, as it counts unique individuals*). Output data **cannot be splitted by day** when requested over a larger time period. These reports are available **only for the Funding Instrument and Campaign entities**. +* **ENTITY reports**, making calls to [Campaign Management endpoints](https://developer.twitter.com/en/docs/ads/campaign-management/api-reference). These reports return details on entity configuration since the creation of the Twitter Ads account. -```bash -python nck/entrypoint.py read_yandex_statistics --yandex-token --yandex-report-type AD_PERFORMANCE_REPORT --yandex-field-name AdFormat --yandex-field-name AdId --yandex-field-name Impressions --yandex-include-vat True --yandex-report-language en --yandex-field-name AdGroupName --yandex-field-name AdGroupId --yandex-field-name AdNetworkType --yandex-field-name CampaignId --yandex-field-name CampaignName --yandex-field-name CampaignType --yandex-field-name Date --yandex-field-name Device --yandex-field-name Clicks --yandex-field-name Conversions --yandex-field-name Cost --yandex-date-range ALL_TIME write_console +*Call example for ANALYTICS reports* +This call will collect engagement metrics for Line Item entities, splitting the results by day, from 2020-01-01 to 2020-01-03: +``` +python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type ANALYTICS --twitter-entity LINE_ITEM --twitter-metric-group ENGAGEMENT --twitter-segmentation-type AGE --twitter-granularity DAY --twitter-start-date 2020-01-01 --twitter-end-date 2020-01-03 write_console ``` -Didn't work? See [troubleshooting](#troubleshooting) section. +*Call example for REACH reports* +This call will collect reach metrics (*total_audience_reach, average_frequency*) for Campaign entities, from 2020-01-01 to 2020-01-03: +``` +python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type REACH --twitter-entity CAMPAIGN --twitter-start-date 2020-01-01 --twitter-end-date 2020-01-03 write_console +``` -#### Parameters +*Call example for ENTITY reports* +This call collects details on the configuration of Campaign entities (id, name, total_budget_amount_local_micro, currency), since the creation of the Twitter Ads account: +``` +python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type REACH --twitter-entity CAMPAIGN --twitter-entity-attribute id --twitter-entity-attribute name --twitter-entity-attribute total_budget_amount_local_micro --twitter-entity-attribute currency write_console +``` -Detailed version [here](https://tech.yandex.com/direct/doc/reports/spec-docpage/). +#### Parameters -| CLI option | Documentation | -| ---------- | ------------- | -| `--yandex-token` | Bear token that allows you to authenticate to the API | -| `--yandex-report-language` | (Optional) Language of the report. See all options [here](https://tech.yandex.com/direct/doc/dg/concepts/headers-docpage/#headers__accept-language). | -| `--yandex-filter` | (Optional) Filters on a particular field. | -| `--yandex-max-rows` | (Optional) The maximum number of rows in the report. | -| `--yandex-field-name` | Information you want to collect. Complete list [here](https://tech.yandex.com/direct/doc/reports/fields-list-docpage/). | -| `--yandex-report-type` | Type of report. Linked to the fields you want to select. | -| `--yandex-date-range` | List [here](https://tech.yandex.com/direct/doc/reports/period-docpage/). | -| `--yandex-include-vat` | Adds VAT to your expenses if set to `True`| -| `--yandex-date-start` | (Optional) Selects data on a specific period of time. Combined with `--yandex-date-stop` and `--yandex-date-range` set to `CUSTOM_DATE`. | -| `--yandex-date-stop` | (Optional) Selects data on a specific period of time. Combined with `--yandex-date-start` and `--yandex-date-range` set to `CUSTOM_DATE`. | +|CLI option|Documentation| +|--|--| +|`--twitter-consumer-key`|API key, available in the 'Keys and tokens' section of your Twitter Developper App.| +|`--twitter-consumer-secret`|API secret key, available in the 'Keys and tokens' section of your Twitter Developper App.| +|`--twitter-access-token`|Access token, available in the 'Keys and tokens' section of your Twitter Developper App.| +|`--twitter-access-token-secret`|Access token secret, available in the 'Keys and tokens' section of your Twitter Developper App.| +|`--twitter-account-id`|Specifies the Twitter Account ID for which the data should be returned.| +|`--twitter-report-type`|Specifies the type of report to collect. *Possible values: ANALYTICS, REACH, ENTITY.*| +|`--twitter-entity`|Specifies the entity type to retrieve data for. *Possible values: FUNDING_INSTRUMENT, CAMPAIGN, LINE_ITEM, MEDIA_CREATIVE, PROMOTED_TWEET.*| +|`--twitter-entity-attribute`|Specific to ENTITY reports. Specifies the entity attribute (configuration detail) that should be returned.| +|`--twitter-granularity`|Specific to ANALYTICS reports. Specifies how granular the retrieved data should be. *Possible values: TOTAL (default), DAY.*| +|`--twitter-metric-group`|Specific to ANALYTICS reports. Specifies the list of metrics (as a group) that should be returned. *Possible values can be found [here](https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation).* | +|`--twitter-placement`|Specific to ANALYTICS reports. Scopes the retrieved data to a particular placement. *Possible values: ALL_ON_TWITTER (default), PUBLISHER_NETWORK.*| +|`--twitter-segmentation-type`|Specific to ANALYTICS reports. Specifies how the retrieved data should be segmented. *Possible values can be found [here](https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation).* | +|`--twitter-platform`|Specific to ANALYTICS reports. Required if segmentation_type is set to DEVICES or PLATFORM_VERSION. *Possible values can be identified through the targeting_criteria/locations*| +|`--twitter-country`|Specific to ANALYTICS reports. Required if segmentation_type is set to CITIES, POSTAL_CODES, or REGION. *Possible values can be identified through the GET targeting_criteria/platforms endpoint.*| +|`--twitter-start-date`|Specifies report start date (format: YYYY-MM-DD).| +|`--twitter-end-date`|Specifies report end date (format: YYYY-MM-DD).| +|`--twitter-add-request-date-to-report`|If set to *True* (default: *False*), the date on which the request is made will appear on each report record.| -## Adobe Analytics Readers +If you need any further information, the documentation of Twitter Ads API can be found [here](https://developer.twitter.com/en/docs/ads/general/overview). -As of May 2020 (last update of this section of the documentation), **two versions of Adobe Analytics Reporting API are coexisting: 1.4 and 2.0**. As some functionalities of API 1.4 have not been made available in API 2.0 yet (Data Warehouse reports in particular), our Adobe Analytics Readers are also available in these two versions. +## Yandex Readers -### Adobe Analytics Reader 1.4 +For now, there is only one Yandex API you can access through Nautilus connectors: [Direct API](https://tech.yandex.com/direct/). +This API allows you to collect display metrics. #### How to obtain credentials -Our Adobe Analytics Reader 1.4 uses the **WSSE authentication framework**. This authentication framework is now deprecated, so you won't be able to generate new WSSE authentication credentials (Username, Password) on Adobe Developper Console if you don't already have them. +In order to access Yandex Direct API, you need two accounts: an advertiser account and a developer account. +Here is the process: + +1. Create a developer account if you don't already have one. Click on the *Get started* button on this [page](https://direct.yandex.com/). +2. Create and register an app that will access Yandex Direct API via [Yandex OAuth](https://oauth.yandex.com/client/new). +3. Keep app client id safe. Log in with your advertiser account and [give permission to the app to access your data](https://tech.yandex.com/oauth/doc/dg/tasks/get-oauth-token-docpage/). +4. Store your token very carefully. +5. Log out and log in as a developer and [ask permission to access Yandex Direct API](https://direct.yandex.com/registered/main.pl?cmd=apiSettings) (ask for Full access). Fill in the form. +6. Wait for Yandex support to reply but it should be within a week. + +### Yandex Campaign Reader + +[Official documentation](https://tech.yandex.com/direct/doc/ref-v5/campaigns/get-docpage/) #### Quickstart -Call example to Adobe Analytics Reader 1.4, getting the number of visits per day and tracking code for a specified Report Suite, between 2020-01-01 and 2020-01-31: +If you want to quickly get to the point, here is a simple command that get the daily budget for all your campaigns. +```bash +python nck/entrypoint.py read_yandex_campaigns --yandex-token --yandex-field-name Id --yandex-field-name Name --yandex-field-name DailyBudget write_console ``` -python nck/entrypoint.py read_adobe --adobe-username --adobe-password --adobe-report-suite-id --adobe-date-granularity day --adobe-report-element-id trackingcode --adobe-report-metric-id visits --adobe-start-date 2020-01-01 --adobe-end-date 2020-01-31 write_console -``` + +Didn't work? See [troubleshooting](#troubleshooting) section. #### Parameters |CLI option|Documentation| -|--|--| -|`--adobe-username`|Username used for WSSE authentication| -|`--adobe-password`|Password used for WSSE authentication| -|`--adobe-list-report-suite`|Should be set to *True* if you wish to request the list of available Adobe Report Suites (*default: False*). If set to *True*, the below parameters should be left empty.| -|`--adobe-report-suite-id`|ID of the requested Adobe Report Suite| -|`--adobe-report-element-id`|ID of the element (i.e. dimension) to include in the report| -|`--adobe-report-metric-id`|ID of the metric to include in the report| -|`--adobe-date-granularity`|Granularity of the report. *Possible values: PREVIOUS_DAY, LAST_30_DAYS, LAST_7_DAYS, LAST_90_DAYS*| -|`--adobe-start-date`|Start date of the report (format: YYYY-MM-DD)| -|`--adobe-end-date`|End date of the report (format: YYYY-MM-DD)| - -#### Addtional information -- **The full list of available elements and metrics** can be retrieved with the [GetElements](https://github.com/AdobeDocs/analytics-1.4-apis/blob/master/docs/reporting-api/methods/r_GetElements.md) and [GetMetrics](https://github.com/AdobeDocs/analytics-1.4-apis/blob/master/docs/reporting-api/methods/r_GetMetrics.md) methods. -- **Adobe Analytics Reader 1.4 requests Data Warehouse reports** (the "source" parameter is set to "warehouse" in the report description), allowing it to efficiently process multiple-dimension requests. -- **If you need further information**, the documentation of Adobe APIs 1.4 can be found [here](https://github.com/AdobeDocs/analytics-1.4-apis). - -### Adobe Analytics Reader 2.0 +|--| -| +|`--yandex-token`|Bear token that allows you to authenticate to the API| +|`--yandex-campaign-id`|(Optional) Selects campaigns with the specified IDs.| +|`--yandex-campaign-state`|(Optional) Selects campaigns with the specified [states](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status).| +|`--yandex-campaign-status`|(Optional) Selects campaigns with the specified [statuses](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status).| +|`--yandex-campaign-payment-status`|(Optional) Selects campaigns with the specified payment [statuses](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status).| +|`--yandex-field-name`|Parameters to get that are common to all types of campaigns.| -#### How to obtain credentials +### Yandex Statistics Reader -Adobe Analytics Reader 2.0 uses the **JWT authentication framework**. -- Get developper access to Adobe Analytics (documentation can be found [here](https://helpx.adobe.com/enterprise/using/manage-developers.html)) -- Create a Service Account integration to Adobe Analytics on [Adobe Developper Console](https://console.adobe.io/) -- Use the generated JWT credentials (Client ID, Client Secret, Technical Account ID, Organization ID and private.key file) to retrieve your Global Company ID (to be requested to [Discovery API](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/discovery.md')). All these parameters will be passed to Adobe Analytics Reader 2.0. +[Official documentation](https://tech.yandex.com/direct/doc/reports/reports-docpage/) #### Quickstart -Call example to Adobe Analytics Reader 2.0, getting the number of visits per day and tracking code for a specified Report Suite, between 2020-01-01 and 2020-01-31: +The command below gives you a performance report for all your campaigns and since the beginning. -``` -python nck/entrypoint.py read_adobe_2_0 --adobe-client-id --adobe-client-secret --adobe-tech-account-id --adobe-org-id --adobe-private-key --adobe-global-company-id --adobe-report-suite-id --adobe-dimension daterangeday --adobe-dimension campaign --adobe-start-date 2020-01-01 --adobe-end-date 2020-01-31 --adobe-metric visits write_console +```bash +python nck/entrypoint.py read_yandex_statistics --yandex-token --yandex-report-type AD_PERFORMANCE_REPORT --yandex-field-name AdFormat --yandex-field-name AdId --yandex-field-name Impressions --yandex-include-vat True --yandex-report-language en --yandex-field-name AdGroupName --yandex-field-name AdGroupId --yandex-field-name AdNetworkType --yandex-field-name CampaignId --yandex-field-name CampaignName --yandex-field-name CampaignType --yandex-field-name Date --yandex-field-name Device --yandex-field-name Clicks --yandex-field-name Conversions --yandex-field-name Cost --yandex-date-range ALL_TIME write_console ``` +Didn't work? See [troubleshooting](#troubleshooting) section. + #### Parameters +Detailed version [here](https://tech.yandex.com/direct/doc/reports/spec-docpage/). + |CLI option|Documentation| |--|--| -|`--adobe-client-id`|Client ID, that you can find on Adobe Developper Console| -|`--adobe-client-secret`|Client Secret, that you can find on Adobe Developper Console| -|`--adobe-tech-account-id`|Technical Account ID, that you can find on Adobe Developper Console| -|`--adobe-org-id`|Organization ID, that you can find on Adobe Developper Console| -|`--adobe-private-key`|Content of the private.key file, that you had to provide to create the integration. Make sure to enter the parameter in quotes, include headers, and indicate newlines as \n.| -|`--adobe-global-company-id`|Global Company ID (to be requested to [Discovery API](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/discovery.md'))| -|`--adobe-report-suite-id`|ID of the requested Adobe Report Suite| -|`--adobe-dimension`|Dimension to include in the report| -|`--adobe-metric`|Metric to include in the report| -|`--adobe-start-date`|Start date of the report (format: YYYY-MM-DD)| -|`--adobe-end-date`|End date of the report (format: YYYY-MM-DD)| - -#### Additional information - -- **In API 2.0, dimension and metric names are slightly different from API 1.4**. To get new metric and dimension names and reproduce the behavior of Adobe Analytics UI as closely as possible, [enable the Debugger feature in Adobe Analytics Workspace](https://github.com/AdobeDocs/analytics-2.0-apis/blob/master/reporting-tricks.md): it allow you to visualize the back-end JSON requests made by Adobe Analytics UI to Reporting API 2.0. -- **In API 2.0, the date granularity parameter was removed, and should now be handled as a dimension**: a request featuring `--adobe-dimension daterangeday` will produce a report with a day granularity. -- **API 2.0 does not feature Data Warehouse reports yet** (along with other features, that are indicated on the "Current limitations" section of [this page](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/migration-guide.md)). For this reason, if you wish to collect multiple-dimension reports, Adobe Analytics Reader 1.4 might be a more efficient solution in terms of processing time. -- **If you need any further information**, the documentation of Adobe APIs 2.0 can be found [here](https://github.com/AdobeDocs/analytics-2.0-apis). - -### Troubleshooting +|`--yandex-token`|Bear token that allows you to authenticate to the API| +|`--yandex-report-language`|(Optional) Language of the report. See all options [here](https://tech.yandex.com/direct/doc/dg/concepts/headers-docpage/#headers__accept-language).| +|`--yandex-filter`|(Optional) Filters on a particular field.| +|`--yandex-max-rows`|(Optional) The maximum number of rows in the report.| +|`--yandex-field-name`|Information you want to collect. Complete list [here](https://tech.yandex.com/direct/doc/reports/fields-list-docpage/).| +|`--yandex-report-type`|Type of report. Linked to the fields you want to select.| +|`--yandex-date-range`|List [here](https://tech.yandex.com/direct/doc/reports/period-docpage/).| +|`--yandex-include-vat`|Adds VAT to your expenses if set to `True`| +|`--yandex-date-start`|(Optional) Selects data on a specific period of time. Combined with `--yandex-date-stop` and `--yandex-date-range` set to `CUSTOM_DATE`.| +|`--yandex-date-stop`|(Optional) Selects data on a specific period of time. Combined with `--yandex-date-start` and `--yandex-date-range` set to `CUSTOM_DATE`.| + +## Troubleshooting You encountered and you don't know what 's going on. You may find an answer in the troubleshooting guide below. From 06ab6d812affb0fdd0f2e05d6678483a32745783 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Gabrielle=20B=C3=A9ranger?= <56034720+gabrielleberanger@users.noreply.github.com> Date: Fri, 5 Jun 2020 16:45:52 +0200 Subject: [PATCH 03/54] Harmonizing General README file --- README.md | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 3559638d..e61b141c 100644 --- a/README.md +++ b/README.md @@ -6,25 +6,25 @@ Nautilus connectors kit is a tool which aim is getting raw data from different s ### Readers -- Google DoubleClick Manager (DBM / DV360) -- Google Campaign Manager (CM / DCM) -- Google Search Ads 360 (SA360) +- Adobe Analytics 1.4 +- Adobe Analytics 2.0 +- Amazon S3 +- Facebook Marketing +- Google Ads - Google Analytics -- Google Search Console -- Google Sheets - Google Cloud Storage -- Google Adwords +- Google Campaign Manager +- Google Display & Video 360 +- Google Search Ads 360 - Google Search Console -- Facebook Business Manager -- Twitter Ads -- Amazon S3 +- Google Sheets - Oracle -- SalesForce - MySQL - Radarly -- Adobe Analytics 1.4 -- Adobe Analytics 2.0 -- Yandex +- SalesForce +- Twitter Ads +- Yandex Campaign +- Yandex Statistics ### Writers @@ -99,4 +99,4 @@ It is advised to do the following in a virtual env * https://manikos.github.io/a-tour-on-python-packaging * http://lucumr.pocoo.org/2014/1/27/python-on-wheels/ -* https://pip.readthedocs.io/en/1.4.1/cookbook.html#controlling-setup-requires \ No newline at end of file +* https://pip.readthedocs.io/en/1.4.1/cookbook.html#controlling-setup-requires From fcc6a98ce42514ed40133d5f362fc38227d816c3 Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Thu, 11 Jun 2020 13:13:28 +0200 Subject: [PATCH 04/54] TwitterReader minor fixes --- README.md | 28 +- nck/readers/README.md | 497 +++++++++++++++------------ nck/readers/twitter_reader.py | 125 ++++--- requirements.txt | 1 - tests/readers/test_twitter_reader.py | 180 ++++++++-- 5 files changed, 531 insertions(+), 300 deletions(-) diff --git a/README.md b/README.md index 3559638d..e61b141c 100644 --- a/README.md +++ b/README.md @@ -6,25 +6,25 @@ Nautilus connectors kit is a tool which aim is getting raw data from different s ### Readers -- Google DoubleClick Manager (DBM / DV360) -- Google Campaign Manager (CM / DCM) -- Google Search Ads 360 (SA360) +- Adobe Analytics 1.4 +- Adobe Analytics 2.0 +- Amazon S3 +- Facebook Marketing +- Google Ads - Google Analytics -- Google Search Console -- Google Sheets - Google Cloud Storage -- Google Adwords +- Google Campaign Manager +- Google Display & Video 360 +- Google Search Ads 360 - Google Search Console -- Facebook Business Manager -- Twitter Ads -- Amazon S3 +- Google Sheets - Oracle -- SalesForce - MySQL - Radarly -- Adobe Analytics 1.4 -- Adobe Analytics 2.0 -- Yandex +- SalesForce +- Twitter Ads +- Yandex Campaign +- Yandex Statistics ### Writers @@ -99,4 +99,4 @@ It is advised to do the following in a virtual env * https://manikos.github.io/a-tour-on-python-packaging * http://lucumr.pocoo.org/2014/1/27/python-on-wheels/ -* https://pip.readthedocs.io/en/1.4.1/cookbook.html#controlling-setup-requires \ No newline at end of file +* https://pip.readthedocs.io/en/1.4.1/cookbook.html#controlling-setup-requires diff --git a/nck/readers/README.md b/nck/readers/README.md index f265c255..26649c46 100644 --- a/nck/readers/README.md +++ b/nck/readers/README.md @@ -2,6 +2,28 @@ Each reader role is to read data from external source and transform it into a Stream understable format to be written on GCS and BQ thanks to the corresponding writers. +## List of Readers + +- Adobe Analytics 1.4 +- Adobe Analytics 2.0 +- Amazon S3 +- Facebook Marketing +- Google Ads +- Google Analytics +- Google Cloud Storage +- Google Campaign Manager +- Google Display & Video 360 +- Google Search Ads 360 +- Google Search Console +- Google Sheets +- Oracle +- MySQL +- Radarly +- SalesForce +- Twitter Ads +- Yandex Campaign +- Yandex Statistics + ## Step to create a new Reader 1. Create python module following naming nomenclature ``` [command]_reader.py ``` @@ -10,11 +32,92 @@ Each reader role is to read data from external source and transform it into a St 4. Reference click command into [commands list](./__init__.py) 5. Update current README.md -## Facebook Reader +## Adobe Analytics Readers + +As of May 2020 (last update of this section of the documentation), **two versions of Adobe Analytics Reporting API are coexisting: 1.4 and 2.0**. As some functionalities of API 1.4 have not been made available in API 2.0 yet (Data Warehouse reports in particular), our Adobe Analytics Readers are also available in these two versions. + +### Adobe Analytics Reader 1.4 + +#### How to obtain credentials + +Our Adobe Analytics Reader 1.4 uses the **WSSE authentication framework**. This authentication framework is now deprecated, so you won't be able to generate new WSSE authentication credentials (Username, Password) on Adobe Developper Console if you don't already have them. + +#### Quickstart + +Call example to Adobe Analytics Reader 1.4, getting the number of visits per day and tracking code for a specified Report Suite, between 2020-01-01 and 2020-01-31: + +``` +python nck/entrypoint.py read_adobe --adobe-username --adobe-password --adobe-report-suite-id --adobe-date-granularity day --adobe-report-element-id trackingcode --adobe-report-metric-id visits --adobe-start-date 2020-01-01 --adobe-end-date 2020-01-31 write_console +``` + +#### Parameters + +|CLI option|Documentation| +|--|--| +|`--adobe-username`|Username used for WSSE authentication| +|`--adobe-password`|Password used for WSSE authentication| +|`--adobe-list-report-suite`|Should be set to *True* if you wish to request the list of available Adobe Report Suites (*default: False*). If set to *True*, the below parameters should be left empty.| +|`--adobe-report-suite-id`|ID of the requested Adobe Report Suite| +|`--adobe-report-element-id`|ID of the element (i.e. dimension) to include in the report| +|`--adobe-report-metric-id`|ID of the metric to include in the report| +|`--adobe-date-granularity`|Granularity of the report. *Possible values: PREVIOUS_DAY, LAST_30_DAYS, LAST_7_DAYS, LAST_90_DAYS*| +|`--adobe-start-date`|Start date of the report (format: YYYY-MM-DD)| +|`--adobe-end-date`|End date of the report (format: YYYY-MM-DD)| + +#### Addtional information +- **The full list of available elements and metrics** can be retrieved with the [GetElements](https://github.com/AdobeDocs/analytics-1.4-apis/blob/master/docs/reporting-api/methods/r_GetElements.md) and [GetMetrics](https://github.com/AdobeDocs/analytics-1.4-apis/blob/master/docs/reporting-api/methods/r_GetMetrics.md) methods. +- **Adobe Analytics Reader 1.4 requests Data Warehouse reports** (the "source" parameter is set to "warehouse" in the report description), allowing it to efficiently process multiple-dimension requests. +- **If you need further information**, the documentation of Adobe APIs 1.4 can be found [here](https://github.com/AdobeDocs/analytics-1.4-apis). + +### Adobe Analytics Reader 2.0 + +#### How to obtain credentials + +Adobe Analytics Reader 2.0 uses the **JWT authentication framework**. +- Get developper access to Adobe Analytics (documentation can be found [here](https://helpx.adobe.com/enterprise/using/manage-developers.html)) +- Create a Service Account integration to Adobe Analytics on [Adobe Developper Console](https://console.adobe.io/) +- Use the generated JWT credentials (Client ID, Client Secret, Technical Account ID, Organization ID and private.key file) to retrieve your Global Company ID (to be requested to [Discovery API](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/discovery.md')). All these parameters will be passed to Adobe Analytics Reader 2.0. + +#### Quickstart + +Call example to Adobe Analytics Reader 2.0, getting the number of visits per day and tracking code for a specified Report Suite, between 2020-01-01 and 2020-01-31: + +``` +python nck/entrypoint.py read_adobe_2_0 --adobe-client-id --adobe-client-secret --adobe-tech-account-id --adobe-org-id --adobe-private-key --adobe-global-company-id --adobe-report-suite-id --adobe-dimension daterangeday --adobe-dimension campaign --adobe-start-date 2020-01-01 --adobe-end-date 2020-01-31 --adobe-metric visits write_console +``` + +#### Parameters + +|CLI option|Documentation| +|--|--| +|`--adobe-client-id`|Client ID, that you can find on Adobe Developper Console| +|`--adobe-client-secret`|Client Secret, that you can find on Adobe Developper Console| +|`--adobe-tech-account-id`|Technical Account ID, that you can find on Adobe Developper Console| +|`--adobe-org-id`|Organization ID, that you can find on Adobe Developper Console| +|`--adobe-private-key`|Content of the private.key file, that you had to provide to create the integration. Make sure to enter the parameter in quotes, include headers, and indicate newlines as \n.| +|`--adobe-global-company-id`|Global Company ID (to be requested to [Discovery API](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/discovery.md'))| +|`--adobe-report-suite-id`|ID of the requested Adobe Report Suite| +|`--adobe-dimension`|Dimension to include in the report| +|`--adobe-metric`|Metric to include in the report| +|`--adobe-start-date`|Start date of the report (format: YYYY-MM-DD)| +|`--adobe-end-date`|End date of the report (format: YYYY-MM-DD)| + +#### Additional information + +- **In API 2.0, dimension and metric names are slightly different from API 1.4**. To get new metric and dimension names and reproduce the behavior of Adobe Analytics UI as closely as possible, [enable the Debugger feature in Adobe Analytics Workspace](https://github.com/AdobeDocs/analytics-2.0-apis/blob/master/reporting-tricks.md): it allow you to visualize the back-end JSON requests made by Adobe Analytics UI to Reporting API 2.0. +- **In API 2.0, the date granularity parameter was removed, and should now be handled as a dimension**: a request featuring `--adobe-dimension daterangeday` will produce a report with a day granularity. +- **API 2.0 does not feature Data Warehouse reports yet** (along with other features, that are indicated on the "Current limitations" section of [this page](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/migration-guide.md)). For this reason, if you wish to collect multiple-dimension reports, Adobe Analytics Reader 1.4 might be a more efficient solution in terms of processing time. +- **If you need any further information**, the documentation of Adobe APIs 2.0 can be found [here](https://github.com/AdobeDocs/analytics-2.0-apis). + +## Amazon S3 Reader + +*Not documented yet.* + +## Facebook Marketing Reader #### Quickstart -The Facebook Reader handles calls to 2 endpoints of the Facebook Marketing API: **Facebook Ad Insights** (to retrieve performance data), and **Facebook Object Node** (to retrieve configuration data). +The Facebook Marketing Reader handles calls to 2 endpoints of the Facebook Marketing API: **Facebook Ad Insights** (to retrieve performance data), and **Facebook Object Node** (to retrieve configuration data). *Example of Facebook Ad Insights Request* ``` @@ -46,9 +149,9 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- |`--facebook-breakdown`|How to break down the result. *This parameter is only relevant for Facebook Ad Insights Requests.*| |`--facebook-action-breakdown`|How to break down action results. *This parameter is only relevant for Facebook Ad Insights Requests.*| -#### Additional details for a relevant use of the Facebook Reader +#### Additional information -**#1: Make sure to select the appropriate `--facebook-level`** +**1. Make sure to select the appropriate `--facebook-level`** |If Facebook Object Type is...|Facebook Level can be...| |:--|:--| @@ -58,7 +161,7 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- |`ad`|ad, creative| |`creative`|creative| -**#2: Format Facebook Reader response using `--facebook-fields`** +**2. Format Facebook Marketing Reader response using `--facebook-fields`** 2.1. The list of **applicable fields** can be found on the links below: @@ -67,7 +170,7 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- 2.2. If you want to select **a nested field value**, simply indicate the path to this value within the request field. -*Facebook Reader Request* +*Facebook Marketing Reader Request* ``` --facebook-field object_story_spec[video_data][call_to_action][value][link] ``` @@ -87,14 +190,14 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- } ``` -*Facebook Reader Response* +*Facebook Marketing Reader Response* ``` {"object_story_spec_video_data_call_to_action_value_link": "https://www.artefact.com"} ``` (2.3) **Action Breakdown filters** can be applied to the fields of ***Facebook Ad Insights* Requests** using the following syntax: [:]. You can combine multiple Action Breakdown filters on the same field by adding them in cascade next to each other. -*Facebook Reader Request* +*Facebook Marketing Reader Request* ``` --facebook-action-breakdown action_type --facebook-field actions[action_type:video_view][action_type:post_engagement] @@ -122,69 +225,11 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- ] ``` -*Facebook Reader Response* +*Facebook Marketing Reader Response* ``` {"actions_action_type_video_view": "17", "actions_action_type_post_engagement": "25"} ``` -## Twitter Ads Reader - -#### How to obtain credentials - -* **Apply for a developper account** trough [this link](https://developer.twitter.com/en/apply). -* **Create a Twitter app** on the developper portal: it will generate your authentication credentials. -* **Apply for Twitter Ads API access** by filling out [this form]([https://developer.twitter.com/en/docs/ads/general/overview/adsapi-application](https://developer.twitter.com/en/docs/ads/general/overview/adsapi-application)). Receiving Twitter approval may take up to 7 business days. -* **Get a Campaign Analyst access to the Twitter Ads account** you wish to retrieve data for, on the @handle that you used to create your Twitter App. - -#### Quickstart - -The Twitter Ads Reader can collect **3 types of reports**, making calls to 3 endpoints of the Twitter Ads API: -* **ANALYTICS reports**, making calls to the [Asynchronous Analytics endpoint](https://developer.twitter.com/en/docs/ads/analytics/api-reference/asynchronous). These reports return performance data for a wide range of metrics, that **can be aggregated over time**. Output data **can be splitted by day** when requested over a larger time period. -* **REACH reports**, making calls to the [Reach and Average Frequency endpoint](https://developer.twitter.com/en/docs/ads/analytics/api-reference/reach). These reports return performance data with a focus on reach and frequency metrics, that **cannot be aggregated over time** (*e.g. the reach of day A and B is not equal to the reach of day A + the reach of day B, as it counts unique individuals*). Output data **cannot be splitted by day** when requested over a larger time period. These reports are available **only for the Funding Instrument and Campaign entities**. -* **ENTITY reports**, making calls to [Campaign Management endpoints](https://developer.twitter.com/en/docs/ads/campaign-management/api-reference). These reports return details on entity configuration since the creation of the Twitter Ads account. - -*Call example for ANALYTICS reports* -This call will collect engagement metrics for Line Item entities, splitting the results by day, from 2020-01-01 to 2020-01-03: -``` -python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type ANALYTICS --twitter-entity LINE_ITEM --twitter-metric-group ENGAGEMENT --twitter-segmentation-type AGE --twitter-granularity DAY --twitter-start-date 2020-01-01 --twitter-end-date 2020-01-03 write_console -``` - -*Call example for REACH reports* -This call will collect reach metrics (*total_audience_reach, average_frequency*) for Campaign entities, from 2020-01-01 to 2020-01-03: -``` -python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type REACH --twitter-entity CAMPAIGN --twitter-start-date 2020-01-01 --twitter-end-date 2020-01-03 write_console -``` - -*Call example for ENTITY reports* -This call collects details on the configuration of Campaign entities (id, name, total_budget_amount_local_micro, currency), since the creation of the Twitter Ads account: -``` -python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type REACH --twitter-entity CAMPAIGN --twitter-entity-attribute id --twitter-entity-attribute name --twitter-entity-attribute total_budget_amount_local_micro --twitter-entity-attribute currency write_console -``` - -#### Parameters - -|CLI option|Documentation| -|--|--| -|`--twitter-consumer-key`|API key, available in the 'Keys and tokens' section of your Twitter Developper App.| -|`--twitter-consumer-secret`|API secret key, available in the 'Keys and tokens' section of your Twitter Developper App.| -|`--twitter-access-token`|Access token, available in the 'Keys and tokens' section of your Twitter Developper App.| -|`--twitter-access-token-secret`|Access token secret, available in the 'Keys and tokens' section of your Twitter Developper App.| -|`--twitter-account-id`|Specifies the Twitter Account ID for which the data should be returned.| -|`--twitter-report-type`|Specifies the type of report to collect. *Possible values: ANALYTICS, REACH, ENTITY.*| -|`--twitter-entity`|Specifies the entity type to retrieve data for. *Possible values: FUNDING_INSTRUMENT, CAMPAIGN, LINE_ITEM, MEDIA_CREATIVE, PROMOTED_TWEET.*| -|`--twitter-entity-attribute`|Specific to ENTITY reports. Specifies the entity attribute (configuration detail) that should be returned.| -|`--twitter-granularity`|Specific to ANALYTICS reports. Specifies how granular the retrieved data should be. *Possible values: TOTAL (default), DAY.*| -|`--twitter-metric-group`|Specific to ANALYTICS reports. Specifies the list of metrics (as a group) that should be returned. *Possible values can be found [here](https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation).* | -|`--twitter-placement`|Specific to ANALYTICS reports. Scopes the retrieved data to a particular placement. *Possible values: ALL_ON_TWITTER (default), PUBLISHER_NETWORK.*| -|`--twitter-segmentation-type`|Specific to ANALYTICS reports. Specifies how the retrieved data should be segmented. *Possible values can be found [here](https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation).* | -|`--twitter-platform`|Specific to ANALYTICS reports. Required if segmentation_type is set to DEVICES or PLATFORM_VERSION. *Possible values can be identified through the targeting_criteria/locations*| -|`--twitter-country`|Specific to ANALYTICS reports. Required if segmentation_type is set to CITIES, POSTAL_CODES, or REGION. *Possible values can be identified through the GET targeting_criteria/platforms endpoint.*| -|`--twitter-start-date`|Specifies report start date (format: YYYY-MM-DD).| -|`--twitter-end-date`|Specifies report end date (format: YYYY-MM-DD).| -|`--twitter-add-request-date-to-report`|If set to *True* (default: *False*), the date on which the request is made will appear on each report record.| - -If you need any further information, the documentation of Twitter Ads API can be found [here](https://developer.twitter.com/en/docs/ads/general/overview). - ## Google Readers ### Authentication @@ -195,11 +240,9 @@ via the oAuth flow. A full script to do this can be found here: [Refresh token generator](https://github.com/artefactory/Refresh-token-generator-for-google-oauth) - ### Google Ads Reader -#### How to obtain Credentials - +#### How to obtain credentials Using the Google Ads API requires four things: - A developer token (Generated at a company level - one per company -, takes around 2 days to be approved by Google) which can be completely independant from the Google Ads Account you will be calling (though you need a Manager Google Ads Account to request a token for your company) @@ -216,8 +259,7 @@ to apply for access if your Company does not already have a developer token (gra See the [documentation here](https://developers.google.com/adwords/api/docs/guides/first-api-call "Make your first API call") to set-up your OAuth2 credentials and refresh token specifically for your Google Ads Accounts. - -#### Which Reports and Metrics are available in the API +#### Which reports and metrics are available in the API The list of available reports for the API, and the associated metrics, can be [found here](https://developers.google.com/adwords/api/docs/appendix/reports#available-reports "Report Types") @@ -225,7 +267,6 @@ The list of available reports for the API, and the associated metrics, can be [f - Call Example - The following command retrieves insights about the Ads of *my_first_campaign* and *my_second_campaign* in the Google Ads Account thanks to your company , and your , and with the necessary permissions to access your Accounts. @@ -236,12 +277,26 @@ python nck/entrypoint.py read_googleads --googleads-developer-token and +*Not documented yet.* -- A refresh token, created with the email address able to access to your Google Search Console Account. +### Google Campaign Manager Reader -- The URLs whose performance you want to see. +*Not documented yet.* -See the [documentation here](https://developers.google.com/webmaster-tools/search-console-api-original/v3/prereqs "Search Console API") -to see an Overview of the Search Console API. +### Google Display & Video Reader +*Not documented yet.* -#### Search Analytics +### Google Search Console Reader + +#### Which reports and metrics are available in the API The list of available dimensions and metrics in the API can be [found here](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query "Search Analytics") @@ -280,19 +337,27 @@ with the necessary permissions to access your Accounts. python nck/entrypoint.py read_search_console --search-console-client-id --search-console-refresh-token --search-console-site-url --search-console-dimensions country --search-console-dimensions device --search-console-start-date 2020-01-01 --search-console-end-date 2020-01-01 write_console ``` -- Parameters of the Google Search Console Readers +- Parameters of the Google Search Console Reader -| --search-console-client-id | --search-console-client-secret | --search-console-access-token | --search-console-refresh-token | --search-console-dimensions | --search-console-site-url | --search-console-start-date | --search-console-end-date | --search-console-date-column | --search-console-row-limit | -|:-----------------:|:---------------------:|:-----------------------:|:-----------------------:|:-----------------------:|:-----------------------:|:--------------------:|:---------------------------:|:----------------------:|:----------------------:| -|OAuth2 ID| OAuth2 Secret| Access token | Refresh token for OAuth2 | [Dimensions to request](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query#dimensionFilterGroups.filters.dimension) |Site URL whose performance you want to request| Start Date for the request | End Date for the request | If true, include date column in the report | Row number by report page | +|CLI option|Documentation| +|--|--| +|`--search-console-client-id`|OAuth2 ID| +|`--search-console-client-secret`|OAuth2 Secret| +|`--search-console-access-token`|Access token| +|`--search-console-refresh-token`|Refresh token for OAuth2| +|`--search-console-dimensions`|[Dimensions to request](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query#dimensionFilterGroups.filters.dimension)| +|`--search-console-site-url`|Site URL whose performance you want to request| +|`--search-console-start-date`|Start Date for the request| +|`--search-console-end-date`|End Date for the request| +|`--search-console-date-column`|If true, include date column in the report| +|`--search-console-row-limit`|Row number by report page| See the documents below for a better understanding of the parameters: - [Google Search Console API](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query) +### Google Search Ads 360 Reader -### Search Ads 360 Reader (SA360) - -#### How to obtain Credentials +#### How to obtain credentials Using the Search Ads API requires two things: @@ -303,8 +368,7 @@ Using the Search Ads API requires two things: See the [documentation here](https://developers.google.com/search-ads/v2/authorizing "SA360 Authentication") to set-up your OAuth2 credentials and refresh token specifically for Search Ads 360 Reporting. - -#### Which Reports and Metrics are available in the API +#### Which reports and metrics are available in the API The list of available reports for the API, and the associated metrics, can be [found here](https://developers.google.com/search-ads/v2/report-types "Report Types") @@ -312,7 +376,6 @@ The list of available reports for the API, and the associated metrics, can be [f - Call Example - The following command retrieves insights about the Ads in the Search Ads 360 Account from the agency thanks to your , and with the necessary permissions to access your Accounts. @@ -323,179 +386,187 @@ python nck/entrypoint.py read_sa360 --sa360-client-id --sa360-client *If it doesn't work, try to* `export PYTHONPATH="."` *in the nautilus-connector-kit folder (to be sure Python is reading correctly)* *If you want the output to be printed in your console, add* `write_console` *at the end of your command (see writers for more details)* - - Parameters of the SA360 Reader -| CLI option | Documentation | -| ---------- | ------------- | -|`--sa360-access-token` | (Optional) Access token | -|`--sa360-client-id` | OAuth2 ID | -|`--sa360-client-secret` | OAuth2 ID Secret | -|`--sa360-refresh-token` | Refresh token | -|`--sa360-agency-id` | Agency ID to request in SA360 | -|`--sa360-advertiser-id` | (Optional) Advertiser ids to request. If not provided, every advertiser of the agency will be requested| -|`--sa360-report-name` | (Optional) Name of the output report | -|`--sa360-report-type` | Type of the report to request. List [here](https://developers.google.com/search-ads/v2/report-types)| -|`--sa360-column` | Dimensions and metrics to request in the report | -|`--sa360-saved-column` | (Optional) Saved columns to report. See [documentation](https://developers.google.com/search-ads/v2/how-tos/reporting/saved-columns)| -|`--sa360-start-date` | Start date of the period to request | -|`--sa360-end-date` | End date of the period to request | +|CLI option|Documentation| +|--|--| +|`--sa360-access-token`|(Optional) Access token| +|`--sa360-client-id`|OAuth2 ID| +|`--sa360-client-secret`|OAuth2 ID Secret| +|`--sa360-refresh-token`|Refresh token| +|`--sa360-agency-id`|Agency ID to request in SA360| +|`--sa360-advertiser-id`|(Optional) Advertiser ids to request. If not provided, every advertiser of the agency will be requested| +|`--sa360-report-name`|(Optional) Name of the output report| +|`--sa360-report-type` Type of the report to request (list [here](https://developers.google.com/search-ads/v2/report-types))| +|`--sa360-column`|Dimensions and metrics to request in the report| +|`--sa360-saved-column`|(Optional) Saved columns to report (see [documentation](https://developers.google.com/search-ads/v2/how-tos/reporting/saved-columns))| +|`--sa360-start-date`|Start date of the period to request| +|`--sa360-end-date`|End date of the period to request| See the documents below for a better understanding of the parameters: - [SA360 Reporting](https://developers.google.com/search-ads/v2/how-tos/reporting) +#### How to obtain credentials -## Yandex readers +Using the Google Search Console API requires three main parameters: +- OAuth2 credentials: and -For now, there is only one Yandex API you can access through Nautilus connectors: [Direct API](https://tech.yandex.com/direct/). -This API allows you to collect display metrics. +- A refresh token, created with the email address able to access to your Google Search Console Account. -### Access Yandex Direct API +- The URLs whose performance you want to see. -In order to access Yandex Direct API, you need two accounts: an advertiser account and a developer account. -Here is the process: +See the [documentation here](https://developers.google.com/webmaster-tools/search-console-api-original/v3/prereqs "Search Console API") +to see an Overview of the Search Console API. -1. Create a developer account if you don't already have one. Click on the *Get started* button on this [page](https://direct.yandex.com/). -2. Create and register an app that will access Yandex Direct API via [Yandex OAuth](https://oauth.yandex.com/client/new). -3. Keep app client id safe. Log in with your advertiser account and [give permission to the app to access your data](https://tech.yandex.com/oauth/doc/dg/tasks/get-oauth-token-docpage/). -4. Store your token very carefully. -5. Log out and log in as a developer and [ask permission to access Yandex Direct API](https://direct.yandex.com/registered/main.pl?cmd=apiSettings) (ask for Full access). Fill in the form. -6. Wait for Yandex support to reply but it should be within a week. +### Google Sheets Reader -### Yandex campaign reader +*Not documented yet.* -[Official documentation](https://tech.yandex.com/direct/doc/ref-v5/campaigns/get-docpage/) +## Oracle Reader -#### Quickstart +*Not documented yet.* -If you want to quickly get to the point, here is a simple command that get the daily budget for all your campaigns. +## MySQL Reader -```bash -python nck/entrypoint.py read_yandex_campaigns --yandex-token --yandex-field-name Id --yandex-field-name Name --yandex-field-name DailyBudget write_console -``` +*Not documented yet.* -Didn't work? See [troubleshooting](#troubleshooting) section. +## Radarly Reader -#### Parameters +*Not documented yet.* -| CLI option | Documentation | -| ---------- | ------------- | -| `--yandex-token` | Bear token that allows you to authenticate to the API | -| `--yandex-campaign-id` | (Optional) Selects campaigns with the specified IDs. | -| `--yandex-campaign-state` | (Optional) Selects campaigns with the specified [states](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status). | -| `--yandex-campaign-status` | (Optional) Selects campaigns with the specified [statuses](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status). | -| `--yandex-campaign-payment-status` | (Optional) Selects campaigns with the specified payment [statuses](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status). | -| `--yandex-field-name` | Parameters to get that are common to all types of campaigns. | +## Salesforce Reader -### Yandex statistics reader +*Not documented yet.* -[Official documentation](https://tech.yandex.com/direct/doc/reports/reports-docpage/) +## Twitter Ads Reader + +#### How to obtain credentials + +* **Apply for a developer account** through [this link](https://developer.twitter.com/en/apply). +* **Create a Twitter app** on the developer portal: it will generate your authentication credentials. +* **Apply for Twitter Ads API access** by filling out [this form](https://developer.twitter.com/en/docs/ads/general/overview/adsapi-application). Receiving Twitter approval may take up to 7 business days. +* **Get a Campaign Analyst access to the Twitter Ads account** you wish to retrieve data for, on the @handle that you used to create your Twitter App. #### Quickstart -The command below gives you a performance report for all your campaigns and since the beginning. +The Twitter Ads Reader can collect **3 types of reports**, making calls to 3 endpoints of the Twitter Ads API: +* **ANALYTICS reports**, making calls to the [Asynchronous Analytics endpoint](https://developer.twitter.com/en/docs/ads/analytics/api-reference/asynchronous). These reports return performance data for a wide range of metrics, that **can be aggregated over time**. Output data **can be splitted by day** when requested over a larger time period. +* **REACH reports**, making calls to the [Reach and Average Frequency endpoint](https://developer.twitter.com/en/docs/ads/analytics/api-reference/reach). These reports return performance data with a focus on reach and frequency metrics, that **cannot be aggregated over time** (*e.g. the reach of day A and B is not equal to the reach of day A + the reach of day B, as it counts unique individuals*). Output data **cannot be splitted by day** when requested over a larger time period. These reports are available **only for the Funding Instrument and Campaign entities**. +* **ENTITY reports**, making calls to [Campaign Management endpoints](https://developer.twitter.com/en/docs/ads/campaign-management/api-reference). These reports return details on entity configuration since the creation of the Twitter Ads account. -```bash -python nck/entrypoint.py read_yandex_statistics --yandex-token --yandex-report-type AD_PERFORMANCE_REPORT --yandex-field-name AdFormat --yandex-field-name AdId --yandex-field-name Impressions --yandex-include-vat True --yandex-report-language en --yandex-field-name AdGroupName --yandex-field-name AdGroupId --yandex-field-name AdNetworkType --yandex-field-name CampaignId --yandex-field-name CampaignName --yandex-field-name CampaignType --yandex-field-name Date --yandex-field-name Device --yandex-field-name Clicks --yandex-field-name Conversions --yandex-field-name Cost --yandex-date-range ALL_TIME write_console +*Call example for ANALYTICS reports*: this call will collect engagement metrics for Line Item entities, splitting the results by day, from 2020-01-01 to 2020-01-03: +``` +python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type ANALYTICS --twitter-entity LINE_ITEM --twitter-metric-group ENGAGEMENT --twitter-segmentation-type AGE --twitter-granularity DAY --twitter-start-date 2020-01-01 --twitter-end-date 2020-01-03 write_console ``` -Didn't work? See [troubleshooting](#troubleshooting) section. +*Call example for REACH reports*: this call will collect reach metrics (*total_audience_reach, average_frequency*) for Campaign entities, from 2020-01-01 to 2020-01-03: +``` +python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type REACH --twitter-entity CAMPAIGN --twitter-start-date 2020-01-01 --twitter-end-date 2020-01-03 write_console +``` -#### Parameters +*Call example for ENTITY reports*: this call collects details on the configuration of Campaign entities (id, name, total_budget_amount_local_micro, currency), since the creation of the Twitter Ads account: +``` +python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type REACH --twitter-entity CAMPAIGN --twitter-entity-attribute id --twitter-entity-attribute name --twitter-entity-attribute total_budget_amount_local_micro --twitter-entity-attribute currency write_console +``` -Detailed version [here](https://tech.yandex.com/direct/doc/reports/spec-docpage/). +#### Parameters -| CLI option | Documentation | -| ---------- | ------------- | -| `--yandex-token` | Bear token that allows you to authenticate to the API | -| `--yandex-report-language` | (Optional) Language of the report. See all options [here](https://tech.yandex.com/direct/doc/dg/concepts/headers-docpage/#headers__accept-language). | -| `--yandex-filter` | (Optional) Filters on a particular field. | -| `--yandex-max-rows` | (Optional) The maximum number of rows in the report. | -| `--yandex-field-name` | Information you want to collect. Complete list [here](https://tech.yandex.com/direct/doc/reports/fields-list-docpage/). | -| `--yandex-report-type` | Type of report. Linked to the fields you want to select. | -| `--yandex-date-range` | List [here](https://tech.yandex.com/direct/doc/reports/period-docpage/). | -| `--yandex-include-vat` | Adds VAT to your expenses if set to `True`| -| `--yandex-date-start` | (Optional) Selects data on a specific period of time. Combined with `--yandex-date-stop` and `--yandex-date-range` set to `CUSTOM_DATE`. | -| `--yandex-date-stop` | (Optional) Selects data on a specific period of time. Combined with `--yandex-date-start` and `--yandex-date-range` set to `CUSTOM_DATE`. | +|CLI option|Documentation| +|--|--| +|`--twitter-consumer-key`|API key, available in the 'Keys and tokens' section of your Twitter Developer App.| +|`--twitter-consumer-secret`|API secret key, available in the 'Keys and tokens' section of your Twitter Developer App.| +|`--twitter-access-token`|Access token, available in the 'Keys and tokens' section of your Twitter Developer App.| +|`--twitter-access-token-secret`|Access token secret, available in the 'Keys and tokens' section of your Twitter Developer App.| +|`--twitter-account-id`|Specifies the Twitter Account ID for which the data should be returned.| +|`--twitter-report-type`|Specifies the type of report to collect. *Possible values: ANALYTICS, REACH, ENTITY.*| +|`--twitter-entity`|Specifies the entity type to retrieve data for. *Possible values: FUNDING_INSTRUMENT, CAMPAIGN, LINE_ITEM, MEDIA_CREATIVE, PROMOTED_TWEET.*| +|`--twitter-entity-attribute`|Specific to ENTITY reports. Specifies the entity attribute (configuration detail) that should be returned.| +|`--twitter-granularity`|Specific to ANALYTICS reports. Specifies how granular the retrieved data should be. *Possible values: TOTAL (default), DAY.*| +|`--twitter-metric-group`|Specific to ANALYTICS reports. Specifies the list of metrics (as a group) that should be returned. *Possible values can be found [here](https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation).* | +|`--twitter-placement`|Specific to ANALYTICS reports. Scopes the retrieved data to a particular placement. *Possible values: ALL_ON_TWITTER (default), PUBLISHER_NETWORK.*| +|`--twitter-segmentation-type`|Specific to ANALYTICS reports. Specifies how the retrieved data should be segmented. *Possible values can be found [here](https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation).* | +|`--twitter-platform`|Specific to ANALYTICS reports. Required if segmentation_type is set to DEVICES or PLATFORM_VERSION. *Possible values can be identified through the targeting_criteria/locations*| +|`--twitter-country`|Specific to ANALYTICS reports. Required if segmentation_type is set to CITIES, POSTAL_CODES, or REGION. *Possible values can be identified through the GET targeting_criteria/platforms endpoint.*| +|`--twitter-start-date`|Specifies report start date (format: YYYY-MM-DD).| +|`--twitter-end-date`|Specifies report end date (format: YYYY-MM-DD).| +|`--twitter-add-request-date-to-report`|If set to *True* (default: *False*), the date on which the request is made will appear on each report record.| -## Adobe Analytics Readers +If you need any further information, the documentation of Twitter Ads API can be found [here](https://developer.twitter.com/en/docs/ads/general/overview). -As of May 2020 (last update of this section of the documentation), **two versions of Adobe Analytics Reporting API are coexisting: 1.4 and 2.0**. As some functionalities of API 1.4 have not been made available in API 2.0 yet (Data Warehouse reports in particular), our Adobe Analytics Readers are also available in these two versions. +## Yandex Readers -### Adobe Analytics Reader 1.4 +For now, there is only one Yandex API you can access through Nautilus connectors: [Direct API](https://tech.yandex.com/direct/). +This API allows you to collect display metrics. #### How to obtain credentials -Our Adobe Analytics Reader 1.4 uses the **WSSE authentication framework**. This authentication framework is now deprecated, so you won't be able to generate new WSSE authentication credentials (Username, Password) on Adobe Developper Console if you don't already have them. +In order to access Yandex Direct API, you need two accounts: an advertiser account and a developer account. +Here is the process: + +1. Create a developer account if you don't already have one. Click on the *Get started* button on this [page](https://direct.yandex.com/). +2. Create and register an app that will access Yandex Direct API via [Yandex OAuth](https://oauth.yandex.com/client/new). +3. Keep app client id safe. Log in with your advertiser account and [give permission to the app to access your data](https://tech.yandex.com/oauth/doc/dg/tasks/get-oauth-token-docpage/). +4. Store your token very carefully. +5. Log out and log in as a developer and [ask permission to access Yandex Direct API](https://direct.yandex.com/registered/main.pl?cmd=apiSettings) (ask for Full access). Fill in the form. +6. Wait for Yandex support to reply but it should be within a week. + +### Yandex Campaign Reader + +[Official documentation](https://tech.yandex.com/direct/doc/ref-v5/campaigns/get-docpage/) #### Quickstart -Call example to Adobe Analytics Reader 1.4, getting the number of visits per day and tracking code for a specified Report Suite, between 2020-01-01 and 2020-01-31: +If you want to quickly get to the point, here is a simple command that get the daily budget for all your campaigns. +```bash +python nck/entrypoint.py read_yandex_campaigns --yandex-token --yandex-field-name Id --yandex-field-name Name --yandex-field-name DailyBudget write_console ``` -python nck/entrypoint.py read_adobe --adobe-username --adobe-password --adobe-report-suite-id --adobe-date-granularity day --adobe-report-element-id trackingcode --adobe-report-metric-id visits --adobe-start-date 2020-01-01 --adobe-end-date 2020-01-31 write_console -``` + +Didn't work? See [troubleshooting](#troubleshooting) section. #### Parameters |CLI option|Documentation| -|--|--| -|`--adobe-username`|Username used for WSSE authentication| -|`--adobe-password`|Password used for WSSE authentication| -|`--adobe-list-report-suite`|Should be set to *True* if you wish to request the list of available Adobe Report Suites (*default: False*). If set to *True*, the below parameters should be left empty.| -|`--adobe-report-suite-id`|ID of the requested Adobe Report Suite| -|`--adobe-report-element-id`|ID of the element (i.e. dimension) to include in the report| -|`--adobe-report-metric-id`|ID of the metric to include in the report| -|`--adobe-date-granularity`|Granularity of the report. *Possible values: PREVIOUS_DAY, LAST_30_DAYS, LAST_7_DAYS, LAST_90_DAYS*| -|`--adobe-start-date`|Start date of the report (format: YYYY-MM-DD)| -|`--adobe-end-date`|End date of the report (format: YYYY-MM-DD)| - -#### Addtional information -- **The full list of available elements and metrics** can be retrieved with the [GetElements](https://github.com/AdobeDocs/analytics-1.4-apis/blob/master/docs/reporting-api/methods/r_GetElements.md) and [GetMetrics](https://github.com/AdobeDocs/analytics-1.4-apis/blob/master/docs/reporting-api/methods/r_GetMetrics.md) methods. -- **Adobe Analytics Reader 1.4 requests Data Warehouse reports** (the "source" parameter is set to "warehouse" in the report description), allowing it to efficiently process multiple-dimension requests. -- **If you need further information**, the documentation of Adobe APIs 1.4 can be found [here](https://github.com/AdobeDocs/analytics-1.4-apis). +|--| -| +|`--yandex-token`|Bear token that allows you to authenticate to the API| +|`--yandex-campaign-id`|(Optional) Selects campaigns with the specified IDs.| +|`--yandex-campaign-state`|(Optional) Selects campaigns with the specified [states](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status).| +|`--yandex-campaign-status`|(Optional) Selects campaigns with the specified [statuses](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status).| +|`--yandex-campaign-payment-status`|(Optional) Selects campaigns with the specified payment [statuses](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status).| +|`--yandex-field-name`|Parameters to get that are common to all types of campaigns.| -### Adobe Analytics Reader 2.0 - -#### How to obtain credentials +### Yandex Statistics Reader -Adobe Analytics Reader 2.0 uses the **JWT authentication framework**. -- Get developper access to Adobe Analytics (documentation can be found [here](https://helpx.adobe.com/enterprise/using/manage-developers.html)) -- Create a Service Account integration to Adobe Analytics on [Adobe Developper Console](https://console.adobe.io/) -- Use the generated JWT credentials (Client ID, Client Secret, Technical Account ID, Organization ID and private.key file) to retrieve your Global Company ID (to be requested to [Discovery API](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/discovery.md')). All these parameters will be passed to Adobe Analytics Reader 2.0. +[Official documentation](https://tech.yandex.com/direct/doc/reports/reports-docpage/) #### Quickstart -Call example to Adobe Analytics Reader 2.0, getting the number of visits per day and tracking code for a specified Report Suite, between 2020-01-01 and 2020-01-31: +The command below gives you a performance report for all your campaigns and since the beginning. -``` -python nck/entrypoint.py read_adobe_2_0 --adobe-client-id --adobe-client-secret --adobe-tech-account-id --adobe-org-id --adobe-private-key --adobe-global-company-id --adobe-report-suite-id --adobe-dimension daterangeday --adobe-dimension campaign --adobe-start-date 2020-01-01 --adobe-end-date 2020-01-31 --adobe-metric visits write_console +```bash +python nck/entrypoint.py read_yandex_statistics --yandex-token --yandex-report-type AD_PERFORMANCE_REPORT --yandex-field-name AdFormat --yandex-field-name AdId --yandex-field-name Impressions --yandex-include-vat True --yandex-report-language en --yandex-field-name AdGroupName --yandex-field-name AdGroupId --yandex-field-name AdNetworkType --yandex-field-name CampaignId --yandex-field-name CampaignName --yandex-field-name CampaignType --yandex-field-name Date --yandex-field-name Device --yandex-field-name Clicks --yandex-field-name Conversions --yandex-field-name Cost --yandex-date-range ALL_TIME write_console ``` +Didn't work? See [troubleshooting](#troubleshooting) section. + #### Parameters +Detailed version [here](https://tech.yandex.com/direct/doc/reports/spec-docpage/). + |CLI option|Documentation| |--|--| -|`--adobe-client-id`|Client ID, that you can find on Adobe Developper Console| -|`--adobe-client-secret`|Client Secret, that you can find on Adobe Developper Console| -|`--adobe-tech-account-id`|Technical Account ID, that you can find on Adobe Developper Console| -|`--adobe-org-id`|Organization ID, that you can find on Adobe Developper Console| -|`--adobe-private-key`|Content of the private.key file, that you had to provide to create the integration. Make sure to enter the parameter in quotes, include headers, and indicate newlines as \n.| -|`--adobe-global-company-id`|Global Company ID (to be requested to [Discovery API](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/discovery.md'))| -|`--adobe-report-suite-id`|ID of the requested Adobe Report Suite| -|`--adobe-dimension`|Dimension to include in the report| -|`--adobe-metric`|Metric to include in the report| -|`--adobe-start-date`|Start date of the report (format: YYYY-MM-DD)| -|`--adobe-end-date`|End date of the report (format: YYYY-MM-DD)| - -#### Additional information - -- **In API 2.0, dimension and metric names are slightly different from API 1.4**. To get new metric and dimension names and reproduce the behavior of Adobe Analytics UI as closely as possible, [enable the Debugger feature in Adobe Analytics Workspace](https://github.com/AdobeDocs/analytics-2.0-apis/blob/master/reporting-tricks.md): it allow you to visualize the back-end JSON requests made by Adobe Analytics UI to Reporting API 2.0. -- **In API 2.0, the date granularity parameter was removed, and should now be handled as a dimension**: a request featuring `--adobe-dimension daterangeday` will produce a report with a day granularity. -- **API 2.0 does not feature Data Warehouse reports yet** (along with other features, that are indicated on the "Current limitations" section of [this page](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/migration-guide.md)). For this reason, if you wish to collect multiple-dimension reports, Adobe Analytics Reader 1.4 might be a more efficient solution in terms of processing time. -- **If you need any further information**, the documentation of Adobe APIs 2.0 can be found [here](https://github.com/AdobeDocs/analytics-2.0-apis). - -### Troubleshooting +|`--yandex-token`|Bear token that allows you to authenticate to the API| +|`--yandex-report-language`|(Optional) Language of the report. See all options [here](https://tech.yandex.com/direct/doc/dg/concepts/headers-docpage/#headers__accept-language).| +|`--yandex-filter`|(Optional) Filters on a particular field.| +|`--yandex-max-rows`|(Optional) The maximum number of rows in the report.| +|`--yandex-field-name`|Information you want to collect. Complete list [here](https://tech.yandex.com/direct/doc/reports/fields-list-docpage/).| +|`--yandex-report-type`|Type of report. Linked to the fields you want to select.| +|`--yandex-date-range`|List [here](https://tech.yandex.com/direct/doc/reports/period-docpage/).| +|`--yandex-include-vat`|Adds VAT to your expenses if set to `True`| +|`--yandex-date-start`|(Optional) Selects data on a specific period of time. Combined with `--yandex-date-stop` and `--yandex-date-range` set to `CUSTOM_DATE`.| +|`--yandex-date-stop`|(Optional) Selects data on a specific period of time. Combined with `--yandex-date-start` and `--yandex-date-range` set to `CUSTOM_DATE`.| + +## Troubleshooting You encountered and you don't know what 's going on. You may find an answer in the troubleshooting guide below. diff --git a/nck/readers/twitter_reader.py b/nck/readers/twitter_reader.py index d1851bfe..5ce3421e 100644 --- a/nck/readers/twitter_reader.py +++ b/nck/readers/twitter_reader.py @@ -13,7 +13,6 @@ import logging import click from click import ClickException -import pandas as pd from time import sleep from itertools import chain from datetime import datetime, timedelta @@ -39,9 +38,6 @@ from twitter_ads.http import Request from twitter_ads.cursor import Cursor -logging.basicConfig(level="INFO") -logger = logging.getLogger() - API_DATEFORMAT = "%Y-%m-%dT%H:%M:%SZ" REP_DATEFORMAT = "%Y-%m-%d" MAX_WAITING_SEC = 3600 @@ -199,9 +195,16 @@ def __init__( self.platform = platform self.country = country - # Check input parameters + # Define check functions - if self.report_type == "ANALYTICS": + def check_report_dates(): + + if end_date < start_date: + raise ClickException( + "Report end date should be equal or anterior to report start date." + ) + + def check_analytics_report_segmentation(): if ( self.segmentation_type in ["DEVICES", "PLATFORM VERSION"] @@ -215,6 +218,8 @@ def __init__( ): raise ClickException("Please provide a value for 'country'.") + def check_analytics_report_metric_groups(): + if self.entity == "FUNDING_INSTRUMENT" and any( [ metric_group not in ["ENGAGEMENT", "BILLING"] @@ -225,21 +230,22 @@ def __init__( "'FUNDING_INSTRUMENT' only accept the 'ENGAGEMENT' and 'BILLING' metric groups." ) - if "MOBILE_CONVERSION" in self.metric_groups and len( - self.metric_groups > 1 + if ( + "MOBILE_CONVERSION" in self.metric_groups + and len(self.metric_groups) > 1 ): raise ClickException( "'MOBILE_CONVERSION' data should be requested separately." ) - elif self.report_type == "REACH": + def check_reach_report_entities(): if self.entity not in ["CAMPAIGN", "FUNDING_INSTRUMENT"]: raise ClickException( "'REACH' reports only accept the 'CAMPAIGN' and 'FUNDING_INSTRUMENT' entities." ) - elif self.report_type == "ENTITY": + def check_entity_report_entity_attributes(): if not all( [ @@ -251,20 +257,28 @@ def __init__( f"Available attributes for '{self.entity}' are: {ENTITY_ATTRIBUTES[self.entity]}" ) + # Check input parameters + + check_report_dates() + + if self.report_type == "ANALYTICS": + check_analytics_report_segmentation() + check_analytics_report_metric_groups() + + elif self.report_type == "REACH": + check_reach_report_entities() + + elif self.report_type == "ENTITY": + check_entity_report_entity_attributes() + def get_daily_period_items(self): """ Returns a list of datetime instances representing each date contained in the requested period. Useful when granularity is set to 'DAY'. """ - period_items = [] - current_date = self.start_date - - while current_date < self.end_date: - period_items.append(current_date) - current_date += timedelta(days=1) - - return period_items + delta = self.end_date - self.start_date + return [self.start_date + timedelta(days=i) for i in range(delta.days)] def get_active_entity_ids(self): """ @@ -333,22 +347,48 @@ def parse(self, raw_analytics_response): """ for entity_resp in raw_analytics_response["data"]: - for entity_data in entity_resp["id_data"]: + entity_records = [ + { + "id": entity_resp["id"], + **{ + mt: 0 + if entity_data["metrics"][mt] is None + else entity_data["metrics"][mt][i] + for mt in entity_data["metrics"] + }, + } + for i in range(raw_analytics_response["time_series_length"]) + ] + entity_records = self.add_daily_timestamps(entity_records) + entity_records = self.add_segment(entity_records, entity_data) + yield from entity_records - entity_df = pd.DataFrame(entity_data["metrics"]).fillna(0) - entity_df["id"] = entity_resp["id"] - if self.granularity == "DAY": - entity_df["date"] = [ - item.strftime(REP_DATEFORMAT) - for item in self.get_daily_period_items() - ] - if self.segmentation_type: - entity_df[self.segmentation_type.lower()] = entity_data["segment"][ - "segment_name" - ] + def add_daily_timestamps(self, entity_records): + """ + Add daily timestamps to a list of records, if granularity is 'DAY'. + """ + + if self.granularity == "DAY": + period_items = self.get_daily_period_items() + return [ + {**entity_records[i], "date": period_items[i].strftime(REP_DATEFORMAT)} + for i in range(len(entity_records)) + ] + return entity_records + + def add_segment(self, entity_records, entity_data): + """ + Add segment to a list of records, if a segmentation_type is requested. + """ - yield from entity_df.to_dict("records") + if self.segmentation_type: + entity_segment = entity_data["segment"]["segment_name"] + return [ + {**rec, self.segmentation_type.lower(): entity_segment} + for rec in entity_records + ] + return entity_records def get_analytics_report(self, job_ids): """ @@ -380,7 +420,7 @@ def get_analytics_report(self, job_ids): def get_entity_report(self): """ - Get 'ENTITY' report through 'Core Entity' endpoints of Twitter Ads API. + Get 'ENTITY' report through 'Campaign Management' endpoints of Twitter Ads API. Documentation: https://developer.twitter.com/en/docs/ads/campaign-management/api-reference """ @@ -403,11 +443,7 @@ def get_reach_report(self): Documentation: https://developer.twitter.com/en/docs/ads/analytics/api-reference/reach """ - resource = ( - "/" - + API_VERSION - + f"/stats/accounts/{self.account.id}/reach/{self.entity.lower()}s" - ) + resource = f"/{API_VERSION}/stats/accounts/{self.account.id}/reach/{self.entity.lower()}s" entity_ids = self.get_active_entity_ids() for chunk_entity_ids in split_list(entity_ids, MAX_ENTITY_IDS_PER_JOB): @@ -420,17 +456,22 @@ def get_reach_report(self): request = Request(self.client, "get", resource, params=params) yield from Cursor(None, request) - def add_date_if_necessary(self, record): + def add_request_or_period_dates(self, record): """ Add request_date, period_start_date and/or period_end_date to a JSON-like record. """ + def check_add_period_date_to_report(): + if self.report_type == "ANALYTICS" and self.granularity == "TOTAL": + return True + elif self.report_type == "REACH": + return True + return False + if self.add_request_date_to_report: record["request_date"] = datetime.today().strftime(REP_DATEFORMAT) - if ( - self.report_type == "ANALYTICS" and self.granularity == "TOTAL" - ) or self.report_type == "REACH": + if check_add_period_date_to_report(): record["period_start_date"] = self.start_date.strftime(REP_DATEFORMAT) record["period_end_date"] = (self.end_date - timedelta(days=1)).strftime( REP_DATEFORMAT @@ -461,6 +502,6 @@ def read(self): def result_generator(): for record in data: - yield self.add_date_if_necessary(record) + yield self.add_request_or_period_dates(record) yield JSONStream("results_" + self.account.id, result_generator()) diff --git a/requirements.txt b/requirements.txt index 9c00b1d0..f0e897f9 100644 --- a/requirements.txt +++ b/requirements.txt @@ -62,6 +62,5 @@ urllib3==1.25.7 Werkzeug==0.16.0 googleads==22.0.0 twitter-ads==7.0.1 -pandas==1.0.3 pyjwt==1.7.1 cryptography==2.9 diff --git a/tests/readers/test_twitter_reader.py b/tests/readers/test_twitter_reader.py index b57d52de..e19a478b 100644 --- a/tests/readers/test_twitter_reader.py +++ b/tests/readers/test_twitter_reader.py @@ -17,33 +17,137 @@ # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. from unittest import TestCase, mock +from click import ClickException from freezegun import freeze_time +from datetime import datetime -from datetime import datetime, timedelta +from twitter_ads.client import Client from nck.readers.twitter_reader import TwitterReader class TwitterReaderTest(TestCase): - def mock_twitter_reader(self, **kwargs): - for param, value in kwargs.items(): - if param == "end_date": - setattr(self, param, value + timedelta(days=1)) - else: - setattr(self, param, value) - setattr(self, "account", mock.MagicMock()) - - @mock.patch.object(TwitterReader, "__init__", mock_twitter_reader) + + kwargs = { + "consumer_key": "", + "consumer_secret": "", + "access_token": "", + "access_token_secret": "", + "account_id": "", + "report_type": None, + "entity": None, + "entity_attribute": [], + "granularity": None, + "metric_group": [], + "placement": None, + "segmentation_type": None, + "platform": None, + "country": None, + "add_request_date_to_report": None, + "start_date": datetime(2020, 1, 1), + "end_date": datetime(2020, 1, 3), + } + + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) + def test_check_report_dates(self): + temp_kwargs = self.kwargs.copy() + params = {"start_date": datetime(2020, 1, 3), "end_date": datetime(2020, 1, 1)} + temp_kwargs.update(params) + with self.assertRaises(ClickException): + TwitterReader(**temp_kwargs) + + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) + def test_check_analytics_report_segmentation_if_missing_platform(self): + temp_kwargs = self.kwargs.copy() + params = { + "report_type": "ANALYTICS", + "segmentation_type": "DEVICES", + "platform": None, + } + temp_kwargs.update(params) + with self.assertRaises(ClickException): + TwitterReader(**temp_kwargs) + + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) + def test_check_analytics_report_segmentation_if_missing_country(self): + temp_kwargs = self.kwargs.copy() + params = { + "report_type": "ANALYTICS", + "segmentation_type": "CITIES", + "country": None, + } + temp_kwargs.update(params) + with self.assertRaises(ClickException): + TwitterReader(**temp_kwargs) + + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) + def test_check_analytics_report_metric_groups_if_funding_instrument(self): + temp_kwargs = self.kwargs.copy() + params = { + "report_type": "ANALYTICS", + "entity": "FUNDING_INSTRUMENT", + "metric_group": ["ENGAGEMENT", "VIDEO"], + } + temp_kwargs.update(params) + with self.assertRaises(ClickException): + TwitterReader(**temp_kwargs) + + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) + def test_check_analytics_report_metric_groups_if_mobile_conversion(self): + temp_kwargs = self.kwargs.copy() + params = { + "report_type": "ANALYTICS", + "metric_group": ["MOBILE_CONVERSION", "ENGAGEMENT"], + } + temp_kwargs.update(params) + with self.assertRaises(ClickException): + TwitterReader(**temp_kwargs) + + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) + def test_check_reach_report_entities(self): + temp_kwargs = self.kwargs.copy() + params = {"report_type": "REACH", "entity": "LINE_ITEM"} + temp_kwargs.update(params) + with self.assertRaises(ClickException): + TwitterReader(**temp_kwargs) + + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) + def test_check_entity_report_entity_attributes(self): + temp_kwargs = self.kwargs.copy() + params = { + "report_type": "ENTITY", + "entity": "CAMPAIGN", + "entity_attribute": ["id", "name", "XXXXX"], + } + temp_kwargs.update(params) + with self.assertRaises(ClickException): + TwitterReader(**temp_kwargs) + + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) def test_get_daily_period_items(self): - kwargs = {"start_date": datetime(2020, 1, 1), "end_date": datetime(2020, 1, 3)} - output = TwitterReader(**kwargs).get_daily_period_items() + temp_kwargs = self.kwargs.copy() + params = {"start_date": datetime(2020, 1, 1), "end_date": datetime(2020, 1, 3)} + temp_kwargs.update(params) + output = TwitterReader(**temp_kwargs).get_daily_period_items() expected = [datetime(2020, 1, 1), datetime(2020, 1, 2), datetime(2020, 1, 3)] self.assertEqual(output, expected) - @mock.patch.object(TwitterReader, "__init__", mock_twitter_reader) + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) def test_parse_with_total_granularity(self): - kwargs = {"granularity": "TOTAL", "segmentation_type": None} + temp_kwargs = self.kwargs.copy() + params = {"granularity": "TOTAL", "segmentation_type": None} + temp_kwargs.update(params) raw_analytics_response = { + "time_series_length": 1, "data": [ { "id": "XXXXX", @@ -59,7 +163,7 @@ def test_parse_with_total_granularity(self): }, ], } - output = TwitterReader(**kwargs).parse(raw_analytics_response) + output = TwitterReader(**temp_kwargs).parse(raw_analytics_response) expected = [ {"id": "XXXXX", "retweets": 11, "likes": 12}, {"id": "YYYYY", "retweets": 21, "likes": 22}, @@ -67,15 +171,19 @@ def test_parse_with_total_granularity(self): for output_record, expected_record in zip(output, expected): self.assertEqual(output_record, expected_record) - @mock.patch.object(TwitterReader, "__init__", mock_twitter_reader) + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) def test_parse_with_day_granularity(self): - kwargs = { + temp_kwargs = self.kwargs.copy() + params = { "granularity": "DAY", "segmentation_type": None, "start_date": datetime(2020, 1, 1), "end_date": datetime(2020, 1, 3), } + temp_kwargs.update(params) raw_analytics_response = { + "time_series_length": 3, "data": [ { "id": "XXXXX", @@ -103,7 +211,7 @@ def test_parse_with_day_granularity(self): }, ], } - output = TwitterReader(**kwargs).parse(raw_analytics_response) + output = TwitterReader(**temp_kwargs).parse(raw_analytics_response) expected = [ {"date": "2020-01-01", "id": "XXXXX", "retweets": 11, "likes": 14}, {"date": "2020-01-02", "id": "XXXXX", "retweets": 12, "likes": 15}, @@ -115,10 +223,14 @@ def test_parse_with_day_granularity(self): for output_record, expected_record in zip(output, expected): self.assertEqual(output_record, expected_record) - @mock.patch.object(TwitterReader, "__init__", mock_twitter_reader) + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) def test_parse_with_segment(self): - kwargs = {"granularity": "TOTAL", "segmentation_type": "GENDER"} + temp_kwargs = self.kwargs.copy() + params = {"granularity": "TOTAL", "segmentation_type": "GENDER"} + temp_kwargs.update(params) raw_analytics_response = { + "time_series_length": 1, "data": [ { "id": "XXXXX", @@ -148,7 +260,7 @@ def test_parse_with_segment(self): }, ], } - output = TwitterReader(**kwargs).parse(raw_analytics_response) + output = TwitterReader(**temp_kwargs).parse(raw_analytics_response) expected = [ {"id": "XXXXX", "gender": "Male", "retweets": 11, "likes": 12}, {"id": "XXXXX", "gender": "Female", "retweets": 13, "likes": 14}, @@ -158,24 +270,27 @@ def test_parse_with_segment(self): for output_record, expected_record in zip(output, expected): self.assertDictEqual(output_record, expected_record) - @freeze_time("2020-01-01") - @mock.patch.object(TwitterReader, "__init__", mock_twitter_reader) - def test_add_date_if_necessary(self): - kwargs = { + @freeze_time("2020-01-03") + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) + def test_add_request_or_period_dates(self): + temp_kwargs = self.kwargs.copy() + params = { "report_type": "ANALYTICS", "granularity": "TOTAL", "start_date": datetime(2020, 1, 1), "end_date": datetime(2020, 1, 3), "add_request_date_to_report": True, } + temp_kwargs.update(params) record = {"id": "XXXXX", "name": "Artefact Campaign"} - output = TwitterReader(**kwargs).add_date_if_necessary(record) + output = TwitterReader(**temp_kwargs).add_request_or_period_dates(record) expected = { "id": "XXXXX", "name": "Artefact Campaign", "period_start_date": "2020-01-01", "period_end_date": "2020-01-03", - "request_date": "2020-01-01", + "request_date": "2020-01-03", } self.assertEqual(output, expected) @@ -190,7 +305,8 @@ def mock_parse(*args): {"id": "YYYYY", "retweets": 21, "likes": 22}, ] - @mock.patch.object(TwitterReader, "__init__", mock_twitter_reader) + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) @mock.patch.object( TwitterReader, "get_active_entity_ids", lambda *args: ["XXXXX", "YYYYYY"] ) @@ -199,12 +315,16 @@ def mock_parse(*args): @mock.patch.object(TwitterReader, "get_raw_analytics_response", lambda *args: {}) @mock.patch.object(TwitterReader, "parse", mock_parse) def test_read_analytics_report(self): - kwargs = { + temp_kwargs = self.kwargs.copy() + params = { "report_type": "ANALYTICS", "granularity": "DAY", "add_request_date_to_report": False, } - output = next(TwitterReader(**kwargs).read()) + temp_kwargs.update(params) + reader = TwitterReader(**temp_kwargs) + reader.account = mock.MagicMock() + output = next(reader.read()) expected = [ {"id": "XXXXX", "retweets": 11, "likes": 12}, {"id": "YYYYY", "retweets": 21, "likes": 22}, From 0e9c980458110cd21c3ccc02e06201ae19ed301c Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Thu, 11 Jun 2020 14:32:08 +0200 Subject: [PATCH 05/54] Reduce complexity of TwitterReader constructor (input validations) --- nck/readers/twitter_reader.py | 53 +++++++++++++++------------- tests/readers/test_twitter_reader.py | 14 ++++---- 2 files changed, 36 insertions(+), 31 deletions(-) diff --git a/nck/readers/twitter_reader.py b/nck/readers/twitter_reader.py index 5ce3421e..cbc66a1c 100644 --- a/nck/readers/twitter_reader.py +++ b/nck/readers/twitter_reader.py @@ -195,17 +195,30 @@ def __init__( self.platform = platform self.country = country - # Define check functions + # Validate input parameters + self.validate_params() - def check_report_dates(): + def validate_params(self): + """ + Validate combination of input parameters (triggered in TwitterReader constructor). + """ - if end_date < start_date: - raise ClickException( - "Report end date should be equal or anterior to report start date." - ) + self.validate_dates() + self.validate_analytics_segmentation() + self.validate_analytics_metric_groups() + self.validate_reach_entity() + self.validate_entity_attributes() + + def validate_dates(self): - def check_analytics_report_segmentation(): + if self.end_date - timedelta(days=1) < self.start_date: + raise ClickException( + "Report end date should be equal or ulterior to report start date." + ) + + def validate_analytics_segmentation(self): + if self.report_type == "ANALYTICS": if ( self.segmentation_type in ["DEVICES", "PLATFORM VERSION"] and not self.platform @@ -218,7 +231,9 @@ def check_analytics_report_segmentation(): ): raise ClickException("Please provide a value for 'country'.") - def check_analytics_report_metric_groups(): + def validate_analytics_metric_groups(self): + + if self.report_type == "ANALYTICS": if self.entity == "FUNDING_INSTRUMENT" and any( [ @@ -238,14 +253,18 @@ def check_analytics_report_metric_groups(): "'MOBILE_CONVERSION' data should be requested separately." ) - def check_reach_report_entities(): + def validate_reach_entity(self): + + if self.report_type == "REACH": if self.entity not in ["CAMPAIGN", "FUNDING_INSTRUMENT"]: raise ClickException( "'REACH' reports only accept the 'CAMPAIGN' and 'FUNDING_INSTRUMENT' entities." ) - def check_entity_report_entity_attributes(): + def validate_entity_attributes(self): + + if self.report_type == "ENTITY": if not all( [ @@ -257,20 +276,6 @@ def check_entity_report_entity_attributes(): f"Available attributes for '{self.entity}' are: {ENTITY_ATTRIBUTES[self.entity]}" ) - # Check input parameters - - check_report_dates() - - if self.report_type == "ANALYTICS": - check_analytics_report_segmentation() - check_analytics_report_metric_groups() - - elif self.report_type == "REACH": - check_reach_report_entities() - - elif self.report_type == "ENTITY": - check_entity_report_entity_attributes() - def get_daily_period_items(self): """ Returns a list of datetime instances representing each date contained diff --git a/tests/readers/test_twitter_reader.py b/tests/readers/test_twitter_reader.py index e19a478b..7d3c832d 100644 --- a/tests/readers/test_twitter_reader.py +++ b/tests/readers/test_twitter_reader.py @@ -50,7 +50,7 @@ class TwitterReaderTest(TestCase): @mock.patch.object(Client, "__init__", lambda *args: None) @mock.patch.object(Client, "accounts", lambda *args: None) - def test_check_report_dates(self): + def test_validate_dates(self): temp_kwargs = self.kwargs.copy() params = {"start_date": datetime(2020, 1, 3), "end_date": datetime(2020, 1, 1)} temp_kwargs.update(params) @@ -59,7 +59,7 @@ def test_check_report_dates(self): @mock.patch.object(Client, "__init__", lambda *args: None) @mock.patch.object(Client, "accounts", lambda *args: None) - def test_check_analytics_report_segmentation_if_missing_platform(self): + def test_validate_analytics_segmentation_if_missing_platform(self): temp_kwargs = self.kwargs.copy() params = { "report_type": "ANALYTICS", @@ -72,7 +72,7 @@ def test_check_analytics_report_segmentation_if_missing_platform(self): @mock.patch.object(Client, "__init__", lambda *args: None) @mock.patch.object(Client, "accounts", lambda *args: None) - def test_check_analytics_report_segmentation_if_missing_country(self): + def test_validate_analytics_segmentation_if_missing_country(self): temp_kwargs = self.kwargs.copy() params = { "report_type": "ANALYTICS", @@ -85,7 +85,7 @@ def test_check_analytics_report_segmentation_if_missing_country(self): @mock.patch.object(Client, "__init__", lambda *args: None) @mock.patch.object(Client, "accounts", lambda *args: None) - def test_check_analytics_report_metric_groups_if_funding_instrument(self): + def test_validate_analytics_metric_groups_if_funding_instrument(self): temp_kwargs = self.kwargs.copy() params = { "report_type": "ANALYTICS", @@ -98,7 +98,7 @@ def test_check_analytics_report_metric_groups_if_funding_instrument(self): @mock.patch.object(Client, "__init__", lambda *args: None) @mock.patch.object(Client, "accounts", lambda *args: None) - def test_check_analytics_report_metric_groups_if_mobile_conversion(self): + def test_validate_analytics_metric_groups_if_mobile_conversion(self): temp_kwargs = self.kwargs.copy() params = { "report_type": "ANALYTICS", @@ -110,7 +110,7 @@ def test_check_analytics_report_metric_groups_if_mobile_conversion(self): @mock.patch.object(Client, "__init__", lambda *args: None) @mock.patch.object(Client, "accounts", lambda *args: None) - def test_check_reach_report_entities(self): + def test_validate_reach_entity(self): temp_kwargs = self.kwargs.copy() params = {"report_type": "REACH", "entity": "LINE_ITEM"} temp_kwargs.update(params) @@ -119,7 +119,7 @@ def test_check_reach_report_entities(self): @mock.patch.object(Client, "__init__", lambda *args: None) @mock.patch.object(Client, "accounts", lambda *args: None) - def test_check_entity_report_entity_attributes(self): + def test_validate_entity_attributes(self): temp_kwargs = self.kwargs.copy() params = { "report_type": "ENTITY", From aed3fd0b9de89615ccaa052d8cb2f0cfcdf14180 Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Fri, 12 Jun 2020 10:18:44 +0200 Subject: [PATCH 06/54] Minor fix --- nck/readers/twitter_reader.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/nck/readers/twitter_reader.py b/nck/readers/twitter_reader.py index cbc66a1c..aaf26eaa 100644 --- a/nck/readers/twitter_reader.py +++ b/nck/readers/twitter_reader.py @@ -467,11 +467,9 @@ def add_request_or_period_dates(self, record): """ def check_add_period_date_to_report(): - if self.report_type == "ANALYTICS" and self.granularity == "TOTAL": - return True - elif self.report_type == "REACH": - return True - return False + return ( + self.report_type == "ANALYTICS" and self.granularity == "TOTAL" + ) or self.report_type == "REACH" if self.add_request_date_to_report: record["request_date"] = datetime.today().strftime(REP_DATEFORMAT) From 25c2f265693871bc616aed346780b717f14c2f12 Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Fri, 12 Jun 2020 20:10:17 +0200 Subject: [PATCH 07/54] Fix typo --- nck/readers/twitter_reader.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/nck/readers/twitter_reader.py b/nck/readers/twitter_reader.py index aaf26eaa..86e94d65 100644 --- a/nck/readers/twitter_reader.py +++ b/nck/readers/twitter_reader.py @@ -171,23 +171,23 @@ def __init__( end_date, add_request_date_to_report, ): - # Authentification params + # Authentication inputs self.client = Client( consumer_key, consumer_secret, access_token, access_token_secret ) self.account = self.client.accounts(account_id) - # General params + # General inputs self.report_type = report_type self.entity = entity self.start_date = start_date self.end_date = end_date + timedelta(days=1) self.add_request_date_to_report = add_request_date_to_report - # Report params: ENTITY + # Report inputs: ENTITY self.entity_attributes = list(entity_attribute) - # Report params: ANALYTICS + # Report inputs: ANALYTICS self.granularity = granularity self.metric_groups = list(metric_group) self.placement = placement @@ -195,10 +195,10 @@ def __init__( self.platform = platform self.country = country - # Validate input parameters - self.validate_params() + # Validate inputs + self.validate_inputs() - def validate_params(self): + def validate_inputs(self): """ Validate combination of input parameters (triggered in TwitterReader constructor). """ From 0224ce9d71b67cfdb32058da5bab215f4e3b46df Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Fri, 19 Jun 2020 19:36:18 +0200 Subject: [PATCH 08/54] Adding documentation for Google Analytics, DBM and DCM readers --- nck/readers/README.md | 328 +++++++++++++++++++++++++++--------------- 1 file changed, 209 insertions(+), 119 deletions(-) diff --git a/nck/readers/README.md b/nck/readers/README.md index 96fe0af5..aed1d8d0 100644 --- a/nck/readers/README.md +++ b/nck/readers/README.md @@ -45,6 +45,10 @@ Both Adobe Analytics Readers use the **JWT authentication framework**. ### Adobe Analytics Reader 1.4 +#### Source API + +[Analytics API v1.4](https://github.com/AdobeDocs/analytics-1.4-apis) + #### Quickstart Call example to Adobe Analytics Reader 1.4, getting the number of visits per day and tracking code for a specified Report Suite, between 2020-01-01 and 2020-01-31: @@ -53,6 +57,8 @@ Call example to Adobe Analytics Reader 1.4, getting the number of visits per day python nck/entrypoint.py read_adobe --adobe-client-id --adobe-client-secret --adobe-tech-account-id --adobe-org-id --adobe-private-key --adobe-global-company-id --adobe-report-suite-id --adobe-date-granularity day --adobe-report-element-id trackingcode --adobe-report-metric-id visits --adobe-start-date 2020-01-01 --adobe-end-date 2020-01-31 write_console ``` +Didn't work? See [troubleshooting](#troubleshooting) section. + #### Parameters |CLI option|Documentation| @@ -68,8 +74,8 @@ python nck/entrypoint.py read_adobe --adobe-client-id --adobe-client |`--adobe-report-element-id`|ID of the element (i.e. dimension) to include in the report| |`--adobe-report-metric-id`|ID of the metric to include in the report| |`--adobe-date-granularity`|Granularity of the report. *Possible values: PREVIOUS_DAY, LAST_30_DAYS, LAST_7_DAYS, LAST_90_DAYS*| -|`--adobe-start-date`|Start date of the report (format: YYYY-MM-DD)| -|`--adobe-end-date`|End date of the report (format: YYYY-MM-DD)| +|`--adobe-start-date`|Start date of the period to request (format: YYYY-MM-DD)| +|`--adobe-end-date`|End date of the period to request (format: YYYY-MM-DD)| #### Addtional information @@ -79,6 +85,10 @@ python nck/entrypoint.py read_adobe --adobe-client-id --adobe-client ### Adobe Analytics Reader 2.0 +#### Source API + +[Analytics API v2.0](https://github.com/AdobeDocs/analytics-2.0-apis) + #### Quickstart Call example to Adobe Analytics Reader 2.0, getting the number of visits per day and tracking code for a specified Report Suite, between 2020-01-01 and 2020-01-31: @@ -87,6 +97,8 @@ Call example to Adobe Analytics Reader 2.0, getting the number of visits per day python nck/entrypoint.py read_adobe_2_0 --adobe-2-0-client-id --adobe-2-0-client-secret --adobe-2-0-tech-account-id --adobe-2-0-org-id --adobe-2-0-private-key --adobe-2-0-global-company-id --adobe-2-0-report-suite-id --adobe-2-0-dimension daterangeday --adobe-2-0-dimension campaign --adobe-2-0-start-date 2020-01-01 --adobe-2-0-end-date 2020-01-31 --adobe-2-0-metric visits write_console ``` +Didn't work? See [troubleshooting](#troubleshooting) section. + #### Parameters |CLI option|Documentation| @@ -100,8 +112,8 @@ python nck/entrypoint.py read_adobe_2_0 --adobe-2-0-client-id --adob |`--adobe-2-0-report-suite-id`|ID of the requested Adobe Report Suite| |`--adobe-2-0-dimension`|Dimension to include in the report| |`--adobe-2-0-metric`|Metric to include in the report| -|`--adobe-2-0-start-date`|Start date of the report (format: YYYY-MM-DD)| -|`--adobe-2-0-end-date`|End date of the report (format: YYYY-MM-DD)| +|`--adobe-2-0-start-date`|Start date of the period to request (format: YYYY-MM-DD)| +|`--adobe-2-0-end-date`|Start date of the period to request (format: YYYY-MM-DD)| #### Additional information @@ -116,6 +128,10 @@ python nck/entrypoint.py read_adobe_2_0 --adobe-2-0-client-id --adob ## Facebook Marketing Reader +#### Source API + +[Facebook Marketing API](https://developers.facebook.com/docs/marketing-api/reference/v7.0) + #### Quickstart The Facebook Marketing Reader handles calls to 2 endpoints of the Facebook Marketing API: **Facebook Ad Insights** (to retrieve performance data), and **Facebook Ad Management** (to retrieve configuration data). @@ -130,6 +146,8 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- python nck/entrypoint.py read_facebook --facebook-access-token --facebook-object-id --facebook-ad-insights False --facebook-level ad --facebook-field id --facebook-field creative[id] --facebook-add-date-to-report True --facebook-start-date 2020-01-01 --facebook-end-date 2019-01-01 write_console ``` +Didn't work? See [troubleshooting](#troubleshooting) section. + #### Parameters |CLI option|Documentation| @@ -137,13 +155,13 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- |`--facebook-app-id`|Facebook App ID. *Not mandatory if Facebook Access Token is provided.*| |`--facebook-app-secret`|Facebook App Secret. *Not mandatory if Facebook Access Token is provided.*| |`--facebook-access-token`|Facebook App Access Token.| -|`--facebook-object-type`|Nature of the root Facebook Object used to make the request. *Supported values: creative (available only for Ad Management requests), ad, adset, campaign, account (default).*| +|`--facebook-object-type`|Nature of the root Facebook Object used to make the request. *Possible values: creative (available only for Ad Management requests), ad, adset, campaign, account (default).*| |`--facebook-object-id`|ID of the root Facebook Object used to make the request.| -|`--facebook-level`|Granularity of the response. *Supported values: creative (available only for Ad Management requests), ad (default), adset, campaign or account.*| +|`--facebook-level`|Granularity of the response. *Possible values: creative (available only for Ad Management requests), ad (default), adset, campaign, account.*| |`--facebook-ad-insights`|*True* (default) if *Ad Insights* request, *False* if *Ad Management* request.| |`--facebook-field`|Fields to be retrieved.| -|`--facebook-start-date`|Start date of the requested time range. *This parameter is only relevant for Ad Insights Requests, and Ad Management requests at the Campaign, Adset and Ad levels.*| -|`--facebook-end-date`|End date of the requested time range. *This parameter is only relevant for Ad Insights Requests, and Ad Management requests at the Campaign, Adset and Ad levels.*| +|`--facebook-start-date`|Start date of the period to request (format: YYYY-MM-DD). *This parameter is only relevant for Ad Insights Requests, and Ad Management requests at the Campaign, Adset and Ad levels.*| +|`--facebook-end-date`|Start date of the period to request (format: YYYY-MM-DD). *This parameter is only relevant for Ad Insights Requests, and Ad Management requests at the Campaign, Adset and Ad levels.*| |`--facebook-date-preset`|Relative time range. Ignored if *--facebook-start date* and *--facebook-end-date* are specified. *This parameter is only relevant for Ad Insights Requests, and Ad Management requests at the Campaign, Adset and Ad levels.*| |`--facebook-time-increment`|Cuts the results between smaller time slices within the specified time range. *This parameter is only relevant for Ad Insights Requests, and Ad Management requests at the Campaign, Adset and Ad levels.*| |`--facebook-add-date-to-report`|*True* if you wish to add the date of the request to each response record, *False* otherwise (default).| @@ -235,79 +253,97 @@ python nck/entrypoint.py read_facebook --facebook-access-token -- ### Authentication -You can authenticate to most of the readers of the google -suite following the same schema. You'll need to generate a **refresh token** to connect -via the oAuth flow. A full script to do this can be found here: - -[Refresh token generator](https://github.com/artefactory/Refresh-token-generator-for-google-oauth) +You can authenticate to most of the Readers of the Google Suite following the same schema. You'll need to generate a **refresh token** to connect via the OAuth flow. A full script to do this can be found in this [refresh token generator](https://github.com/artefactory/Refresh-token-generator-for-google-oauth). ### Google Ads Reader +#### Source API + +[AdWords API](https://developers.google.com/adwords/api/docs/guides/start) + #### How to obtain credentials -Using the Google Ads API requires four things: +Using the AdWords API requires four things: - A developer token (Generated at a company level - one per company -, takes around 2 days to be approved by Google) which can be completely independant from the Google Ads Account you will be calling (though you need a Manager Google Ads Account to request a token for your company) - - OAuth2 credentials: and - - A refresh token, created with the email address able to access to all the Google Ads Account you will be calling +- The ID of the Google Ads Accounts you will be reading from (XXX-XXX-XXXX numbers, written right next to your Account Name) -- The ID of the GAds Accounts you will be reading from (XXX-XXX-XXXX numbers, written right next to your Account Name) - -See the [documentation here](https://developers.google.com/adwords/api/docs/guides/signup "Sign Up for Google Ads API") -to apply for access if your Company does not already have a developer token (granting you the right to use the API). - -See the [documentation here](https://developers.google.com/adwords/api/docs/guides/first-api-call "Make your first API call") -to set-up your OAuth2 credentials and refresh token specifically for your Google Ads Accounts. - -#### Which reports and metrics are available in the API - -The list of available reports for the API, and the associated metrics, can be [found here](https://developers.google.com/adwords/api/docs/appendix/reports#available-reports "Report Types") +See the [documentation here](https://developers.google.com/adwords/api/docs/guides/signup) to apply for access if your Company does not already have a developer token (granting you the right to use the API). -#### Simple API call example +See the [documentation here](https://developers.google.com/adwords/api/docs/guides/first-api-call) to set-up your OAuth2 credentials and refresh token specifically for your Google Ads Accounts. -- Call Example +#### Quickstart -The following command retrieves insights about the Ads of *my_first_campaign* and *my_second_campaign* in the Google Ads Account thanks to -your company , and your , and with the necessary permissions to access your Accounts. +The following command retrieves insights about the Ads of *my_first_campaign* and *my_second_campaign* in the Google Ads Account , thanks to your company , , and with the necessary permissions to access your Accounts. ``` -python nck/entrypoint.py read_googleads --googleads-developer-token --googleads-client-id --googleads-client-secret --googleads-refresh-token --googleads-client-customer-id --googleads-report-type AD_PERFORMANCE_REPORT --googleads-date-range-type LAST_7_DAYS --googleads-field CampaignName --googleads-field AdGroupName --googleads-field Headline --googleads-field Date --googleads-field Impressions --googleads-report-filter "{'field':'CampaignName','operator':'IN','values':['my_first_campaign','my_second_campaign']}" +python nck/entrypoint.py read_googleads --googleads-developer-token --googleads-client-id --googleads-client-secret --googleads-refresh-token --googleads-client-customer-id --googleads-report-type AD_PERFORMANCE_REPORT --googleads-date-range-type LAST_7_DAYS --googleads-field CampaignName --googleads-field AdGroupName --googleads-field Headline --googleads-field Date --googleads-field Impressions --googleads-report-filter "{'field':'CampaignName','operator':'IN','values':['my_first_campaign','my_second_campaign']}" ``` -*If it doesn't work, try to* `export PYTHONPATH="."` *in the nautilus-connector-kit folder (to be sure Python is reading correctly)* -*If you want the output to be printed in your console, add* `write_console` *at the end of your command (see writers for more details)* +Didn't work? See [troubleshooting](#troubleshooting) section. -- Parameters of the Google Ads Reader +#### Parameters |CLI option|Documentation| |--|--| |`--googleads-developer-token`|Company Developer token for Google Ads API| |`--googleads-client-id`|OAuth2 ID| -|`--googleads-client-secret`|OAuth2 Secret| +|`--googleads-client-secret`|OAuth2 secret| |`--googleads-refresh-token`|Refresh token for OAuth2| -|`--googleads-manager-id`|Manager_Account_ID (XXX-XXX-XXXX identifier) (optional)| +|`--googleads-manager-id`|(Optional) Manager_Account_ID (XXX-XXX-XXXX identifier)| |`--googleads-client-customer-id`|GAds_Account_ID (ignored if a manager account ID was given)| -|`--googleads-report-name`|Optional name for your output stream ("Custom Report" by default)| +|`--googleads-report-name`|(Optional) Name of your output stream ("Custom Report" by default)| |`--googleads-report-type`|Type of report to be called| -|`--googleads-date-range-type`|Type of date range to apply (if "CUSTOM_RANGE", a min and max date must be specified)| -|`--googleads-start-date`|Start date for "CUSTOM_RANGE" date range (optional)| -|`--googleads-end-date`|End date for "CUSTOM_RANGE" date range (optional)| -|`--googleads-field`|List of fields to request| +|`--googleads-date-range-type`|Type of date range to apply (if "CUSTOM_RANGE", a min and max date must be specified). *Possible values can be found [here](https://developers.google.com/adwords/api/docs/guides/reporting#date_ranges).*| +|`--googleads-start-date`|(Optional) Start date for "CUSTOM_RANGE" date range (format: YYYY-MM-DD)| +|`--googleads-end-date`|(Optional) End date for "CUSTOM_RANGE" date range (format: YYYY-MM-DD)| +|`--googleads-field`|Fields to include in the report| |`--googleads-report-filter`|Filter to apply on a chosen field (Dictionary as String "{'field':,'operator':,'values':}")| -|`--googleads-include-zero-impressions`|Boolean specifying whether or not rows with zero impressions should be included in report| -|`--googleads-filter-on-video-campaigns`|Boolean used to filter on Video Campaigns only (require CampaignId to be listed as a field)| -|`--googleads-include-client-customer-id`|Boolean used to add "AccountId" as a field in the output stream *| +|`--googleads-include-zero-impressions`|Boolean specifying whether or not rows with zero impressions should be included in the report| +|`--googleads-filter-on-video-campaigns`|Boolean used to filter the report on Video Campaigns only (require CampaignId to be listed as a field)| +|`--googleads-include-client-customer-id`|Boolean used to add "AccountId" as a field in the output stream. *AccountId is not available in the API, but is known since it's a requirement to call the API (= Client Customer ID)*| -\* *AccountId is not available in the API but is known since it's a requirement to call the API (= client customer ID)* - -See the documents below for a better understanding of the parameters: -- [Google Ads API Reporting Basics](https://developers.google.com/adwords/api/docs/guides/reporting#create_a_report_definition) -- [Possible Date Ranges](https://developers.google.com/adwords/api/docs/guides/reporting#date_ranges) +See documentation below for a better understanding of the parameters: +- [Reporting basics](https://developers.google.com/adwords/api/docs/guides/reporting#create_a_report_definition) +- [Available reports and associated fields](https://developers.google.com/adwords/api/docs/appendix/reports#available-reports) ### Google Analytics Reader -*Not documented yet.* +#### Source API + +[Analytics Reporting API](https://developers.google.com/analytics/devguides/reporting/core/v4) + +#### Quickstart + +The following command retrieves sessions, pageviews and bounces volumes by date from 2020-01-01 to 2020-01-03, for the Analytics View , thanks your , and with the necessary permissions to access your accounts. + +``` +python nck/entrypoint.py read_ga --ga-client-id --ga-client-secret --ga-view-id --ga-refresh-token --ga-dimension ga:date --ga-metric sessions --ga-metric ga:pageviews --ga-metric ga:bounces --ga-start-date 2020-01-01 --ga-end-date 2020-01-03 write_console +``` + +Didn't work? See [troubleshooting](#troubleshooting) section. + +#### Parameters + +|CLI option|Documentation| +|--|--| +|`--ga-client-id`|OAuth2 ID| +|`--ga-client-secret`|OAuth2 secret| +|`--ga-access-token`|(Optional) Access token for OAuth2| +|`--ga-refresh-token`|Refresh token for OAuth2| +|`--ga-view-id`|Analytics View ID from which to retrieve data. See documentation [here](https://support.google.com/analytics/answer/1009618) for a better understanding of Google Analytics hierrarchy.| +|`--ga-account-id`|Analytics Account ID from which to retrieve data. See documentation [here](https://support.google.com/analytics/answer/1009618) for a better understanding of Google Analytics hierrarchy.| +|`--ga-dimension`|Dimensions to include in the report (max 9). Possible values can be found [here](https://ga-dev-tools.appspot.com/dimensions-metrics-explorer/).| +|`--ga-metric`|Metrics to include in the report (min 1, max 10). Possible values can be found [here](https://ga-dev-tools.appspot.com/dimensions-metrics-explorer/).| +|`--ga-segment-id`|Segment ID of a built-in or custom segment (for example gaid::-3) on which report data should be segmented.| +|`--ga-start-date`|Start date of the period to request (format: YYYY-MM-DD)| +|`--ga-end-date`|End date of the period to request (format: YYYY-MM-DD)| +|`--ga-date-range`| of the period to request, specified as a unique argument (format: YYYY-MM-DD YYYY-MM-DD)| +|`--ga-day-range`|*Possible values: PREVIOUS_DAY, LAST_30_DAYS, LAST_7_DAYS, LAST_90_DAYS.*| +|`--ga-sampling-level`|Desired sample size. See documentation [here](https://support.google.com/analytics/answer/2637192) for a better understanding of Google Analytics sampling. *Possible values: SMALL, DEFAULT, LARGE (default).*| + +See documentation [here](https://developers.google.com/analytics/devguides/reporting/core/v4/basics) for a better understanding of the parameters. ### Google Cloud Storage Reader @@ -315,109 +351,155 @@ See the documents below for a better understanding of the parameters: ### Google Campaign Manager Reader -*Not documented yet.* +#### Source API -### Google Display & Video Reader +[DCM/DFA Reporting and Trafficking API](https://developers.google.com/doubleclick-advertisers/v3.3) -*Not documented yet.* +#### Quickstart + +The following command retrieves impressions, clicks and cost volumes from 2020-01-01 to 2020-01-03, thanks your , , and with the necessary permissions to access your accounts. + +``` +python nck/entrypoint.py read_dcm --dcm-client-id --dcm-client-secret --dcm-refresh-token --dcm-profile-id --dcm-dimension dfa:date --dcm-metric dfa:impressions --dcm-metric dfa:clicks --dcm-metric dfa:mediaCost --dcm-start-date 2020-01-01 --dcm-end-date 2020-01-03 write_console +``` + +Didn't work? See [troubleshooting](#troubleshooting) section. + +##### Parameters + +|CLI option|Documentation| +|--|--| +|`--dcm-client-id`|OAuth2 ID| +|`--dcm-client-secret`|OAuth2 secret| +|`--dcm-access-token`|(Optional) Access token for OAuth2| +|`--dcm-refresh-token`|Refresh token for OAuth2| +|`--dcm-profile-id`|ID of the DFA user profile that has been granted permissions to the CM account for which you want to retrieve data. You should have 1 DFA user profile per CM account that you can access. The associated ID can be found directly on your Campaign Manager UI (when accessing your list of CM accounts, on the top right hand corner).| +|`--dcm-report-name`|Name of the report, that will appear in CM UI.| +|`--dcm-report-type`|Type of the report. *Possible values: CROSS_DIMENSION_REACH, FLOODLIGHT, PATH_TO_CONVERSION, REACH, STANDARD.*| +|`--dcm-dimension`|Dimensions to include in the report. *Possible values can be found [here](https://developers.google.com/doubleclick-advertisers/v3.3/dimensions).*| +|`--dcm-metric`|Metrics to include in the report. *Possible values can be found [here](https://developers.google.com/doubleclick-advertisers/v3.3/dimensions).*| +|`--dcm-filter`| association, used to narrow the scope of the report. For instance "dfa:advertiserId XXXXX" will narrow report scope to the performance of Advertiser ID XXXXX. *Possible filter types can be found [here](https://developers.google.com/doubleclick-advertisers/v3.3/dimensions).*| +|`--dcm-start-date`|Start date of the period to request (format: YYYY-MM-DD)| +|`--dcm-end-date`|End date of the period to request (format: YYYY-MM-DD)| + +### Google Display & Video 360 Reader + +#### Source API + +[Doubleclick Bid Manager API](https://developers.google.com/bid-manager/v1) + +#### Quickstart + +The following command retrieves impressions, clicks and cost volumes filtered on a specific from 2020-01-01 to 2020-01-03, thanks your , and with the necessary permissions to access your accounts. + +``` +python nck/entrypoint.py read_dbm --dbm-client-id --dbm-client-secret —dbm-refresh-token —dbm-filter FILTER_ADVERTISER --dbm-query-dimension FILTER_DATE --dbm-query-metric METRIC_IMPRESSIONS --dbm-query-metric METRIC_CLICKS --dbm-query-metric METRIC_MEDIA_COST_ADVERTISER --dbm-query-param-type TYPE_GENERAL --dbm-request-type custom_query_report --dbm-start-date 2020-01-01 --dbm-end-date 2020-01-03 write_console +``` + +Didn't work? See [troubleshooting](#troubleshooting) section. + +#### Parameters + +|CLI option|Documentation| +|--|--| +|`--dbm-client-id`|OAuth2 ID| +|`--dbm-client-secret`|OAuth2 secret| +|`--dbm-access-token`|(Optional) Access token for OAuth2| +|`--dbm-refresh-token`|Refresh token for OAuth2| +|`--dbm-query-request-type`|Doubleclick Bid Manager API request type. *Possible values: existing_query, custom_query, existing_query_report, custom_query_report, lineitems_objects, sdf_objects and list_reports.*| +|`--dbm-query-id`|Query ID.| +|`--dbm-query-title`|Query title, used to name the reports generated from this query in DV360 UI.| +|`--dbm-query-frequency`|How often the query is run. *Possible values can be found [here](https://developers.google.com/bid-manager/v1/queries#schedule.frequency). Default: ONE_TIME.*| +|`--dbm-filter`| association, used to narrow the scope of the report. For instance "FILTER_ADVERTISER XXXXX" will narrow report scope to the performance of Advertiser ID XXXXX. *Possible filter types can be found [here](https://developers.google.com/bid-manager/v1/filters-metrics#filters).*| +|`--dbm-query-dimension`|Dimensions to include in the report. *Possible values can be found [here](https://developers.google.com/bid-manager/v1/filters-metrics#filters).*| +|`--dbm-query-metric`|Metrics to include in the report. *Possible values can be found [here](https://developers.google.com/bid-manager/v1/filters-metrics#metrics).*| +|`--dbm-query-param-type`|Report type. *Possible values can be found [here](https://developers.google.com/bid-manager/v1/queries#params.type). Default: TYPE_TRUEVIEW.*| +|`--dbm-start-date`|Start date of the period to request (format: YYYY-MM-DD)| +|`--dbm-end-date`|End date of the period to request (format: YYYY-MM-DD)| ### Google Search Console Reader -#### Which reports and metrics are available in the API +#### Source API -The list of available dimensions and metrics in the API can be [found here](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query "Search Analytics") +[Search Console API (Search Analytics endpoint)](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/) -#### Simple API call example +#### How to obtain credentials -- Call Example +Using the Google Search Console API requires three main parameters: +- OAuth2 credentials: and +- A refresh token, created with the email address able to access to your Google Search Console Account. +- The URLs whose performance you want to see -The following command retrieves insights about the URL thanks to your company and -with the necessary permissions to access your Accounts. +#### Quickstart + +The following command retrieves insights about the URL from 2020-01-01 to 2020-01-03, thanks to your and with the necessary permissions to access your accounts. ``` -python nck/entrypoint.py read_search_console --search-console-client-id --search-console-refresh-token --search-console-site-url --search-console-dimensions country --search-console-dimensions device --search-console-start-date 2020-01-01 --search-console-end-date 2020-01-01 write_console +python nck/entrypoint.py read_search_console --search-console-client-id --search-console-refresh-token --search-console-site-url --search-console-dimensions country --search-console-dimensions device --search-console-start-date 2020-01-01 --search-console-end-date 2020-01-03 write_console ``` -- Parameters of the Google Search Console Reader +Didn't work? See [troubleshooting](#troubleshooting) section. + +#### Parameters |CLI option|Documentation| |--|--| |`--search-console-client-id`|OAuth2 ID| -|`--search-console-client-secret`|OAuth2 Secret| -|`--search-console-access-token`|Access token| +|`--search-console-client-secret`|OAuth2 secret| +|`--search-console-access-token`|Access token for OAuth2| |`--search-console-refresh-token`|Refresh token for OAuth2| -|`--search-console-dimensions`|[Dimensions to request](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query#dimensionFilterGroups.filters.dimension)| +|`--search-console-dimensions`|Dimensions of the report. *Possible values can be found [here](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query#dimensionFilterGroups.filters.dimension).*| |`--search-console-site-url`|Site URL whose performance you want to request| -|`--search-console-start-date`|Start Date for the request| -|`--search-console-end-date`|End Date for the request| -|`--search-console-date-column`|If true, include date column in the report| +|`--search-console-start-date`|Start date of the period to request (format: YYYY-MM-DD)| +|`--search-console-end-date`|End date of the period to request (format: YYYY-MM-DD)| +|`--search-console-date-column`|If set to *True*, a date column will be included in the report| |`--search-console-row-limit`|Row number by report page| -See the documents below for a better understanding of the parameters: -- [Google Search Console API](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query) +See documentation [here](https://developers.google.com/webmaster-tools/search-console-api-original/v3/searchanalytics/query) for a better understanding of the parameters. ### Google Search Ads 360 Reader +#### Source API + +[Search Ads 360 API](https://developers.google.com/search-ads/v2/reference) + #### How to obtain credentials Using the Search Ads API requires two things: - - OAuth2 credentials: and - - A refresh token, created with the email address able to access to all the Search Ads 360 Account you will be calling See the [documentation here](https://developers.google.com/search-ads/v2/authorizing "SA360 Authentication") to set-up your OAuth2 credentials and refresh token specifically for Search Ads 360 Reporting. -#### Which reports and metrics are available in the API - -The list of available reports for the API, and the associated metrics, can be [found here](https://developers.google.com/search-ads/v2/report-types "Report Types") - -#### Simple API call example - -- Call Example +#### Quickstart -The following command retrieves insights about the Ads in the Search Ads 360 Account from the agency thanks to -your , and with the necessary permissions to access your Accounts. +The following command retrieves insights about the Ads in the Search Ads 360 Account from the agency thanks to your , and with the necessary permissions to access your accounts. ``` python nck/entrypoint.py read_sa360 --sa360-client-id --sa360-client-secret --sa360-refresh-token --sa360-agency-id --sa360-advertiser-id --sa360-report-type keyword --sa360-column date --sa360-column impr --sa360-column clicks --sa360-start-date 2020-01-01 --sa360-end-date 2020-01-01 ``` -*If it doesn't work, try to* `export PYTHONPATH="."` *in the nautilus-connector-kit folder (to be sure Python is reading correctly)* -*If you want the output to be printed in your console, add* `write_console` *at the end of your command (see writers for more details)* +Didn't work? See [troubleshooting](#troubleshooting) section. -- Parameters of the SA360 Reader +#### Parameters |CLI option|Documentation| |--|--| -|`--sa360-access-token`|(Optional) Access token| |`--sa360-client-id`|OAuth2 ID| -|`--sa360-client-secret`|OAuth2 ID Secret| +|`--sa360-client-secret`|OAuth2 secret| +|`--sa360-access-token`|(Optional) Access token| |`--sa360-refresh-token`|Refresh token| |`--sa360-agency-id`|Agency ID to request in SA360| |`--sa360-advertiser-id`|(Optional) Advertiser ids to request. If not provided, every advertiser of the agency will be requested| |`--sa360-report-name`|(Optional) Name of the output report| -|`--sa360-report-type` Type of the report to request (list [here](https://developers.google.com/search-ads/v2/report-types))| -|`--sa360-column`|Dimensions and metrics to request in the report| -|`--sa360-saved-column`|(Optional) Saved columns to report (see [documentation](https://developers.google.com/search-ads/v2/how-tos/reporting/saved-columns))| -|`--sa360-start-date`|Start date of the period to request| -|`--sa360-end-date`|End date of the period to request| - -See the documents below for a better understanding of the parameters: -- [SA360 Reporting](https://developers.google.com/search-ads/v2/how-tos/reporting) - -#### How to obtain credentials +|`--sa360-report-type`| Type of the report to request. *Possible values can be found [here](https://developers.google.com/search-ads/v2/report-types).*| +|`--sa360-column`|Dimensions and metrics to include in the report| +|`--sa360-saved-column`|(Optional) Saved columns to report. *Documentation can be found [here](https://developers.google.com/search-ads/v2/how-tos/reporting/saved-columns).*| +|`--sa360-start-date`|Start date of the period to request (format: YYYY-MM-DD)| +|`--sa360-end-date`|End date of the period to request (format: YYYY-MM-DD)| -Using the Google Search Console API requires three main parameters: -- OAuth2 credentials: and - -- A refresh token, created with the email address able to access to your Google Search Console Account. - -- The URLs whose performance you want to see. - -See the [documentation here](https://developers.google.com/webmaster-tools/search-console-api-original/v3/prereqs "Search Console API") -to see an Overview of the Search Console API. +See documentation [here](https://developers.google.com/search-ads/v2/how-tos/reporting) for a better understanding of the parameters. ### Google Sheets Reader @@ -441,6 +523,10 @@ to see an Overview of the Search Console API. ## Twitter Ads Reader +#### Source API + +[Twitter Ads API](https://developer.twitter.com/en/docs/ads/general/overview) + #### How to obtain credentials * **Apply for a developer account** through [this link](https://developer.twitter.com/en/apply). @@ -470,6 +556,8 @@ python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter-consumer-secret --twitter-access-token --twitter-access-token-secret --twitter-account-id --twitter-report-type REACH --twitter-entity CAMPAIGN --twitter-entity-attribute id --twitter-entity-attribute name --twitter-entity-attribute total_budget_amount_local_micro --twitter-entity-attribute currency write_console ``` +Didn't work? See [troubleshooting](#troubleshooting) section. + #### Parameters |CLI option|Documentation| @@ -488,16 +576,17 @@ python nck/entrypoint.py read_twitter --twitter-consumer-key --twitter |`--twitter-segmentation-type`|Specific to ANALYTICS reports. Specifies how the retrieved data should be segmented. *Possible values can be found [here](https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation).* | |`--twitter-platform`|Specific to ANALYTICS reports. Required if segmentation_type is set to DEVICES or PLATFORM_VERSION. *Possible values can be identified through the targeting_criteria/locations*| |`--twitter-country`|Specific to ANALYTICS reports. Required if segmentation_type is set to CITIES, POSTAL_CODES, or REGION. *Possible values can be identified through the GET targeting_criteria/platforms endpoint.*| -|`--twitter-start-date`|Specifies report start date (format: YYYY-MM-DD).| -|`--twitter-end-date`|Specifies report end date (format: YYYY-MM-DD).| +|`--twitter-start-date`|Start date of the period to request (format: YYYY-MM-DD).| +|`--twitter-end-date`|End date of the period to request (format: YYYY-MM-DD).| |`--twitter-add-request-date-to-report`|If set to *True* (default: *False*), the date on which the request is made will appear on each report record.| If you need any further information, the documentation of Twitter Ads API can be found [here](https://developer.twitter.com/en/docs/ads/general/overview). ## Yandex Readers -For now, there is only one Yandex API you can access through Nautilus connectors: [Direct API](https://tech.yandex.com/direct/). -This API allows you to collect display metrics. +#### Source API + +[Yandex Direct API](https://tech.yandex.com/direct/) #### How to obtain credentials @@ -517,9 +606,9 @@ Here is the process: #### Quickstart -If you want to quickly get to the point, here is a simple command that get the daily budget for all your campaigns. +The following command retrieves the daily budget of all your campaigns, since your account creation. -```bash +``` python nck/entrypoint.py read_yandex_campaigns --yandex-token --yandex-field-name Id --yandex-field-name Name --yandex-field-name DailyBudget write_console ``` @@ -531,8 +620,8 @@ Didn't work? See [troubleshooting](#troubleshooting) section. |--| -| |`--yandex-token`|Bear token that allows you to authenticate to the API| |`--yandex-campaign-id`|(Optional) Selects campaigns with the specified IDs.| -|`--yandex-campaign-state`|(Optional) Selects campaigns with the specified [states](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status).| -|`--yandex-campaign-status`|(Optional) Selects campaigns with the specified [statuses](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status).| +|`--yandex-campaign-state`|(Optional) Selects campaigns with the specified states. *Possible values can be found [here](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status).*| +|`--yandex-campaign-status`|(Optional) Selects campaigns with the specified statuses. *Possible values can be found [here](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status).*| |`--yandex-campaign-payment-status`|(Optional) Selects campaigns with the specified payment [statuses](https://tech.yandex.com/direct/doc/dg/objects/campaign-docpage/#status).| |`--yandex-field-name`|Parameters to get that are common to all types of campaigns.| @@ -542,9 +631,9 @@ Didn't work? See [troubleshooting](#troubleshooting) section. #### Quickstart -The command below gives you a performance report for all your campaigns and since the beginning. +The following command retrieves a performance report for all your campaigns, since your account creation. -```bash +``` python nck/entrypoint.py read_yandex_statistics --yandex-token --yandex-report-type AD_PERFORMANCE_REPORT --yandex-field-name AdFormat --yandex-field-name AdId --yandex-field-name Impressions --yandex-include-vat True --yandex-report-language en --yandex-field-name AdGroupName --yandex-field-name AdGroupId --yandex-field-name AdNetworkType --yandex-field-name CampaignId --yandex-field-name CampaignName --yandex-field-name CampaignType --yandex-field-name Date --yandex-field-name Device --yandex-field-name Clicks --yandex-field-name Conversions --yandex-field-name Cost --yandex-date-range ALL_TIME write_console ``` @@ -557,19 +646,20 @@ Detailed version [here](https://tech.yandex.com/direct/doc/reports/spec-docpage/ |CLI option|Documentation| |--|--| |`--yandex-token`|Bear token that allows you to authenticate to the API| -|`--yandex-report-language`|(Optional) Language of the report. See all options [here](https://tech.yandex.com/direct/doc/dg/concepts/headers-docpage/#headers__accept-language).| +|`--yandex-report-language`|(Optional) Language of the report. *Possible values can be found [here](https://tech.yandex.com/direct/doc/dg/concepts/headers-docpage/#headers__accept-language).*| |`--yandex-filter`|(Optional) Filters on a particular field.| |`--yandex-max-rows`|(Optional) The maximum number of rows in the report.| -|`--yandex-field-name`|Information you want to collect. Complete list [here](https://tech.yandex.com/direct/doc/reports/fields-list-docpage/).| +|`--yandex-field-name`|Information you want to collect. *Possible values can be found [here](https://tech.yandex.com/direct/doc/reports/fields-list-docpage/).*| |`--yandex-report-type`|Type of report. Linked to the fields you want to select.| -|`--yandex-date-range`|List [here](https://tech.yandex.com/direct/doc/reports/period-docpage/).| +|`--yandex-date-range`|*Possible values can be found [here](https://tech.yandex.com/direct/doc/reports/period-docpage/).*| |`--yandex-include-vat`|Adds VAT to your expenses if set to `True`| |`--yandex-date-start`|(Optional) Selects data on a specific period of time. Combined with `--yandex-date-stop` and `--yandex-date-range` set to `CUSTOM_DATE`.| |`--yandex-date-stop`|(Optional) Selects data on a specific period of time. Combined with `--yandex-date-start` and `--yandex-date-range` set to `CUSTOM_DATE`.| ## Troubleshooting -You encountered an issue and you don't know what's going on. You may find an answer in the troubleshooting guide below. +You encountered an issue when running a Reader command and you don't know what's going on? +You may find an answer in the troubleshooting guide below. 1. **Have you installed NCK dependencies?** In order to run NCK, you need to install all dependencies. First create a [virtual environment](https://docs.python.org/3/library/venv.html) and then run `pip install -r requirements.txt`. 2. **Have you set `PYTHONPATH` environment variable to the root of NCK folder?** From 91b88b5a9dfe880ab2901cf2837b9d4ab7e45a96 Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Mon, 22 Jun 2020 20:01:47 +0200 Subject: [PATCH 09/54] Add details to Google Analytics reader --- nck/readers/README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/nck/readers/README.md b/nck/readers/README.md index aed1d8d0..7f87ae97 100644 --- a/nck/readers/README.md +++ b/nck/readers/README.md @@ -111,7 +111,7 @@ Didn't work? See [troubleshooting](#troubleshooting) section. |`--adobe-2-0-global-company-id`|Global Company ID (to be requested to [Discovery API](https://www.adobe.io/apis/experiencecloud/analytics/docs.html#!AdobeDocs/analytics-2.0-apis/master/discovery.md))| |`--adobe-2-0-report-suite-id`|ID of the requested Adobe Report Suite| |`--adobe-2-0-dimension`|Dimension to include in the report| -|`--adobe-2-0-metric`|Metric to include in the report| +|`--adobe-2-0-metric`|Metric to include in the report| |`--adobe-2-0-start-date`|Start date of the period to request (format: YYYY-MM-DD)| |`--adobe-2-0-end-date`|Start date of the period to request (format: YYYY-MM-DD)| @@ -340,8 +340,9 @@ Didn't work? See [troubleshooting](#troubleshooting) section. |`--ga-start-date`|Start date of the period to request (format: YYYY-MM-DD)| |`--ga-end-date`|End date of the period to request (format: YYYY-MM-DD)| |`--ga-date-range`| of the period to request, specified as a unique argument (format: YYYY-MM-DD YYYY-MM-DD)| -|`--ga-day-range`|*Possible values: PREVIOUS_DAY, LAST_30_DAYS, LAST_7_DAYS, LAST_90_DAYS.*| +|`--ga-day-range`|Relative time range. *Possible values: PREVIOUS_DAY, LAST_30_DAYS, LAST_7_DAYS, LAST_90_DAYS.*| |`--ga-sampling-level`|Desired sample size. See documentation [here](https://support.google.com/analytics/answer/2637192) for a better understanding of Google Analytics sampling. *Possible values: SMALL, DEFAULT, LARGE (default).*| +|`--ga-add-view`|If set to *True* (default: False)*, adds a "ga:viewId" field to the output stream.| See documentation [here](https://developers.google.com/analytics/devguides/reporting/core/v4/basics) for a better understanding of the parameters. From 2670caa2f542fe2fbbce06560b732bb040578ccb Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Tue, 23 Jun 2020 18:01:33 +0200 Subject: [PATCH 10/54] [WIP] twitter url --- nck/helpers/twitter_helper.py | 2 +- nck/readers/twitter_reader.py | 33 +++++++++++++++++++++++++++++++++ 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/nck/helpers/twitter_helper.py b/nck/helpers/twitter_helper.py index 25cc5e57..35494949 100644 --- a/nck/helpers/twitter_helper.py +++ b/nck/helpers/twitter_helper.py @@ -22,7 +22,7 @@ def __init__(self, message): logging.error(message) -REPORT_TYPES = ["ANALYTICS", "REACH", "ENTITY"] +REPORT_TYPES = ["ANALYTICS", "REACH", "ENTITY", "CREATIVE"] ENTITY_OBJECTS = { "FUNDING_INSTRUMENT": FundingInstrument, diff --git a/nck/readers/twitter_reader.py b/nck/readers/twitter_reader.py index 86e94d65..0b46bd0b 100644 --- a/nck/readers/twitter_reader.py +++ b/nck/readers/twitter_reader.py @@ -38,6 +38,9 @@ from twitter_ads.http import Request from twitter_ads.cursor import Cursor +# from twitter_ads.creative import TweetPreview +from twitter_ads.creative import CardsFetch + API_DATEFORMAT = "%Y-%m-%dT%H:%M:%SZ" REP_DATEFORMAT = "%Y-%m-%d" MAX_WAITING_SEC = 3600 @@ -442,6 +445,33 @@ def get_entity_report(self): for entity_obj in ACCOUNT_CHILD_OBJECTS[self.entity] ] + def get_creative_report(self): + + # Step 1 - Get tweet_ids + tweet_ids = [ + promoted_tweet.tweet_id for promoted_tweet in self.account.promoted_tweets() + ] + + for chunk_tweet_ids in split_list(tweet_ids, 200): + + # Step 2 - Get card_uri + # https://developer.twitter.com/en/docs/ads/creatives/api-reference/tweets + resource = f"/{API_VERSION}/accounts/{self.account.id}/tweets" + params = {"tweet_type": "PUBLISHED"} + request = Request(self.client, "get", resource, params=params) + + # Step 3 - Get website_dest_url + for tweet in Cursor(None, request): + if "card_uri" in tweet: + record = { + "tweet_id": tweet["tweet_id"], + "card_uri": tweet["card_uri"], + } + record["website_dest_url"] = CardsFetch.load( + self.account, card_uris=[tweet["card_uri"]] + ).first + yield record + def get_reach_report(self): """ Get 'REACH' report through the 'Reach and Average Frequency' endpoint of Twitter Ads API. @@ -503,6 +533,9 @@ def read(self): elif self.report_type == "ENTITY": data = self.get_entity_report() + elif self.report_type == "CREATIVE": + data = self.get_creative_report() + def result_generator(): for record in data: yield self.add_request_or_period_dates(record) From 65f4afc8ed4dbda2b31e76a300562ca1edb19107 Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Tue, 7 Jul 2020 19:13:39 +0200 Subject: [PATCH 11/54] Adding the 'Card' entity as an option of 'ENTITY reports --- nck/helpers/twitter_helper.py | 11 ++- nck/readers/README.md | 12 +-- nck/readers/twitter_reader.py | 143 ++++++++++++++++----------- tests/readers/test_twitter_reader.py | 9 ++ 4 files changed, 110 insertions(+), 65 deletions(-) diff --git a/nck/helpers/twitter_helper.py b/nck/helpers/twitter_helper.py index 35494949..d32cb3e7 100644 --- a/nck/helpers/twitter_helper.py +++ b/nck/helpers/twitter_helper.py @@ -13,7 +13,7 @@ import logging from twitter_ads.campaign import FundingInstrument, Campaign, LineItem -from twitter_ads.creative import MediaCreative, PromotedTweet +from twitter_ads.creative import MediaCreative, PromotedTweet, CardsFetch class JobTimeOutError(Exception): @@ -22,7 +22,7 @@ def __init__(self, message): logging.error(message) -REPORT_TYPES = ["ANALYTICS", "REACH", "ENTITY", "CREATIVE"] +REPORT_TYPES = ["ANALYTICS", "REACH", "ENTITY"] ENTITY_OBJECTS = { "FUNDING_INSTRUMENT": FundingInstrument, @@ -33,8 +33,11 @@ def __init__(self, message): } ENTITY_ATTRIBUTES = { - entity: list(ENTITY_OBJECTS[entity].__dict__["PROPERTIES"].keys()) - for entity in ENTITY_OBJECTS + **{ + entity: list(ENTITY_OBJECTS[entity].__dict__["PROPERTIES"].keys()) + for entity in ENTITY_OBJECTS + }, + "CARD": list(CardsFetch.__dict__["PROPERTIES"].keys()), } GRANULARITIES = ["DAY", "TOTAL"] diff --git a/nck/readers/README.md b/nck/readers/README.md index 7f87ae97..773d038f 100644 --- a/nck/readers/README.md +++ b/nck/readers/README.md @@ -533,14 +533,14 @@ See documentation [here](https://developers.google.com/search-ads/v2/how-tos/rep * **Apply for a developer account** through [this link](https://developer.twitter.com/en/apply). * **Create a Twitter app** on the developer portal: it will generate your authentication credentials. * **Apply for Twitter Ads API access** by filling out [this form](https://developer.twitter.com/en/docs/ads/general/overview/adsapi-application). Receiving Twitter approval may take up to 7 business days. -* **Get a Campaign Analyst access to the Twitter Ads account** you wish to retrieve data for, on the @handle that you used to create your Twitter App. +* **Get access to the Twitter Ads account** you wish to retrieve data for, on the @handle that you used to create your Twitter App. Be careful, access levels matter: with an *Ad Manager* access, you will be able to request all report types; with a *Campaign Analyst* access, you will be able to request all report types, except ENTITY reports on Card entities. #### Quickstart -The Twitter Ads Reader can collect **3 types of reports**, making calls to 3 endpoints of the Twitter Ads API: +The Twitter Ads Reader can collect **3 types of reports**, making calls to 4 endpoints of the Twitter Ads API: * **ANALYTICS reports**, making calls to the [Asynchronous Analytics endpoint](https://developer.twitter.com/en/docs/ads/analytics/api-reference/asynchronous). These reports return performance data for a wide range of metrics, that **can be aggregated over time**. Output data **can be splitted by day** when requested over a larger time period. * **REACH reports**, making calls to the [Reach and Average Frequency endpoint](https://developer.twitter.com/en/docs/ads/analytics/api-reference/reach). These reports return performance data with a focus on reach and frequency metrics, that **cannot be aggregated over time** (*e.g. the reach of day A and B is not equal to the reach of day A + the reach of day B, as it counts unique individuals*). Output data **cannot be splitted by day** when requested over a larger time period. These reports are available **only for the Funding Instrument and Campaign entities**. -* **ENTITY reports**, making calls to [Campaign Management endpoints](https://developer.twitter.com/en/docs/ads/campaign-management/api-reference). These reports return details on entity configuration since the creation of the Twitter Ads account. +* **ENTITY reports**, making calls to [Campaign Management endpoints](https://developer.twitter.com/en/docs/ads/campaign-management/api-reference) if the selected entity is Funding Instrument, Campaign, Line Item, Media Creative or Promoted Tweet, and to the [Creative endpoint](https://developer.twitter.com/en/docs/ads/creatives/api-reference/) if the selected entity is Card. These reports return details on entity configuration since the creation of the Twitter Ads account. *Call example for ANALYTICS reports*: this call will collect engagement metrics for Line Item entities, splitting the results by day, from 2020-01-01 to 2020-01-03: ``` @@ -569,8 +569,8 @@ Didn't work? See [troubleshooting](#troubleshooting) section. |`--twitter-access-token-secret`|Access token secret, available in the 'Keys and tokens' section of your Twitter Developer App.| |`--twitter-account-id`|Specifies the Twitter Account ID for which the data should be returned.| |`--twitter-report-type`|Specifies the type of report to collect. *Possible values: ANALYTICS, REACH, ENTITY.*| -|`--twitter-entity`|Specifies the entity type to retrieve data for. *Possible values: FUNDING_INSTRUMENT, CAMPAIGN, LINE_ITEM, MEDIA_CREATIVE, PROMOTED_TWEET.*| -|`--twitter-entity-attribute`|Specific to ENTITY reports. Specifies the entity attribute (configuration detail) that should be returned.| +|`--twitter-entity`|Specifies the entity type to retrieve data for. *Possible values: FUNDING_INSTRUMENT, CAMPAIGN, LINE_ITEM, MEDIA_CREATIVE, PROMOTED_TWEET, CARD.*| +|`--twitter-entity-attribute`|Specific to ENTITY reports. Specifies the entity attribute (configuration detail) that should be returned. *To get possible values, print the ENTITY_ATTRIBUTES variable on nck/helpers/twitter_helper.py*| |`--twitter-granularity`|Specific to ANALYTICS reports. Specifies how granular the retrieved data should be. *Possible values: TOTAL (default), DAY.*| |`--twitter-metric-group`|Specific to ANALYTICS reports. Specifies the list of metrics (as a group) that should be returned. *Possible values can be found [here](https://developer.twitter.com/en/docs/ads/analytics/overview/metrics-and-segmentation).* | |`--twitter-placement`|Specific to ANALYTICS reports. Scopes the retrieved data to a particular placement. *Possible values: ALL_ON_TWITTER (default), PUBLISHER_NETWORK.*| @@ -581,7 +581,7 @@ Didn't work? See [troubleshooting](#troubleshooting) section. |`--twitter-end-date`|End date of the period to request (format: YYYY-MM-DD).| |`--twitter-add-request-date-to-report`|If set to *True* (default: *False*), the date on which the request is made will appear on each report record.| -If you need any further information, the documentation of Twitter Ads API can be found [here](https://developer.twitter.com/en/docs/ads/general/overview). +If you need any further information, the documentation of Twitter Ads API can be found [here](https://developer.twitter.com/en/docs/ads/general/overview). To get a better understanding of **Twitter Ads Hierrarchy and Terminology**, we advise you to have a look at [this page](https://developer.twitter.com/en/docs/tutorials/ads-api-hierarchy-terminology). ## Yandex Readers diff --git a/nck/readers/twitter_reader.py b/nck/readers/twitter_reader.py index 0b46bd0b..0112f62c 100644 --- a/nck/readers/twitter_reader.py +++ b/nck/readers/twitter_reader.py @@ -86,13 +86,14 @@ @click.option( "--twitter-entity", required=True, - type=click.Choice(list(ENTITY_OBJECTS.keys())), + type=click.Choice(list(ENTITY_ATTRIBUTES.keys())), help="Specifies the entity type to retrieve data for.", ) @click.option( "--twitter-entity-attribute", multiple=True, - help="Specific to 'ENTITY' reports. Specifies the entity attribute (a.k.a. dimension) that should be returned.", + help="Specific to 'ENTITY' reports. " + "Specifies the entity attribute (a.k.a. dimension) that should be returned.", ) @click.option( "--twitter-granularity", @@ -209,6 +210,7 @@ def validate_inputs(self): self.validate_dates() self.validate_analytics_segmentation() self.validate_analytics_metric_groups() + self.validate_analytics_entity() self.validate_reach_entity() self.validate_entity_attributes() @@ -256,13 +258,22 @@ def validate_analytics_metric_groups(self): "'MOBILE_CONVERSION' data should be requested separately." ) + def validate_analytics_entity(self): + + if self.report_type == "ANALYTICS": + + if self.entity == "CARD": + raise ClickException( + f"'ANALYTICS' reports only accept following entities: {list(ENTITY_OBJECTS.keys())}." + ) + def validate_reach_entity(self): if self.report_type == "REACH": if self.entity not in ["CAMPAIGN", "FUNDING_INSTRUMENT"]: raise ClickException( - "'REACH' reports only accept the 'CAMPAIGN' and 'FUNDING_INSTRUMENT' entities." + "'REACH' reports only accept the following entities: CAMPAIGN, FUNDING_INSTRUMENT." ) def validate_entity_attributes(self): @@ -279,14 +290,33 @@ def validate_entity_attributes(self): f"Available attributes for '{self.entity}' are: {ENTITY_ATTRIBUTES[self.entity]}" ) - def get_daily_period_items(self): + def get_analytics_report(self, job_ids): """ - Returns a list of datetime instances representing each date contained - in the requested period. Useful when granularity is set to 'DAY'. + Get 'ANALYTICS' report through the 'Asynchronous Analytics' endpoint of Twitter Ads API. + Documentation: https://developer.twitter.com/en/docs/ads/analytics/api-reference/asynchronous """ - delta = self.end_date - self.start_date - return [self.start_date + timedelta(days=i) for i in range(delta.days)] + all_responses = [] + + for job_id in job_ids: + + logging.info(f"Processing job_id: {job_id}") + + job_result = self.get_job_result(job_id) + waiting_sec = 2 + + while job_result.status == "PROCESSING": + logging.info(f"Waiting {waiting_sec} seconds for job to be completed") + sleep(waiting_sec) + if waiting_sec > MAX_WAITING_SEC: + raise JobTimeOutError("Waited too long for job to be completed") + waiting_sec *= 2 + job_result = self.get_job_result(job_id) + + raw_analytics_response = self.get_raw_analytics_response(job_result) + all_responses.append(self.parse(raw_analytics_response)) + + return chain(*all_responses) def get_active_entity_ids(self): """ @@ -385,6 +415,15 @@ def add_daily_timestamps(self, entity_records): ] return entity_records + def get_daily_period_items(self): + """ + Returns a list of datetime instances representing each date contained + in the requested period. Useful when granularity is set to 'DAY'. + """ + + delta = self.end_date - self.start_date + return [self.start_date + timedelta(days=i) for i in range(delta.days)] + def add_segment(self, entity_records, entity_data): """ Add segment to a list of records, if a segmentation_type is requested. @@ -398,37 +437,10 @@ def add_segment(self, entity_records, entity_data): ] return entity_records - def get_analytics_report(self, job_ids): - """ - Get 'ANALYTICS' report through the 'Asynchronous Analytics' endpoint of Twitter Ads API. - Documentation: https://developer.twitter.com/en/docs/ads/analytics/api-reference/asynchronous - """ - - all_responses = [] - - for job_id in job_ids: - - logging.info(f"Processing job_id: {job_id}") - - job_result = self.get_job_result(job_id) - waiting_sec = 2 - - while job_result.status == "PROCESSING": - logging.info(f"Waiting {waiting_sec} seconds for job to be completed") - sleep(waiting_sec) - if waiting_sec > MAX_WAITING_SEC: - raise JobTimeOutError("Waited too long for job to be completed") - waiting_sec *= 2 - job_result = self.get_job_result(job_id) - - raw_analytics_response = self.get_raw_analytics_response(job_result) - all_responses.append(self.parse(raw_analytics_response)) - - return chain(*all_responses) - - def get_entity_report(self): + def get_campaign_management_report(self): """ Get 'ENTITY' report through 'Campaign Management' endpoints of Twitter Ads API. + Supported entities: FUNDING_INSTRUMENT, CAMPAIGN, LINE_ITEM, MEDIA_CREATIVE, PROMOTED_TWEET Documentation: https://developer.twitter.com/en/docs/ads/campaign-management/api-reference """ @@ -445,33 +457,54 @@ def get_entity_report(self): for entity_obj in ACCOUNT_CHILD_OBJECTS[self.entity] ] - def get_creative_report(self): + def get_cards_report(self): + """ + Get 'ENTITY' report through the 'Creatives' endpoint of Twitter Ads API. + Supported entities: CARD + Documentation: https://developer.twitter.com/en/docs/ads/creatives/api-reference/ + """ - # Step 1 - Get tweet_ids tweet_ids = [ promoted_tweet.tweet_id for promoted_tweet in self.account.promoted_tweets() ] for chunk_tweet_ids in split_list(tweet_ids, 200): - - # Step 2 - Get card_uri - # https://developer.twitter.com/en/docs/ads/creatives/api-reference/tweets - resource = f"/{API_VERSION}/accounts/{self.account.id}/tweets" - params = {"tweet_type": "PUBLISHED"} - request = Request(self.client, "get", resource, params=params) - - # Step 3 - Get website_dest_url - for tweet in Cursor(None, request): + for tweet in self.get_tweets(): if "card_uri" in tweet: + card_fetch = self.get_card_fetch(card_uri=tweet["card_uri"]) + card_attributes = { + attr: getattr(card_fetch, attr, None) + for attr in self.entity_attributes + } record = { "tweet_id": tweet["tweet_id"], "card_uri": tweet["card_uri"], + **card_attributes, } - record["website_dest_url"] = CardsFetch.load( - self.account, card_uris=[tweet["card_uri"]] - ).first yield record + def get_tweets(self): + """ + Step 1 of 'ENTITY - CARD' report generation process: + Returns details on 'PUBLISHED' tweets, as a generator of dictionnaries + Documentation: https://developer.twitter.com/en/docs/ads/creatives/api-reference/tweets + """ + + resource = f"/{API_VERSION}/accounts/{self.account.id}/tweets" + params = {"tweet_type": "PUBLISHED"} + request = Request(self.client, "get", resource, params=params) + + yield from Cursor(None, request) + + def get_card_fetch(self, card_uri): + """ + Step 2 of 'ENTITY - CARD' report generation process: + Returns the CartFetch object associated with a specific card_uri + Documentation: https://developer.twitter.com/en/docs/ads/creatives/api-reference/cards-fetch + """ + + return CardsFetch.load(self.account, card_uris=[card_uri]).first + def get_reach_report(self): """ Get 'REACH' report through the 'Reach and Average Frequency' endpoint of Twitter Ads API. @@ -531,10 +564,10 @@ def read(self): data = self.get_reach_report() elif self.report_type == "ENTITY": - data = self.get_entity_report() - - elif self.report_type == "CREATIVE": - data = self.get_creative_report() + if self.entity == "CARD": + data = self.get_cards_report() + else: + data = self.get_campaign_management_report() def result_generator(): for record in data: diff --git a/tests/readers/test_twitter_reader.py b/tests/readers/test_twitter_reader.py index 7d3c832d..d17def7a 100644 --- a/tests/readers/test_twitter_reader.py +++ b/tests/readers/test_twitter_reader.py @@ -108,6 +108,15 @@ def test_validate_analytics_metric_groups_if_mobile_conversion(self): with self.assertRaises(ClickException): TwitterReader(**temp_kwargs) + @mock.patch.object(Client, "__init__", lambda *args: None) + @mock.patch.object(Client, "accounts", lambda *args: None) + def test_validate_analytics_entity(self): + temp_kwargs = self.kwargs.copy() + params = {"report_type": "ANALYTICS", "entity": "CARD"} + temp_kwargs.update(params) + with self.assertRaises(ClickException): + TwitterReader(**temp_kwargs) + @mock.patch.object(Client, "__init__", lambda *args: None) @mock.patch.object(Client, "accounts", lambda *args: None) def test_validate_reach_entity(self): From a865a5b24ac95a99cf3baf2fa4c56860b4dc1944 Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Wed, 8 Jul 2020 13:41:38 +0200 Subject: [PATCH 12/54] Hotfix --- nck/readers/twitter_reader.py | 35 +++++++++++++++-------------------- 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/nck/readers/twitter_reader.py b/nck/readers/twitter_reader.py index 0112f62c..82ad5a68 100644 --- a/nck/readers/twitter_reader.py +++ b/nck/readers/twitter_reader.py @@ -464,26 +464,21 @@ def get_cards_report(self): Documentation: https://developer.twitter.com/en/docs/ads/creatives/api-reference/ """ - tweet_ids = [ - promoted_tweet.tweet_id for promoted_tweet in self.account.promoted_tweets() - ] - - for chunk_tweet_ids in split_list(tweet_ids, 200): - for tweet in self.get_tweets(): - if "card_uri" in tweet: - card_fetch = self.get_card_fetch(card_uri=tweet["card_uri"]) - card_attributes = { - attr: getattr(card_fetch, attr, None) - for attr in self.entity_attributes - } - record = { - "tweet_id": tweet["tweet_id"], - "card_uri": tweet["card_uri"], - **card_attributes, - } - yield record - - def get_tweets(self): + for tweet in self.get_published_tweets(): + if "card_uri" in tweet: + card_fetch = self.get_card_fetch(card_uri=tweet["card_uri"]) + card_attributes = { + attr: getattr(card_fetch, attr, None) + for attr in self.entity_attributes + } + record = { + "tweet_id": tweet["tweet_id"], + "card_uri": tweet["card_uri"], + **card_attributes, + } + yield record + + def get_published_tweets(self): """ Step 1 of 'ENTITY - CARD' report generation process: Returns details on 'PUBLISHED' tweets, as a generator of dictionnaries From e43d0b0425b284d2b138d548fb5bccb4734542d2 Mon Sep 17 00:00:00 2001 From: benoitgoujon Date: Thu, 9 Jul 2020 17:11:00 +0200 Subject: [PATCH 13/54] Feat: add .dockerignore --- .dockerignore | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 .dockerignore diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..b7fc1b38 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,7 @@ +**/.github +**/tests +**/.env +**/.flake8 +**/.gitignore +**/CONTRIBUTING.md +**/README.md \ No newline at end of file From eb4ef3bba07dc114f9213a1c1e26294f663a1472 Mon Sep 17 00:00:00 2001 From: benoitgoujon Date: Thu, 9 Jul 2020 17:22:44 +0200 Subject: [PATCH 14/54] Feat: add python specific files to dockerignore and dev env files --- .dockerignore | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/.dockerignore b/.dockerignore index b7fc1b38..c02bed5f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -3,5 +3,12 @@ **/.env **/.flake8 **/.gitignore -**/CONTRIBUTING.md -**/README.md \ No newline at end of file +**/.git +CONTRIBUTING.md +README.md +**/__pycache__ +**/*.pyc +**/.settings +**/.vscode +**/Dockerfile* +**/requirements-dev.txt \ No newline at end of file From 2873971ec72819c1b6851ab78bcf627af85ca0da Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Wed, 15 Jul 2020 17:11:31 +0200 Subject: [PATCH 15/54] Skipping inactive clientCustomerIds --- nck/readers/googleads_reader.py | 64 ++++++++++++++++++--------------- 1 file changed, 35 insertions(+), 29 deletions(-) diff --git a/nck/readers/googleads_reader.py b/nck/readers/googleads_reader.py index 5693fe71..83f10b36 100644 --- a/nck/readers/googleads_reader.py +++ b/nck/readers/googleads_reader.py @@ -26,6 +26,7 @@ from click import ClickException from googleads import adwords from googleads.oauth2 import GoogleRefreshTokenClient +from googleads.errors import AdWordsReportBadRequestError from nck.readers.reader import Reader from nck.utils.args import extract_args @@ -172,24 +173,30 @@ def valid_client_customer_id(client_customer_id): def fetch_report_from_gads_client_customer_obj( self, report_definition, client_customer_id ): - if self.valid_client_customer_id(client_customer_id): - adwords_client = self.init_adwords_client(client_customer_id) - report_downloader = adwords_client.GetReportDownloader() - customer_report = report_downloader.DownloadReportAsStream( - report_definition, - client_customer_id=client_customer_id, - include_zero_impressions=self.include_zero_impressions, - skip_report_header=True, - skip_column_header=True, - skip_report_summary=True, - ) - else: + if not self.valid_client_customer_id(client_customer_id): raise ClickException( - "Wrong format: " - + client_customer_id - + ". Client customer ID should be in the form 123-456-7890" + f"Wrong format: {client_customer_id}. Client customer ID should be in the form 123-456-7890." ) - return customer_report + else: + try: + adwords_client = self.init_adwords_client(client_customer_id) + report_downloader = adwords_client.GetReportDownloader() + customer_report = report_downloader.DownloadReportAsStream( + report_definition, + client_customer_id=client_customer_id, + include_zero_impressions=self.include_zero_impressions, + skip_report_header=True, + skip_column_header=True, + skip_report_summary=True, + ) + return customer_report + except AdWordsReportBadRequestError as e: + if e.type == "AuthorizationError.CUSTOMER_NOT_ACTIVE": + logging.info( + f"Skipping clientCustomerId {client_customer_id} (inactive)." + ) + else: + raise Exception(f"Wrong request. Error type: {e.type}") def get_customer_ids(self, manager_id): """Retrieves all CustomerIds in the account hierarchy. @@ -214,7 +221,7 @@ def get_customer_ids(self, manager_id): selector = { "fields": ["CustomerId"], "predicates": [ - {"field": "CanManageClients", "operator": "EQUALS", "values": [False]} + {"field": "CanManageClients", "operator": "EQUALS", "values": [False]}, ], "paging": {"startIndex": str(offset), "numberResults": str(PAGE_SIZE)}, } @@ -346,19 +353,18 @@ def format_and_yield(self): customer_report = self.fetch_report_from_gads_client_customer_obj( report_definition, googleads_account_id ) - customer_report = stream_reader(customer_report) - - for row in customer_report: - reader = csv.DictReader(StringIO(row), self.fields) - for row in reader: - if self.include_client_customer_id: - row['AccountId'] = googleads_account_id - - if self.filter_on_video_campaigns: - if row['CampaignId'] in video_campaign_ids: + if customer_report: + customer_report = stream_reader(customer_report) + for row in customer_report: + reader = csv.DictReader(StringIO(row), self.fields) + for row in reader: + if self.include_client_customer_id: + row["AccountId"] = googleads_account_id + if self.filter_on_video_campaigns: + if row["CampaignId"] in video_campaign_ids: + yield row + else: yield row - else: - yield row def read(self): if self.manager_id: From d37c490d1d6ee5734142a275637168325ddd17f9 Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Thu, 16 Jul 2020 11:49:45 +0200 Subject: [PATCH 16/54] Classifying skipped clientCustomerId logging as a warning --- nck/readers/googleads_reader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nck/readers/googleads_reader.py b/nck/readers/googleads_reader.py index 83f10b36..a331f6c5 100644 --- a/nck/readers/googleads_reader.py +++ b/nck/readers/googleads_reader.py @@ -192,7 +192,7 @@ def fetch_report_from_gads_client_customer_obj( return customer_report except AdWordsReportBadRequestError as e: if e.type == "AuthorizationError.CUSTOMER_NOT_ACTIVE": - logging.info( + logging.warning( f"Skipping clientCustomerId {client_customer_id} (inactive)." ) else: From 8fe494cd80b2431a5142384bd78f6daedb0ff3c0 Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Thu, 16 Jul 2020 13:17:32 +0200 Subject: [PATCH 17/54] Changing retry behaviour for async jobs --- nck/helpers/twitter_helper.py | 8 -------- nck/readers/twitter_reader.py | 36 ++++++++++++++++++++++++----------- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/nck/helpers/twitter_helper.py b/nck/helpers/twitter_helper.py index d32cb3e7..042752a6 100644 --- a/nck/helpers/twitter_helper.py +++ b/nck/helpers/twitter_helper.py @@ -10,18 +10,10 @@ # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -import logging - from twitter_ads.campaign import FundingInstrument, Campaign, LineItem from twitter_ads.creative import MediaCreative, PromotedTweet, CardsFetch -class JobTimeOutError(Exception): - def __init__(self, message): - super().__init__(message) - logging.error(message) - - REPORT_TYPES = ["ANALYTICS", "REACH", "ENTITY"] ENTITY_OBJECTS = { diff --git a/nck/readers/twitter_reader.py b/nck/readers/twitter_reader.py index 82ad5a68..d82c6014 100644 --- a/nck/readers/twitter_reader.py +++ b/nck/readers/twitter_reader.py @@ -13,9 +13,9 @@ import logging import click from click import ClickException -from time import sleep from itertools import chain from datetime import datetime, timedelta +from tenacity import retry, wait_exponential, stop_after_delay from nck.utils.args import extract_args from nck.commands.command import processor @@ -29,7 +29,6 @@ METRIC_GROUPS, PLACEMENTS, SEGMENTATION_TYPES, - JobTimeOutError, ) from twitter_ads.client import Client @@ -302,17 +301,18 @@ def get_analytics_report(self, job_ids): logging.info(f"Processing job_id: {job_id}") - job_result = self.get_job_result(job_id) - waiting_sec = 2 + # job_result = self.get_job_result(job_id) + # waiting_sec = 2 - while job_result.status == "PROCESSING": - logging.info(f"Waiting {waiting_sec} seconds for job to be completed") - sleep(waiting_sec) - if waiting_sec > MAX_WAITING_SEC: - raise JobTimeOutError("Waited too long for job to be completed") - waiting_sec *= 2 - job_result = self.get_job_result(job_id) + # while job_result.status == "PROCESSING": + # logging.info(f"Waiting {waiting_sec} seconds for job to be completed") + # sleep(waiting_sec) + # if waiting_sec > MAX_WAITING_SEC: + # raise JobTimeOutError("Waited too long for job to be completed") + # waiting_sec *= 2 + # job_result = self.get_job_result(job_id) + job_result = self._waiting_for_job_to_complete(job_id) raw_analytics_response = self.get_raw_analytics_response(job_result) all_responses.append(self.parse(raw_analytics_response)) @@ -355,6 +355,20 @@ def get_job_ids(self, entity_ids): for chunk_entity_ids in split_list(entity_ids, MAX_ENTITY_IDS_PER_JOB) ] + @retry( + wait=wait_exponential(multiplier=1, min=60, max=3600), + stop=stop_after_delay(36000), + ) + def _waiting_for_job_to_complete(self, job_id): + """ + Retrying to get job_result until job status is 'COMPLETED'. + """ + job_result = self.get_job_result(job_id) + if job_result.status == "PROCESSING": + raise Exception(f"Job {job_id} is still running.") + else: + return job_result + def get_job_result(self, job_id): """ Step 3 of 'ANALYTICS' report generation process: From 35373c456b5d9217191da7d73bb4335df2ed3ce2 Mon Sep 17 00:00:00 2001 From: Pierre Hay Date: Sat, 15 Aug 2020 13:33:51 +0200 Subject: [PATCH 18/54] DV360: Create sdf tasks and Download sdf files --- nck/helpers/dv360_helper.py | 20 +++ nck/readers/__init__.py | 2 + nck/readers/dv360_reader.py | 192 +++++++++++++++++++++++++++++ nck/utils/file_reader.py | 13 +- tests/readers/test_dv360_reader.py | 30 +++++ 5 files changed, 252 insertions(+), 5 deletions(-) create mode 100644 nck/helpers/dv360_helper.py create mode 100644 nck/readers/dv360_reader.py create mode 100644 tests/readers/test_dv360_reader.py diff --git a/nck/helpers/dv360_helper.py b/nck/helpers/dv360_helper.py new file mode 100644 index 00000000..14b0d608 --- /dev/null +++ b/nck/helpers/dv360_helper.py @@ -0,0 +1,20 @@ +FILE_NAMES = { + "FILE_TYPE_INSERTION_ORDER": "InsertionOrders", + "FILE_TYPE_CAMPAIGN": "Campaigns", + "FILE_TYPE_MEDIA_PRODUCT": "MediaProducts", + "FILE_TYPE_LINE_ITEM": "LineItems", + "FILE_TYPE_AD_GROUP": "AdGroups", + "FILE_TYPE_AD": "AdGroupAds" +} + +FILE_TYPES = FILE_NAMES.keys() + +FILTER_TYPES = [ + "FILTER_TYPE_UNSPECIFIED", + "FILTER_TYPE_NONE", + "FILTER_TYPE_ADVERTISER_ID", + "FILTER_TYPE_CAMPAIGN_ID", + "FILTER_TYPE_MEDIA_PRODUCT_ID", + "FILTER_TYPE_INSERTION_ORDER_ID", + "FILTER_TYPE_LINE_ITEM_ID" +] diff --git a/nck/readers/__init__.py b/nck/readers/__init__.py index 669688d6..4e76a73a 100644 --- a/nck/readers/__init__.py +++ b/nck/readers/__init__.py @@ -15,6 +15,7 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +from nck.readers.dv360_reader import dv360 from nck.readers.reader import Reader from nck.readers.mysql_reader import mysql @@ -46,6 +47,7 @@ sa360_reader, facebook, oracle, + dv360, dbm, dcm, ga, diff --git a/nck/readers/dv360_reader.py b/nck/readers/dv360_reader.py new file mode 100644 index 00000000..062d778d --- /dev/null +++ b/nck/readers/dv360_reader.py @@ -0,0 +1,192 @@ + +import click +import logging +import io +import httplib2 + +from itertools import chain +from typing import List, Tuple + +from googleapiclient import discovery +from googleapiclient.http import MediaIoBaseDownload +from oauth2client import client, GOOGLE_REVOKE_URI +from tenacity import retry, wait_exponential, stop_after_delay + +from nck.helpers.dv360_helper import FILE_NAMES, FILE_TYPES, FILTER_TYPES +from nck.commands.command import processor +from nck.readers.reader import Reader +from nck.utils.file_reader import CSVReader, unzip +from nck.utils.args import extract_args +from nck.streams.format_date_stream import FormatDateStream + + +@click.command(name="read_dv360") +@click.option("--dv360-access-token", default=None, required=True) +@click.option("--dv360-refresh-token", required=True) +@click.option("--dv360-client-id", required=True) +@click.option("--dv360-client-secret", required=True) +@click.option("--dv360-advertiser-id", required=True) +@click.option("--dv360-file-type", type=click.Choice(FILE_TYPES), multiple=True, required=True) +@click.option("--dv360-filter-type", type=click.Choice(FILTER_TYPES), required=True) +@processor("dbm_access_token", "dbm_refresh_token", "dbm_client_secret") +def dv360(**kwargs): + return DV360Reader(**extract_args("dv360_", kwargs)) + + +class DV360Reader(Reader): + + API_NAME = "displayvideo" + API_VERSION = "v1" + SDF_VERSION = "SDF_VERSION_5_2" + + # path where to download the sdf file. + BASE = "/tmp" + + # name of the downloaded archive which may embeds several csv + # if more than one file type where to be provided. + ARCHIVE_NAME = "sdf" + + def __init__( + self, + access_token: str, + refresh_token: str, + client_id: str, + client_secret: str, + **kwargs + ): + + credentials = client.GoogleCredentials( + access_token, + client_id=client_id, + client_secret=client_secret, + refresh_token=refresh_token, + token_expiry=None, + token_uri="https://www.googleapis.com/oauth2/v4/token", + user_agent=None, + revoke_uri=GOOGLE_REVOKE_URI + ) + http = credentials.authorize(httplib2.Http()) + credentials.refresh(http) + + self._client = discovery.build( + self.API_NAME , self.API_VERSION, http=http, cache_discovery=False + ) + + self.kwargs = kwargs + self.file_names = self.get_file_names() + + def _get_file_type(self) -> Tuple[str]: + # file_type : dictates the resource type that populates the sdf file. + # https://developers.google.com/display-video/api/reference/rest/v1/sdfdownloadtasks/create#filetype + # Required: One can provide several file types. + return self.kwargs.get("file_type") + + def _get_filter_type(self) -> str: + # filter_type : specifies the type of resource to filter. + # Required: Only one filter_type allowed. + return self.kwargs.get("filter_type") + + def _get_advertiser_id(self) -> str: + return self.kwargs.get("advertiser_id") + + def get_file_names(self) -> List[str]: + # DV360 api creates one file per file_type. + # map file_type with the name of the generated file. + return [f"SDF-{FILE_NAMES[file_type]}" for file_type in self._get_file_type()] + + # make sure to implement the appropriate retry policy. + @retry( + wait=wait_exponential(multiplier=1, min=60, max=3600), + stop=stop_after_delay(36000), + ) + def _wait_sdf_download_request(self, operation): + """ + Wait for a sdf task to be completed. ie. (file ready for download) + Args: + operation (dict): task metadata + Returns: + operation (dict): task metadata updated with resource location. + """ + logging.info( + f"waiting for SDF operation: {operation['name']} to complete running." + ) + get_request = self._client.sdfdownloadtasks().operations().get(name=operation["name"]) + operation = get_request.execute() + if "done" not in operation: + raise Exception("The operation has exceed the time limit treshold.\n") + if "error" in operation: + raise Exception("The operation finished in error with code %s: %s" % ( + operation["error"]["code"], + operation["error"]["message"])) + return operation + + def create_sdf_task(self, body): + """ + Create a sdf asynchronous task of type googleapiclient.discovery.Resource + Args: + body (dict) : request body to describe the data within the generated sdf file. + Return: + operation (dict) : contains the task metadata. + """ + + operation = self._client.sdfdownloadtasks().create(body=body).execute() + logging.info("Operation %s was created." % operation["name"]) + return operation + + def download_sdf(self, operation): + request = self._client.media().download(resourceName=operation["response"]["resourceName"]) + request.uri = request.uri.replace("?alt=json", "?alt=media") + sdf = io.FileIO(f"{self.BASE}/{self.ARCHIVE_NAME}.zip", mode="wb") + downloader = MediaIoBaseDownload(sdf, request) + done = False + while done is False: + status, done = downloader.next_chunk() + logging.info(f"Download {int(status.progress() * 100)}.") + + @staticmethod + def sdf_to_njson_generator(path_to_file): + csv_reader = CSVReader(csv_delimiter=",", csv_fieldnames=None) + with open(path_to_file, "rb") as fd: + dict_reader = csv_reader.read_csv(fd) + for line in dict_reader: + yield line + + def get_sdf_body(self): + # exctract request body from parameters + file_type = self._get_file_type() + filter_type = self._get_filter_type() + advertiser_id = self._get_advertiser_id() + body = { + "parentEntityFilter": { + "fileType": file_type, + "filterType": filter_type + }, + "version": self.SDF_VERSION, + "advertiserId": advertiser_id + } + return body + + def get_sdf_objects(self): + body = self.get_sdf_body() + # create sdf task + init_operation = self.create_sdf_task(body=body) + # wait for the task to be ready or raise Error + created_operation = self._wait_sdf_download_request(init_operation) + # download and unzip the sdf file(s) archive. + self.download_sdf(created_operation) + unzip(f"{self.BASE}/{self.ARCHIVE_NAME}.zip", output_path=self.BASE) + # We chain operation if many file_types were to be provided. + return chain( + *[ + self.sdf_to_njson_generator(f"{self.BASE}/{file_name}.csv") + for file_name in self.file_names + ] + ) + + def read(self): + yield FormatDateStream( + "sdf", + self.get_sdf_objects(), + keys=["Date"], + date_format=self.kwargs.get("date_format"), + ) diff --git a/nck/utils/file_reader.py b/nck/utils/file_reader.py index ce41175a..79f96519 100644 --- a/nck/utils/file_reader.py +++ b/nck/utils/file_reader.py @@ -19,9 +19,15 @@ import csv import codecs import gzip +import zipfile import json +def unzip(input_file, output_path): + with zipfile.ZipFile(input_file, 'r') as zip_ref: + zip_ref.extractall(output_path) + + def format_csv_delimiter(csv_delimiter): _csv_delimiter = csv_delimiter.encode().decode("unicode_escape") if csv_delimiter == "newline": @@ -32,9 +38,7 @@ def format_csv_delimiter(csv_delimiter): def format_csv_fieldnames(csv_fieldnames): - if csv_fieldnames is None: - _csv_fieldnames = csv_fieldnames - elif isinstance(csv_fieldnames, list): + if isinstance(csv_fieldnames, list): _csv_fieldnames = csv_fieldnames elif isinstance(csv_fieldnames, (str, bytes)): _csv_fieldnames = json.loads(csv_fieldnames) @@ -49,8 +53,7 @@ def format_csv_fieldnames(csv_fieldnames): class CSVReader(object): def __init__(self, csv_delimiter, csv_fieldnames, **kwargs): self.csv_delimiter = format_csv_delimiter(csv_delimiter) - self.csv_fieldnames = format_csv_fieldnames(csv_fieldnames) - + self.csv_fieldnames = format_csv_fieldnames(csv_fieldnames) if csv_fieldnames is not None else None self.csv_reader = lambda fd: self.read_csv(fd, **kwargs) def read_csv(self, fd, **kwargs): diff --git a/tests/readers/test_dv360_reader.py b/tests/readers/test_dv360_reader.py new file mode 100644 index 00000000..dc738e4b --- /dev/null +++ b/tests/readers/test_dv360_reader.py @@ -0,0 +1,30 @@ +from nck.readers.dv360_reader import DV360Reader +from unittest import TestCase, mock + + +class TestDV360Reader(TestCase): + + def mock_dv360_reader(self, **kwargs): + for param, value in kwargs.items(): + setattr(self, param, value) + + @mock.patch.object(DV360Reader, '__init__', mock_dv360_reader) + def test_get_sdf_body(self): + kwargs = {} + reader = DV360Reader(**kwargs) + reader.kwargs = { + "file_type": ["FILE_TYPE_INSERTION_ORDER", "FILE_TYPE_CAMPAIGN"], + "filter_type": "FILTER_TYPE_ADVERTISER_ID", + "advertiser_id": "4242424" + } + + expected_query_body = { + "parentEntityFilter": { + "fileType": ["FILE_TYPE_INSERTION_ORDER", "FILE_TYPE_CAMPAIGN"], + "filterType": "FILTER_TYPE_ADVERTISER_ID" + }, + "version": "SDF_VERSION_5_2", + "advertiserId": "4242424" + } + + self.assertDictEqual(reader.get_sdf_body(), expected_query_body) From 93d71fe22970fcedcc2c43ae8fea867b1e6440d2 Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Mon, 24 Aug 2020 20:26:51 +0200 Subject: [PATCH 19/54] enh: storing report attributes as instance variables --- nck/readers/ttd_reader.py | 50 +++++++++++++++++++-------------------- tests/readers/test_ttd.py | 30 +++++++++++------------ 2 files changed, 38 insertions(+), 42 deletions(-) diff --git a/nck/readers/ttd_reader.py b/nck/readers/ttd_reader.py index bf73f8db..4e36560e 100644 --- a/nck/readers/ttd_reader.py +++ b/nck/readers/ttd_reader.py @@ -103,7 +103,6 @@ def make_api_call(self, method, endpoint, payload={}): ) if response.ok: if response.content: - print(response.json()) return response.json() else: response.raise_for_status() @@ -123,15 +122,14 @@ def get_report_template_id(self): Please specify the exact name of the ReportTemplate you wish to retrieve.""" ) else: - report_template_id = json_response["Result"][0]["ReportTemplateId"] - logging.info(f"Retrieved ReportTemplateId: {report_template_id}") - return report_template_id + self.report_template_id = json_response["Result"][0]["ReportTemplateId"] + logging.info(f"Retrieved ReportTemplateId: {self.report_template_id}") - def create_report_schedule(self, report_template_id): + def create_report_schedule(self): method, endpoint = API_ENDPOINTS["create_report_schedule"] payload = { "ReportScheduleName": self.report_schedule_name, - "ReportTemplateId": report_template_id, + "ReportTemplateId": self.report_template_id, "AdvertiserFilters": self.advertiser_ids, "ReportStartDateInclusive": self.start_date.isoformat(), "ReportEndDateExclusive": self.end_date.isoformat(), @@ -139,33 +137,33 @@ def create_report_schedule(self, report_template_id): } logging.info(f"Creating ReportSchedule: {payload}") json_response = self.make_api_call(method, endpoint, payload) - report_schedule_id = json_response["ReportScheduleId"] - return report_schedule_id + self.report_schedule_id = json_response["ReportScheduleId"] @retry( wait=wait_exponential(multiplier=1, min=60, max=3600), stop=stop_after_delay(36000), ) - def _wait_for_download_url(self, report_schedule_id): - report_execution_details = self.get_report_execution_details(report_schedule_id) + def _wait_for_download_url(self): + report_execution_details = self.get_report_execution_details() if report_execution_details["ReportExecutionState"] == "Pending": - raise Exception(f"ReportSchedule '{report_schedule_id}' is still running.") + raise Exception( + f"ReportSchedule '{self.report_schedule_id}' is still running." + ) else: # As the ReportSchedule that we just created runs only once, # the API response will include only one ReportDelivery (so we can get index "[0]") - download_url = report_execution_details["ReportDeliveries"][0][ + self.download_url = report_execution_details["ReportDeliveries"][0][ "DownloadURL" ] logging.info( - f"ReportScheduleId '{report_schedule_id}' is ready. DownloadURL: {download_url}" + f"ReportScheduleId '{self.report_schedule_id}' is ready. DownloadURL: {self.download_url}" ) - return download_url - def get_report_execution_details(self, report_schedule_id): + def get_report_execution_details(self): method, endpoint = API_ENDPOINTS["get_report_execution_details"] payload = { "AdvertiserIds": self.advertiser_ids, - "ReportScheduleIds": [report_schedule_id], + "ReportScheduleIds": [self.report_schedule_id], **DEFAULT_PAGING_ARGS, } json_response = self.make_api_call(method, endpoint, payload) @@ -174,20 +172,20 @@ def get_report_execution_details(self, report_schedule_id): report_execution_details = json_response["Result"][0] return report_execution_details - def download_report(self, download_url): - report = requests.get(url=download_url, headers=self.headers, stream=True) + def download_report(self): + report = requests.get(url=self.download_url, headers=self.headers, stream=True) return get_report_generator_from_flat_file(report.iter_lines()) - def delete_report_schedule(self, report_schedule_id): - logging.info(f"Deleting ReportScheduleId '{report_schedule_id}'") + def delete_report_schedule(self): + logging.info(f"Deleting ReportScheduleId '{self.report_schedule_id}'") method, endpoint = API_ENDPOINTS["delete_report_schedule"] - self.make_api_call(method, f"{endpoint}/{report_schedule_id}") + self.make_api_call(method, f"{endpoint}/{self.report_schedule_id}") def read(self): - report_template_id = self.get_report_template_id() - report_schedule_id = self.create_report_schedule(report_template_id) - download_url = self._wait_for_download_url(report_schedule_id) - data = self.download_report(download_url) + self.get_report_template_id() + self.create_report_schedule() + self._wait_for_download_url() + data = self.download_report() def result_generator(): for record in data: @@ -199,4 +197,4 @@ def result_generator(): "results_" + "_".join(self.advertiser_ids), result_generator() ) - self.delete_report_schedule(report_schedule_id) + self.delete_report_schedule() diff --git a/tests/readers/test_ttd.py b/tests/readers/test_ttd.py index 37156ed1..94764832 100644 --- a/tests/readers/test_ttd.py +++ b/tests/readers/test_ttd.py @@ -63,9 +63,8 @@ def test_get_report_template_id_if_exactly_1_match( self, mock_build_headers, mock_api_call ): reader = TheTradeDeskReader(**self.kwargs) - output = reader.get_report_template_id() - expected = 1234 - self.assertEqual(output, expected) + reader.get_report_template_id() + self.assertEqual(reader.report_template_id, 1234) @mock.patch("nck.readers.ttd_reader.build_headers", return_value={}) @mock.patch( @@ -117,9 +116,9 @@ def test_get_report_template_id_if_no_match( ) def test_create_report_schedule(self, mock_build_headers, mock_api_call): reader = TheTradeDeskReader(**self.kwargs) - output = reader.create_report_schedule(report_template_id=1234) - expected = 5678 - self.assertEqual(output, expected) + reader.report_template_id = 1234 + reader.create_report_schedule() + self.assertEqual(reader.report_schedule_id, 5678) @mock.patch("nck.readers.ttd_reader.build_headers", return_value={}) @mock.patch("tenacity.BaseRetrying.wait", side_effect=lambda *args, **kwargs: 0) @@ -154,19 +153,15 @@ def test_create_report_schedule(self, mock_build_headers, mock_api_call): ) def test_wait_for_download_url(self, mock_build_headers, mock_retry, mock_api_call): reader = TheTradeDeskReader(**self.kwargs) - output = reader._wait_for_download_url(report_schedule_id=5678) - expected = "https://download.url" - self.assertEqual(output, expected) + reader.report_schedule_id = 5678 + reader._wait_for_download_url() + self.assertEqual(reader.download_url, "https://download.url") @mock.patch("nck.readers.ttd_reader.build_headers", return_value={}) @mock.patch("tenacity.BaseRetrying.wait", side_effect=lambda *args, **kwargs: 0) - @mock.patch.object(TheTradeDeskReader, "get_report_template_id", lambda *args: 1234) - @mock.patch.object(TheTradeDeskReader, "create_report_schedule", lambda *args: 5678) - @mock.patch.object( - TheTradeDeskReader, - "_wait_for_download_url", - lambda *args: "https://download.url", - ) + @mock.patch.object(TheTradeDeskReader, "get_report_template_id", lambda *args: None) + @mock.patch.object(TheTradeDeskReader, "create_report_schedule", lambda *args: None) + @mock.patch.object(TheTradeDeskReader, "_wait_for_download_url", lambda *args: None) @mock.patch( "nck.readers.ttd_reader.TheTradeDeskReader.download_report", return_value=iter( @@ -191,6 +186,9 @@ def test_wait_for_download_url(self, mock_build_headers, mock_retry, mock_api_ca ) def test_read(self, mock_build_headers, mock_retry, mock_download_report): reader = TheTradeDeskReader(**self.kwargs) + reader.report_template_id = 1234 + reader.report_schedule_id = 5678 + reader.download_url = "https://download.url" output = next(reader.read()) expected = [ {"Date": "2020-01-01", "Advertiser_ID": "XXXXX", "Impressions": 10}, From 7095f6684c4ba45b4bab70de8e746f39ede5c171 Mon Sep 17 00:00:00 2001 From: Arthur Derennes Date: Tue, 25 Aug 2020 18:59:41 +0200 Subject: [PATCH 20/54] refacto: format docstrings --- nck/readers/dv360_reader.py | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/nck/readers/dv360_reader.py b/nck/readers/dv360_reader.py index 062d778d..9eb1d565 100644 --- a/nck/readers/dv360_reader.py +++ b/nck/readers/dv360_reader.py @@ -76,25 +76,30 @@ def __init__( self.file_names = self.get_file_names() def _get_file_type(self) -> Tuple[str]: - # file_type : dictates the resource type that populates the sdf file. - # https://developers.google.com/display-video/api/reference/rest/v1/sdfdownloadtasks/create#filetype - # Required: One can provide several file types. + """ + file_type : dictates the resource type that populates the sdf file. + https://developers.google.com/display-video/api/reference/rest/v1/sdfdownloadtasks/create#filetype + Required: One can provide several file types. + """ return self.kwargs.get("file_type") def _get_filter_type(self) -> str: - # filter_type : specifies the type of resource to filter. - # Required: Only one filter_type allowed. + """ + filter_type : specifies the type of resource to filter. + Required: Only one filter_type allowed. + """ return self.kwargs.get("filter_type") def _get_advertiser_id(self) -> str: return self.kwargs.get("advertiser_id") def get_file_names(self) -> List[str]: - # DV360 api creates one file per file_type. - # map file_type with the name of the generated file. + """ + DV360 api creates one file per file_type. + map file_type with the name of the generated file. + """ return [f"SDF-{FILE_NAMES[file_type]}" for file_type in self._get_file_type()] - # make sure to implement the appropriate retry policy. @retry( wait=wait_exponential(multiplier=1, min=60, max=3600), stop=stop_after_delay(36000), @@ -168,13 +173,11 @@ def get_sdf_body(self): def get_sdf_objects(self): body = self.get_sdf_body() - # create sdf task init_operation = self.create_sdf_task(body=body) - # wait for the task to be ready or raise Error created_operation = self._wait_sdf_download_request(init_operation) - # download and unzip the sdf file(s) archive. self.download_sdf(created_operation) unzip(f"{self.BASE}/{self.ARCHIVE_NAME}.zip", output_path=self.BASE) + # We chain operation if many file_types were to be provided. return chain( *[ From 19de5b232db38b1efdc42d3e5348f5cf355d86bf Mon Sep 17 00:00:00 2001 From: Arthur Derennes Date: Fri, 28 Aug 2020 18:19:56 +0200 Subject: [PATCH 21/54] refacto: take benoit's comments into account --- nck/readers/dv360_reader.py | 37 +++++++------------------------------ 1 file changed, 7 insertions(+), 30 deletions(-) diff --git a/nck/readers/dv360_reader.py b/nck/readers/dv360_reader.py index 9eb1d565..33b63bce 100644 --- a/nck/readers/dv360_reader.py +++ b/nck/readers/dv360_reader.py @@ -5,7 +5,7 @@ import httplib2 from itertools import chain -from typing import List, Tuple +from typing import List from googleapiclient import discovery from googleapiclient.http import MediaIoBaseDownload @@ -75,30 +75,12 @@ def __init__( self.kwargs = kwargs self.file_names = self.get_file_names() - def _get_file_type(self) -> Tuple[str]: - """ - file_type : dictates the resource type that populates the sdf file. - https://developers.google.com/display-video/api/reference/rest/v1/sdfdownloadtasks/create#filetype - Required: One can provide several file types. - """ - return self.kwargs.get("file_type") - - def _get_filter_type(self) -> str: - """ - filter_type : specifies the type of resource to filter. - Required: Only one filter_type allowed. - """ - return self.kwargs.get("filter_type") - - def _get_advertiser_id(self) -> str: - return self.kwargs.get("advertiser_id") - def get_file_names(self) -> List[str]: """ DV360 api creates one file per file_type. map file_type with the name of the generated file. """ - return [f"SDF-{FILE_NAMES[file_type]}" for file_type in self._get_file_type()] + return [f"SDF-{FILE_NAMES[file_type]}" for file_type in self.kwargs.get("file_type")] @retry( wait=wait_exponential(multiplier=1, min=60, max=3600), @@ -146,7 +128,7 @@ def download_sdf(self, operation): done = False while done is False: status, done = downloader.next_chunk() - logging.info(f"Download {int(status.progress() * 100)}.") + logging.info(f"Download {int(status.progress() * 100)}%.") @staticmethod def sdf_to_njson_generator(path_to_file): @@ -157,19 +139,14 @@ def sdf_to_njson_generator(path_to_file): yield line def get_sdf_body(self): - # exctract request body from parameters - file_type = self._get_file_type() - filter_type = self._get_filter_type() - advertiser_id = self._get_advertiser_id() - body = { + return { "parentEntityFilter": { - "fileType": file_type, - "filterType": filter_type + "fileType": self.kwargs.get("file_type"), + "filterType": self.kwargs.get("filter_type") }, "version": self.SDF_VERSION, - "advertiserId": advertiser_id + "advertiserId": self.kwargs.get("advertiser_id") } - return body def get_sdf_objects(self): body = self.get_sdf_body() From f007bc51d0e1284300a49df9b3a2d68629d75101 Mon Sep 17 00:00:00 2001 From: Pierre Hay Date: Mon, 31 Aug 2020 16:17:48 +0200 Subject: [PATCH 22/54] refacto : raise more specific errors --- nck/readers/dv360_reader.py | 41 ++++++++++++++++++++++--------------- nck/utils/exceptions.py | 25 ++++++++++++++++++++++ nck/utils/file_reader.py | 8 ++++++++ 3 files changed, 58 insertions(+), 16 deletions(-) create mode 100644 nck/utils/exceptions.py diff --git a/nck/readers/dv360_reader.py b/nck/readers/dv360_reader.py index 33b63bce..960f01c8 100644 --- a/nck/readers/dv360_reader.py +++ b/nck/readers/dv360_reader.py @@ -1,4 +1,20 @@ - +# GNU Lesser General Public License v3.0 only +# Copyright (C) 2020 Artefact +# licence-information@artefact.com +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3 of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. import click import logging import io @@ -13,9 +29,10 @@ from tenacity import retry, wait_exponential, stop_after_delay from nck.helpers.dv360_helper import FILE_NAMES, FILE_TYPES, FILTER_TYPES +from nck.utils.exceptions import RetryTimeoutError, SdfOperationError from nck.commands.command import processor from nck.readers.reader import Reader -from nck.utils.file_reader import CSVReader, unzip +from nck.utils.file_reader import sdf_to_njson_generator, unzip from nck.utils.args import extract_args from nck.streams.format_date_stream import FormatDateStream @@ -100,11 +117,7 @@ def _wait_sdf_download_request(self, operation): get_request = self._client.sdfdownloadtasks().operations().get(name=operation["name"]) operation = get_request.execute() if "done" not in operation: - raise Exception("The operation has exceed the time limit treshold.\n") - if "error" in operation: - raise Exception("The operation finished in error with code %s: %s" % ( - operation["error"]["code"], - operation["error"]["message"])) + raise RetryTimeoutError("The operation has taken more than 10 hours to complete.\n") return operation def create_sdf_task(self, body): @@ -130,14 +143,6 @@ def download_sdf(self, operation): status, done = downloader.next_chunk() logging.info(f"Download {int(status.progress() * 100)}%.") - @staticmethod - def sdf_to_njson_generator(path_to_file): - csv_reader = CSVReader(csv_delimiter=",", csv_fieldnames=None) - with open(path_to_file, "rb") as fd: - dict_reader = csv_reader.read_csv(fd) - for line in dict_reader: - yield line - def get_sdf_body(self): return { "parentEntityFilter": { @@ -152,13 +157,17 @@ def get_sdf_objects(self): body = self.get_sdf_body() init_operation = self.create_sdf_task(body=body) created_operation = self._wait_sdf_download_request(init_operation) + if "error" in created_operation: + raise SdfOperationError("The operation finished in error with code %s: %s" % ( + created_operation["error"]["code"], + created_operation["error"]["message"])) self.download_sdf(created_operation) unzip(f"{self.BASE}/{self.ARCHIVE_NAME}.zip", output_path=self.BASE) # We chain operation if many file_types were to be provided. return chain( *[ - self.sdf_to_njson_generator(f"{self.BASE}/{file_name}.csv") + sdf_to_njson_generator(f"{self.BASE}/{file_name}.csv") for file_name in self.file_names ] ) diff --git a/nck/utils/exceptions.py b/nck/utils/exceptions.py new file mode 100644 index 00000000..bd15a654 --- /dev/null +++ b/nck/utils/exceptions.py @@ -0,0 +1,25 @@ +# GNU Lesser General Public License v3.0 only +# Copyright (C) 2020 Artefact +# licence-information@artefact.com +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3 of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +class RetryTimeoutError(Exception): + """Raised when a query exceeds it's time limit treshold.""" + pass + + +class SdfOperationError(Exception): + """Raised when a sdf operation has failed.""" + pass diff --git a/nck/utils/file_reader.py b/nck/utils/file_reader.py index 79f96519..149e9208 100644 --- a/nck/utils/file_reader.py +++ b/nck/utils/file_reader.py @@ -28,6 +28,14 @@ def unzip(input_file, output_path): zip_ref.extractall(output_path) +def sdf_to_njson_generator(path_to_file): + csv_reader = CSVReader(csv_delimiter=",", csv_fieldnames=None) + with open(path_to_file, "rb") as fd: + dict_reader = csv_reader.read_csv(fd) + for line in dict_reader: + yield line + + def format_csv_delimiter(csv_delimiter): _csv_delimiter = csv_delimiter.encode().decode("unicode_escape") if csv_delimiter == "newline": From 7fb41cac9b54090b5536e2f04fea02d82d6be084 Mon Sep 17 00:00:00 2001 From: Vivien MORLET Date: Tue, 1 Sep 2020 16:22:01 +0200 Subject: [PATCH 23/54] upgrade DBM v1.1 - update get_query and remove sdf requests --- nck/helpers/dbm_helper.py | 18 -------- nck/readers/dbm_reader.py | 86 +++++++++------------------------------ 2 files changed, 20 insertions(+), 84 deletions(-) diff --git a/nck/helpers/dbm_helper.py b/nck/helpers/dbm_helper.py index 6042a8e1..8d321cd2 100644 --- a/nck/helpers/dbm_helper.py +++ b/nck/helpers/dbm_helper.py @@ -21,23 +21,5 @@ "existing_query_report", "custom_query_report", "lineitems_objects", - "sdf_objects", "list_reports", ] - -POSSIBLE_SDF_FILE_TYPES = [ - "INVENTORY_SOURCE", - "AD", - "AD_GROUP", - "CAMPAIGN", - "INSERTION_ORDER", - "LINE_ITEM", -] - -FILE_TYPES_DICT = { - "AD": "ads", - "AD_GROUP": "adGroups", - "CAMPAIGN": "campaigns", - "LINE_ITEM": "lineItems", - "INSERTION_ORDER": "insertionOrders", -} diff --git a/nck/readers/dbm_reader.py b/nck/readers/dbm_reader.py index 8fd47359..27196e23 100644 --- a/nck/readers/dbm_reader.py +++ b/nck/readers/dbm_reader.py @@ -22,20 +22,19 @@ import requests import datetime -from itertools import chain - from googleapiclient import discovery from oauth2client import client, GOOGLE_REVOKE_URI from tenacity import retry, wait_exponential, stop_after_delay +from click import ClickException from nck.commands.command import processor from nck.readers.reader import Reader from nck.utils.args import extract_args from nck.streams.format_date_stream import FormatDateStream -from nck.utils.text import get_generator_dict_from_str_csv, add_column_value_to_csv_line_iterator +from nck.utils.text import get_generator_dict_from_str_csv -from nck.helpers.dbm_helper import POSSIBLE_REQUEST_TYPES, FILE_TYPES_DICT +from nck.helpers.dbm_helper import POSSIBLE_REQUEST_TYPES DISCOVERY_URI = "https://analyticsreporting.googleapis.com/$discovery/rest" @@ -64,7 +63,7 @@ help=( "Sometimes the date range on which metrics are computed is missing from the report. " "If this option is set to True, this range will be added." - ) + ), ) @click.option("--dbm-filter", type=click.Tuple([str, int]), multiple=True) @click.option("--dbm-file-type", multiple=True) @@ -78,7 +77,9 @@ "--dbm-day-range", required=True, default="LAST_7_DAYS", - type=click.Choice(["PREVIOUS_DAY", "LAST_30_DAYS", "LAST_90_DAYS", "LAST_7_DAYS", "PREVIOUS_MONTH", "PREVIOUS_WEEK"]), + type=click.Choice( + ["PREVIOUS_DAY", "LAST_30_DAYS", "LAST_90_DAYS", "LAST_7_DAYS", "PREVIOUS_MONTH", "PREVIOUS_WEEK"] + ), ) @processor("dbm_access_token", "dbm_refresh_token", "dbm_client_secret") def dbm(**kwargs): @@ -88,7 +89,7 @@ def dbm(**kwargs): class DbmReader(Reader): API_NAME = "doubleclickbidmanager" - API_VERSION = "v1" + API_VERSION = "v1.1" def __init__(self, access_token, refresh_token, client_secret, client_id, **kwargs): credentials = client.GoogleCredentials( @@ -105,29 +106,23 @@ def __init__(self, access_token, refresh_token, client_secret, client_id, **kwar http = credentials.authorize(httplib2.Http()) credentials.refresh(http) - # API_SCOPES = ['https://www.googleapis.com/auth/doubleclickbidmanager'] self._client = discovery.build(self.API_NAME, self.API_VERSION, http=http, cache_discovery=False) self.kwargs = kwargs - def get_query(self, query_id, query_title): - response = self._client.queries().listqueries().execute() - if "queries" in response: - for q in response["queries"]: - if q["queryId"] == query_id or q["metadata"]["title"] == query_title: - return q + def get_query(self, query_id): + if query_id: + return self._client.queries().getquery(queryId=query_id).execute() else: - logging.info("No query found with the id {} or the title {}".format(query_id, query_title)) - return None + raise ClickException(f"Please provide a 'query_id' in order to find your query") def get_existing_query(self): query_id = self.kwargs.get("query_id", None) - query_title = self.kwargs.get("query_title", None) - query = self.get_query(query_id, query_id) + query = self.get_query(query_id) if query: return query else: - raise Exception("No query found with the id {} or the title {}".format(query_id, query_title)) + raise ClickException(f"No query found with the id {query_id}") def get_query_body(self): body_q = { @@ -159,15 +154,10 @@ def create_and_get_query(self): query = self._client.queries().createquery(body=body_query).execute() return query - @retry( - wait=wait_exponential(multiplier=1, min=60, max=3600), - stop=stop_after_delay(36000), - ) + @retry(wait=wait_exponential(multiplier=1, min=60, max=3600), stop=stop_after_delay(36000)) def _wait_for_query(self, query_id): - logging.info( - "waiting for query of id : {} to complete running".format(query_id) - ) - query_infos = self.get_query(query_id, None) + logging.info("waiting for query of id : {} to complete running".format(query_id)) + query_infos = self.get_query(query_id) if query_infos["metadata"]["running"]: raise Exception("Query still running.") else: @@ -191,13 +181,12 @@ def get_query_report_url(self, existing_query=True): def get_query_report(self, existing_query=True): url = self.get_query_report_url(existing_query) report = requests.get(url, stream=True) - if self.kwargs["query_param_type"] == "TYPE_REACH_AND_FREQUENCY" \ - and self.kwargs["add_date_to_report"]: + if self.kwargs["query_param_type"] == "TYPE_REACH_AND_FREQUENCY" and self.kwargs["add_date_to_report"]: return get_generator_dict_from_str_csv( report.iter_lines(), add_date=True, day_range=self.kwargs["day_range"], - date_format=self.kwargs.get("date_format") + date_format=self.kwargs.get("date_format"), ) else: return get_generator_dict_from_str_csv(report.iter_lines()) @@ -226,35 +215,7 @@ def get_lineitems_objects(self): lines = lineitems.split("\n") return get_generator_dict_from_str_csv(lines) - def get_sdf_body(self): - filter_types = [filt[0] for filt in self.kwargs.get("filter")] - assert ( - len([filter_types[0] == filt for filt in filter_types if filter_types[0] == filt]) == 1 - ), "sdf accept just one filter type, multiple filter types detected" - filter_ids = [str(filt[1]) for filt in self.kwargs.get("filter")] - - file_types = self.kwargs.get("file_type") - body_sdf = {"version": "5.1", "filterIds": filter_ids, "filterType": filter_types, "fileTypes": file_types} - return body_sdf - - def get_sdf_objects(self): - body_sdf = self.get_sdf_body() - file_types = body_sdf["fileTypes"] - response = self._client.sdf().download(body=body_sdf).execute() - - return chain( - *[ - get_generator_dict_from_str_csv( - add_column_value_to_csv_line_iterator( - response[FILE_TYPES_DICT[file_type]].split("\n"), "file_type", file_type - ) - ) - for file_type in file_types - ] - ) - def read(self): - # request existing query request_type = self.kwargs.get("request_type") if request_type == "existing_query": data = [self.get_existing_query()] @@ -268,8 +229,6 @@ def read(self): data = self.list_query_reports() elif request_type == "lineitems_objects": data = self.get_lineitems_objects() - elif request_type == "sdf_objects": - data = self.get_sdf_objects() else: raise Exception("Unknown request type") @@ -278,9 +237,4 @@ def result_generator(): yield record # should replace results later by a good identifier - yield FormatDateStream( - "results", - result_generator(), - keys=["Date"], - date_format=self.kwargs.get("date_format"), - ) + yield FormatDateStream("results", result_generator(), keys=["Date"], date_format=self.kwargs.get("date_format")) From bff5642c59756d68d9650c912cd375958cc243d6 Mon Sep 17 00:00:00 2001 From: Vivien MORLET Date: Tue, 1 Sep 2020 16:27:13 +0200 Subject: [PATCH 24/54] update README --- nck/readers/README.md | 2 +- nck/readers/dbm_reader.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nck/readers/README.md b/nck/readers/README.md index 773d038f..0ac7d9d9 100644 --- a/nck/readers/README.md +++ b/nck/readers/README.md @@ -383,7 +383,7 @@ Didn't work? See [troubleshooting](#troubleshooting) section. |`--dcm-start-date`|Start date of the period to request (format: YYYY-MM-DD)| |`--dcm-end-date`|End date of the period to request (format: YYYY-MM-DD)| -### Google Display & Video 360 Reader +### Google DoubleClick Manager Reader (DBM) #### Source API diff --git a/nck/readers/dbm_reader.py b/nck/readers/dbm_reader.py index 27196e23..33f28cf6 100644 --- a/nck/readers/dbm_reader.py +++ b/nck/readers/dbm_reader.py @@ -114,7 +114,7 @@ def get_query(self, query_id): if query_id: return self._client.queries().getquery(queryId=query_id).execute() else: - raise ClickException(f"Please provide a 'query_id' in order to find your query") + raise ClickException("Please provide a 'query_id' in order to find your query") def get_existing_query(self): query_id = self.kwargs.get("query_id", None) From 4fdb3ffc1406ff85fffa3d90a0a86d29e9703227 Mon Sep 17 00:00:00 2001 From: benoitgoujon Date: Tue, 1 Sep 2020 18:26:03 +0200 Subject: [PATCH 25/54] fix: remove commented code and unused variables --- setup.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/setup.py b/setup.py index 07d6d7f9..95134467 100644 --- a/setup.py +++ b/setup.py @@ -19,8 +19,6 @@ from setuptools import setup, find_packages -with open('README.md') as readme_file: - readme = readme_file.read() with open('requirements.txt') as requirements_file: requirements = [el.strip() for el in requirements_file.readlines()] @@ -47,9 +45,7 @@ ], }, install_requires=requirements, - # long_description=readme + '\n\n' + history, include_package_data=True, - # keywords='nautilus_connectors', name='nck', packages=find_packages(), setup_requires=setup_requirements, From ec6b3ce4999abc769261264f90e98e96497f0cb8 Mon Sep 17 00:00:00 2001 From: Vivien MORLET Date: Tue, 1 Sep 2020 18:52:26 +0200 Subject: [PATCH 26/54] wait for url in query_infos (weird API behavior) --- nck/readers/dbm_reader.py | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/nck/readers/dbm_reader.py b/nck/readers/dbm_reader.py index 33f28cf6..b8eba21d 100644 --- a/nck/readers/dbm_reader.py +++ b/nck/readers/dbm_reader.py @@ -49,7 +49,7 @@ @click.option("--dbm-client-secret", required=True) @click.option("--dbm-query-metric", multiple=True) @click.option("--dbm-query-dimension", multiple=True) -@click.option("--dbm-request-type", type=click.Choice(POSSIBLE_REQUEST_TYPES)) +@click.option("--dbm-request-type", type=click.Choice(POSSIBLE_REQUEST_TYPES), required=True) @click.option("--dbm-query-id") @click.option("--dbm-query-title") @click.option("--dbm-query-frequency", default="ONE_TIME") @@ -65,7 +65,7 @@ "If this option is set to True, this range will be added." ), ) -@click.option("--dbm-filter", type=click.Tuple([str, int]), multiple=True) +@click.option("--dbm-filter", type=click.Tuple([str, str]), multiple=True) @click.option("--dbm-file-type", multiple=True) @click.option( "--dbm-date-format", @@ -158,8 +158,11 @@ def create_and_get_query(self): def _wait_for_query(self, query_id): logging.info("waiting for query of id : {} to complete running".format(query_id)) query_infos = self.get_query(query_id) - if query_infos["metadata"]["running"]: - raise Exception("Query still running.") + if query_infos["metadata"]["running"] or ( + "googleCloudStoragePathForLatestReport" not in query_infos["metadata"].keys() + and "googleDrivePathForLatestReport" not in query_infos["metadata"].keys() + ): + raise ClickException("Query still running.") else: return query_infos @@ -171,7 +174,7 @@ def get_query_report_url(self, existing_query=True): query_id = query_infos["queryId"] query_infos = self._wait_for_query(query_id) - if query_infos["metadata"]["googleCloudStoragePathForLatestReport"]: + if query_infos["metadata"].get("googleCloudStoragePathForLatestReport", None): url = query_infos["metadata"]["googleCloudStoragePathForLatestReport"] else: url = query_infos["metadata"]["googleDrivePathForLatestReport"] @@ -230,7 +233,7 @@ def read(self): elif request_type == "lineitems_objects": data = self.get_lineitems_objects() else: - raise Exception("Unknown request type") + raise ClickException("Unknown request type") def result_generator(): for record in data: From 8554f946a2816a1dc6ba30af69a269cffb575b43 Mon Sep 17 00:00:00 2001 From: Pierre Hay Date: Wed, 2 Sep 2020 12:51:11 +0200 Subject: [PATCH 27/54] S3 write : log the path where the file is written to. --- nck/writers/local_writer.py | 2 +- nck/writers/s3_writer.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/nck/writers/local_writer.py b/nck/writers/local_writer.py index 14959fab..95f64dbb 100644 --- a/nck/writers/local_writer.py +++ b/nck/writers/local_writer.py @@ -36,7 +36,7 @@ def __init__(self, local_directory): def write(self, stream): """ - Write file to console, mainly used for debugging + Write file to disk at location given as parameter. """ path = os.path.join(self._local_directory, stream.name) diff --git a/nck/writers/s3_writer.py b/nck/writers/s3_writer.py index 74e6017f..49fad683 100644 --- a/nck/writers/s3_writer.py +++ b/nck/writers/s3_writer.py @@ -55,7 +55,7 @@ def __init__( @retry def write(self, stream): - logging.info("Writing file to S3") + logging.info("Start writing file to S3 ...") bucket = self._s3_resource.Bucket(self._bucket_name) if bucket not in self._s3_resource.buckets.all(): @@ -86,5 +86,5 @@ def write(self, stream): Params={"Bucket": self._bucket_name, "Key": stream.name}, ExpiresIn=3600, ) - + logging.info(f"file written at location {url_file}") return url_file, bucket From db2aebf8e338023cc4ad8dbe54cc2a20ae69da8c Mon Sep 17 00:00:00 2001 From: Vivien MORLET Date: Wed, 2 Sep 2020 14:51:54 +0200 Subject: [PATCH 28/54] change exceptions --- nck/readers/dbm_reader.py | 4 +--- nck/utils/exceptions.py | 6 +++++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/nck/readers/dbm_reader.py b/nck/readers/dbm_reader.py index b8eba21d..a1d94cfc 100644 --- a/nck/readers/dbm_reader.py +++ b/nck/readers/dbm_reader.py @@ -162,7 +162,7 @@ def _wait_for_query(self, query_id): "googleCloudStoragePathForLatestReport" not in query_infos["metadata"].keys() and "googleDrivePathForLatestReport" not in query_infos["metadata"].keys() ): - raise ClickException("Query still running.") + raise Exception("Query still running.") else: return query_infos @@ -232,8 +232,6 @@ def read(self): data = self.list_query_reports() elif request_type == "lineitems_objects": data = self.get_lineitems_objects() - else: - raise ClickException("Unknown request type") def result_generator(): for record in data: diff --git a/nck/utils/exceptions.py b/nck/utils/exceptions.py index bd15a654..495764d2 100644 --- a/nck/utils/exceptions.py +++ b/nck/utils/exceptions.py @@ -15,11 +15,15 @@ # You should have received a copy of the GNU Lesser General Public License # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + class RetryTimeoutError(Exception): - """Raised when a query exceeds it's time limit treshold.""" + """Raised when a query exceeds it's time limit threshold.""" + pass class SdfOperationError(Exception): """Raised when a sdf operation has failed.""" + pass From 454168124d470299089e5557c148c8a622a5eeeb Mon Sep 17 00:00:00 2001 From: Vivien MORLET Date: Wed, 2 Sep 2020 15:50:45 +0200 Subject: [PATCH 29/54] fix get report url --- nck/readers/dbm_reader.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/nck/readers/dbm_reader.py b/nck/readers/dbm_reader.py index a1d94cfc..8995bc6c 100644 --- a/nck/readers/dbm_reader.py +++ b/nck/readers/dbm_reader.py @@ -159,8 +159,8 @@ def _wait_for_query(self, query_id): logging.info("waiting for query of id : {} to complete running".format(query_id)) query_infos = self.get_query(query_id) if query_infos["metadata"]["running"] or ( - "googleCloudStoragePathForLatestReport" not in query_infos["metadata"].keys() - and "googleDrivePathForLatestReport" not in query_infos["metadata"].keys() + "googleCloudStoragePathForLatestReport" not in query_infos["metadata"] + and "googleDrivePathForLatestReport" not in query_infos["metadata"] ): raise Exception("Query still running.") else: @@ -174,7 +174,10 @@ def get_query_report_url(self, existing_query=True): query_id = query_infos["queryId"] query_infos = self._wait_for_query(query_id) - if query_infos["metadata"].get("googleCloudStoragePathForLatestReport", None): + if ( + "googleCloudStoragePathForLatestReport" in query_infos["metadata"] + and len(query_infos["metadata"]["googleCloudStoragePathForLatestReport"]) > 0 + ): url = query_infos["metadata"]["googleCloudStoragePathForLatestReport"] else: url = query_infos["metadata"]["googleDrivePathForLatestReport"] From 66a3419ad0a664578373a1916d586ec734c2bec2 Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Wed, 2 Sep 2020 18:59:00 +0200 Subject: [PATCH 30/54] clean: creating specific exceptions --- nck/helpers/ttd_helper.py | 13 +++++++++++++ nck/readers/ttd_reader.py | 8 +++++--- 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/nck/helpers/ttd_helper.py b/nck/helpers/ttd_helper.py index 4fca054e..ec933719 100644 --- a/nck/helpers/ttd_helper.py +++ b/nck/helpers/ttd_helper.py @@ -12,6 +12,7 @@ import requests from datetime import datetime +import logging API_HOST = "https://api.thetradedesk.com/v3" @@ -44,6 +45,18 @@ BQ_DATEFORMAT = "%Y-%m-%d" +class ReportTemplateNotFoundError(Exception): + def __init__(self, message): + super().__init__(message) + logging.error(message) + + +class ReportScheduleNotReadyError(Exception): + def __init__(self, message): + super().__init__(message) + logging.error(message) + + def build_headers(login, password): access_token = get_access_token(login, password) return {"Content-Type": "application/json", "TTD-Auth": access_token} diff --git a/nck/readers/ttd_reader.py b/nck/readers/ttd_reader.py index 4e36560e..eee75f92 100644 --- a/nck/readers/ttd_reader.py +++ b/nck/readers/ttd_reader.py @@ -26,6 +26,8 @@ API_ENDPOINTS, DEFAULT_REPORT_SCHEDULE_ARGS, DEFAULT_PAGING_ARGS, + ReportTemplateNotFoundError, + ReportScheduleNotReadyError, build_headers, format_date, ) @@ -113,11 +115,11 @@ def get_report_template_id(self): payload = {"NameContains": self.report_template_name, **DEFAULT_PAGING_ARGS} json_response = self.make_api_call(method, endpoint, payload) if json_response["ResultCount"] == 0: - raise Exception( + raise ReportTemplateNotFoundError( f"No existing ReportTemplate match '{self.report_template_name}'" ) if json_response["ResultCount"] > 1: - raise Exception( + raise ReportTemplateNotFoundError( f"""'{self.report_template_name}' match more than one ReportTemplate. Please specify the exact name of the ReportTemplate you wish to retrieve.""" ) @@ -146,7 +148,7 @@ def create_report_schedule(self): def _wait_for_download_url(self): report_execution_details = self.get_report_execution_details() if report_execution_details["ReportExecutionState"] == "Pending": - raise Exception( + raise ReportScheduleNotReadyError( f"ReportSchedule '{self.report_schedule_id}' is still running." ) else: From 9c8ba8f34650f99799ad0c840d0df4dbb1a9149b Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Wed, 2 Sep 2020 19:15:15 +0200 Subject: [PATCH 31/54] enh: adding a CLI option to choose between JSONStream and NormalizedJSONStream --- nck/readers/README.md | 1 + nck/readers/ttd_reader.py | 21 ++++++++++++++++++--- 2 files changed, 19 insertions(+), 3 deletions(-) diff --git a/nck/readers/README.md b/nck/readers/README.md index 056a9919..97315e7f 100644 --- a/nck/readers/README.md +++ b/nck/readers/README.md @@ -557,6 +557,7 @@ Didn't work? See [troubleshooting](#troubleshooting) section. |`--ttd-report-schedule-name`|Name of the Report Schedule to create| |`--ttd-start-date`|Start date of the period to request (format: YYYY-MM-DD)| |`--ttd-end-date`|End date of the period to request (format: YYYY-MM-DD)| +|`--ttd-normalize-stream`|If set to True, yields a NormalizedJSONStream (spaces and special characters replaced by '_' in field names, which is useful for BigQuery). Else (*default*), yields a standard JSONStream.| If you need any further information, the documentation of The Trade Desk API can be found [here](https://api.thetradedesk.com/v3/portal/api/doc/ApiOverview). diff --git a/nck/readers/ttd_reader.py b/nck/readers/ttd_reader.py index eee75f92..c24b2ab3 100644 --- a/nck/readers/ttd_reader.py +++ b/nck/readers/ttd_reader.py @@ -20,6 +20,7 @@ from nck.utils.args import extract_args from nck.commands.command import processor from nck.readers.reader import Reader +from nck.streams.json_stream import JSONStream from nck.streams.normalized_json_stream import NormalizedJSONStream from nck.helpers.ttd_helper import ( API_HOST, @@ -66,6 +67,13 @@ type=click.DateTime(), help="End date of the period to request (format: YYYY-MM-DD)", ) +@click.option( + "--ttd-normalize-stream", + type=click.BOOL, + default=False, + help="If set to True, yields a NormalizedJSONStream (spaces and special " + "characters replaced by '_' in field names, which is useful for BigQuery)", +) @processor("ttd_login", "ttd_password") def the_trade_desk(**kwargs): return TheTradeDeskReader(**extract_args("ttd_", kwargs)) @@ -81,6 +89,7 @@ def __init__( report_schedule_name, start_date, end_date, + normalize_stream ): self.headers = build_headers(login, password) self.advertiser_ids = list(advertiser_id) @@ -89,6 +98,7 @@ def __init__( self.start_date = start_date # Report end date is exclusive: to become inclusive, it should be incremented by 1 day self.end_date = end_date + timedelta(days=1) + self.normalize_stream = normalize_stream self.validate_dates() @@ -195,8 +205,13 @@ def result_generator(): k: format_date(v) if k == "Date" else v for k, v in record.items() } - yield NormalizedJSONStream( - "results_" + "_".join(self.advertiser_ids), result_generator() - ) + if self.normalize_stream: + yield NormalizedJSONStream( + "results_" + "_".join(self.advertiser_ids), result_generator() + ) + else: + yield JSONStream( + "results_" + "_".join(self.advertiser_ids), result_generator() + ) self.delete_report_schedule() From 923d72096d0537838870ffe262865cb73d8a76da Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Wed, 2 Sep 2020 23:21:53 +0200 Subject: [PATCH 32/54] clean: moving auth logic from helper to reader --- nck/helpers/ttd_helper.py | 23 +---------------------- nck/readers/ttd_reader.py | 25 ++++++++++++++++++++++--- 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/nck/helpers/ttd_helper.py b/nck/helpers/ttd_helper.py index ec933719..0a0dbea2 100644 --- a/nck/helpers/ttd_helper.py +++ b/nck/helpers/ttd_helper.py @@ -10,9 +10,8 @@ # along with this program; if not, write to the Free Software Foundation, # Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -import requests -from datetime import datetime import logging +from datetime import datetime API_HOST = "https://api.thetradedesk.com/v3" @@ -57,26 +56,6 @@ def __init__(self, message): logging.error(message) -def build_headers(login, password): - access_token = get_access_token(login, password) - return {"Content-Type": "application/json", "TTD-Auth": access_token} - - -def get_access_token(login, password): - url = f"{API_HOST}/authentication" - headers = {"Content-Type": "application/json"} - payload = { - "Login": login, - "Password": password, - "TokenExpirationInMinutes": 1440, - } - response = requests.post(url=url, headers=headers, json=payload) - if response.ok: - return response.json()["Token"] - else: - response.raise_for_status() - - def format_date(date_string): """ Input: "2020-01-01T00:00:00" diff --git a/nck/readers/ttd_reader.py b/nck/readers/ttd_reader.py index c24b2ab3..201b0032 100644 --- a/nck/readers/ttd_reader.py +++ b/nck/readers/ttd_reader.py @@ -29,7 +29,6 @@ DEFAULT_PAGING_ARGS, ReportTemplateNotFoundError, ReportScheduleNotReadyError, - build_headers, format_date, ) from nck.utils.text import get_report_generator_from_flat_file @@ -72,7 +71,8 @@ type=click.BOOL, default=False, help="If set to True, yields a NormalizedJSONStream (spaces and special " - "characters replaced by '_' in field names, which is useful for BigQuery)", + "characters replaced by '_' in field names, which is useful for BigQuery). " + "Else, yields a standard JSONStream.", ) @processor("ttd_login", "ttd_password") def the_trade_desk(**kwargs): @@ -91,7 +91,8 @@ def __init__( end_date, normalize_stream ): - self.headers = build_headers(login, password) + self.login = login + self.password = password self.advertiser_ids = list(advertiser_id) self.report_template_name = report_template_name self.report_schedule_name = report_schedule_name @@ -108,6 +109,23 @@ def validate_dates(self): "Report end date should be equal or ulterior to report start date." ) + def get_access_token(self): + url = f"{API_HOST}/authentication" + headers = {"Content-Type": "application/json"} + payload = { + "Login": self.login, + "Password": self.password, + "TokenExpirationInMinutes": 1440, + } + response = requests.post(url=url, headers=headers, json=payload) + if response.ok: + return response.json()["Token"] + else: + response.raise_for_status() + + def build_headers(self): + self.headers = {"Content-Type": "application/json", "TTD-Auth": self.get_access_token()} + def make_api_call(self, method, endpoint, payload={}): url = f"{API_HOST}/{endpoint}" response = requests.request( @@ -194,6 +212,7 @@ def delete_report_schedule(self): self.make_api_call(method, f"{endpoint}/{self.report_schedule_id}") def read(self): + self.build_headers() self.get_report_template_id() self.create_report_schedule() self._wait_for_download_url() From f479726c8ddcc223a3af018b626fcfebeb6de4b7 Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Wed, 2 Sep 2020 23:27:59 +0200 Subject: [PATCH 33/54] clean: adding '_' before helper methods --- nck/readers/ttd_reader.py | 42 +++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/nck/readers/ttd_reader.py b/nck/readers/ttd_reader.py index 201b0032..6f1de4be 100644 --- a/nck/readers/ttd_reader.py +++ b/nck/readers/ttd_reader.py @@ -101,15 +101,15 @@ def __init__( self.end_date = end_date + timedelta(days=1) self.normalize_stream = normalize_stream - self.validate_dates() + self._validate_dates() - def validate_dates(self): + def _validate_dates(self): if self.end_date - timedelta(days=1) < self.start_date: raise ClickException( "Report end date should be equal or ulterior to report start date." ) - def get_access_token(self): + def _get_access_token(self): url = f"{API_HOST}/authentication" headers = {"Content-Type": "application/json"} payload = { @@ -123,10 +123,10 @@ def get_access_token(self): else: response.raise_for_status() - def build_headers(self): - self.headers = {"Content-Type": "application/json", "TTD-Auth": self.get_access_token()} + def _build_headers(self): + self.headers = {"Content-Type": "application/json", "TTD-Auth": self._get_access_token()} - def make_api_call(self, method, endpoint, payload={}): + def _make_api_call(self, method, endpoint, payload={}): url = f"{API_HOST}/{endpoint}" response = requests.request( method=method, url=url, headers=self.headers, json=payload @@ -137,11 +137,11 @@ def make_api_call(self, method, endpoint, payload={}): else: response.raise_for_status() - def get_report_template_id(self): + def _get_report_template_id(self): logging.info(f"Collecting ReportTemplateId of '{self.report_template_name}'") method, endpoint = API_ENDPOINTS["get_report_template_id"] payload = {"NameContains": self.report_template_name, **DEFAULT_PAGING_ARGS} - json_response = self.make_api_call(method, endpoint, payload) + json_response = self._make_api_call(method, endpoint, payload) if json_response["ResultCount"] == 0: raise ReportTemplateNotFoundError( f"No existing ReportTemplate match '{self.report_template_name}'" @@ -155,7 +155,7 @@ def get_report_template_id(self): self.report_template_id = json_response["Result"][0]["ReportTemplateId"] logging.info(f"Retrieved ReportTemplateId: {self.report_template_id}") - def create_report_schedule(self): + def _create_report_schedule(self): method, endpoint = API_ENDPOINTS["create_report_schedule"] payload = { "ReportScheduleName": self.report_schedule_name, @@ -166,7 +166,7 @@ def create_report_schedule(self): **DEFAULT_REPORT_SCHEDULE_ARGS, } logging.info(f"Creating ReportSchedule: {payload}") - json_response = self.make_api_call(method, endpoint, payload) + json_response = self._make_api_call(method, endpoint, payload) self.report_schedule_id = json_response["ReportScheduleId"] @retry( @@ -174,7 +174,7 @@ def create_report_schedule(self): stop=stop_after_delay(36000), ) def _wait_for_download_url(self): - report_execution_details = self.get_report_execution_details() + report_execution_details = self._get_report_execution_details() if report_execution_details["ReportExecutionState"] == "Pending": raise ReportScheduleNotReadyError( f"ReportSchedule '{self.report_schedule_id}' is still running." @@ -189,34 +189,34 @@ def _wait_for_download_url(self): f"ReportScheduleId '{self.report_schedule_id}' is ready. DownloadURL: {self.download_url}" ) - def get_report_execution_details(self): + def _get_report_execution_details(self): method, endpoint = API_ENDPOINTS["get_report_execution_details"] payload = { "AdvertiserIds": self.advertiser_ids, "ReportScheduleIds": [self.report_schedule_id], **DEFAULT_PAGING_ARGS, } - json_response = self.make_api_call(method, endpoint, payload) + json_response = self._make_api_call(method, endpoint, payload) # As the ReportScheduleId that we provided as a payload is globally unique, # the API response will include only one Result (so we can get index "[0]") report_execution_details = json_response["Result"][0] return report_execution_details - def download_report(self): + def _download_report(self): report = requests.get(url=self.download_url, headers=self.headers, stream=True) return get_report_generator_from_flat_file(report.iter_lines()) - def delete_report_schedule(self): + def _delete_report_schedule(self): logging.info(f"Deleting ReportScheduleId '{self.report_schedule_id}'") method, endpoint = API_ENDPOINTS["delete_report_schedule"] - self.make_api_call(method, f"{endpoint}/{self.report_schedule_id}") + self._make_api_call(method, f"{endpoint}/{self.report_schedule_id}") def read(self): - self.build_headers() - self.get_report_template_id() - self.create_report_schedule() + self._build_headers() + self._get_report_template_id() + self._create_report_schedule() self._wait_for_download_url() - data = self.download_report() + data = self._download_report() def result_generator(): for record in data: @@ -233,4 +233,4 @@ def result_generator(): "results_" + "_".join(self.advertiser_ids), result_generator() ) - self.delete_report_schedule() + self._delete_report_schedule() From f7057d95d070ab0543289a231bcd6fd2948bce28 Mon Sep 17 00:00:00 2001 From: gabrielleberanger Date: Thu, 3 Sep 2020 09:23:28 +0200 Subject: [PATCH 34/54] fix: fixing tests + slightly modifying build_headers logic --- nck/readers/ttd_reader.py | 2 +- tests/readers/test_ttd.py | 86 +++++++++++++++++++++++++++++---------- 2 files changed, 66 insertions(+), 22 deletions(-) diff --git a/nck/readers/ttd_reader.py b/nck/readers/ttd_reader.py index 6f1de4be..fad74013 100644 --- a/nck/readers/ttd_reader.py +++ b/nck/readers/ttd_reader.py @@ -93,6 +93,7 @@ def __init__( ): self.login = login self.password = password + self._build_headers() self.advertiser_ids = list(advertiser_id) self.report_template_name = report_template_name self.report_schedule_name = report_schedule_name @@ -212,7 +213,6 @@ def _delete_report_schedule(self): self._make_api_call(method, f"{endpoint}/{self.report_schedule_id}") def read(self): - self._build_headers() self._get_report_template_id() self._create_report_schedule() self._wait_for_download_url() diff --git a/tests/readers/test_ttd.py b/tests/readers/test_ttd.py index 94764832..93892ac3 100644 --- a/tests/readers/test_ttd.py +++ b/tests/readers/test_ttd.py @@ -33,9 +33,10 @@ class TheTradeDeskReaderTest(TestCase): "report_schedule_name": "adgroup_performance_schedule", "start_date": datetime(2020, 1, 1), "end_date": datetime(2020, 3, 1), + "normalize_stream": False } - @mock.patch("nck.readers.ttd_reader.build_headers", return_value={}) + @mock.patch("nck.readers.ttd_reader.TheTradeDeskReader._build_headers", return_value={}) def test_validate_dates(self, mock_build_headers): temp_kwargs = self.kwargs.copy() params = {"start_date": datetime(2020, 1, 3), "end_date": datetime(2020, 1, 1)} @@ -43,9 +44,9 @@ def test_validate_dates(self, mock_build_headers): with self.assertRaises(ClickException): TheTradeDeskReader(**temp_kwargs) - @mock.patch("nck.readers.ttd_reader.build_headers", return_value={}) + @mock.patch("nck.readers.ttd_reader.TheTradeDeskReader._build_headers", return_value={}) @mock.patch( - "nck.readers.ttd_reader.TheTradeDeskReader.make_api_call", + "nck.readers.ttd_reader.TheTradeDeskReader._make_api_call", return_value={ "Result": [ { @@ -63,12 +64,12 @@ def test_get_report_template_id_if_exactly_1_match( self, mock_build_headers, mock_api_call ): reader = TheTradeDeskReader(**self.kwargs) - reader.get_report_template_id() + reader._get_report_template_id() self.assertEqual(reader.report_template_id, 1234) - @mock.patch("nck.readers.ttd_reader.build_headers", return_value={}) + @mock.patch("nck.readers.ttd_reader.TheTradeDeskReader._build_headers", return_value={}) @mock.patch( - "nck.readers.ttd_reader.TheTradeDeskReader.make_api_call", + "nck.readers.ttd_reader.TheTradeDeskReader._make_api_call", return_value={ "Result": [ { @@ -93,22 +94,22 @@ def test_get_report_template_id_if_more_than_1_match( self, mock_build_headers, mock_api_call ): with self.assertRaises(Exception): - TheTradeDeskReader(**self.kwargs).get_report_template_id() + TheTradeDeskReader(**self.kwargs)._get_report_template_id() - @mock.patch("nck.readers.ttd_reader.build_headers", return_value={}) + @mock.patch("nck.readers.ttd_reader.TheTradeDeskReader._build_headers", return_value={}) @mock.patch( - "nck.readers.ttd_reader.TheTradeDeskReader.make_api_call", + "nck.readers.ttd_reader.TheTradeDeskReader._make_api_call", return_value={"Result": [], "ResultCount": 0}, ) def test_get_report_template_id_if_no_match( self, mock_build_headers, mock_api_call ): with self.assertRaises(Exception): - TheTradeDeskReader(**self.kwargs).get_report_template_id() + TheTradeDeskReader(**self.kwargs)._get_report_template_id() - @mock.patch("nck.readers.ttd_reader.build_headers", return_value={}) + @mock.patch("nck.readers.ttd_reader.TheTradeDeskReader._build_headers", return_value={}) @mock.patch( - "nck.readers.ttd_reader.TheTradeDeskReader.make_api_call", + "nck.readers.ttd_reader.TheTradeDeskReader._make_api_call", return_value={ "ReportScheduleId": 5678, "ReportScheduleName": "adgroup_performance_schedule", @@ -117,13 +118,13 @@ def test_get_report_template_id_if_no_match( def test_create_report_schedule(self, mock_build_headers, mock_api_call): reader = TheTradeDeskReader(**self.kwargs) reader.report_template_id = 1234 - reader.create_report_schedule() + reader._create_report_schedule() self.assertEqual(reader.report_schedule_id, 5678) - @mock.patch("nck.readers.ttd_reader.build_headers", return_value={}) + @mock.patch("nck.readers.ttd_reader.TheTradeDeskReader._build_headers", return_value={}) @mock.patch("tenacity.BaseRetrying.wait", side_effect=lambda *args, **kwargs: 0) @mock.patch( - "nck.readers.ttd_reader.TheTradeDeskReader.make_api_call", + "nck.readers.ttd_reader.TheTradeDeskReader._make_api_call", side_effect=[ { "Result": [ @@ -157,13 +158,54 @@ def test_wait_for_download_url(self, mock_build_headers, mock_retry, mock_api_ca reader._wait_for_download_url() self.assertEqual(reader.download_url, "https://download.url") - @mock.patch("nck.readers.ttd_reader.build_headers", return_value={}) + @mock.patch("nck.readers.ttd_reader.TheTradeDeskReader._build_headers", return_value={}) @mock.patch("tenacity.BaseRetrying.wait", side_effect=lambda *args, **kwargs: 0) - @mock.patch.object(TheTradeDeskReader, "get_report_template_id", lambda *args: None) - @mock.patch.object(TheTradeDeskReader, "create_report_schedule", lambda *args: None) + @mock.patch.object(TheTradeDeskReader, "_get_report_template_id", lambda *args: None) + @mock.patch.object(TheTradeDeskReader, "_create_report_schedule", lambda *args: None) @mock.patch.object(TheTradeDeskReader, "_wait_for_download_url", lambda *args: None) @mock.patch( - "nck.readers.ttd_reader.TheTradeDeskReader.download_report", + "nck.readers.ttd_reader.TheTradeDeskReader._download_report", + return_value=iter( + [ + { + "Date": "2020-01-01T00:00:00", + "Advertiser ID": "XXXXX", + "Impressions": 10 + }, + { + "Date": "2020-02-01T00:00:00", + "Advertiser ID": "XXXXX", + "Impressions": 11 + }, + { + "Date": "2020-02-03T00:00:00", + "Advertiser ID": "XXXXX", + "Impressions": 12 + }, + ] + ), + ) + def test_read_if_normalize_stream_is_False(self, mock_build_headers, mock_retry, mock_download_report): + reader = TheTradeDeskReader(**self.kwargs) + reader.report_template_id = 1234 + reader.report_schedule_id = 5678 + reader.download_url = "https://download.url" + output = next(reader.read()) + expected = [ + {"Date": "2020-01-01", "Advertiser ID": "XXXXX", "Impressions": 10}, + {"Date": "2020-02-01", "Advertiser ID": "XXXXX", "Impressions": 11}, + {"Date": "2020-02-03", "Advertiser ID": "XXXXX", "Impressions": 12}, + ] + for output_record, expected_record in zip(output.readlines(), iter(expected)): + self.assertEqual(output_record, expected_record) + + @mock.patch("nck.readers.ttd_reader.TheTradeDeskReader._build_headers", return_value={}) + @mock.patch("tenacity.BaseRetrying.wait", side_effect=lambda *args, **kwargs: 0) + @mock.patch.object(TheTradeDeskReader, "_get_report_template_id", lambda *args: None) + @mock.patch.object(TheTradeDeskReader, "_create_report_schedule", lambda *args: None) + @mock.patch.object(TheTradeDeskReader, "_wait_for_download_url", lambda *args: None) + @mock.patch( + "nck.readers.ttd_reader.TheTradeDeskReader._download_report", return_value=iter( [ { @@ -184,8 +226,10 @@ def test_wait_for_download_url(self, mock_build_headers, mock_retry, mock_api_ca ] ), ) - def test_read(self, mock_build_headers, mock_retry, mock_download_report): - reader = TheTradeDeskReader(**self.kwargs) + def test_read_if_normalize_stream_is_True(self, mock_build_headers, mock_retry, mock_download_report): + temp_kwargs = self.kwargs.copy() + temp_kwargs.update({"normalize_stream": True}) + reader = TheTradeDeskReader(**temp_kwargs) reader.report_template_id = 1234 reader.report_schedule_id = 5678 reader.download_url = "https://download.url" From 3af5ed748e186b75dfbd821be0d46b47885530e4 Mon Sep 17 00:00:00 2001 From: Vivien MORLET Date: Thu, 10 Sep 2020 16:12:59 +0200 Subject: [PATCH 35/54] remove totals --- nck/readers/dbm_reader.py | 72 +++++++++++++-------------------------- 1 file changed, 24 insertions(+), 48 deletions(-) diff --git a/nck/readers/dbm_reader.py b/nck/readers/dbm_reader.py index 8e1b28f3..36129b10 100644 --- a/nck/readers/dbm_reader.py +++ b/nck/readers/dbm_reader.py @@ -32,7 +32,7 @@ from nck.utils.args import extract_args from nck.streams.format_date_stream import FormatDateStream -from nck.utils.text import get_report_generator_from_flat_file +from nck.utils.text import get_report_generator_from_flat_file, skip_last from nck.utils.date_handler import get_date_start_and_date_stop_from_range from nck.helpers.dbm_helper import POSSIBLE_REQUEST_TYPES @@ -127,6 +127,7 @@ def get_existing_query(self): def get_query_body(self): body_q = { + "kind": "doubleclickbidmanager#query", "metadata": { "format": "CSV", "title": self.kwargs.get("query_title", "NO_TITLE_GIVEN"), @@ -134,19 +135,13 @@ def get_query_body(self): }, "params": { "type": self.kwargs.get("query_param_type", "TYPE_TRUEVIEW"), - "groupBys": self.kwargs.get("query_dimension"), - "metrics": self.kwargs.get("query_metric"), - "filters": [ - {"type": filt[0], "value": str(filt[1])} - for filt in self.kwargs.get("filter") - ], + "groupBys": list(self.kwargs.get("query_dimension", [])), + "metrics": list(self.kwargs.get("query_metric", [])), + "filters": [{"type": filt[0], "value": str(filt[1])} for filt in self.kwargs.get("filter")], }, "schedule": {"frequency": self.kwargs.get("query_frequency", "ONE_TIME")}, } - if ( - self.kwargs.get("start_date") is not None - and self.kwargs.get("end_date") is not None - ): + if self.kwargs.get("start_date") is not None and self.kwargs.get("end_date") is not None: body_q["metadata"]["dataRange"] = "CUSTOM_DATES" body_q["reportDataStartTimeMs"] = 1000 * int( (self.kwargs.get("start_date") + datetime.timedelta(days=1)).timestamp() @@ -194,34 +189,22 @@ def get_query_report_url(self, existing_query=True): def get_query_report(self, existing_query=True): url = self.get_query_report_url(existing_query) report = requests.get(url, stream=True) - if ( - self.kwargs["query_param_type"] == "TYPE_REACH_AND_FREQUENCY" - and self.kwargs["add_date_to_report"] - ): - start, stop = get_date_start_and_date_stop_from_range( - self.kwargs["day_range"] - ) + if self.kwargs["query_param_type"] == "TYPE_REACH_AND_FREQUENCY" and self.kwargs["add_date_to_report"]: + start, stop = get_date_start_and_date_stop_from_range(self.kwargs["day_range"]) column_dict = { "date_start": start.strftime(self.kwargs.get("date_format")), "date_stop": stop.strftime(self.kwargs.get("date_format")), } - return get_report_generator_from_flat_file( - report.iter_lines(), - skip_n_last=1, - add_column=True, - column_dict=column_dict, + report_gen = get_report_generator_from_flat_file( + report.iter_lines(), add_column=True, column_dict=column_dict ) + return skip_last(report_gen, 1) else: - return get_report_generator_from_flat_file( - report.iter_lines(), skip_n_last=1 - ) + report_gen = get_report_generator_from_flat_file(report.iter_lines()) + return skip_last(report_gen, 1) def list_query_reports(self): - reports_infos = ( - self._client.reports() - .listreports(queryId=self.kwargs.get("query_id")) - .execute() - ) + reports_infos = self._client.reports().listreports(queryId=self.kwargs.get("query_id")).execute() for report in reports_infos["reports"]: yield report @@ -229,35 +212,28 @@ def get_lineitems_body(self): if len(self.kwargs.get("filter")) > 0: filter_types = [filt[0] for filt in self.kwargs.get("filter")] assert ( - len( - [ - filter_types[0] == filt - for filt in filter_types - if filter_types[0] == filt - ] - ) - == 1 + len([filter_types[0] == filt for filt in filter_types if filter_types[0] == filt]) == 1 ), "Lineitems accept just one filter type, multiple filter types detected" filter_ids = [str(filt[1]) for filt in self.kwargs.get("filter")] - return { - "filterType": filter_types[0], - "filterIds": filter_ids, - "format": "CSV", - "fileSpec": "EWF", - } + return {"filterType": filter_types[0], "filterIds": filter_ids, "format": "CSV", "fileSpec": "EWF"} else: return {} def get_lineitems_objects(self): body_lineitems = self.get_lineitems_body() - response = ( - self._client.lineitems().downloadlineitems(body=body_lineitems).execute() - ) + response = self._client.lineitems().downloadlineitems(body=body_lineitems).execute() lineitems = response["lineItems"] lines = lineitems.split("\n") return get_report_generator_from_flat_file(lines, skip_n_last=1) + @staticmethod + def _remove_totals_record(generator): + current = next(generator) + for i in generator: + yield current + current = i + def read(self): request_type = self.kwargs.get("request_type") if request_type == "existing_query": From 74e9420423d2131522df79a15ba96757b25213e8 Mon Sep 17 00:00:00 2001 From: Vivien MORLET Date: Thu, 10 Sep 2020 16:20:29 +0200 Subject: [PATCH 36/54] change tests --- tests/readers/test_dbm_reader.py | 75 ++++++++++---------------------- 1 file changed, 24 insertions(+), 51 deletions(-) diff --git a/tests/readers/test_dbm_reader.py b/tests/readers/test_dbm_reader.py index 279d93c8..eb676bc9 100644 --- a/tests/readers/test_dbm_reader.py +++ b/tests/readers/test_dbm_reader.py @@ -23,78 +23,51 @@ class TestDbmReader(unittest.TestCase): - def mock_dbm_reader(self, **kwargs): for param, value in kwargs.items(): setattr(self, param, value) - @mock.patch.object(DbmReader, '__init__', mock_dbm_reader) + @mock.patch.object(DbmReader, "__init__", mock_dbm_reader) def test_get_query_body(self): kwargs = {} reader = DbmReader(**kwargs) - reader.kwargs = { - 'filter': [('FILTER_ADVERTISER', 1)] - } + reader.kwargs = {"filter": [("FILTER_ADVERTISER", 1)]} expected_query_body = { - 'metadata': { - 'format': 'CSV', - 'title': 'NO_TITLE_GIVEN', - 'dataRange': 'LAST_7_DAYS' + "kind": "doubleclickbidmanager#query", + "metadata": {"format": "CSV", "title": "NO_TITLE_GIVEN", "dataRange": "LAST_7_DAYS"}, + "params": { + "type": "TYPE_TRUEVIEW", + "groupBys": [], + "metrics": [], + "filters": [{"type": "FILTER_ADVERTISER", "value": "1"}], }, - 'params': { - 'type': 'TYPE_TRUEVIEW', - 'groupBys': None, - 'metrics': None, - 'filters': [ - { - 'type': 'FILTER_ADVERTISER', - 'value': '1' - } - ] - }, - 'schedule': { - 'frequency': 'ONE_TIME' - } + "schedule": {"frequency": "ONE_TIME"}, } self.assertDictEqual(reader.get_query_body(), expected_query_body) - @mock.patch.object(DbmReader, '__init__', mock_dbm_reader) + @mock.patch.object(DbmReader, "__init__", mock_dbm_reader) def test_get_query_body_ms_conversion(self): kwargs = {} reader = DbmReader(**kwargs) reader.kwargs = { - 'filter': [('FILTER_ADVERTISER', 1)], - 'start_date': datetime.datetime( - 2020, 1, 15, tzinfo=datetime.timezone.utc - ), - 'end_date': datetime.datetime( - 2020, 1, 18, tzinfo=datetime.timezone.utc - ) + "filter": [("FILTER_ADVERTISER", 1)], + "start_date": datetime.datetime(2020, 1, 15, tzinfo=datetime.timezone.utc), + "end_date": datetime.datetime(2020, 1, 18, tzinfo=datetime.timezone.utc), } expected_query_body = { - 'metadata': { - 'format': 'CSV', - 'title': 'NO_TITLE_GIVEN', - 'dataRange': 'CUSTOM_DATES' - }, - 'params': { - 'type': 'TYPE_TRUEVIEW', - 'groupBys': None, - 'metrics': None, - 'filters': [ - { - 'type': 'FILTER_ADVERTISER', - 'value': '1' - } - ] - }, - 'schedule': { - 'frequency': 'ONE_TIME' + "kind": "doubleclickbidmanager#query", + "metadata": {"format": "CSV", "title": "NO_TITLE_GIVEN", "dataRange": "CUSTOM_DATES"}, + "params": { + "type": "TYPE_TRUEVIEW", + "groupBys": [], + "metrics": [], + "filters": [{"type": "FILTER_ADVERTISER", "value": "1"}], }, - 'reportDataStartTimeMs': 1579132800000, - 'reportDataEndTimeMs': 1579392000000 + "schedule": {"frequency": "ONE_TIME"}, + "reportDataStartTimeMs": 1579132800000, + "reportDataEndTimeMs": 1579392000000, } self.assertDictEqual(reader.get_query_body(), expected_query_body) From 8f215d55cd6c1c8fede02dec3bf8d4cf5d241df9 Mon Sep 17 00:00:00 2001 From: Vivien MORLET Date: Thu, 10 Sep 2020 18:08:15 +0200 Subject: [PATCH 37/54] remove useless func --- nck/readers/dbm_reader.py | 7 ------- 1 file changed, 7 deletions(-) diff --git a/nck/readers/dbm_reader.py b/nck/readers/dbm_reader.py index 36129b10..cb7aa80a 100644 --- a/nck/readers/dbm_reader.py +++ b/nck/readers/dbm_reader.py @@ -227,13 +227,6 @@ def get_lineitems_objects(self): lines = lineitems.split("\n") return get_report_generator_from_flat_file(lines, skip_n_last=1) - @staticmethod - def _remove_totals_record(generator): - current = next(generator) - for i in generator: - yield current - current = i - def read(self): request_type = self.kwargs.get("request_type") if request_type == "existing_query": From a062715e55cf5540b1683bd4e1b27f73577efe7a Mon Sep 17 00:00:00 2001 From: Ali BELLAMLIH MAMOU Date: Mon, 14 Sep 2020 17:29:52 +0200 Subject: [PATCH 38/54] Readability of the gs file is done correctly but there is an error line 64 of the file entrypoint.py when executed : TypeError: 'NoneType' object is not iterable --- nck/readers/gs_reader.py | 126 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 126 insertions(+) create mode 100644 nck/readers/gs_reader.py diff --git a/nck/readers/gs_reader.py b/nck/readers/gs_reader.py new file mode 100644 index 00000000..ed23c6fb --- /dev/null +++ b/nck/readers/gs_reader.py @@ -0,0 +1,126 @@ +import gspread +#from oauth2client.service_account import ServiceAccountCredentials +from google.oauth2 import service_account +from nck.utils.args import extract_args +from nck.commands.command import processor +from nck.readers.reader import Reader +from google.auth.transport.requests import AuthorizedSession +import click +import json + + +@click.command(name="read_gs") +@click.option("--gs-project_id", default=None, required=True) +@click.option("--gs-private_key_id", required=True)# +@click.option("--gs-private_key_path", required=True)# +@click.option("--gs-client_email", required=True) +@click.option("--gs-client_id",required=True)# +#@click.option("--gs-auth_uri",required=True) +#@click.option("--gs-token-uri", required=True) +#@click.option("--gs-auth_provider", required=True) +@click.option("--gs-client_cert", required=True)# +@click.option("--gs-sheet_name", default=None, required=True) + +@processor("gs_private_key_id", "gs_private_key_path", "gs_client_id","gs_client_cert") +def google_sheets(**kwargs): + return GSheetsReader(**extract_args("gs_", kwargs)) + +class GSheetsReader(Reader): + _scopes = ['https://www.googleapis.com/auth/spreadsheets.readonly',"https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"] + + def __init__( + self, + project_id: str, + private_key_id: str, + private_key_path:str, + client_email: str, + client_id:str, + client_cert:str, + sheet_name:str): + self._sheet_name=sheet_name + private_key_txt=open(private_key_path,'r').read().replace("\\n","\n") + self._keyfile_dict = { + 'type': 'service_account', + 'project_id': project_id, + 'private_key_id': private_key_id, + 'private_key':private_key_txt, #open(private_key_path,'r').read().replace("\\n","\n"), + 'auth_provider_x509_cert_url': 'https://www.googleapis.com/oauth2/v1/certs', + 'auth_uri': 'https://accounts.google.com/o/oauth2/auth', + 'client_email': client_email, + 'client_id': client_id, + 'client_x509_cert_url': client_cert, + 'token_uri': 'https://accounts.google.com/o/oauth2/token', + } + #self._credentials = ServiceAccountCredentials.from_json_keyfile_dict(keyfile_dict=self._keyfile_dict, scopes=self._scopes) + self._credentials= service_account.Credentials.from_service_account_info(info=self._keyfile_dict) + self._scoped_credentials = self._credentials.with_scopes( + ['https://spreadsheets.google.com/feeds', + 'https://www.googleapis.com/auth/drive'] + ) + gc = gspread.Client(auth=self._scoped_credentials) + gc.session = AuthorizedSession(self._scoped_credentials) + + def read(self): + #client = gspread.authorize(self._credentials) + gc = gspread.Client(auth=self._scoped_credentials) + gc.session = AuthorizedSession(self._scoped_credentials) + sheet = gc.open(self._sheet_name).sheet1 + list_of_hashes = sheet.get_all_records() + print(list_of_hashes) + + +# import google.oauth2.credentials +# import gspread +# from nck.utils.args import extract_args +# from nck.commands.command import processor +# from nck.readers.reader import Reader +# import click + +# @click.command(name="read_gs") +# @click.option("--gs-client_id",required=True)# +# @click.option("--gs-client_secret",required=True) +# @click.option("--gs-refresh_token",required=True) +# @click.option("--gs-sheet_name", default=None, required=True) + +# @processor("gs_private_key_id", "gs_private_key_path", "gs_client_id","gs_client_cert") + +# def google_sheets(**kwargs): +# return GSheetsReader(**extract_args("gs_", kwargs)) + +# class GSheetsReader(Reader): +# _scopes = ['https://www.googleapis.com/auth/spreadsheets.readonly',"https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"] + +# def __init__( +# self, +# client_id:str, +# client_secret:str, +# refresh_token:str, +# sheet_name:str): +# self._sheet_name=sheet_name +# #private_key_txt=open(private_key_path,'r').read().replace("\\n","\n") +# # self._keyfile_dict = { +# # 'id_token': project_id, +# # 'private_key_id': private_key_id, +# # 'private_key':private_key_txt, +# # 'auth_provider_x509_cert_url': 'https://www.googleapis.com/oauth2/v1/certs', +# # 'auth_uri': 'https://accounts.google.com/o/oauth2/auth', +# # 'client_email': client_email, +# # 'client_id': client_id, +# # 'client_x509_cert_url': client_cert, +# # 'token_uri': 'https://accounts.google.com/o/oauth2/token', +# # } +# self._keyfile_dict= { +# 'client_id': client_id, +# 'client_secret': client_secret, +# 'refresh_token':refresh_token, +# 'token_uri': 'https://accounts.google.com/o/oauth2/token' +# } +# self._credentials = google.oauth2.credentials.Credentials(token=None,scopes=self._scopes,**self._keyfile_dict) + +# def read(self): +# client = gspread.authorize(self._credentials) +# sheet = client.open(self._sheet_name).sheet1 +# list_of_hashes = sheet.get_all_records() +# print(list_of_hashes) + + From 7b38301ac75bceff25f97ef4e92e0d5e4765cb80 Mon Sep 17 00:00:00 2001 From: Ali BELLAMLIH MAMOU Date: Tue, 15 Sep 2020 13:40:48 +0200 Subject: [PATCH 39/54] =?UTF-8?q?Bug=20concernant=20les=20librairies=20cas?= =?UTF-8?q?s=C3=A9es=20corrig=C3=A9,=20bug=20concernant=20le=20reader=20gs?= =?UTF-8?q?=20corrig=C3=A9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- nck/entrypoint.py | 1 - nck/readers/__init__.py | 2 ++ nck/readers/gs_reader.py | 8 +++++++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/nck/entrypoint.py b/nck/entrypoint.py index 865ebdd9..cb3f6a5a 100644 --- a/nck/entrypoint.py +++ b/nck/entrypoint.py @@ -59,7 +59,6 @@ def run(processors, state_service_name, state_service_host, state_service_port, raise click.BadParameter("You must specify at least one writer") reader = _readers[0] - # A stream should represent a full file! for stream in reader.read(): for writer in _writers: diff --git a/nck/readers/__init__.py b/nck/readers/__init__.py index 1cc0b5ea..b177167b 100644 --- a/nck/readers/__init__.py +++ b/nck/readers/__init__.py @@ -38,6 +38,7 @@ from nck.readers.radarly_reader import radarly from nck.readers.yandex_campaign_reader import yandex_campaigns from nck.readers.yandex_statistics_reader import yandex_statistics +from nck.readers.gs_reader import google_sheets readers = [ mysql, @@ -61,6 +62,7 @@ radarly, yandex_campaigns, yandex_statistics, + google_sheets ] diff --git a/nck/readers/gs_reader.py b/nck/readers/gs_reader.py index ed23c6fb..e5995b98 100644 --- a/nck/readers/gs_reader.py +++ b/nck/readers/gs_reader.py @@ -7,6 +7,8 @@ from google.auth.transport.requests import AuthorizedSession import click import json +from nck.streams.normalized_json_stream import NormalizedJSONStream + @click.command(name="read_gs") @@ -66,7 +68,11 @@ def read(self): gc.session = AuthorizedSession(self._scoped_credentials) sheet = gc.open(self._sheet_name).sheet1 list_of_hashes = sheet.get_all_records() - print(list_of_hashes) + def result_generator(): + for record in list_of_hashes: + yield record + + yield NormalizedJSONStream(sheet, result_generator()) # import google.oauth2.credentials From 63ef9727f2488c9332e4bf064082ac88ea1221f2 Mon Sep 17 00:00:00 2001 From: Ali BELLAMLIH MAMOU Date: Tue, 15 Sep 2020 14:44:18 +0200 Subject: [PATCH 40/54] Modfis mineurs --- nck/readers/gs_reader.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/nck/readers/gs_reader.py b/nck/readers/gs_reader.py index e5995b98..915d838f 100644 --- a/nck/readers/gs_reader.py +++ b/nck/readers/gs_reader.py @@ -12,16 +12,16 @@ @click.command(name="read_gs") -@click.option("--gs-project_id", default=None, required=True) -@click.option("--gs-private_key_id", required=True)# -@click.option("--gs-private_key_path", required=True)# -@click.option("--gs-client_email", required=True) -@click.option("--gs-client_id",required=True)# +@click.option("--gs-project-id", default=None, required=True) +@click.option("--gs-private-key-id", required=True)# +@click.option("--gs-private-key-path", required=True)# +@click.option("--gs-client-email", required=True) +@click.option("--gs-client-id",required=True)# #@click.option("--gs-auth_uri",required=True) #@click.option("--gs-token-uri", required=True) #@click.option("--gs-auth_provider", required=True) -@click.option("--gs-client_cert", required=True)# -@click.option("--gs-sheet_name", default=None, required=True) +@click.option("--gs-client-cert", required=True)# +@click.option("--gs-sheet-name", default=None, required=True) @processor("gs_private_key_id", "gs_private_key_path", "gs_client_id","gs_client_cert") def google_sheets(**kwargs): From e8b8009105cda000bb467f88d8b9046843bdc697 Mon Sep 17 00:00:00 2001 From: Ali BELLAMLIH MAMOU Date: Tue, 15 Sep 2020 16:16:12 +0200 Subject: [PATCH 41/54] Got rid of all commented code that is not useful anymore --- nck/readers/gs_reader.py | 75 ++++------------------------------------ 1 file changed, 6 insertions(+), 69 deletions(-) diff --git a/nck/readers/gs_reader.py b/nck/readers/gs_reader.py index 915d838f..b98a8e5d 100644 --- a/nck/readers/gs_reader.py +++ b/nck/readers/gs_reader.py @@ -1,5 +1,4 @@ import gspread -#from oauth2client.service_account import ServiceAccountCredentials from google.oauth2 import service_account from nck.utils.args import extract_args from nck.commands.command import processor @@ -13,14 +12,11 @@ @click.command(name="read_gs") @click.option("--gs-project-id", default=None, required=True) -@click.option("--gs-private-key-id", required=True)# -@click.option("--gs-private-key-path", required=True)# +@click.option("--gs-private-key-id", required=True) +@click.option("--gs-private-key-path", required=True) @click.option("--gs-client-email", required=True) -@click.option("--gs-client-id",required=True)# -#@click.option("--gs-auth_uri",required=True) -#@click.option("--gs-token-uri", required=True) -#@click.option("--gs-auth_provider", required=True) -@click.option("--gs-client-cert", required=True)# +@click.option("--gs-client-id",required=True) +@click.option("--gs-client-cert", required=True) @click.option("--gs-sheet-name", default=None, required=True) @processor("gs_private_key_id", "gs_private_key_path", "gs_client_id","gs_client_cert") @@ -45,7 +41,7 @@ def __init__( 'type': 'service_account', 'project_id': project_id, 'private_key_id': private_key_id, - 'private_key':private_key_txt, #open(private_key_path,'r').read().replace("\\n","\n"), + 'private_key':private_key_txt, 'auth_provider_x509_cert_url': 'https://www.googleapis.com/oauth2/v1/certs', 'auth_uri': 'https://accounts.google.com/o/oauth2/auth', 'client_email': client_email, @@ -53,7 +49,6 @@ def __init__( 'client_x509_cert_url': client_cert, 'token_uri': 'https://accounts.google.com/o/oauth2/token', } - #self._credentials = ServiceAccountCredentials.from_json_keyfile_dict(keyfile_dict=self._keyfile_dict, scopes=self._scopes) self._credentials= service_account.Credentials.from_service_account_info(info=self._keyfile_dict) self._scoped_credentials = self._credentials.with_scopes( ['https://spreadsheets.google.com/feeds', @@ -63,7 +58,6 @@ def __init__( gc.session = AuthorizedSession(self._scoped_credentials) def read(self): - #client = gspread.authorize(self._credentials) gc = gspread.Client(auth=self._scoped_credentials) gc.session = AuthorizedSession(self._scoped_credentials) sheet = gc.open(self._sheet_name).sheet1 @@ -72,61 +66,4 @@ def result_generator(): for record in list_of_hashes: yield record - yield NormalizedJSONStream(sheet, result_generator()) - - -# import google.oauth2.credentials -# import gspread -# from nck.utils.args import extract_args -# from nck.commands.command import processor -# from nck.readers.reader import Reader -# import click - -# @click.command(name="read_gs") -# @click.option("--gs-client_id",required=True)# -# @click.option("--gs-client_secret",required=True) -# @click.option("--gs-refresh_token",required=True) -# @click.option("--gs-sheet_name", default=None, required=True) - -# @processor("gs_private_key_id", "gs_private_key_path", "gs_client_id","gs_client_cert") - -# def google_sheets(**kwargs): -# return GSheetsReader(**extract_args("gs_", kwargs)) - -# class GSheetsReader(Reader): -# _scopes = ['https://www.googleapis.com/auth/spreadsheets.readonly',"https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"] - -# def __init__( -# self, -# client_id:str, -# client_secret:str, -# refresh_token:str, -# sheet_name:str): -# self._sheet_name=sheet_name -# #private_key_txt=open(private_key_path,'r').read().replace("\\n","\n") -# # self._keyfile_dict = { -# # 'id_token': project_id, -# # 'private_key_id': private_key_id, -# # 'private_key':private_key_txt, -# # 'auth_provider_x509_cert_url': 'https://www.googleapis.com/oauth2/v1/certs', -# # 'auth_uri': 'https://accounts.google.com/o/oauth2/auth', -# # 'client_email': client_email, -# # 'client_id': client_id, -# # 'client_x509_cert_url': client_cert, -# # 'token_uri': 'https://accounts.google.com/o/oauth2/token', -# # } -# self._keyfile_dict= { -# 'client_id': client_id, -# 'client_secret': client_secret, -# 'refresh_token':refresh_token, -# 'token_uri': 'https://accounts.google.com/o/oauth2/token' -# } -# self._credentials = google.oauth2.credentials.Credentials(token=None,scopes=self._scopes,**self._keyfile_dict) - -# def read(self): -# client = gspread.authorize(self._credentials) -# sheet = client.open(self._sheet_name).sheet1 -# list_of_hashes = sheet.get_all_records() -# print(list_of_hashes) - - + yield NormalizedJSONStream(sheet, result_generator()) \ No newline at end of file From 435874979ec3e418ac5dfb24d44cc3f02cd0e39d Mon Sep 17 00:00:00 2001 From: Ali BELLAMLIH MAMOU Date: Tue, 15 Sep 2020 16:59:45 +0200 Subject: [PATCH 42/54] Fixing build --- nck/readers/gs_reader.py | 70 ++++++++++++++++++++-------------------- 1 file changed, 35 insertions(+), 35 deletions(-) diff --git a/nck/readers/gs_reader.py b/nck/readers/gs_reader.py index b98a8e5d..e92196c9 100644 --- a/nck/readers/gs_reader.py +++ b/nck/readers/gs_reader.py @@ -5,65 +5,65 @@ from nck.readers.reader import Reader from google.auth.transport.requests import AuthorizedSession import click -import json from nck.streams.normalized_json_stream import NormalizedJSONStream - @click.command(name="read_gs") @click.option("--gs-project-id", default=None, required=True) @click.option("--gs-private-key-id", required=True) @click.option("--gs-private-key-path", required=True) @click.option("--gs-client-email", required=True) -@click.option("--gs-client-id",required=True) +@click.option("--gs-client-id", required=True) @click.option("--gs-client-cert", required=True) @click.option("--gs-sheet-name", default=None, required=True) - -@processor("gs_private_key_id", "gs_private_key_path", "gs_client_id","gs_client_cert") +@processor("gs_private_key_id", "gs_private_key_path", "gs_client_id", "gs_client_cert") def google_sheets(**kwargs): return GSheetsReader(**extract_args("gs_", kwargs)) + class GSheetsReader(Reader): - _scopes = ['https://www.googleapis.com/auth/spreadsheets.readonly',"https://spreadsheets.google.com/feeds",'https://www.googleapis.com/auth/spreadsheets',"https://www.googleapis.com/auth/drive.file","https://www.googleapis.com/auth/drive"] - + _scopes = ['https://www.googleapis.com/auth/spreadsheets.readonly', 'https://www.googleapis.com/auth/spreadsheets', + "https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"] + def __init__( - self, - project_id: str, - private_key_id: str, - private_key_path:str, - client_email: str, - client_id:str, - client_cert:str, - sheet_name:str): - self._sheet_name=sheet_name - private_key_txt=open(private_key_path,'r').read().replace("\\n","\n") + self, + project_id: str, + private_key_id: str, + private_key_path: str, + client_email: str, + client_id: str, + client_cert: str, + sheet_name: str): + self._sheet_name = sheet_name + private_key_txt = open(private_key_path, 'r').read().replace("\\n", "\n") self._keyfile_dict = { - 'type': 'service_account', - 'project_id': project_id, - 'private_key_id': private_key_id, - 'private_key':private_key_txt, - 'auth_provider_x509_cert_url': 'https://www.googleapis.com/oauth2/v1/certs', - 'auth_uri': 'https://accounts.google.com/o/oauth2/auth', - 'client_email': client_email, - 'client_id': client_id, - 'client_x509_cert_url': client_cert, - 'token_uri': 'https://accounts.google.com/o/oauth2/token', + 'type': 'service_account', + 'project_id': project_id, + 'private_key_id': private_key_id, + 'private_key': private_key_txt, + 'auth_provider_x509_cert_url': 'https://www.googleapis.com/oauth2/v1/certs', + 'auth_uri': 'https://accounts.google.com/o/oauth2/auth', + 'client_email': client_email, + 'client_id': client_id, + 'client_x509_cert_url': client_cert, + 'token_uri': 'https://accounts.google.com/o/oauth2/token', } - self._credentials= service_account.Credentials.from_service_account_info(info=self._keyfile_dict) + self._credentials = service_account.Credentials.from_service_account_info(info=self._keyfile_dict) self._scoped_credentials = self._credentials.with_scopes( - ['https://spreadsheets.google.com/feeds', - 'https://www.googleapis.com/auth/drive'] + ['https://spreadsheets.google.com/feeds', + 'https://www.googleapis.com/auth/drive'] ) gc = gspread.Client(auth=self._scoped_credentials) gc.session = AuthorizedSession(self._scoped_credentials) - + def read(self): gc = gspread.Client(auth=self._scoped_credentials) - gc.session = AuthorizedSession(self._scoped_credentials) + gc.session = AuthorizedSession(self._scoped_credentials) sheet = gc.open(self._sheet_name).sheet1 list_of_hashes = sheet.get_all_records() + def result_generator(): - for record in list_of_hashes: - yield record + for record in list_of_hashes: + yield record - yield NormalizedJSONStream(sheet, result_generator()) \ No newline at end of file + yield NormalizedJSONStream(sheet, result_generator()) From b93cebdc43b2d2ba29ea87a3211a43adbb79641a Mon Sep 17 00:00:00 2001 From: Ali BELLAMLIH MAMOU Date: Tue, 15 Sep 2020 17:20:36 +0200 Subject: [PATCH 43/54] test --- nck/readers/gs_reader.py | 50 ++++++++++++++++++++++------------------ 1 file changed, 27 insertions(+), 23 deletions(-) diff --git a/nck/readers/gs_reader.py b/nck/readers/gs_reader.py index e92196c9..7f3c8d3b 100644 --- a/nck/readers/gs_reader.py +++ b/nck/readers/gs_reader.py @@ -22,36 +22,40 @@ def google_sheets(**kwargs): class GSheetsReader(Reader): - _scopes = ['https://www.googleapis.com/auth/spreadsheets.readonly', 'https://www.googleapis.com/auth/spreadsheets', - "https://www.googleapis.com/auth/drive.file", "https://www.googleapis.com/auth/drive"] + _scopes = [ + "https://www.googleapis.com/auth/spreadsheets.readonly", + "https://www.googleapis.com/auth/spreadsheets", + "https://www.googleapis.com/auth/drive.file", + "https://www.googleapis.com/auth/drive", + ] def __init__( - self, - project_id: str, - private_key_id: str, - private_key_path: str, - client_email: str, - client_id: str, - client_cert: str, - sheet_name: str): + self, + project_id: str, + private_key_id: str, + private_key_path: str, + client_email: str, + client_id: str, + client_cert: str, + sheet_name: str, + ): self._sheet_name = sheet_name - private_key_txt = open(private_key_path, 'r').read().replace("\\n", "\n") + private_key_txt = open(private_key_path, "r").read().replace("\\n", "\n") self._keyfile_dict = { - 'type': 'service_account', - 'project_id': project_id, - 'private_key_id': private_key_id, - 'private_key': private_key_txt, - 'auth_provider_x509_cert_url': 'https://www.googleapis.com/oauth2/v1/certs', - 'auth_uri': 'https://accounts.google.com/o/oauth2/auth', - 'client_email': client_email, - 'client_id': client_id, - 'client_x509_cert_url': client_cert, - 'token_uri': 'https://accounts.google.com/o/oauth2/token', + "type": "service_account", + "project_id": project_id, + "private_key_id": private_key_id, + "private_key": private_key_txt, + "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", + "auth_uri": "https://accounts.google.com/o/oauth2/auth", + "client_email": client_email, + "client_id": client_id, + "client_x509_cert_url": client_cert, + "token_uri": "https://accounts.google.com/o/oauth2/token", } self._credentials = service_account.Credentials.from_service_account_info(info=self._keyfile_dict) self._scoped_credentials = self._credentials.with_scopes( - ['https://spreadsheets.google.com/feeds', - 'https://www.googleapis.com/auth/drive'] + ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] ) gc = gspread.Client(auth=self._scoped_credentials) gc.session = AuthorizedSession(self._scoped_credentials) From 6f17cbc490074240a23593a18210cff3817118dc Mon Sep 17 00:00:00 2001 From: Ali BELLAMLIH MAMOU Date: Wed, 16 Sep 2020 10:56:00 +0200 Subject: [PATCH 44/54] Modifying the code to answer the different remarks on the pull request + adding an additional click option to get a specific worksheet page --- .pre-commit-config.yaml | 11 +++++ nck/readers/gs_reader.py | 91 ++++++++++++++++++++++++++++++---------- pyproject.toml | 2 + 3 files changed, 82 insertions(+), 22 deletions(-) create mode 100644 .pre-commit-config.yaml create mode 100644 pyproject.toml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..19a3fd3d --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,11 @@ +repos: + - repo: https://github.com/ambv/black + rev: stable + hooks: + - id: black + entry: black + language_version: python3.7 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v1.2.3 + hooks: + - id: flake8 diff --git a/nck/readers/gs_reader.py b/nck/readers/gs_reader.py index 7f3c8d3b..5937a2a1 100644 --- a/nck/readers/gs_reader.py +++ b/nck/readers/gs_reader.py @@ -1,21 +1,70 @@ +# GNU Lesser General Public License v3.0 only +# Copyright (C) 2020 Artefact +# licence-information@artefact.com +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3 of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +import click import gspread +from google.auth.transport.requests import AuthorizedSession from google.oauth2 import service_account -from nck.utils.args import extract_args + from nck.commands.command import processor from nck.readers.reader import Reader -from google.auth.transport.requests import AuthorizedSession -import click -from nck.streams.normalized_json_stream import NormalizedJSONStream +from nck.utils.args import extract_args +from nck.streams.json_stream import JSONStream @click.command(name="read_gs") -@click.option("--gs-project-id", default=None, required=True) -@click.option("--gs-private-key-id", required=True) -@click.option("--gs-private-key-path", required=True) -@click.option("--gs-client-email", required=True) -@click.option("--gs-client-id", required=True) -@click.option("--gs-client-cert", required=True) -@click.option("--gs-sheet-name", default=None, required=True) +@click.option( + "--gs-project-id", + required=True, + help="Project ID that is given by Google services once you have \ + created your project in the google cloud console. You can retrieve it in the JSON credential file", +) +@click.option( + "--gs-private-key-id", + required=True, + help="Private key ID given by Google services once you have added credentials \ + to the project. You can retrieve it in the JSON credential file", +) +@click.option( + "--gs-private-key-path", + required=True, + help="The path to the private key that is stored in a txt file. \ + You can retrieve it first in the JSON credential file", +) +@click.option( + "--gs-client-email", + required=True, + help="Client e-mail given by Google services once you have added credentials \ + to the project. You can retrieve it in the JSON credential file", +) +@click.option( + "--gs-client-id", + required=True, + help="Client ID given by Google services once you have added credentials \ + to the project. You can retrieve it in the JSON credential file", +) +@click.option( + "--gs-client-cert", + required=True, + help="Client certificate given by Google services once you have added credentials \ + to the project. You can retrieve it in the JSON credential file", +) +@click.option("--gs-sheet-name", required=True, help="The name you have given to your google sheet") +@click.option("--gs-page-number", default=0, type=click.INT, help="The page number you want to access") @processor("gs_private_key_id", "gs_private_key_path", "gs_client_id", "gs_client_cert") def google_sheets(**kwargs): return GSheetsReader(**extract_args("gs_", kwargs)) @@ -38,10 +87,12 @@ def __init__( client_id: str, client_cert: str, sheet_name: str, + page_number: int, ): self._sheet_name = sheet_name + self._page_number = page_number private_key_txt = open(private_key_path, "r").read().replace("\\n", "\n") - self._keyfile_dict = { + keyfile_dict = { "type": "service_account", "project_id": project_id, "private_key_id": private_key_id, @@ -53,21 +104,17 @@ def __init__( "client_x509_cert_url": client_cert, "token_uri": "https://accounts.google.com/o/oauth2/token", } - self._credentials = service_account.Credentials.from_service_account_info(info=self._keyfile_dict) - self._scoped_credentials = self._credentials.with_scopes( - ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] - ) - gc = gspread.Client(auth=self._scoped_credentials) - gc.session = AuthorizedSession(self._scoped_credentials) + credentials = service_account.Credentials.from_service_account_info(info=keyfile_dict) + scoped_credentials = credentials.with_scopes(self._scopes) + self._gc = gspread.Client(auth=scoped_credentials) + self._gc.session = AuthorizedSession(scoped_credentials) def read(self): - gc = gspread.Client(auth=self._scoped_credentials) - gc.session = AuthorizedSession(self._scoped_credentials) - sheet = gc.open(self._sheet_name).sheet1 + sheet = self._gc.open(self._sheet_name).get_worksheet(self._page_number) list_of_hashes = sheet.get_all_records() def result_generator(): for record in list_of_hashes: yield record - yield NormalizedJSONStream(sheet, result_generator()) + yield JSONStream(sheet, result_generator()) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..e34796ec --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[tool.black] +line-length = 120 \ No newline at end of file From a7c57b8125f75714b07afe2daa4a9e203c9a87ab Mon Sep 17 00:00:00 2001 From: Ali BELLAMLIH MAMOU Date: Wed, 16 Sep 2020 10:56:00 +0200 Subject: [PATCH 45/54] Modifying the code to answer the different remarks on the pull request + adding an additional click option to get a specific worksheet page --- .pre-commit-config.yaml | 11 +++++ Untitled-1 | 5 +++ nck/readers/gs_reader.py | 91 ++++++++++++++++++++++++++++++---------- pyproject.toml | 2 + 4 files changed, 87 insertions(+), 22 deletions(-) create mode 100644 .pre-commit-config.yaml create mode 100644 Untitled-1 create mode 100644 pyproject.toml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..19a3fd3d --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,11 @@ +repos: + - repo: https://github.com/ambv/black + rev: stable + hooks: + - id: black + entry: black + language_version: python3.7 + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v1.2.3 + hooks: + - id: flake8 diff --git a/Untitled-1 b/Untitled-1 new file mode 100644 index 00000000..429e3cb6 --- /dev/null +++ b/Untitled-1 @@ -0,0 +1,5 @@ +python3 nck/entrypoint.py read_gs --gs-project_id "testsheet-289008" --gs-private_key_id "2b4735a95a2326a2cebe590fb43b8e9974dbf0a8" --gs-private_key_path "/Users/ali.bellamlih-mamou/Desktop/private_key.txt" --gs-client_email "ali-893@testsheet-289008.iam.gserviceaccount.com" --gs-client_id "110148593831883229758" --gs-client_cert "https://www.googleapis.com/robot/v1/metadata/x509/ali-893%40testsheet-289008.iam.gserviceaccount.com" --gs-sheet_name "Test/API_Test" + +python3 nck/entrypoint.py read_gs --gs-project-id "testsheet-289008" --gs-private-key-id "2b4735a95a2326a2cebe590fb43b8e9974dbf0a8" --gs-private-key-path "/Users/ali.bellamlih-mamou/Desktop/private_key.txt" --gs-client-email "ali-893@testsheet-289008.iam.gserviceaccount.com" --gs-client-id "110148593831883229758" --gs-client-cert "https://www.googleapis.com/robot/v1/metadata/x509/ali-893%40testsheet-289008.iam.gserviceaccount.com" --gs-sheet-name "API_Test" write_console + +python3 nck/entrypoint.py read_gs --gs-client-id "959629532880-8tkrubhpfsvjvj7fu0abhs8qsg9lbao7.apps.googleusercontent.com" --gs-client-secret "9F-t5k_NrzgVuD77mbf6GPW4" --gs-refresh-token "1//03LZW5YDzro96CgYIARAAGAMSNwF-L9Irfb7AuOMCiuE1Ay6jQW6vT20a91Mg48hgB_5_Fhj3BOl9KiF70GRGsmCl7SJUv459nns" --gs-sheet-name "API_Test" write_console \ No newline at end of file diff --git a/nck/readers/gs_reader.py b/nck/readers/gs_reader.py index 7f3c8d3b..5937a2a1 100644 --- a/nck/readers/gs_reader.py +++ b/nck/readers/gs_reader.py @@ -1,21 +1,70 @@ +# GNU Lesser General Public License v3.0 only +# Copyright (C) 2020 Artefact +# licence-information@artefact.com +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU Lesser General Public +# License as published by the Free Software Foundation; either +# version 3 of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with this program; if not, write to the Free Software Foundation, +# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +import click import gspread +from google.auth.transport.requests import AuthorizedSession from google.oauth2 import service_account -from nck.utils.args import extract_args + from nck.commands.command import processor from nck.readers.reader import Reader -from google.auth.transport.requests import AuthorizedSession -import click -from nck.streams.normalized_json_stream import NormalizedJSONStream +from nck.utils.args import extract_args +from nck.streams.json_stream import JSONStream @click.command(name="read_gs") -@click.option("--gs-project-id", default=None, required=True) -@click.option("--gs-private-key-id", required=True) -@click.option("--gs-private-key-path", required=True) -@click.option("--gs-client-email", required=True) -@click.option("--gs-client-id", required=True) -@click.option("--gs-client-cert", required=True) -@click.option("--gs-sheet-name", default=None, required=True) +@click.option( + "--gs-project-id", + required=True, + help="Project ID that is given by Google services once you have \ + created your project in the google cloud console. You can retrieve it in the JSON credential file", +) +@click.option( + "--gs-private-key-id", + required=True, + help="Private key ID given by Google services once you have added credentials \ + to the project. You can retrieve it in the JSON credential file", +) +@click.option( + "--gs-private-key-path", + required=True, + help="The path to the private key that is stored in a txt file. \ + You can retrieve it first in the JSON credential file", +) +@click.option( + "--gs-client-email", + required=True, + help="Client e-mail given by Google services once you have added credentials \ + to the project. You can retrieve it in the JSON credential file", +) +@click.option( + "--gs-client-id", + required=True, + help="Client ID given by Google services once you have added credentials \ + to the project. You can retrieve it in the JSON credential file", +) +@click.option( + "--gs-client-cert", + required=True, + help="Client certificate given by Google services once you have added credentials \ + to the project. You can retrieve it in the JSON credential file", +) +@click.option("--gs-sheet-name", required=True, help="The name you have given to your google sheet") +@click.option("--gs-page-number", default=0, type=click.INT, help="The page number you want to access") @processor("gs_private_key_id", "gs_private_key_path", "gs_client_id", "gs_client_cert") def google_sheets(**kwargs): return GSheetsReader(**extract_args("gs_", kwargs)) @@ -38,10 +87,12 @@ def __init__( client_id: str, client_cert: str, sheet_name: str, + page_number: int, ): self._sheet_name = sheet_name + self._page_number = page_number private_key_txt = open(private_key_path, "r").read().replace("\\n", "\n") - self._keyfile_dict = { + keyfile_dict = { "type": "service_account", "project_id": project_id, "private_key_id": private_key_id, @@ -53,21 +104,17 @@ def __init__( "client_x509_cert_url": client_cert, "token_uri": "https://accounts.google.com/o/oauth2/token", } - self._credentials = service_account.Credentials.from_service_account_info(info=self._keyfile_dict) - self._scoped_credentials = self._credentials.with_scopes( - ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"] - ) - gc = gspread.Client(auth=self._scoped_credentials) - gc.session = AuthorizedSession(self._scoped_credentials) + credentials = service_account.Credentials.from_service_account_info(info=keyfile_dict) + scoped_credentials = credentials.with_scopes(self._scopes) + self._gc = gspread.Client(auth=scoped_credentials) + self._gc.session = AuthorizedSession(scoped_credentials) def read(self): - gc = gspread.Client(auth=self._scoped_credentials) - gc.session = AuthorizedSession(self._scoped_credentials) - sheet = gc.open(self._sheet_name).sheet1 + sheet = self._gc.open(self._sheet_name).get_worksheet(self._page_number) list_of_hashes = sheet.get_all_records() def result_generator(): for record in list_of_hashes: yield record - yield NormalizedJSONStream(sheet, result_generator()) + yield JSONStream(sheet, result_generator()) diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..e34796ec --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,2 @@ +[tool.black] +line-length = 120 \ No newline at end of file From 840200bf749809f57b4f9a40c91df88bc2fc0c53 Mon Sep 17 00:00:00 2001 From: Arthur Derennes Date: Wed, 16 Sep 2020 11:20:28 +0200 Subject: [PATCH 46/54] fix: remove creds file From 2bbed4853f5ccfd8a4b0545d52f07840ac0656eb Mon Sep 17 00:00:00 2001 From: Ali BELLAMLIH MAMOU Date: Thu, 17 Sep 2020 15:08:12 +0200 Subject: [PATCH 47/54] Added an additional option to select the page of interest and modified some variable names + adding a documentations images folder that can be useful to clarify some steps in the doc --- Untitled-1 | 5 ----- documentation_images/credentials_gs.png | Bin 0 -> 82311 bytes nck/readers/gs_reader.py | 16 +++++++++++----- 3 files changed, 11 insertions(+), 10 deletions(-) delete mode 100644 Untitled-1 create mode 100644 documentation_images/credentials_gs.png diff --git a/Untitled-1 b/Untitled-1 deleted file mode 100644 index 429e3cb6..00000000 --- a/Untitled-1 +++ /dev/null @@ -1,5 +0,0 @@ -python3 nck/entrypoint.py read_gs --gs-project_id "testsheet-289008" --gs-private_key_id "2b4735a95a2326a2cebe590fb43b8e9974dbf0a8" --gs-private_key_path "/Users/ali.bellamlih-mamou/Desktop/private_key.txt" --gs-client_email "ali-893@testsheet-289008.iam.gserviceaccount.com" --gs-client_id "110148593831883229758" --gs-client_cert "https://www.googleapis.com/robot/v1/metadata/x509/ali-893%40testsheet-289008.iam.gserviceaccount.com" --gs-sheet_name "Test/API_Test" - -python3 nck/entrypoint.py read_gs --gs-project-id "testsheet-289008" --gs-private-key-id "2b4735a95a2326a2cebe590fb43b8e9974dbf0a8" --gs-private-key-path "/Users/ali.bellamlih-mamou/Desktop/private_key.txt" --gs-client-email "ali-893@testsheet-289008.iam.gserviceaccount.com" --gs-client-id "110148593831883229758" --gs-client-cert "https://www.googleapis.com/robot/v1/metadata/x509/ali-893%40testsheet-289008.iam.gserviceaccount.com" --gs-sheet-name "API_Test" write_console - -python3 nck/entrypoint.py read_gs --gs-client-id "959629532880-8tkrubhpfsvjvj7fu0abhs8qsg9lbao7.apps.googleusercontent.com" --gs-client-secret "9F-t5k_NrzgVuD77mbf6GPW4" --gs-refresh-token "1//03LZW5YDzro96CgYIARAAGAMSNwF-L9Irfb7AuOMCiuE1Ay6jQW6vT20a91Mg48hgB_5_Fhj3BOl9KiF70GRGsmCl7SJUv459nns" --gs-sheet-name "API_Test" write_console \ No newline at end of file diff --git a/documentation_images/credentials_gs.png b/documentation_images/credentials_gs.png new file mode 100644 index 0000000000000000000000000000000000000000..34373a1e1a3201d1d578fe333affa47f254e26df GIT binary patch literal 82311 zcmbTcWmH~E(=Cd-y9Ny|5AH63;1US#?(Pr>?gV#-5Zv7f?hxGF-Sw{QcfaTR&b|LG zV?2XZy;`b!b$8XARbh(qpO6s<5Wv8|kfo)>mBGNk3&6l2%;8{w658lQGcYhbT?;WW zMQJfHQbk8QQwwVoFtD#di5{@3SaLJA6MH&e9ml1{dA{#tIH4teG_xQ28Tw615gk4+ zhlVtO22YxtR0J!I5+aWj0gi?ahILQ~>UHh?^{n}RMX=Z`tGuG3;#UO<2O^jkKP@{u znh2z@qCNLwzhcP1&|(lQA3O*Q7ETx+P5OgD?vk3CDHxOA+IuSl3WWLbtFocYoL}R+ z9%6a1dI~I9EOz9oKrerQzBdV&YQQHN9wM+vR@was1k}}@3XmyjFG3HuGvf2fMgikV z$M@?7j}vQW18+<)qK{$Xci`WC;S^nqdp>kI{tVvaY|&Tf)K76i>Gu-wmd*mJis^OL=M;9-v-q%0`uj`A?I zCk3(}rrRIKwAI{Hk)hEetXI5Cz1y?ekHO__ z+sj4?s0FL@VUoLB);EfG^|H!Lib!}cQusJu6dX`d0E^hyZ{>$5sE!I{l@-59J~#%B zCB40X)b0>CXBXpQoNuWJ7f!B1V1iz&aZ^u@&8({1@QnZCw|iHs9oHDJ3-o@G zE-a{?PKpLE=y&2VaM>QL&Ub=U$T<$Q3N4#=xtU;E)7FGgAxDp&@Aif_ES`82k>HY0 zzk`<|l*LlJ6&Ra_^AeQk{OVp&g`Ox{P+!4c5d@-reI}XxUzl}9ySW+G_2&)m{WK#n z0!`?0oD~VGoWHV$!~ce%RJ^+2UZRn6H-=96)mnO=bjtq@%lEE%ftr1OwOM!x$IqsV z$oz7irJART;8$qvag+VFIQ8eXQr+df;l;~))$d^u(>MJs9WyncE#yR4P*PBglhg_f zwFX`z;zU$%smIUA+>tN%Ez-2MTmg(>;JmY+;xxK>cL;QdaHw&3yUK}nhE3d2-{mYO zYBcC8*s;mKbTNMZ+lc6djCP!Yo{=88YmTR;R_?WKQILeRDC>Pv>VhYi0g>4v^L2F)xU20@OEq*$kaj!F~0SYYDnQPAs&jE#v<<^HH!!;g0R4e{V{T#4~U%6TM!b( z+;VsiSZ{T>5bMgR97rI+^ai5$&~%qh;cv(AMDheR2bT1(8$E$AQ=l1eM#8as zE%h0zDXqCV@x8#kFqZ>Q2jmTs9k94hw(u+CoR@j_}tAip7fa#9jP~esyq)^3Jg13VgqNSm^Q~as;RpmjcPi04G zrO5sBWtL&kk7(rt<>cu^)_^OdU6*0g;rRriF!4N6rHBJjBO!Dldwdyk)=YVMy5ifvZkD|BD0pWI>B(rL)ITNE;k@H5vvZX(WsTE)-km*OE)spS2w!XS1{@v z5$y5bK-;a_piZ!^k(zew$E3d%2g>&fzm3p&;n6Bc{{jub+`5vx`i7~rz zzrfA_KXEYT9EJ^gd)*u{eQ8ChNu`Sd{iuydw20Vh#WKa9NiBoCs-g?BP4Z1}^vKW} zsc(ZH<8+k*e+K-lH19Bv!4s8L8dW&?@im)UQ(J=nYnzm}RDiOm7P)es=vF&rd_k&l zfN|80I;}4)Zi#FOO3C^(!nFVN$7ws`17lfZ4C5mc-@))<&%wPBltJ`eq@C!YKf^xB zxbgQtIvIq@ge&zc?3HsR48)YA|UM#|`(xYYuX{az9Mt_Fx$W z85PQv%PnlsZTwEtm-Ji;Tn2ejos*ouxWpWB9+TZ7-A>=M+#cSX-ZCG>;GeKuu>1t2 zaH)d$trx5VIaxV|*$G(p*zZ70Tv=R7tUQ*}rk^b=tvsi0zNh}gUo2ZAGO#twGI^U5 z?IK^u>&N&?pk=BNtqHF|T;pjTRozfC-uPxtVdT_T)SI++mvf4|EWtz{ z$A~hEQh~CIA`|Tyy-zfdS&$LUE8u48bmvsz(%r({l+eO(GP9msYd8F*n`6<%zH{>J z;B^9}iWrXn;(+v4WV+kS)|~58BcbgvrHnGVMfd7Ldza6e*!*_efSCg0g!P05GjgSq z?#vp&+J2jvH@kO@x2X?@kCV^-i}Neflg~ZZ74wZALKRZehbsI7ysy}Sh>nQA5Q`8| z;A{~$5yDUo5wQ{FP>6*JJZbhP`4NZp!c9tldpX`iyDp5j&=-f z!IdKu@$%(7H<5@*zsyox;{RL_9s?n!kKFGEMiIL4F3%8 zOyP{v%)Ej}#t}cG6Yu%W9{xGoDt?i}@CoiVE74akwH5X0=SIxW1 zjPE=Xj=N5X6PV+lGDs%aMoQbe{5DU5ZX-Hy^>&YU)UoZc3UC;7uG(h(G>#oj$jl5p zH;=G8>3h|YE7fq}advTx!YLwzlDHF`wN*OQbcfZ_bXyz*9-iD&d?-CE?Pbw;~h+Qw|(xdHzAz0UF{ zZ;@w5f1_#JTysO^ctJ&>Zuwic*igtkp*!If35(#cIVC$4hb`xCPDb_+&Iy4#5B@2~ zs$IEKm1E&_7cI9|i{}|?7zItfI=-|lOMV;oqNlP7mFC*L=B?`^rl1N~W-ML)1GSTi zGo_=F(=UhR=PKt`C-WzMFUse;@!;G3`;`p3x5!JVD5%XSQK()h6KyEouRfHH#;0;G zS~tseN@-IoEYa0@Zsg~``mqN{+ULF2Z@ce$)8S*nCgKg^k5!~e!bqHon~O)xjLis4 zkxjjXPM_Itd2X(kw6AcNn4$H8I@jKdZp*FKEPRjn9-sGORD?AAbl*R|mzs228d$TK zJJ`_Lq`EX)OL!C-{c&*dWV9r1&1xj8QF>p#xnCa385>i?NUih17Vg2`JVow`IR-<^ z1kX7SI>&l;y=q$x2xKOKdpHG?7R^St85B=o)I_8RH0iD0Xoe@i!bt${t5tQGb*jT~ z*1h?SgejXh%EBOLJ$n*O^f5!I`H#mBcfE_Lt7W(h9B(Eh@g4E@-{TQ(pBbd#Wwb{6 zM$`t|m4#`BX-?JURZ7%+D&=Pt*N%%)f4O{mE@zTgP+ITZC^IOZN`W^Xwlg(9vx_tE zShrm8Tud60A77g$8@#MjDZF+{zJK1+NxrKjhO&jiC~XZ4X(*1G7qMd|`B7P(-6Es_ zh6VbtfaUy=o28Fy_9&J$l^O~6j?o*G$5O*UN6*Vd1)6)Tx#MUr-pJ@*V0K-{b{4bt+;r?d+5QzmCWgKMRZQ|&XVbf-ze(=x;Ns(;{d`H z(-&P^I&WW-jU%Zk_e<-cO647we%2|+Ou`CmC9Yb#)$)_lC*P2}iWPz7X9Lu6wdDEP zdBF;+ay#FLs?F~FU;S4_TYEAwPMXu2F?ElP%YsTL`iEaVc;Cv3uZOPZDh8MdT1q{y zJbRyw{#E&5-MmvEr7`XO;dKyok3_-O z^aVkel+m--fxUUDgRHr^S>WA!(k4(y8HeY4%lGI3ZUYW`40o~swBH{-TjbFta@51h z5!sOGP`olwAjh+^g?$XV6t- zM_7AFh(?u1okWI7-XHj5<8-WL_EwcU3l!;S7Ws~GS0>MAb{*uqR-#belx^4a*{!w1 zsk6tf(yr&KnEc8*lEU>2KRaO`eGf%ndr2u#n#Wjr4f;qvYsQx^edU8YH23DOc!yu# zdNV$KjbZJdBI+g5ihQ?kW80KBlvkb&dmW;pzEvJombMs}sm%0r^gJ1zj|{4nNk!cE zH61ha8%iB{_4Iv&7?VhR*$9}HSxu%!Fsqc+@ln5NX{y{gtnZ%=KF&1Jv%OF|>C~`4 zcERqoiQuEXIIF)Bu34(3>TKxz@T%Ccxsi_#Tu_&q*`j|?aOS#ueViaUM8g|vyKWskfNHH^ilVNpfp#i0mF{jJ;kxK(&TA1EI# zI!u#?oXEg|eM>%OwPf1j_{0#LA1NryIZD7lUKUw)Y|VD^N)(lmsQr88k-xcZ(Tnxr z$0G%_RKSxczvxJh4tY?XfuZD98wOmM#t>Oa@$@Y#m+|$^*6u2nDi%w)z-OLedAc&X z$}h|%9=}E`_1XLH)T~U+A1#j$;f|&cz}vH*W?;StqeeMKwaY@u_RC$5GK^L4mE(jZ z^{X+cWtS1!)L8qR&Ypzb8KE>1#c-!`v+;I1(JtdcWWUotlMsgr{X7nKI;OyGf_4At zrR*8)u5&NAlYdfsw!DLTUWp9`kp>w9dkB*suq-AYXc@vp@{5$O*UZ(U$>gCjv`y-E zu=?kN`S`&n7TapsYKjp0Xvz;R;h51{{Bs1^E-Ta`%u4KGyHFCWQg1Gf4;@acRTv0Pu>n1U(Hs?8h$(8!` zwJGqPz>4T#f3of^ZtjMKcHFOdF)wKjvSIOb^^|3 zE)p;A=ECY*o#j2uHI&t2!3lYnH4njs`YFcg+RA(H^#z$l|C!1Tltq;JsEdvv{6x=B zw}O_eK2&=Vzhi!5AD2I_Kdv=?0fmDZcY74|S~QJA#A-n9r=o)EQ-SGAKy}GOlgg_> z1R8w&1|k07hVeMiaa}W&)ixnFOlPr8Rq65Q=##e(}e)`L2i8K*U-Myd_5r0ya=XlQM!#MyGmlOQY`pl`Tk9q3%$#vzOU#*% zn|_#(NQ9^zfvxPr(xJs~W>WIqEppy{$Sl1_omHIu!g*qcVYO+?<*qp^u}HDf$Q{c= z`O*lJV&U}Brry;?|8-OH@_bUR@bndO>1|IKcEO)C z7CNnqs40LH4VF~|{wJh>K7zHdY%G>Bgojv|BKi&9(>GzVKr~Z0)-Ccp>`UY!e|Cuv zB5GvRO4c-ls=U95k#jjFL#9xtj`8GV$@lP+V2&f(gJ1n;`b{YkiF)45(|8?aduz9Zelm?NuCNu2!}MY6oHzLb8M2Yr81AYem<3c*!C1oDI1h z>Y(I*d=8gSQ-h(dCFi5yqFgVz_d{K{Yzuk81I3;UkrvG$5BkNv+PylhlIo+^b-qkI;>E~ zQI)Xq@I%LYvJ$({?IL+eTA7L_sYIW7)FWCxS|+O^NL1v7XC;Uy)#&-Dp6PbGJJr>^ zx=ySH!Lo9EI?Ccw45n3o#VpC-qYRCc<;2*ZPKd;su%0M zZp!*BW4f^}yHLiTe$`6)xSX)lx6*n-2txO@?i-@ixAraq_wV_nxGOL^8<>G1eWXsI zNV_6qlhja{qNwQ6G!l`d7IFQ9l*Vk1kcGkF5`XgNrj`!aZ_p&6oQT-MS0fmEHtorp zz}pbsFu+AFsN3=;^WCLNL?2VkC0?Wwg~w!8Ww$@qN_i#82b@9sfFR>?Bb7of8C8(j zaGkhD@YUQniCvXYV4^w3l`1sPocKlBOuY&ozdo?M&_k?4cf!A7t)UfSY~#-Vu#b;S zZlTnnzgE60=1&i(HVjZMMJW7Lx@3dQ;H_>myOtkcI8dNxabPHGHErfPMcy~OCOPNY zxAr@CWN7elSI};L7c$Hijdu1mvG|9&KkC|jlMq=B_aHx1;9RU&Pcyk@KDi-3s$Ud+ zSPYL~N>x@Mx4%pg4{b6=5^t=npXHtGu*T5nyap|gW%K&=F!Ki*QtcMqpU%5Vai$K} z9PQKNtfg1}2(9Vui7;PW+yc+DI%B-->pa^z>vb;-&hrDQe(xry+S$_@nM!p--zFr%po&w0p9xLvQ}{VCu-DO|r*G-Rk2loWKi~Gr-B^hXCTen#B(72`xd+)2lBtq5axd?il~T5O9TH^j2ul&Y@N*QoVlEs zb%3tsEL6Wbf0dKvHL|l|GBCC?G+}bLvH#lyjNhFXc(gHbHXwDkv9@*Mbr&H2rv@+Z z{I{5yob;b6&Q=2CU*!}@#q1nSNI96;nLy-%2&AN>{Eo(^yvpK||279C0djL^XM0{| zW;ZuCCO0-FJ4Z8S79JiRW)LegD=Q;VgVD*u*4efS$=2!LX#o>t{`-cRg$cy`fBFWR^8YR6RkUz7 zvHmJUO){AZc}DFxP95P_fh|9mq+1Qf`bRWLANFlliSRd?`{ zOz0*Y3ETl#N^x{(acHG#!r!*lty(8%D1Gvg6K7|_))LT=)mq@#glOUtN>##2_k5k- zYsXhxpZD5Vo;&U{_M7ilR}bGuv)J}WPcls7V%b=skif;d&_GZGc>a6H+8x+-aCi{G z-^ldgXdtL!tx5kC*b$&)Njbu8t3Ll*kRn{eqW_<&lQd``|5=)rZaRa1+d>TW|Lp;& zDx5Fe1*M#S08<+HZ$Tzpf#2KXfLsRm)a%sLNY;T=M#xffzN!6$wZ*0aja zw_j($oSSoh!h~CnD_U1hYA4Qk-OkDaO5nE_DbrBIzFL+Pu1w1?pJsVq4N*;*S{e6J z*zd%BYDrYn9AeR6`y~%`Bu={Ox@;3ZoMo+-sqt%i)x+E?Cs)?&Nvb6@`F zYHuvPRl_t@f+IcEEUQ_~-N-AplrgHjLM_`GYN_6;*uQp+Q_Ho$6KT;b+vg_J{qi^N zadkpbrb~ol`ulHOJ#OBgF2{3cF+y+ljccBzWBS9;_2>ofZ_nrTdepKhilv-z1)IGg zt=WEWi!LV%)&KVh<9z4*Zhh#Vnwt00==qcdg4RLwWvJMX=F&NQ#Yge z&L%}&*L~dPOVyd%C1r*`jdI?t`}sAFvW_$*s_QNiI}VcB-W*Kv#LH#!{?T#Y46==v z<8+8%YWcbBd0f6a#Tsi_Pfw>$94~p_?@8eUkEpz z)QlW?jx)8M{IF#5IV?=BTl0Co`;t-7dd6G6P9DLqHK*%kGf)=K>Cy6XHNx=*EOl8- z$J0@1)2W{CV`0o5NbegwTR&wKiO^c6(>TwP5F&G1m6lEYpOtP{k`u`&g2c$Qd*6jB z1KSCKduRHxssM`0D9)w9*K@yjr+8|G@$`nthFJ94wc2rBnP-=7Xk2GP+ooK;%RbE2 zg=(YgB(Xpe-v_dOTC3UtgAn2(hb;H4FanPNvImCFmn&YB##L7>;{BAa_`Z}|qquu! z>B^3$U%@0!qbwW&C_*pxK5v^LBuCtf*0p1G4E=gcSBD*sdxZBd*ArHH&gW~ZYVHF&)8TTRMgDvZi|F-MYMUa*^!vkhOk9~o zpGBWI-cSW^yPjF5%hrC)a;0QTS-OJYBjwR-iAsjN>GNg1xn~^cg}Ve)$SoTv=Pf4? zlsa&^BN5ii>wJLL=+KXE76~V#41D&1bh{4xdYfgnku-~<)V!)QuaoLel<6p2d1oEZ zwo%kblo*o5^W~cL;m6X1oa^4#+#RwEhT8Dks2Pv(#PSYpJo_0cy?xq*Lfc!r$$HUy zz**(?TRgzfQ&HELC_&f5GuD=j0&8vtHt2Rss3gu9?{S%fZci7vf$I<@#~Gr{h}*gk zZrHcz>nx*ao-M2XLEwH;qgXFX{~+LL-lC!)P_Kq%;OLMQDWFiSY(6x(?efsL246{N`0kv3{^lGw zFS-<|Bu+96)#8+qf)(d!Ru%XI`De@4wQ@xL&plqES&<5}S@Cne?OWleOyKq>l0f`vZEExL>@Obaw8cS}}A-6RMQ z!#6vrkTyXcD^3#v;pmTD+#ch4%2gMlSNuZ@-W?VYVS@hEvC(fBQR?}q#Js9{x>;_= zbBz6wT@3z|R`KD47RFQrVKDixEc#vF%yWZqrdjnXf)pznLOF>ZSrEVH>XA-`yI3m< zBH{HX#JHn|c0O!H%!ku0r3Hr-;fApIi-C2oh_l7)#6u%L1BWg@OTf$0JH?Z`nO>b# zNeIf{oRGKL;AEeiIaVqpHb)^fFpLW45p;xyFtC$CL5zt$<+`0Gju~E!!PXfaw(j>X zfDuLOn|hMu)F4=@93dH;=sd|kshiP6>SN|^b_<;mks|azG#FANOxgYC%x1S?x7gx-tIxnRFUT?R6}Ah_dsm#)2Jor zCWo7i9`6MqEpyzNdhpDyX;Z|lRAp_`%I;@>^!vS281qx?hdE+{-CXg4L=xqk&5u4_ z*kgHX8503I={)Hp3Kp9_k-`nq$i+wg>5kuEUP}|2+8AD`^`%k#72mYck~#2c)}JyZ zhp!c6i(8(fZRSo|kiyQB#Jy%eZEC9GQMYB;bv)iAaUi6Y1dBx9eirQOFPez%bd-`c zO?zqDj+PlKRmc`3V5+sJHqFYA7!$Z(K|vx-l^Af4Ky>Iy!M1yXGhB_XmpID4LvJ#l zjph|`d8CE5N-NawzZa6u6Y}vAAEf-g`a_24(0EJZxOu=Eut25VeBR=zPzPTPkZM09 zMtt_Te@SD?X76(P(bqzE?&pX^y}jtON}+hwVE29+w?4R0N<{@{;%KPpkmq>ZQ-!+y z{c|U1u*W=?^3E-4xW}kjoOsRe-DfN~3f7{}(3Rz;pU}?SIWTn{39aZp0C_K5akCPO z=qrO6SyD0_*_hss2g&a$SPQ~n-$y^Ow$9QR;tb=~Z`h}t_M!@vwK7UrP0p|x^X}@8 zqRl^sMxoJHH080{_-croUZx{6dP9D6ee>4XK`tiYcQTw2Ct!g$%o8CY$J5A|kZ40S zeEQWZa{_<^ZTRFk+5%Wk zV`?hW0jN+R7RVBPQ7>WhM6pO{zm+dZillxi#1LT*k5E5S?wsf6inD!xEs@95Xk5=) z)?iM@XJhTAu`4nKa)yFnNBSjwU!L6RVxPK&aDLX{1s1C?&XH~(Om}T2 ziiIFP!c%sZ6ZQu@QbxGbDowO6eei7zc!ZSv0_VVho-kpoETwC1pQSJ9a`~rG{kd_) zaiom|IzJA?cCU(r(vk7FpS`Z*bNfdyVs^b{l1hbb{8Dl5`)uI%F8=!!r`rVBbvH^Q zD`q4ZkuPv$-jm#mM=^)Nr_(Rp8`+*mwAAUmE4p5%U!>GZ9C}(j#@HCi3CsedS@U1#e%FKFzxI(7?oP1e$TtP$VX%CJVE&T8HlKr|X_FRk*|e$YWu&G^%yk z#2Oq$snyR>6o|{b7Q#}2X$~cikIpq+C!TjFC4O+!iwSuU_@`kkUw?kPetgt>%ED(` zAJ&iF=9M<527Q{tMBren<=leCuws_w1@U>1iQ{|2!d!mHVKlOk`oZ<*(&xvyRn;MgC$XFZY=coJ zkQ3ff6xZ6%TJ;q1s~A)kG90&FjhsfpXH?60x$lcqL(9`Vd7-%6(pSXNS4!hvy7q8I zag=p}x)6BcO`yz3+_h)U_KP0GROVDYq02+>G(-Ch2}1!$$v}MiDDKmY+whh=(NU`o z2H7OVB|{KNR?iCJN1Tz=sta^ODbOBZ_jO68&VPdv>lz-AS!(!L8K^q$CRqMdPvxNf zP=F;$<31)~h!yiSicnC{b36j`86uD+c&vG}U%@P5VE*>H2S(CwxAcq|oa`wl5FZFX zXV)}nHjFgmIm*)QIpcyiUPJ+f(=*>$YT)jEx}LAR)?Rm?-NpiY{dTQnPf!?@U*~>T z1_cGRk~io_Pbo`g#>*ljVJ&3(TwpHkpgom#lWHz=IbbF@X{SXc*L0e0uVFmvY@LnW zzZWl_XY+g6NLqcc!A_7N5gd02iRXtu(Y{3HV?&7dLzbkjC^F`Ali+sqyBekZ&3VxK zMr_RK0tz99)*ssAx*7QhlH~ASY<2Bim(6V>11Sez?LmSNUq}i~&9>7eyY58XR!MC? z3g+xR>ek8^idSr>QbDuUrqK(lK!^B-(`CQ;K2Gga?mi1+9OB0q$?jO2<*b?pA3e{* zLd_0y?6|U!$Ac&OcoO7%A_H@do@rah`XIaBg{)CGLxc9g5Sw>z(2&|$+4<4*@HYN> zf@@7v~+9%Pbb7h>`*2&e8o{mXTY!943V=PZ#+ zozGG=E}DEexui=9b=%Q={p-D#bd~-UKj8WDsF38TLK! zn$e?s6pK$p#P(g_Pg7JLp*R=zNp)wUc%BOw6yAC_fis3Par)1rLRJhm>Q=R&nS2ZyMg~9!}U#G!IEY9jQ z$N#V=K3eGMGc8U1`*s9FDo;t$svHTm(P4Q+kn;iW8V+$BjFQygyWwF^@BmR80D^vE{NnZs2B?^ zyY1m~=J-BpZUEI^>|ZNhB2l;jMzQ@9>rw5~u^f*%D@a8U0-2V?Y4BsL z)UiV$lA!nP4s^`B`+^XzN`yRC=mbd*RCnOUCEMr)#SDYLxbJAeE@7tBcv8f&bQT=0 zJONEGu#3ITVKp;wWfLe`B`6m8m1S8twmeVCvt6OZk~vZcY{X|!E>YO_des(Bav^%Q zM;9dYodVz{jx-NQ3kTek zMrBvy4`HxIiKqp&{WzMWm_mB$RI-vT5yEUW^xf0y)Y3F*zh&@upncf35l!4stYA;{ zKWkN9Tq3-hG$KD%FVG>COd9iSUuMR6Lsvx1ukB?g=BN@4#?;fgGW;=F?e(mA8we1x zBDanoo5#sc^7QvlvAJy}dvBZ7b9IU%IsQz(BQsnS+fb#}bEe4IshG4bt7z?S{YKU< z`=QP+@NjnwViYf=471RtO3JiLlk80I023vUvA$x+yVr{odGg&`ovN7s;ED3St+uX` zDb%%UjS5q20Cq~_yv}@1e)K6C)ojiF;X56Ft^CCW(>? zw-;_}AVIF^Wu*{@pMaT(J9N%TM@7;wceU%MgH8y2be?bjckR22)lQ=EK*aUruBb zwDCgQ)bEO(^X%$?0`_By1KRCbuMv?%`nZAY>eM4;3*6}mE;I9Q= z=2p5tW!jnLnS$j#J42ifvL0}r>{x;U8~~r^c---4(bOuDOpzJxqCC<1n}c`PC|RpC zwa2Zpxq#Vl3)wn?!LT*W)*!>2$5+0c)`}KT38$)7*~$tW`$19g%9mP2v79Tk4b>23 zE*(`}t73&H^icDR-3l`K>?KXwT~d9B5~3T1^8b>XWcpT8E--~A#{XeL6;S|{heal) z3I4;W&f){|5z^<8L4a`uh}Rqj>49~$;28zBF|Pe_XUw?iJd6k$480U4`* z`!fUNUu-mt6#sCxNdArh^{R^EnKburf8&2;Geu3we<)l$h+R^}8x2@34z~IPd*2XVf-rt6#yLxltw20?UVAaj!@|z|D}q5TK(0LBQGiY zUmd0Y>Ily%^WQno|JBh-oKfgs9e@1Q3Bx++zjLlY2juxsP2YcY{742YBIN?Zf9I@2 z0mv2T^)LT2)1O5F3R${JgZ$tAS|kDa|DFJR!4Lgf@qhbsi4u-SlTs;`XT)zD_~11v z@N&`nu{iq!e74V>hTHwB+q{etz^yj(?PoZ#UNgZi8}tS@%P_aIvz>c``@O$560)1e z@@<7uthAnYq%(VF1}AIXdneLj&7XHXk7v5fX`J1!`%NU9{0W(N%T^Wm7M0^aqeB|? zc&Fgj`zq9Q8bhREHl}9~guPN$JFa^`aNGWS2B{8~}T>pbr4s+g731>6^OQ4DlCcxjpVC?}z_j z1GrEoC$IDN#&ny8#T8k$u>>|-9oQtT`zk?9YI$dXhBSEjnZm;7`{Ir%^-ivy?1SMJ ztRt%q74ABzE`KWD{6cuY^xWA3-O}K??e|ieQD#|?$?s9acQYxr3Jy!8xcx}vG`gwT zV10-t3(zrcQC0o+GmUn7_pkTsth6Zv}{)=x~0KpzEr$&8= z9ymbc6v=nDV028`89WTdcn-|U&75x%;4|jT2du>mjCVPap^8d{mz>L8D<=G%S`0`O zT&cDBE9B8llUB;L8Z|xM08-I*XE5#`q8wls^2N3_jN>F$0L-+b>#>X72+&5Y0Ci6* zAAxCr~(Bh9>D^(E8O=x@d|$#@`+W4 z0b<*}Q1r88Jzoc|Rpaxr7k3UV2P)m_55D{tOAUn zZE>dSl{C@PD5e7cCeBxV@Wvsk^x46fDzhvP$!h=Z;(a}~-|=!KGhZl!`$zQJK*qN&q6SCT-Qa#g=)C|NUFaF0 zTUUWCxwGbZ%ox@AM@`G(HT9c*-pkF5!YU9|*gI=EW@=ovZT)@t0{z(exR<^(af9qP zmd4@a_j;S$3UKKw0O5X~1wgAtfaPlaOJ&snq~S!a!#KyhwrvQp>m-FZs=7>LYo8? zikE}@n3XPn$W>r39?xkz$^DMx=V$4+5VD~X3JpVl9dJ)GNz%A^r7_WdW__`%Xn5w1 z>;!=7k^xJO!cRA;-5cqY7#(#m2qnl#tMgZa^5*4Y7qu%483y#fqm(>`>F}f(1mlx? zY_fX;d{}f65OtACQKS|DM z#sr^FmZEjrJtcbV$qg!|Qdo@Ytb4>k#fCIUT9>43=x5=pK$L<&iAh{@>c8t0=zpprQM)jHs0)@7^d`P@p)ea3E`)IY* zjkS3V^HWM|5iB9gsb#z0^C?&P0H7YpkTGS;?VJ|J8xV3Lb~%_7VbKN%&^&Ak8KCBpOOAlGR0K9TC-p5&`?Rxy2L8|x2v9ZFGtIy3F( zm%rZ_bN_Gfheg<@3x&uyZ#VuE@Rc*dbX1WbD#$D~Ngym{UIzj zXRsK>^!Ujy$irds9Zb|pBE#8ZM+*|GDVnC?Olb&$`YE(ctJAXdXp)7=Ut+~jrjlci zDTi935xA$i*x|aNd(p15)+ln?a&`(s;K@LwDQV{FQ(Wub`S<&oZn~73DZ1ARqv>%e zhg;7dleJk}{)R=K0bolxwWBI*3q)AZdwNuqNlgssmr@XV0KTb*I!d1RkP^G#^81G9 zXsQxGUK{lT`R$-AFT)yAQ$9bf?RiAMDeYSxpD@K(WV$$9DO^2ohm1XdiB}sHZ=E!g zMbm!)G;m6&Jlhai0fLcC<90w$kiN;!91>zwh1;}*3*zyQy54O!uP%P0A|CE0tyKwn zJT}~B6qwg`6xWU5i`y8-{Lk3yo^ba_ik~YE#7XgX)~w*hSNn%Oh|IGN<32n-Ti3Pg z!=m~vX(vZ227NDq8Y6(s%X~2@*F*nosYV8N4OYbF%Fzx4I4spP0Px5WWv*sP*V7kI z=@FL+g4ao){y<3(g_)a6#a}U!3lPGpa)x5TNMU1IWy$(#-3jdO7aIgt)P1-%3K4zc zp%zELU9UJ35AqF3MZ}D3+mScz;;0%{SHPdP9PVxl^$2rmW*F{C3i5Of2osKouTc{f zn1NFv-b;5qU9tdCX^h4uFcTVeYIPI9ft#Y&G-2NG5p%O`eQQJP-|L9*(0J?BL*Io`5V)GkFYrU=HtXJ0oN zXyT6PENjVU7t}9iari?0k|ebOZch18{f`tVGbRF|^--d5;52MD)O$SyQvk`Xg1dtc z;;Z@^|E)U%IF**?`}oJm&7qtktWNWpUw1;PuxsJc94ZPpde8>pyxtaciCM1*(AjMH z8vui*MwX6V-ft9Gl!9)>54oHx7Bc6a zYlqzRqWXc}isiBh4yS0UMxPXI(;T}H143NZiB7mLVG{_QeK3)-?lmoKEpx+4s9)I3 zOp#xS`}CpY@;86+l8Q*!M~qr$)9KM16XfCc>v2AmU^OH%S$$SG_M@yA{aEvmO-jfz zgR~&DC1e8V$O$Wh8|0ppws>)GPNk|vs9+L|H2I2_+AyfGfEFfzS2UQeshkQ#W721` zQcNS^e&7s(?%MbrxZj6W=I5Fcg;TqF?SYqec*4-p6BBX#- zZ=a`|DLcajf0$~*@S9QIRPd)5VISWbsyx1kY1x1RGh8gb#)oT7_|}t2cjK{08fbJ) z628QgsGfy~J0NA7`Y7($i^lXlCFcPQGq-(S*UQ9k>st1$N0ucw4wmm)KL9AQF%`5H z`$WpJ{3$=Kp6C>HQ88|m_x1zK$T;WXNuEvNE2yK))v!nOU=Rj^1GoAbu|Ik)zVZxC z`b{bVOr(nJ#>cN%4dgy(%TL8*zABw(m$CpN%b8d(jlsda%T^G3wV?R=$nx7bK&Q|m z@Da=U@xcqY0i*L1_$sy|ue3>3AEP5LBjM9@;!=z~`uUXQvv+8h0tjgVcRwIHPQv+5 zth-1G?QrzZug~y#zmwrA0E?m!1i6GL3klwrWDtr^^O!?C7~t8~zsxJyVf$gYYfs;m1&MhSzyrtF_9h{N z4;}5bg*>hR{%Z?IWu}LaNA2Ih?$HE(@DWZ| z;5%ifjyT}hdL-Ps24}Z&ww@@O=n7GQxcZaXk;}ktJZGQscpZg>nIQ@{^C1m8Ak15I z>$_c@(S<27kUd5RV__u~sV4~CRTgmN|Be zuVF?^oi2Pc10I-bJ*PA}au_c<+`t`$>Tk-5N$bs$d-&a z1n8co*sD+lj~kP;MNYldsbLz}8U1Q;0d&gEe#C58Zcud1)5O4aXI(B@EgdFShl|W} zQ&Wb+>9NuQvq|np)70F*svYZJ5_|y1ltNyIRf81MZ=oQ~hWg1F%K4kATQqRqxv~Iz zVFzun-En$&O6`L6bgaFgW)ilB*5u;W6m`r%hvoiZ{hs%AguI~Yt3Cu$Nw75uE5=yF?jGk z2v^tM-2nLzK&4LKVNqR4d~TE;xI5keNHhEWaM`Z2{O}SYjq>VkD}s4zpoxcs#V~Mf zTO+(%J?25`^MEd_M{($)9kNHfaov0dL!PB z>l<@_PUrVW0#0e(W8k}YR#U`%4~qIx ziXytAIX(ac+o+^V^-^%>H>A=*r9I9uf=}iqErF|mD|8l4Uoq(oRPm1Lb<#EhvYU3# zew9`zgQv7nj{_)`*vR#)d7ti%zxe&4Dg1&g21o~!S{C`BzdHc!AV6lUz<(*8S;Y_F z^XM1S{$prd?VKe6Ot32I$e}SH-KuPRE+!rb(U0C9&npD|k>gt3sU1%Zerp1}<}U{Z z)27~qx6IpNuEMhJct&uap(xygMb~ggQ^d`cCn%!1^MWsK98TuZXrTOmUEoC|1;+Xt zP~;@(`Ffa;w%O+j2e4ECd2hC*F!cO1VKQ$OG-jgx*K6=2@&b;?oI*e z1_9~r{f^)F{jX~u?|rZj_L<{cz*@7`toM1I`*+{(vMGm|Nsx$205SLpyjOV9>-<^m`j^Y^-p zKm9H1spqkotu5AR*eN7qnb--M!aZN87!lbOaN5c76r+JSOc8IT*ot&vZ$@l8gG=l} zr1L3e&OAG!k}V!B*L+-`a*@s&7af!WMY+;9G6V0yI76*B=HvaXqqa$Clu zE0mfj9JBl-S|uSt44{7V6fq?6Nt<5?#B>qNKc1pD778r^81gBGf_o#2uKQ}h+Or@U zLP8{1k?+N2G9!$}B4=)M|vrI{ddp|f1efJ(z zOVo9<$oR$D@V8t*b%+_LDTU@8fFGSz|Qa#900xZ zR_0YwU2rWWRTS${#SYOq1u3)Z=d2=JEy6g$mdN1fp1%RrzN6n2{9}F`raOfAsXyj0 z9bFyWcU=TBy(c!32q5In1g1rI;CEHG3anKOLNdD>;xMK(CPF<3`$dKII5!IFo99wt zI&><)96?0wHUAddMl38FOVj^LXJg2D?`7_gZrqs#>=E^sD1jZOG>C(u9{h0w|E$nV zPrxvyV8-I%?m~kcj3!99HIK$Q3Ibg1dFSmqe*5=S7*0pZ#mTG>4K!_~y9lVPBdqdc77VMt*joAP($9_9##_3#vk*M6oLX=LdrtfBTzifW?cOMd?$sMXO)%l zZR>QuN}r?JydXz1m@VWoNHr|xAUM%A!1@Cg9_v-?lWz(PQp*F$)NvHuaYVib9~yX& zk7=TXh5HlB@d8G`;PG0bb$$yJ|3xH-Apv>{;*DImBhZyW0Lsjd1jkUn6#$ay-z7Xuk9p6W7kxm6z~qTkn-Qs8-88B@8w$0H&oqF3H%wjz{bt%ay_qO?^Y{+v|LmfoLm=MWW+{FMEf3r#U zL0hnNPj$I~2|tE{CK3wfs6!iR1WCJfh^Y@sbku0`*e7}Ahw+SMg{H(04)S|c$KQ$| ztX#y%vZT75*^z~NV>Oj26yaV%xcedJFTGmR9kw0)1&FhXE~_NFOMet{i3siSYCOC} zG09dL^RA33dy4NQwBk=tITX&wrM6>f42ky@duJ{OA)iu#b$ym#f>xZ7k-9d6N97;G zeK_%1;RN$CKR&W}ef1q1T)`Z#Z2vo|?kX@M>}-b)#wDgab{`CYb&e~LuLoTrG3)^8 z%+35Y^wFReM6ci990QXOCFo1w7(E&TCJHP8w$hc9L5-F3VjLxXGgz_%M{8fwPrZMSI-p!F4jo$;?nV?eolv#@ev$ zmo%{zlhBB4I9+pup(@I@+**~Lg0Hoy>*<&*H`&x?>q5!)7_WbjzjJ>w^_`S1oE0k; zH_*Zk@sfE|n%>PP(?BnJjl^;(6M6nNVaX!#E5jh~=d`~|&_=V7^m5m;(13OtJM>;#UK5(9#?_%_o9;Q=hdlfNwqz5P#^L+n`HeuRshICS9E z+HT5uN`d$DrIMEK-#0XiIg-A1r;{PdBU>tG$8qEzibSYPns83Q)V1z4+aV{TfL+ek zZo2qw6s09^AYl<699&224O5A2yDDX|cBp!;kyN*{GcefR78KN?d4{p(%BglasApC+ z7qoRf)c)=FR=kRNN*jC8a2(y%huM#3n^QHw# zXXyZSdn8$}q>=m9#bvKa^v{yw9b@f<$hD4$=w-=9jR;rU-Qk4ykG7j=MV!m_2+BmS z{JJRB>6Vd}qcFk0sb$G~7`FF2y52447IaQ3n}^Db4L@|RH7l;K8-8+#+@)Xlc?#^{ zT%{MWy?V|bm5(b&Zo9g`cj&}#7QZ6JzEo$iA+GVkvRYAdt_zM7K!33>M8Fh+H$v+5fjmTLRYIJ`@=n6pGt7<4&+$vD6fxfy_*K|K?qUQ zYpi?Yb*hi05Yaz#RJrv%da#_!FC8kLJ@w7J9&^hQKVKln%4X2L<3phe^s`Mkd{52A z)Es@$8T4gQ4{HhK$5p>U;EMT(sPH~EsxzSB=Y9j?PL2e-0l@ulG3A2G(6oeH)i*VJaLOR7e^yk5*gW{ zd@8NpuN50cuUiwimgA&5S9&RD(`42|Zm@SEpsRjb!4CXoUqVo3jkMgb?{XI8siZtc zL)fX>Z{LgI+OGyYP=Ccy<2OY2*<7RF?K#P5q4Kt4cXic!mxIJ>hkj>(m$>%HoQluH zK-q0p-fear4U?}J;vr9^g%m8QsVVZqgR;`6-r>nR*91x@LuWo4?P%*D&&So3^+$CUEv73A2{WfL8g&$MS%pA8U7P^M$+P9N5epo>I zYAr5|nq!Vm7QJWDwYt{)7=KnvdvFy(Z>N_YLP)%{yo4mP$xi!6_Wef2?_``(ivp~c z_FGEq8SbuYIv=D@(yphu3GGWlX_CT~xR ztLYc$>Y6IkfQAyAw`lU4`^+yeb_=&xX;xFba903)8oVT|8r9GNy1 zVFNPA52el{sTuMop@;p)U;XuRqs8Bf!aQR2o#h`yI>qzO_vSP_Q4J{1OnGka?ILU` z{0;xKFb$?4ylRFD%v*j@i3PPT&mK;syzg`O458FB*%|Y;$`#PCM2_T~d~Ri$@W8bn ze;hu4^FSk_Fio{3DKIaH8&tNeBYNp^XPewY)lVnvImj)Q@%^gfMJFoZ_BN5-vibAw ze5wrnFZ@JQA5O`7&^|lTFUIoYqZ#;R(6g)cveVn4;vfr%wBeTGjvRIPzX_NzuJQ?y zNVd)4A}_y{4LE!5q7Xww`o?OE>3p#|+g*<ypk@} z<2oaqFp^vgX7=X0g83+WJ+z63rJ}Wkg{X%-15Y-J9} z{7gDBvzWW2`jJn_=QgjVXD!}hZfaFBI@QGSR}>*t0T$K+m67z;?W+t}d|(X4P#g&MZgOK@!id3Hwj z$FI$5kw0)OFGZ5-R1G9XEBpIpMGy4Ps~`Bui|l#GoXomM5Xzy}{YMH_@Ac#%BFK)( z-c(ADmp%9nP2M;JoBwHh(iWoLuj#g&A~X=L$-kGS%%_drMub1S$*2FsAkG;P!3F^X zlB+;cIYY2jNJ9+!9v~9TCyH}xkejNLIKSRb-1_wK5%n$NGf$RGVQOUk)zhkri?xvm zItow3jpLxNBj6(Qe@#8X8Oz~kJOtXZS|emlLHNKABNIvT$G(2wOyF`G3`6-DBSq)n z*Xl*rwTOHYhkNokN-vsW&eLg4n{P`K9(2#_dylD}o)eiQ26$7RdJFe~0dV&Tgp(rmO@=($qeN+JvNeb&Zym!Bk`zqVK_3l~pF*jju zZ(*Dy9Ufs2xkKXea&?Y!Oi@(pZf5WacCRdO6{MQnW zFGj@k0V?%!tnjFulIJiUGGS<$FZM^3MerOGhhgI5Nh~BAU`E+g~ zgH*Is6C$g%{ifQKlq#G16a10-b8X&>R92mlueoT6_O#!;@Fr%eF1!v34rJsPmmV$? z=Uj)JEjMSa_nh;;r@8fhuuom@Vx;>~d*=cZ_VUPt@cntsQTBZ-C_~wKt=lLtCAUi9 zevRrP*Lv!U!0@%6psDRr`yF*|tHZXHh1m^=LX!}oT{O8r1)p+(#Z1^|V- z3WKTqP5c=psfA7fr6CXsWQvMUk;`Ve{VbF+rQ5kFfhuPL1B@y~!6Z1an2LR$JIYM- zjgznOTzAhZHseiK-(2MAx2_|ftVx+c=YX^y*XOv{0p(!i_-0TG;r-l4_FQ4%pjh0GuKsg@gqNQ%O6`3i+mj0L~it9n88S_DVA0|OO#AW2TV6b&U zF1o^gF-D|1kB_0`h*@_V%UKtP4XY;HUVCyJhT=E#cM(DK=M;@1e(DC-gYf3)OHay* zs+6{(IuC7^7<1J^le71R3_+UEu=?T2{<~Z4j}?Zn>bfV5dngrklwryfaD)XuR;PQI6urcO<+|vLi@m z;M2^IVD>vI&!ZgP_oTqh`c*(IvDP6a{jkzchBD&zLC*Z8=Dimafems07S6zpP}ZJ> zNNFq5Ac-S{NWnp#!00`D@$-zMz%Qu03bkP54~rkmxedfc#webMi!Z$%F0Z4}bZ)+>GG^WQOIKgpV4U`+W8_C|ovH11{ovT! z8Y7p4B_#^2;N0UBOWh)EZbz$E9@Xi#rL$ZS=tBwIP4s6Ueio52wHxpJ>djoSaqmpT zE_xoOPUyHYaB#aR5X|G>&7(Focz@4$vG|vVHm9$FxIO3UcDJQBH}Lt#n~2=c9j3>` z$s2v_L{ZOk8$Z~m#0MhdniH9`!>p)HKWT!8&(+Ns=KWnz!g^EmORHzBoL%&7r{x~_ z&(=&AI>hjBGeg)COX!h_891kW^j#qdT>Zxf6+p(>y+*?L6Ca`(R`qg61#-4C`C3uf z8V4l>m|cse!qd$zEetnP1IfY;AfE>j7lTya^Ds~|4WX(Kvp04V;pM7M{eTrNZ^S#5 zN8^q|@3_20!)Yps{U3?=F_EQC+!}10_ypjtip>IqMq@gm_AL4jbTRdtp-EMnl^QMEhUvX9O>x)&x%6$ei&w$IAc~$XTA4yE>N>x zamsQFTj-A2rE@)3dZ6Cc_o}!cj(@xTv#x!k< zaKZgrNyey^b#SrJ%2^NInyM>?N>v2=qkXT|tW5m1^^#NBQ+L0k@c4Lx*|bxRu$GQv z_gCl(&OJHPy{hz+Z==q^+hv8PKSMSQp6`8~@tD_ZPpv4A%rKi2j*NCd(9Md-@xQ%b z!q6C-w_H)A_(P{m3#GMJ%d5GX7305sDjENEa?3!Zlle)8F`0)*R>1WDX^SS`ui$tG z)ju!=`B~~Hud6-V?YU&>0rlfQr_2xTL?)cC=*j#oWzhG%GHz?aYnH{+2l`a+Bkxm_ znYffmcQjx0bqe55IMNej{bDI!4EWV6En@wa6n`$`NZ;-DsOQJ4BfAy;@aXwxM1fx& z)86S83u(UZX60R!DBEUuo~V~BG3P^=C=-yY{Cc3`?JU{myI`Em!up^q`VtQQjck?= zW7e^|ymdR#C0OyQEF5!eIar+o?`lft3thcVl!+5}4ebxN2PjlUGPcj=oO6>4$S;JB z;w%--iPBx!Ic!bvOsfpdWybVFzZ(mO$wzIcI878*%@(?3!d{lI@;tvFuqNrQOxS!W zgz>HWJoP~K{bBd%dp18?Yn#^vIJ>lk4W)5qZ_!kV8qVzaZPj!jb}VC-!lurzZ>?Sw z6i+EWOmRK;;ksL?!v80CL6Om=zq#s^j;6Av{|9yaLh}Y0=v1MP2D1JKbW*`#NgVBg zng6!n5TKZCJSq_Jj9X`#^8MGjo5up#tbc58u_#wi=U{$z}b!uvmjCKo;8JDTW2 z#b`x>UE2Wr@Cr35MoHBG&$7g-(NH1@4CnoCkQxF7StqR)c*0|Ji=-rzwV{SK??mq zVqXSrcqDGi^QgYEy@ROBg+0e(^B(&{8X73JsC%KiLofZAcKIAy_WYKfqM!>a^U@N- z!x}c9O}P7;oxq2CqPFR&NyuSGaN`0&$KuVOkVaRlANO@pGIkOu}JqOEji1|-h&EP68N(%F+&pVCfX zix2i+Bm^Yl@h{8kQzFw%HUc67*Bqg|_QbF2_3XF%RHIfkOdWf;j*t8OFq?~tqLsnRx&<;g!8~e?mv_lc&qMv!e${;JRdU0S z`*8knVRKy9%?A;MaW^E@-;);}C0lQ+o;)1O_aB>-J6t49hS9bA(CyCfK@AKQ89(TS zxX3$v_-4g?aiJIUk>zmTzx-I*%3aRgWKZm)pBc@Y_lO+Og+0&%`U+L%2hm#Y^tj#ZY*g5h6E9+L&xQAj^hOmq-$~I-3iCM4OJe@euJx#D~kA(JsQp%y8tg!o%cQ7!bQmFnHcW|H!NH{`on01d4 z5EI-ok3RdNiGt7Qg+JaW-1Z1FfSZMp2|5=>kv=qoI(dIgmMY=^Zy87UGD z03lbfQb;f0AR+Bqd$&ske2{FGlVWmD62i4%MS%BbYdm|+}LV>g${^= zWaj~lAqIM9-tLd40@TSY&+{2Ze1vS)-I)rrU+`>}1Wm_ZWkx-D0C|Xk&*dQ8KqV8ks<^QYU>t(dkeQUWC1Fhp&sPTWUCixNrRjM zPjUeeQXqp$ZYSo!KBzX(uo}vo#hC6YQ4)krK^k-e+r?1I(qs8#7k-HmUdEgF{80R!<0XptHAh~xy;D{q*xW>t$)A9u%035-WFtGsu z=V=fH#`ZCr$}t&(c+zQeRybE+$pi@190H%MydR7tW?BryrMZDJ$G|QvIF&PcO9`AB zRPVe2%P&|&q!kPOG#Tu?_}T0Ne5LcwX?<0yo=iEh)ve>BCT!r>emjCYAot69C{?kn z$8+R=&b&3dMjP4##*sHRQRm-cgn?;dRwT*w*Q+p7=Vc$%{GV$6_ChHzzdMfE=C9r&uRWra%kviC#X3kbMs0p}(Vzyx0(l(T2)bNzY z4#-sT?85f?o3L4c3GW|A_)uwFExy%JQ%0=j9JSkTL~gCl?t8n z1n7``<%?tvW6I*Wp-Rlg@GL^|&QoHt9^50DxMkO%#$NtU(hsDo3DeWX7>iKDHTVX|{ z=4kV->w%W+NP$=O5`O^B5GZCsGn!SNpQ5XcjGv5Bu?Mb__H;BnA&4=T7j;bsSXIUe za)r{|_v^kUb+FJ7&UVdjcHVx)7rcwnSA#z*OdG#15dU(y-~|y{eOi7*r_l&U zX(&Fg$Lxccp-*roM2U7Z0X-9dB>A~A3%Cjvm2fA`I}IIf*4}jZvD3+Zv*e;56gQ{y zG96)Y1BpcWX|2>4&rH{?qAEwR-mj%TH+71usfeIQ(l;VW8O4c zJNnD`PJY+7+c#W`X%2dq7I|}xzJA_-B%R0j47TJR}58+N8VfXXGvNC$sl0%YN z+b}}bc}=sl@3}%e*7RtqRKEYrZ7*83}QzEp(ka}(yQ1YoG@$G(S^?t!zp{{HXe~dSB7=!D21yfSM5wvrMSL%+fjX?4_vj%7NS6Moy&fIEn9DjGUehH(t? zdGcc-(6UMGen-%}QGvh$)l?SVxqLNI47FK_C zdPOSeY4z)A^${)V+ie2=>-96HdGj=dw4Bsz z{Ri6ofAp>o!}Dmq`am2O$<-V?LFwmGIrk++R6D3fWCLe@1shrqaMsfI=ZaLMs1oIJ zyhS7oNShNYKS#;G(xuB!hLG7GR&*d_dz@u#?#yj*HhJ-jlV!-#1lZ)Hk&lPI##GKZ z+V8om=sp~1D@?t`dZ#5C??)BXzxuXGZPovDfW4*h&w7Ln3(>+CY6P}@d)4tLOdiTR zSxtQ|tgOoOyLPUhjIx2oJgwuBmfjE;$FFOex?>Mz>J-metfRb0HI5*t@w&UV-#vu& z8>H1r!GPoIRMYdWCo&&eze)dl)T4dy_i-RS-0orIku`%Pzj%+JZj*e#(#ZSjSFpYMnInT-q z7q50BtR!#Zpt zURimFshopsBCQT&E%`e!Dh`7-ssu(Z%I*NhYcy|V&AWb0_gXD)u)3<=Jo0`ADA~+N zR4zl)Dov?*0PyRry%fpxK|-}8Q7{HopBUl<<4MxT0ph* zi-b1Sfn)( zgY$kkdu?VUcSwPU{-e{HQ#VuzBiw}O43EN}DxVS<`MdPBC@+6O>i?1MmM_Dvd4Tr$ zdz0BIiKoV{bhtcfZMg(s;z7hup! zat4yrws9kMp#=Nea0*hnv{8tU_f+lq-52@OLCz7iTqlsg_&l%@0VUFwMOlGZUrzJx z)!a6cf-P{*7Ag}yyj0?|pjHNnOvzf`8p^p3TAfc67q#A?%)C3J6z?NGmUv<15naN#_^casFvfZ0BT)xP8 z9o}rl!lCl*ULi8%fKlm?WxvCP)#~=r?NX2YQ@f+xhHo{uK|3=j?gyjf zwJcz?6z{k`-y=Ipl%W-(vu`9qBM7S{WOR)gwB*jW%^s|*#C{3^2tFpEj0V@vqh!ZXk+BB~eNteS|nKF8FOOj{W^#569ud4dvJs-5^O+1v0u zeErwnWjY4l@fgU3BI$dbIbvcb3-8i&HC~DKz9#DXtFg;6$iA;(U>O5N@*EZ|1ufUf znaE5@_)`NfQigWX9nivd!Z1Tb2sWRJ;z#h}BV=8y26OBq;UORKMWcyTcH+P8yn8;# zMk?fLmnzGbK(ZFbTK!%qL3%g1Q?jJ{=@6S1D8FdO!cWs{e`%U-y0KfukAcpLD#SGV zBAf<~-Ytz=gzIq0yHPglb7xd%BYD(U7Hi_yFIPe)AQfT$2t60~v;`iE24$3&<}nK0 zv`KHan;^cI6zdDIPD8=>{=^00ns|RziZAr&9&T(wHy$~0B1isKhL)c8T8-G5{0!I# zLpoCnF5;Qf^QsOUniw??NIa`g$6w24IhlKftBQI_!I*K@iO`p-3XhPKzt~C~%6EU= za(_7!DwSsedqBlOWE_c;V<*^>V<1&gC#l={d!l%+{#}$_&4DY(P=}xqv{yTqN+_xQ zA5CrY($WuS(K$}U-ZN@NTdlk9Y-wyhc#Zx-fGZuB6Eto9lmt3=M_s}!B$kn zY3E!0op+zNmL}-pJZKnT(P?mx$h!nH7#*}T8U9;MOb(0SEa+aBxdvue$qE#*Uk;)v zIYqx7zlWgn zUl{XEszF}ICraOLg4_fV&+E=`L;_59RW{J`HYE$W;^FGUl#ILAH`CqsN&Aeu)z=o6 zdaZSBCL5PuYEOsTwsrcCD$?@-VwM(QX$S{c|NBFHhvP+~#) zSX{U1024XEi>sCBR#Ly}J1?J-$ap%jDA7aF?t^9_vYOWRmkM3$n%E~{a;oqmyqP%w zokXEzyKfajq)aAn(pIMPRKF^0Dwp?h!ho8b_`-S}o@z1|j@qMUccc4Mqu+Cix>!hM0RF$ezoy0wbdVWZJfl|ML(IS6ulixEGW%xZyPB^sW z!*m_Ld2xIMOE`l3k8f|YO#E|mBnr?8TEI9YK8BObWRrRqsVep))ixd14T6V^XDKq` zI0j}_KR7?~yw&$T>ikHr;0SvqVR-a(D5U!7D;8ZkIjEk#!^#HQkk`6j(kf_#)w$D% z9%rHxe8Dw^x5OWn&7AvIg=)*?!s}2~sVklhlI!E*^Fap;D|*0`Y5$V4XN#kg{176pu^7H20Lq6s_{3-PbCTuFe|P0=<)}I6Iz5eae8(6-l)6<=B~??KPj` z6`Ovi`ss_`m@tIZ^`)qLx|zEtR)!;WbCNxF)y0MvV#+YmcJc~N-_#@SSA$J; za#OOO8mR#|Yw@h8q=#1_@tufg2k}Nzk5CZqMcBT<^@r(zNWsO|nhmPr$mxxajYje` z=v0OTh-<+Yk5Pshf<7F`Pq`-yd{Xd&%`6l2jl3%FRyH5X^n%EHjV-&j;N8mmmgw^@ z6Fj*_DN1u;D8M{qNf4fR6HYAVT*gr zDTJbyOdWt0v?-@YG(`g&;}jgBos~;(WZHNud5UD?KVE8dzBjGjUj1~gcE_)Y?1j-nB#d5I%z9oTjkqkqDd6;aYS}LCUKr0m^%Zt{MtMNu zO|#*qjQ)9~y|K1?r%f>BT*NFQ0~#e!Hqf3M{j&XdyjYR?d#2$CrjMHp^%J?X5Ai0p z5MYhpP&Q!B{i&`Z`R$1MOGu}8>4K}J2{#8aDIvl{G%2As^TL1B#brI@e(e=5;{P^l zm_7pVXcP(ri<$qXa>oY&lil%Q#jpPk*`55wx-fF;&!4@Ot)1!(jMCMM?vUB_BbB`2 z@AI*Z+KIbt_9u9B$<|AC^EP8+W1t~bWJ6y)G8!#pEsrm>|KXXk4nxF4FSml(bf6eF z4cukL@d@Pz#PRwg;nBjY*&1d8kLH=>4q2~V-yMOL`aIQV>nqAW!y3~P*IcYP&6CSW zzYoc!R_%`;oB5TUL`HqRdS4A-@v=FH63sFnBc0^8WIUi0bjEO#_oSzOK8_)uMe8}r zq4+u6W9;#6-?L+M#v@>GAmcPdikK9wr_?S7yED^|oUAkE@1P}3^Y2ameKbDGK*##a z_C?k4#8=761goZ#X*gCB9@tm66~}838l`~cC>)H6;28ngk}uk>UfOJWJnPa;e277Q zQo?O=ak4~s37xGy7fuS-V@E4>URR}2QHuOi&#p`aZG2Z4JvGKg7QL~YzEd?t(e!62 zhU3iB`fr5hs32zRA&w%Q2L59R{z7BAUWnA}6Ue9+=V1GI!cK>}t_8!*6cZ|F;$U$cz z5hCE&h&H^{ieN2da#en`k~(Myas>V)84IlilSL}YdvRw3A{;qhjnN2EaiH*p6EX~y z378k-BGL`Gy!+FF65cv|Lo7k;SkLb|R|_3)`IlQ&!bFr9vUG$_@>!+a^4cOn6&nU zKWgpRl`xGS-Wz0?u1F+DOD)U%lIB{H<8nLW@8qUu5ICFYC^5H=QLv=#PEl~=CEA(~tFsJH#w6H$TYmz7az=JHt)w_jHq&{H`!R;N)B+Q8|bzm;Ps zs^9c8;ygGB$At8fh}x%u7fD9N#X z)a}&~YwQNW<4yk{b!SO?se5d}zu&AV)k4Z+lO<%{O;SiDvp@AFg(g3ae|PbrS*`tL z+*RgKnV0x|Ewk8eW_a`{*Vv0rh3^t+XZV2Sm-$94v5S9_P+XUjcA2)JoC`l_-BU8s zbiH-i2Hcwc9ayxERls#Ix+mrpxA?6rn2m=(TE64%YC>fE8QPOY{qZDzyOXkUEwN;X z{VMw1h|W5(N`g3n5phV(`$}nN z_wPv&?Oc6Urnlyil{69M&{nD^WYVEW*=r{~YCVA{gf~aR9aQ&r=8_!E1+gN5qlcqv z_6U(iGR`YsCA>H#VT5tiAG4FBt~j0qn(2Uh0#Q&Q&Vvr5o2k(vzH_R5!Q+Z7Jx&G5 z#cLku+vs2{Iw`;PTCU<`Eh)UT_?-RokTp#2V-H9zdfr$42X(Q8sCBV5HcT(i3cX1 zlfMh582&T)#PtbXIv+Af^=Q=GxuALa+f3aAykWiyIow8}DzC9!YR-=a(yu>swfBA< zA^qLt>nY-YKZ%HZiJO7bR-j1zuCWh$sMlWw`gU&|96>%Mf~zjKRsayE4A+l1a{d>8 zW&#vaQvAc41D)HF1AOm{+p0qg2)+H$MPSql5~7z7y|;}ASLD&kO=hxJfdututiFe9 z?Bk0*J&cS%F?=F#+e=~*>g68E>nm;9#~XD0k~d%cJA0K%8(jdzLkOeCE#k$``FH( z)KL5f^cE_-OME#e{wI7&(Scf3mfsmw9j~iIanHeY%lVRLmCO1+0d@V-K*-3+(WGhY z`HIMf$45aT`_YwNcq3l*NduG;hjcZ4F9p5_hBinmnullhICV8JJINQ|G)FcD%~u{8 z&iN=)ey~`srW-r}VMflYO4_>t-#R0Zntgsdy!w}<)&#<@B2an^=WLG1OdHAeoq`&g zJ>H#`fK%?3iu0qGM!1X;N2&vXY4goD`vs9ZxNpHMs>%7;_SjpHR!(^K%x?fS1IGaF z_ss=~N;PX+9}cme=t>z3*7kb={g`|H;pD8HhB7D;zK_OENuMZ=w|&@`xl>@YRo?Zj z5NI5ixcr=jZO#HW!GCn@5ydG+=_PPM!@a^H<&p~^y$FbZE#$uf43gPSokam9LRr6QLDzECk;Ha|o&h zeD4Ogw63O4e`>L17Jv*KbfHE*49{FnMzO)DSW0tQ{AtQcUZex;;G~52*|%_^F5szz zJvb>GKh=_>$Nri}Kph@C1S8iSVN>p%2pLD<6VI2&DMC3Y8O@Q8tK83Ful(RoLTPP3 z33lDfNHm2V%;7E8fu+K7Tt|LK8tDs6O<`^=rVQ=YAk zx39Mohf%3KN_VGAGJstD!;+#fJ9-~q;Gp$Xz4m7dm%7=zPZ?eMEGF~ zxL-oDC?TPc2zv{N3%a%Zo?izBU-E^nn;6xo8p67fq(f2iT?p7j_ZC$KXc~s zTaC29c&-$?N)*ILf#_#oCO-X2e;MS;01q~H1?9&I8Xo+@miy~`ZUct3I#D#kF=?X7 zl5C)Z=jU?pP+cA~1y$bba>jYbw|$`-!Q%NXHe>}`@DO^~UAuATA|K8_z9?J~yI*|x z2$zYVz@#~0(Ea$z0mzh1sT+&naO$5UOwYe&tIJ77KDyjFd4Q)keE1A*oj)jnVAmHx z{|21tc3%*`c04UBFLH4~D1wC$C3`%5_*2PaSu*qR=px{}g^FnMSo|;S&OWq#491|8n%C~TUh4zgxa5P3#BRuxe-tU$quj)NUm>q<8O#h z5{7syLr~>G;P2qpV&O_w6Euz^nnJK+3-?2>*T0qy#s(xXj1wG@%T`v9U&!&^oHCHfv;LG}Ot zE-~W&E1!n56YU{TSZje+P$58OR`e42m>NtAwm~ShlLmOG%n!oDCjkl9a|dXe>%V~N za%ytFaL(|${;6&8{p&m!t&;TE^oAX+^)!HmYzD-lzA6|iGm>?g(lnc_Pp5SH01x8; zgouD}Xz0Md{3+l_F1T1Z15r_1Y>kKQe7ZWiw;*+;ShK=(e@feyRd5b0boFt{A~OKb z(&IiB078aSBgZCN?K0v8AicU)Af)Ln0NXj@^cMfEu5GhHjCwZdwHN}F-EIoc)3JT9 zBbFV=Q$6jPE1?0~oPh%G6qF(LaY_PxcK~Wb{{s$n8)!DWxC*1uG?MWgm0-uB1G0e@ zrqKALD>vQ*H||fu`iJridP-@a|A5v2&?7NU+ENR70BpsxmF{)DO}YZl1KC|_X(8ix z$Z-ej(7!xQ+)FrL2N#0$?k!~ z(Pbt6jVJKP+h5Yv2|+Y(%;18$mQe)vJdoEmWx|OwzhP1e{T_mLU`FZ%C$?zUd@KeF zh1*CAIE0~p!7?D8Gc?2g2+&ClDIe?mIBw418ny``gpU{t+0n-b9&q{*>`5kKd3ytB zmRcZybzVS8xwwPfL_nO(N3em@ClFNbmjZC2Ro6N;cz)>x5XQPy@tQU}~X+7@+ z9k6ihgHX%hwQw@tVGuJjl1U;Kzc8^UFHxX0fb5Yz z_zrQA_h2_J##Ja+07z!&gogri?R8>kpZ5Zre?*Z4iBQ>X;;;snfoKT> z#pNq71%mH0Cl5BVqo=ZN2^8y~qvkvdWx$hB8AxEII;A_c^sNGUOCo7u{c^{%N>5c?!(x!Cx*vK^gS3Iqy98k-i4blnmSaZM;( zTf?(&JG-e^!A4^$AO$iD#5r=|GvIpy>78`?fnbxM%6v>>E%eQ(0DS8+(ngRSD-AG8 zdoJtWrE+ef_*hCAE`LU81VChjmClV&0FBixNQT{|OLCG;u|D=bA4bxR%D=oRpmj!r zjMGX<1hq0#qadTvW(G6qpDJTeAbhWRV~FUB`_tDSJ7!TT#}AUHFODZIj)$v;{5*@6 zur$@B*~nD*!Whtz^T+EVBWYJ2`wO1VBk9M2W@f{K{DKwoxPebPF-fE#K>&q%HxWD| zHxazNdlEkQ#}6VlP%aeu2m-!y-nb}ad8ihJiu@n?gEpj)-Cd^}7M~J+uxl#?HNGV! z?f=ExTZKj0e{G-w0}N6#fOMxQF-S;viG!ek(kTtnNOww$!q6b4NJn7)Y&yKUNGDIBpG0r?Sj_-&e-8WGNQcCtD zH4Jv66)Ahb=rv#01US~1&3UgAxBuCFs%sw2(GF0L@inDciDkCutnDIrLQEd<2OPw# z`ae=^y{#N8aiPt5L!bX5M1Z@fjhP(QCC~Zn)gEZmmoFQIwX8LO^5c$j>;(9oaSx{j z4H~ddhox5!LIo6Z1OhVYgBL?&r|eY^u_@6dBg}}QZ!vmMh1=j#WXuv1?C*STj=3Tz z5+o0!>>i=11sY6Qm(oB1Z6}SE4Tq-|v2B(v0f*kPKWS#47{e#`V@&?M^ZNZ|zD&@u zKW7<_&OvjZNS{#94enjpBWs7Y38y1)t6q zn0PbpCRDYif49nTEb?#Hjot)L?TJ&2Nl-X4qUP`YS(|*(2RUvRNiyJvlh^@?NFVKo zufHLy0E4kaS$WQ;(#G4=;W%2u@7)cy$pz}vSe&_NFf+9vDbt0#v(u3rN>5K_Lm0v~ ze1ai~PrYaD`O&DITxw1Qt*^BGr|lHz(;3RV)Zo&MUYo;6&!6 z5GZtyv+C!S<7+ZOvK?WjaW~{zmpmlc^&{?aP0mj0+w=N=y~URzVMBBVl0aRNp6?FqY5%T6iKrlJG<>&cRHaNK(8^VjNAId4I?-pH z1}OnyRDb!2|$ zuv$eIe87o~ch!qApB&UDPLC_oVWDu0LwnMT@FuKd9F=$@txKGvkI2RnZW?^)K7!&4 ziEZ`kXV?>G*4ookB8?)S9TJ|t?Zm(Fgj}H8u`g1yqNcNs zZeh7)tozmq3dJvVo=$XCh+%VMOXah)y-BM|FAADLPTIQT^%EMYpeqHM&8|Zl4wtG` zjcXUdX?f~MnIlkEDc43FXo!bRKpc}MP^~RQB_fX@ExUZG8ytbo9j1Jyc=^L6@}3W) z)g;l83T*5eM42h6OQFk0Mr|Z$i5x8sBvepj@YfElvK>htd=uE|3+;R&LfVx30(9SK zl@AK}{U3x=RQp=(IUr?4NxMnJ>y%se{P*r6q(*vd*u-9Yps;!C-dar-n82}-nkds@ zl55lbPnD50a z#}(8@;e^SCn7d^ezd!5Q(W30sf#sh6)D5r zh8QtDtAl*92t<3N+@FONDWGl=-IPat!Xf5`EcVcna7vAiMu3{8BC+c# zCC8iDe{A589MGj0vDW58tS=f^x_yx0!Op3qpoA0}o}SUc*tSzi?A9ck|9IhD8u-b$ zPdpjiyFXxFaZRQZjaSYeZ6mirJ;SWV)jqZpbvJ6gZFq6}V`CSk!TS`73MNg)pHT#a z?cc?Oj{A2Gq2X3 zw$zoR{c;Sn0G8DQ(><(!p!lF_&8a3P5F*m}~%Gzb!d0j&KKsasn2zWnhmHwSB|HQ(L z7W9D72gfrnV1Hmk__D+Yu`NJFvCCi^(dxuyw41*fuvA*%&X5QhY`(;ZKqhbEe;-i8 zq4lz~@Si(z?htw`71J}e1#H{1-$_%$;Jfs}@ZccA0{FS zeh~?|WZSvW95$^ByuIB3sPlHMt!=GAn!m|(bVL6(10%-Jtt)J&ZBe}L8NRAgSE1=W zqw>r}*Q@XI*Pbj%$uS!KTUs53YPd<*`$4atkTZ%5UvCoxhw)cy1RR2q%L_m|;Uf`U z2x|B>MHE>Cxm<=W5(#>Ur+e1_{S|W~wu!Hu$E(hjObvzCWH6*|_hcxq)I^KO`Sy=a zEXTktwO*p85pq0^6LF2dnmvoE>Q(&{QmZ(&;*uxJNiBRr$P4t3i=PCHljPXZBv}F$ z$^!lu?!+|eP$%pU&4+z=X}?5?N`^%*khUKQ@^=Gfd%oOCY3PBr`Ual)V1X5GKr-oL z*=UL9?x6klu`#a4R^DUTg18!Tz5shmtr{lN`n^`^Xhp9a?io_JTqrSUO=sde6b1{<8cM6kBtxIOwRjDpzlb3{j%JAt_yo$zEy4)|y zBD1{T#enuY0JVX*Vwm>UK#py!sXF}~R;=Zd^h%~GssjrAcSrYdEUi@Lloud4%tU@j z8(dR)uM2wk?`Uyq0UB0$e(ed8@}84YTK79201^HjO3myB-G=)WA zs-5K}WQWIu7C?rPrPC=P?{{Y|N%RKCU{K3JoYRKeDaq{78CyZ%MltovZmxrz(SsSg zA4&Wk96myOPsK&rB438Q4AM2y{FT*f_Dl!=9d*wY?jw+<@t59DmN4@wq28*At>&%g zlLSy@Yn>Cuhq8E4`{DAK54^t4M`o{I31!|+wf=|QWzB3vD=fJ=O*)-?Dq`*dy@2_-lCZ z>UEF;Fx*C~61K`%6gw1fT3ymdmVg&Q0MGUVtAu@htFXp{FaAL$fk+S zO&{}xhF-w65&kZ!7)_OJ+DdpsXj?iN&RdjB(|FD~+Pd$a=?$+|fbWW9KwqV78mL-M z9tXHRl7g3_ADQjzK&m+oxjl;gPy&!OlyU2gH7&~*Hux{MNJBXSYt*O(IT?-bT=Ud3 z>W-yCZzBpd99%WSafU{XM_EgmQBosM+TL8(^p-anik5SrSorIeA|@i=1s2@L`exbP zkFg`=E}3nMtIyV14i|bV4OH-oHh<9BTIM0LKe$ofFs~U&~(wCcLxI(=_P%?eV-u}GbVPbgE^(E`~T0^a@x$bqr0gRN)^a-r(bO9O za#p`G@!+3U8_;zjHtaq>ucKQh#QPnz`3cxa?;%2=pP2tjln0( zp~^XMWyY{?1}UrXA7L)3EnV)E?87mwM(pkvPi=D9!WO>7G#Z^AnKP9ja(F;np35)b4Y?=M8Po z?4S2!4gQ0TqWRXzs;O)wu_QeZN0pY9vK-pP&6KGHf?4yAvyF-eS=o=R%N@D#8mijP zOwsZO4K@`1Tb#!0>l;q+J~>w0)V(NZGIKDEF#q*8pXZ6f*Li6&T z8T2?Q2{8@~B^ipzBtOy1#HIm#*xe}Ddtg*iUQPlyM9MI^g1ua%UCm8yrb?d~7a$$P z+F_pM!;%k$l3fuS-xC;dyB-+=NWsRNKfuY7b-4H^SvJQ$Ul+tADB&A~t@pVg!ZVKR zLFcA3fC+YoKd9rpbxGD6uVc1fOY8-{sn8dgYRG_OTi|gKm&Q`7sYhknbrO+#q^99_ zmSX8p2r@oa!YCV?A0F(<5B04 zdT%}qrbMu1+(fy2>Q#k2yEb1E+i7HVIKH>oI6%o8s%D*n;{O0}JPr9S#}u z)w@7S|I~-}+txTYC08oD-Uy$$O{iiCWwkQ*4Un`1SXm%K*VPtNM4d`p#wCWF154HvDXCkMaNW z0&x7()fM7R|LNzJ-c;S0_n?~v8DK6z(|@r(fzE50*hwawaxtyqO87)W7}xuM6Y&)Fk8RHjXX+a<1JIveY*3@@x;s zhetLq9fm)n1k%DPU-Wh=SoL=SC~s^v;A=z9$Y)O0PNZ{R;4ae(FL^qHIEl$^L~S7? zG=fD?uk#IF)ga5$Bq7_~y+-j$cG;N)W?Cz_#)h5Xzz?KA;n&!fVAH-%(j$5+Gjz{B z1t%S90i`HZNI$5aGgP>JAIxo2|2bFiRgJ#a8`&r*(SwI3q$h880Rzsd z3$vCSA=^><)0bheYPo8hWKu!d$$Zo|mRz_pIzX4}qki$Le&kSEUf$zYQ+)fY98ENf7|%=3Pu? z8i64K_IHo*m!ohu@5onvUlO);R4*uJ&^a%d0xe7_-ke&BzRxF(LJTL*v%b~1e|LB? zRY#3;X#dioGR1q^EOPUgD5nPWNZaf1dY^H|SAk)Kq7tu$)$>$%a~-^gqKPtSU=jN_ z1gdU2&qIKBGhMam)KF)L;)@}}hM++4W+TydVBJ%#9{iHNn2J2ds*#}I z!5$(a7P%9|VXd-A??&(NXwjzR)*N!hOS?!{(a*^$IoY%8!WaV#h)s`3GIJDmpY`Kfd8ubtieYZ=iq3nHLnaX>GMuVz2VY?^kA;I615@2Sk58Vt%in zc8o>z>RK9vlj(fP2iJOx0CS6H4;~!_g5U|KKSc0#ZM?LOf*XqOiy=*D?6%%L?&lWJ zDFZkymA*^Ud*c&ivP+~lOj`CYv!oXl^d+f)@^rwX`F?RNmTgX6T&q_`kwo5TE*S~OTS?IvKynNFm;QPhHKa!5-Pk+2} ztzVlRxg+ySi>sSqmC1crF5DsU@uK#LgOQY*<3oYo+Po6U^2eMOAKre09%+0eP}5f) zYbs+b>o)pXznB#}Wir~Ref6j+|7+P6Y#m%;OYS=BI5b*1=frEZ;+dCPk2DAhu5T<1 ziDGwPVNF`2YV&z>v3~N-f6JYDnwAx*;NMIV->L~9pzUqV6G?{P0Or;Uxcy^?Ppx6E z$3`!cYGsz7?zjK|HonVfdi@U~9!q1FJ)`$!!g0#`2N>g95wfkgqlgViMacg?kbPV) zd-xawCgb3yJ=Pzc25GI=2pEDP!P(@hez&Q8VXCv=CpEqrZL(K=e6^RGd)8#jkY1Nn z@y&&`?=us|zWaR9X3RChzR{3A^?E$ju~~HCdA}pWL7eK2LoJ`<=lPBUuK9{ie*?>9fOMOV^Res`YsP)?xdsnGo`swvyttBFSj z%dX2u;GRTJp4~~O!VPp+_M1SeV#ko1PaQyDJqK-nWO>-!_!Yzt*%?gs6uh`w<4B}) zd9oj-CvYOK1-^)wJpK^KTfXd%l>k$}{fzNMB}2Ya+nZALs9TD}L~j`v``MqcqJ?~^ zBThL+9RbJI+^n;^XhmD57cs73l&o`aE<4Ne_-v8XVI1?Tuy@!@W&%kvSE)C(XXSE) zqfL|oHbv!`yQ+d)11p!F1zYN5mel-g5b4}4o?Czl%(aHFSuV`IIvDb8y@Zjsxa?V! zNA4KwNNhf=T(@|ZnUp@MJJl0wKlfm6K!{R@Hleq#&Z1oXM{!x~OH}bh=(c(#EnC1Z zhu3;ksjvRTmUQ3#?Yg??`l@ajj!mULE*u}zU9Cgy_H(O8qSdQ7p;cyjmj`nh}RkAG-34x@!sg;3Ym z6NYRAi58J%VF^~m;CH$qt^mL=H1U!4THy%UuZ-zu_RCw0kD?az(0*oYL{wZNdHyvV zEBfV&C>pkaO*A?!#x``phBmP94xfg8s-fg9>6>vD?ZclN&UC`U@MXfljIDPJ>PE-; zzz?=G*yUHcx&$v=j%wl&88+>%=<8xt`>rnG>R2h_`n)tf5%2Dg<^2;$3rZ@H`q%9} zEJY$Q1WNPEPH`?>)WUm<4W3JdH=aF1$Q%aIeBQF)G_8pi*K0uV&C&ho?K+5BIvuYi zSc|eREU8DTSF*-+4fVx^*7G$8sHGcv*B`Vm$r`w*qU%F=`1DCEMs=@R^QE70*6C}r&C>bxk5r%!fNk;t4LIEDB0~Nv)r*HMkT-3;r!(sI%;P_VS0Ueg;1;eb~J(pit=p;x=8h$ijx~%mL`Zv-2f}65 zd^S;I=kHyrz;HbszBL#NW$f0{2h)e2KiNpE6$=I2dc@N3=#s?`o2(y@RPmAxBo010 zH0fR~Et_^#unrmWu>LCCEw{(nG=S&biKT8qJFu`4Eb=c+8@!HO z#i|A~1J+Vf_1jdb1ZCASx-$~Q2@6u44onH5Kd4uYKHMETEVI+niglCZ85Ddb!@4us z+|%F!Umx&}-!t89%M%8V7Pgv7na!gHmz1KIz~2S3ujr_A1fxopKD7QZb)L3=TxQVV zHMB~e8m?m0r1lJ6{^#I!{Om*TGUHxDZz5IW0#B#}e|y-e`8A4}QxcDI%Y!yB{N#hD z#jNJD>k@O4u9HsVIW<^vcGG@AR(C~+i*rKw_;?zhysc<=#XBi=3%IZ&&gSz^4<_RX zipxqLBe2>Y3X_v{D~*X7sb7kg!;nlzqR9CRLM**o3Ad+;3=NBlbh}P$U9;)(akwkE zyKz3un>O{l(U5s|@bH%zXU{jGJZtJe)7MX4WEO5N%^S5uC|cmLW#7zWeFS1gJWl5N zB6ayV)rffP@1buxj_7KynuErQ&ZV@7|CobrZ}*ysO2grBjdPzZ9hpN`SQCu1^l@S5 z4yBpdHc7!^X8nVeNDYI$%A7qvkIhvYdxdpijyg9ZyPS|HW z=Cy#S6J%0{s?R&JK6ebxv5e=a6i>g61iUOwvkv(Q*kck*VKy5 z&=!2wa{aZ(CGc@T{9Y@+ZhlDd!3(q9;8E~2(2KvHq@n>WXp1VAPZSMey;i;3*QFSp z)A5gQo4^%C8B!-L`ZE|z>+-+zRVyJK) zFPMMIZK&AAVXJS!_^0T*%(ho1UB9Y}TuSt7v4~oegljx+PxYvOf1sy+AzCJ5wRY^U zx@|O7)F`diuXeOMwAUywTO&D({$jN!FG2me#0@;q{S8yCXN&0PzpWaB2UNr)jcK}xBD>94j}OF>jy=(L;b|NhG+ z1hm*WZ+p}J>#)-Yv#8|Gn14%r)PpR?eDEv@ z4!rv{-?TU-`Cp~RhK1!herQC`MEIM`qghs-niot{XB=Ca$MxnY|1DiMh&2BEw0H{g zWGZ7P-LGRwVftU2-&j6q@0ZWWo$(V|ve4Lsjm&>5c7`<41(#1H2F-tEI9v{7pcf~J z4{f1p9-?YAIM$x}$_06Cs~Yg9qG&@@l%(mGG>MNdy>|8SudlP@*dM-pG2O6F7*jvW zx_?~gO%NJ<?4wR$L zIs40H@^J}@J82hdM8kQR%JyWLKDc9itFoGJ#?BjTr)>3~mw)!7}oU=*NYDKVS3l6XHe{-(CH3e@ly%u*P?KPpI0J$ z$t}HS1VKpD`#Rh_mCE`!ky%x?x>^64CtLUh|{9?i9cI!s!%y&;plJ~`<&AhC?@sg*aM>UV80?0H4T%RtCoEX_Zi&IwA?zd2aW8+!t%4tD0%VQR~G)34^=#_B3!P{I)7$B&TEw?J)O)&y>Y zdr6tFkkDL5MLmvsU3zKBN7m~oTzJHSn&P<2a`#)>+wHHQxnaGWh@Z6frFjDGUZ=nl zXDpHF7%P~lt@HMj@s}Z&HFO#-E&rPiW2d@u)JJYVYY$P&h2z97;!;Us(nu3`fb6# z%=#*4+a`ZYhN5!`Kh$4R@y3zZ&hoL&x_JpPE2QsYVj|UdlJ7#TrjXfe??kVg>?Ad+ zNKR;znf9?4Xjz?PLznl{hWwO=_yO^6AK1ffH`r{-wMT6?K$n^IpDx&(_<_8x5S!6e zr!T*HE`B#>>?f)A$Z=ZuU)N6-5I1o(D<8!|xMT6cCPOn=&|Iix5MVuk%^Nx+HhUv1 zdNrCWThooPrD{Fn!vkuW)Hl5eG|p-{*T=;k@C&{)VnH{n2%;o@LDLL5K?%+PDSJFc z*{6hv$8WTyx1j^(1M>Js#dw_#nX;5N@xKv1{i1^5<^x{*5N{c4tNvXY4 znx6aX4r#T~4WVZy7r3u3UujO(VTEo~n08TjI5Z53Z@U&u_QiE(Tu@HV;xa(-bG<;R{b^s4`(~Z zDq%|00;BKF<0wj% z>O!OHrrrCtuFq|ktPyF~fFy9|hxSd#1zF7gw+*Y>qw#LSks|tsd$vk<~+An(R=Us+G^3EGx}sc`w%2a$>FP`lcQfs zrv1F1c`olu@6A!3w5&I%ri!y`baheTEh<#Dhzf7dt!+k^}VY z^Tw18B?!AcA;K+t{TdKnbEL(4oJ8#QV;`6M?{q6IzE{RAxqFs@Gmm3l zbf=TO_^jH7l+h*-NdcQh7)%a35Pv9dKDKB)iWOg}#CSgTpOilJSPdxus-9gdWHW3N zSjr@ajOhGPY!R%^4LBpdghF>FCt4}qX^g-|0^)Rk7qn#gFa;--)98^VK!d}qf};86 zjD)Sw)Dz+k3OGTP15X`3+o>4<%ArU84MA<_0G1$mwFxS&RBz+tET@$B7RE#%eMYx3 zTfidX9u4Kr`M?X&>v)ic=E1-uLzk0JV|LSAg-X$A=DD1--`((;BqN_PDCfWn`Fz7m zSc(;iqP-Y!M$uVdEroY-RxZ1zSbt<&lNCwk0oIF+d01xEfKIh#;qW_ zTHJUV!k@wQq|a~%e z0oG&qU&L4->1o~>DZ88W!7r3^w-r!FqqCG7ZCg;Q4jOTlDhZRY>09Vol6)k^hMOYVN;kQLd)}$5jZYB{Ev#MXuvp zgfb)Yn(nB5w=cT8^)Kb)(xEJVU2DfyCS(ZmLWf3Y%Jrm7E#r%_(T9XG2|e+#MlGKD zgoZa?m&80RfO)3;@`+e~M?(CKejKZV&09hju919>Zv{TIm3$TGDe*Jo)0tZSwCohT zv~9!W*^FqJrql6F{-vGZ-%O&qaDH%a^k8~yxygaW{bwH8oez2==9TVQ!4DCzR2aEO9UJaE15~2WP`nn4Ql% zHix(Cnm?>925emQF^&{%eePy6%8thsOhYSW~!8`spn z1JCcXiU`LR$Za_SnhTant^s_5AUW&%^cRma@8*A0mNO=7qY9QPt#5vTR~32zYnWnL zc8~SO(8t-rnO)vrKE%`GznXjl3Yw_m*ws`^n6(744c`O|oif)F8KwxDR^sQ+55cL6-8mG9k?z$|P5>J9;mV+fuh z{0~0ZgaeH56ngIe-`4!USw7Ce<2PAa{s9EAHE^&v4~P|i=YOla1-4e7W##{NPaFd| z;#+sZ|84sMv_>9m?cK0HU;o=06&P3?9Muf_|H;nFP7xMhEZ9VWmutVL^nNQ&*J31- z@gLw*I%(hX_i8T&Jhv%Y{;cxH04hy281^vq)r9(g0;a4~AYygU)cU(-g=5GG#6|xme+-dUe88C@^A{8?MyZ37EU23$U_u|E$aW-F%7r7u0?fpd8ez znh&_V1)e((55UW-dfxjyG+q`E`%kbppS4`*xPX`fK^Kip6&(w{?e5S4$V?R;Gg`Wsz7K3*uiJD+~qER0biXz0Bq&^HDD;K zp*wnb2FO!&;2ll)s>GL6=8v-rAS&OQKOcOl7VHhId?_)+BIC>J-z^pAf3Efdr(9u0 zm>k)(&smZnu&N8Z=JfvTIt7kBl>7}`N4Cx}^wHnISZvjNO#MFZ+|{p_-z{o;0GN7` zW6@0l{@F4%A9yjAb9|q66{rSGet^uN!bFUj@#Q!U+EydJ_YwrC-YLyWi5Ji380Nev zf3<}$Ba{ER(Sv#~L3=dy^|W>g97SFy7TNb5nCb2#?H$gm`}UKiWEx+t0m0Mu>K7=K zZpj;(xQ{5sV4`1prZA~|zc=|Wb`9n)o-)auB@z{Q{0B~LD`+1 z0X`3&f%f*#POua4Hi6SaN@eV2b_O)hFyo{2sW89_5`(%;da9!kEDm3G>wK=~et*5+ zw9K#vZPyJ!y=l*wJ1$~IBKLLygW3#eGe|LWPUFArR9|6X+>tads892TsW6)$mC%R5rg8@;F{u@M!vMb6 zblSSpE(Fcr)ryC*JEo$Rk1v4Cf=c>0k94yE+O%KyxtTX=lZ&Ua6?^f0e*EaR!HWh< zU4RVNMW+Z4HNxK2J%m&nlL;D73)^XX0f)>Ggm1G-b1Nm189wN9HPv!mdwmVAunNoV zgpC{&vNjHLC)UMCH*A%B6zan0GEgo~AaZrIE@llb#b76Je)_qk?5Y|=s5haVYd`Dw zGIvs539c%#tIG&|LOWe zhB+>gzrpefWna@cC(blVv)SY7Q_A0vm&I7N`tYTziGuni)aLd@h9od@E?qxp0jQCe zSqSzT`Xr3j@)tmYa#p+PH4MR>I3OIF6v}4r4ih?6eZ8sVL6TRl@-m=p}W)}7~ zTnBg`)ZAvNGa9Rnki`ast7bFnmtNNedZ2n~bJrqaID(wi$M?y}I4KZ5QD2o8#)^@* z6)h&W0jpeN+M#;j*bi9cYMFJC>M`BKXK+u9PqQ5>`cV1&5A*mbpo;GUsm;@y1sF%h zBTO1g$Gv`@{B>jy><#qgR+Oe1E}48{)dfi2-V(eX0xuSMR56vdlJ_|wWv0MH^qOWb zQa6L6y{Ofp(?7p3MZW}Q^&zl*JO(o;@Y=OEF~IPi?TpE~pO|(FXr)L!^Pq_P0K<9$ zWgGvF>i43Y1%vTd;NGl;5r1s$WDhMf4aHsU>AZM@e&b;zhiaKr3A4>q912gOddEP~ z9yB2o`7Q2H0RLun2Ah8};dh7XxbhaW?2b-$eY1mQroX!C58G>m3C(uwel-D&L=CU{ zAqTO&X9S`DVZ)IBqk$CEprXa?BeG!aAAUyzxZ8HhVo2SNHhQl1CRo|S?eKHb*976G z;OY3s>WVmJ_(O4aNe>W4Y{ALtICat1$HEzgiiz*fcLa0Jku0=%*%o$Pid-T2uz?S{ zfLVWXew;I5z=^=Ml8g#q;qRu(kg(xD5ysGvW59#D{H%>MKr^cHQ-6RvL|GCW)AuH2 zbvAvEz=jZpJg{mBM<>3eDFtr2wG0FCk|@DWK4Fmu%(D6;z#6hTBqq!1-rcE_!is>C zkLfx(*`-`p^M9{$Ch73H2J1`Qq3oy~@E+9iJI-g)QT{eNf$<6-sw=UdxWbTMyJW5p z66A$$SjIvh3kcdh8Ton)UAAISM0?+;C-PTB%N;6)L zB6?0q!cc34pd=gW#yZZmD(NZiWMPhMIcjF++oV`sjbOGaBM-s)4d{8zvS%SZ7*$3x zbke&u_(3><=-to1>H%w)s#I<@>H#jDlf%-RhwD=en1~a@Ms3{!(%QV2R4y=Sd!ZM?_M`u3 ze{7;hI4aPlOjY?@o@HD54yyB5uQCZcxN{{;141of{#6U>hoVKD$gm8Oe%NM6e4A&& zXO~f>TPg68XRv38Avjw>mfA@p5!K%AJ9m`*iC5-oQ^R97=!AxC^48J`4P~RAb$|V0pCNu7`9rTx0ZVaalNM&LEKoIfh@nP~1pvm+a4j584?Jt*-h- zQgo#qhI`H0XLB5d;MKY1EeG@&B-3wN(KZQQpeqcl` zGb!URo6FxDDST%#GPgvX*dZuLahYHof605F#RS1hI++(st>Um@>OX*i4cC#a_M*FD z+~0@r@+<{1lnGS{93yJ8ym(tjD?Ri)?PrFhRjhtk;l()Z6QyI>13M+RnNk?nX0KQ} ztVn%QXS8m_YEtG27~`n-IH9d<5ud$8X5c*bF||kg%Zy0J6btD3Hy}pF2MDU_ZYNfN&G$|n`%|i z^9`8tzXAIT)z<&OOCsw6Hn&n|;D6u)eG7bj;MR1r=6~O(0APfh_4@n&z=UZzhF?ctM|sNe}W(%w489yn6_`RDoU7f;Kl4i<9l|*S<92c58gkVD*K&oI$Yuy z`jfw`&)y?vpQH1b_BJt|2u@kY51;IMeK!e;X2|}y4VEH%qUxYyWK^2Lbm8i0m~4BO z%p+MB?TeYAfI67dPfJPK%);(?5e~_hm_q(-9=Ws6gWESyypIr(uc6aDCih_>41o^H zOEk?3PV8fv=7~#EZ$wGEMCsKWBFJKl`{`6##Z-oV4o^<0Q*A}OTHA_>U^m0kR%lOK z1rpl|7rz(vi+CsHvDS7_Gd8nc;cJrX?;AowtRa;w=reUm_CvOKvbK|bMRkO5)a@Lv z^%Un7TK*_E$>)NCZxHyj29fR>YK9!|y2%ttSU!Vd$qvbGzKlOz`Fo3MPx5e9v`I1TB5GDoi)@W8t_Jhtx)I z?Vu^k)86bb;Z$`a5}V#u<|_KPQ;qq4F|UXi%dcBl4ADyu4c|Kf>_9s}h1`6$#3M)6 z{uNUh=U!1;47*Dvsu;C6x7>6p*CO3gb>NL8HBXTS6g1XL-Mc<7!XF z*6W$AOdCm>Hmy!G+YcPJY%?@`bL#fg-ybRx1@kmxilQQYMxQU~nS6Hy{GZWqHC1MC5tuWvak zL_;88>8quo#Y7P4JrWzO*sQn7bdgOsrue^_RQ791Uj8_=b=~nH=5}PyB@eg+TJ;Gk zGUb;5yV&1D-Qq_u+(^b~5w%gRhylaM#+Vdb3;FuC-dqe)1^R?)ar6pDp&&w|4XMgs zH6HCMGt~Pr4ZWyBp>e87 zfVZvBic-N(NLh!UGZ3?+D6N1K{#IP=2g|Lnafa8_^5R}%J(710^4(#W&=*@juiFqA!KT z7sJSJMnBLIBQY~1TUVob-8prqyngAXf9yWfaYCol=mBR{i~CaRKuw^z!@e^~{z?M0 z8K`{9*HY;secVoE<`gcXW#1liSwwxqdE=`B`-E&V9j@^OFl+4C<0=s7CN_QhFJr!C z2SmC~odCsTP0Won#HB^NSdoU%BE88C{>lQ4)>kjzM)SP!<@_CRNYoE&@`ZlJbOF#Y zlE6^F>Gg&L`Wz$Az*4UA(w(2JXP@RECwa$ZA*J@=1^pq(4uI6N&zjuc2%3_`<$jE2 zwuS_;IcFq68Hf=oxk4kpnpX?WseAdivD`wr=%ehqu%>S1AHpI$^htri@*(O#24bS%+E2)Y*vjUhs%=I07=`m z*_WnqmkTpEk0ScY%t>{{OT-P@7~<*1R*f1 za;TVV%5K*>r!3^5P*X;o0Ta;6oJMK# z^GBc3v3Dw;8z5`3GFz#d!p%0NE*?)lJ~l*ICsKX%Qx21ZRcu^Hbmwk*z28p#1QUxs zz#}C%GLFCB-J@?`HenrmuRqh?3?0y=-H>+M+)j2GMRPdU)w^-51`JzP`ec)P#@W$^$p}u%r9Az6w7HM({^2qD$hLbw`Ih55CgvK7 z=s^SN_y_dFOVbt!6ZQg=mhWb`VhOC)3uB0O|(X!wLX|CSLeI#J-=D8(*jv}Tue~N zcf1Iawg_=xi$K;T;noOTn`l=_G#^hGy7cE!vC3x6@>8h2;asVtojo8$zMhe-#YJtdHXePE77@JLT#}70fE%LY&F-Mn zN2yvTi>Fsr=~@#lZk-epODbeH)tGiQl2r!^Nm>QQ-ikVl2- zoyAYrzbmg`)|M$8fl5DS*)6E>Dt>?8UqNQPna!MU>Lq^J>)$cr?}S$`nB=|!tCel9 z+(1tC^n$aM@*c;9Olw_V0m7RZ%(;y&t^Sky%x8q@g8o0YX#APad(L#-282DNqFyp= zhm@&sJgAo8rVD8E5%I1h;4Ca!M)<#AZt+!CSXvD}Fg+DtNmOw61ko}G4az{X+@khZ z&HnKYD2~Gca`^;Yz_YiKE}t3!ZU?!ukkCln6BL3e*QGK%3#+{-s$_-~h_n;a{=R>l zukBoQHbBCJCT-#&db*_{t@$gh{?G9bq&1PDAnEG}@o$YNJ6ovIRvTW@arm@L55oj| z$hSz|Hbfj~oi=C)Aky(dVCSmFM#a;ievW zQDeUqCl-E68=t}{HTl28|KPw%()7@r>h^iyFSnerpYaER9%mz-(oQI5E0Ixwk0P&n z%u1zZ{Ut(h&P|uvdAbI9zdKmimc%2KVa%}%Y@P-pn+ie{oMKRut${GoBMR`=oqof- zZ6TgAOi%55d^b?%O2gneFA^7S^&Dx7@sX~p+G?a^yz2OaHkYvoJBTUjp#DIIFG+G_ zZGpuJ%7Ns@r$dTer(%{4t;Ln{!#xL=b6jR?__HGNGy%c3)X7Z#DNmAJK$kjko>4#~ zbPkN;#@9Ip*|m}*W)j_O!4!)eeTMSaemv%ymet0imDhDr60D@xU|q6h1}0@mN>=Kl zeSQjQ(1J$y(#^yo0&uO4-#Suu=qTnH+Zi4=HQyP%XIK2_lt^X2gcj5)uces?Nv(EQ zo~d|v4E(egCRsjgCEaDaZ?#;bKA3ULa@94fdTX`&`HRxki=3tzkF@xh#%&k>tqUOr z{GBbupehTI--Z`R!>1Es^03T{@BD9iR)_-HAnGC<6v=!_P&&@$==l zZRvNpKQUvJnR=A9wC?I9T=w$9kDOEU+0>8br^|>JIrfQudVEi+i1@seEsAfz%jwiIx@ zz5BKQ$1lIH50(sYrIDrikGjnlCAg&NJ^vBmOmD+B2!cC%ti0wJh|Eo7<_VoM4LYZ` z{QTomv5u;Q6B~~|tB1nxTO$ssRli4eRL2g8){Q&APo8RnQ^)@shlDO{2cYgt_r*F7 z1%L6O_C-1zbbY2Z(f@zQ`>VJpzyFIH7O6py76zmdkPrq2>24Gd1f-DxC8Q;!bLdpU zp+iKZq(wx!85-$Ex)G%D-sAW8e@>p$`|LiPm%f;pYp&U|_xh~$UejT3-5LG$+why) ziB1lL6}M3nA;c?h@k>1?8VB4FJ zHumVL!IUbk#JxP^cW6&L4O?POga9*l8sB;mk}p=fJE~D68cSdEc}-M@TTu_+d;FE( zxIez_E($+iUdAI>vd}ohAD>vcy{mre*AmV**GE=hvaFI7Ppo%A<-jt|*yfZ=Cl|lK z9_dPYg~pF2-FZQ$?|sHZmgSB7yKsgY9CYSxJq=#R?_jq`ymrM*C4gYa7w@r%Fx&nt z{AI*v*7VLiuBG;!$6X7|j26F8`DQXArfq8!$iMKJKl}WK{!3~4qI8`2?|Et^WNWht zm=jOEXxRzjZNV~|HF)!{zUlMmI!6u*9en3qjs7{bkE7imgRywtx#xVDVC?rzqKhz) ztn&$T7fTRHVAz}8La4+ysPr>oaiJJpWLbMNdl2y(;aY!|SSp3t{Z70<8eSc{n+Z`V ziVPy_d5#Y{h)+}aM1IeHN;#JXw*!g-3*lk%q;02MpU;h7Cbi9iOg8%p^bz+mkYD2rA_0a4*ta35w~J{Y)Ah6Rb5*h zd9$I^qwRZ3aM`x^ZwaC^qmn;b$RR&=Y>_d(NaC-2UG54^bGGB`8~rN^9G7&Eqr5F9 zL1>9SE@;Kl9Ia;qtNxiMZ~t^>ihX+_t>n zKibOR>Pn2<(;(Y;&2LpZ9$C1Qb;KcUJf3}r{GD77`zwfbi_F)^=8-}_z%vUQjC3h4 zddibli$D-9HdS(2K1e`%#<^RE#1CZTU~PTNr6No1=A+ zPVMzZiivC^Uqv09C8^;>syrE@0!M^EyU}DBOvO%C4cj9~y+y0W{fa!UJzb;YtXYJu zzNYl7?K%^Wd>&^f;8Y;7HArlV?ATTjyvt(8UrYb@k#u;zh?e)7_Zwf)dyl`nXOP8Q zQM|+ct1I-T(VGa1eHg#lEdC>lOMR1J3zK@9%Qsp=X=2t^^&cg9xnu8uTSu4rF+R~! z3_oO{FyOV#?A2FR*zmvgVLP?`g?Z;F*@6B-L7p}UEXg=2vnk|S?4 zmDXevQd8rZs?2y_Z)WD~lK>QVjcU#YwiY*O_!eryxY;i2!D=Gzhf2+9=enbGeIA%? zW~=y@c5~Tv{dAU1f{{uQMVmBK_lLJ+E=ps0=WXc`y{rzzEzHCu^z&bT*IjNN-k@+| z3QbCxSb|C{M25a#uC$^4{*&|`GTvc{K1CorQ0FJ3S@-HK_L~YKF%kD`F+-3J>UucXV1JyXjB^xU$AXKw-oR;dQ=i_L>K!|D};tK(g!v`j-q9jTGiAHP-XhU zu+^?&dT+`6OyCJDMggPL13W*4l%gkZ8>BOyIX*nJ1`iQl;(Jtqoaex|IcJFRbsQn@I9iQNdg=-$|BiF(hJ? zoHA7Mw18+#oVCBdYUPX{Clws2{11>qj8o^QjSaUK{2+!l8w}-}?tH=M@JsSW0K6y8*y&s7-H&cqMxQjIxL@1w( z*)a>78;K%V^}m>JeK}KXWyLm1c82z(K2tkIT3qlmWkZI6 zH2EhzvPLZjYZ%xZd-s6vuFDEL+ z{NAJU8LCex*@l%N+uRVGv+KNizO6^H4I2c9TCYfVxVxy#e{f7`k|J`~Djamrr??r@ zeOc!DaGgc-8a{sSc)x`G``}TRRAD<^S7&qa8^rl?7as9{+n73BjGpb2 z5JXedO-r63H&wT%e+povG81oDx9cr>McVYvkRO=ZZX7rPL#TR83=(GE3)@mwD)AY? zAStDPwQoN={t@MI`RvIJfW)p%#S)&jgZI`Akj32`C3|vbn<+0=<3H*AE@md!)|t6; zipy$$9%wyd0}jkui^1P%+?Sct^||FJf(GSCDMhf_b&2E%`$G6G4Mv(XNW~XUp@~d> zZf*D}wKwFxlf@V=6HU^Ro8H>ziD{A#$PJv_l{r)j&=Q8M z6tq!Qs!gf8;m9bw4fn+WU#duF0iVz`EJ&rtYHU{Spx z;!fW-=3s$_*+7L;wfv3oZ?Ed9z$*0^v&Q6V>kA_!!`z@3U5UrkW6|bog+E3E z{}-C@!VSzMSNSo_IY<28Zk7dnHlGpq+*7|!V)n#=rV3z2!}q7~>wktdUd%&kcUs+! z&)oBpx7o==?|dpzk8l&4H=Uy_`7=E9wS0N*PbP!uXBzG3lKjzK5xdkcpYC1*OIXhl z8JcP>UqpyLw(arb=zKBr1ZDA?p;f^D<7|@byGvHM40OOv7kilB{(0MI-0^nq)B^r8)&0+0Fv?w<2m zs+A68NAP#nnttsyU^K3Q0_t_mKoxV3(pFPDIoxpRp?bQAR^=kYz~WY%WAo{5REoec z#+xx3g_9bW0^u=eSUO?@SEh;hOS_u{{En3H>+Xcxn-RDwgzY@ek*YVM1W6IA+ zeIZ)J$4phXz-Ar$h?=*ocwR$f5h*nh#o^VSB@s@o#MsFg8%({7t=#(OHuG})b#~(t zzjg@jUZ%%}-V?U9bQg_?W>BGS@~1pTUi`3MAXN0(imKQgXDhaH*Jwt~a_hT2x|FJ* zhkMLqncpm-uo{nnGLd*<5+1YnTM4J4IPjf-&VVlmPmb-w2`F;YZw`ZJD5Kd_115Fc4no461#EnA z4qBLqfSSpuE)8JN58lfJ0t%cqKj5M+aY&!3uFz7VBkK-{h?>p$h8$4DmTD+mIbL>C z9dj{6&lIp|{T$-9d?KZm@5^|#u1vTpbvpHdXzI4hJeZ9O`cbq@T%Iy#lX_iwj%_04 z4N)hu`1CqV*;c^yS6nmoDw510g_M-Dx+ToH`5v=-|94R9c;AAZ4;6u zOxE{vFOqHTQDw5^WXvL9$e)frKo^mFZ~W|&#}V*(Wa`VJI%LS z6=lev{Q1vLne5OZ6rh%jIQEox22_hzH*R=Cnfu-O*Z9l35%MO6h=A%vYYANbN;Rq&va@{v7AvpTE$xrhdERgvRm#K`mN1=Zec0e z0|OFWnyLiVRL>S=Z*x0}+>cyO&SCAr4dw#DLT%zQj3ex2X2qRtzB7I~iw6w+G{J@E z3XbS&{LDm)F*2O{6to@C<&YX#YxR2N(VX{P5A*Vb5s45|=pGh6T{hfT$J$g5M?qrC ze6ZPPWXre`tNZOk{A(2FTTEa?YrFn;KUX#immk7x=>}7`%(7NR_n6C_@^e2X=m>c8 z8kt4VD|ow+pD=rKnKJo(<(-2H{;0L~rJ1R~RY-6oL4t*`u&FW1^ijQzS(6&W>J;(u zi9ylb0r@K2d^Jh)S0MvX{)2#rYBawIx9%mdt3R#0b*3+x%+V5+;_WqCW)p-p0beD$ z07*a1-Wq|;Sl#7f)#uMvBDy1-V^{l65$XK$%G#@Mv@4|SQZrrupk>NbdM${U7S7W+ z+$s)Ubk-8Fn`06L8kFVwFsz`$^Lu{0!q1ij)9MAZp+9u@; z&aF(*s&+%(yhBBN(9im@8v5qvy8{Qg@QW8z?^#jM9rg!7%nna>X}-@L8Q|R9`6P{4 zuVT#2M8CFB8pdI?9Jlrm2K7=AMLR^S&tb5U+v4p_(7uOZ>Iv>;RLK22|K`jFm2#`s zi0`r%bVVHo2@NldQIm1*H2l|t3*}VV8!)M+Bvrjfp}t;{C*M`i+4X-Nu8D~V$d;A& z<-N-GTk*M<0?5c;WK>qME()^$H9p5KNJ)@a+L~23Qd}NQ?0p9cld*Utzb)|?(&)O zFcoH&+p1@^*JGIifVcla_Y=Pgv+@k^9cnVvQ-h7Mp9U~e5NCd3IAS~~XPP2vMG!GV znr|wlwT4<5B4m7}|CGmV7~;O<7kawA;ZM~}LtS20NNbfiS;)6RIHojMXAu+#=eMwv zSJT`!=2Q4&TBtB>X;RXB=}v8dgKBxjI~5l%=W_1EBxKuRd}# zS-T@~KULG7M*V;}1Cr5GKP>P->yD86v@Ant(h4jC0_fsNOmwx>EHRd zVwgJPrk1XM6Yr5I-RrZ}P3@!x;;6!-gKA?@>~46!wA8>#N>Q-nG?GMr%2cfG>z?Um zAFpi`nvSsMyZL#G&m{5fUz6@VG@DgFhGTO&Y_iWPmAcsPjvb|k@&P+ksN=jx-+#v# zEyyYFce0$>6S?5cTJ0}hg`R|OtFxcKO@zE=AjBW~7%C;JdKZU%FJ39?+ zrV|{ltoZR%T5RQmh!m6En*$$(IAX{8e|k5)LN~4~X6O?!1KsyBAbf!8x2f9dkEZle&)1k>Siae{Yyl}twBJk{Q%kw)8Kb^)C#-b{m z+**947(xoD`TZOkuiV{w<)zBQEO9tb?mw4cT!&wNaHv&dRw8t^vDS-)kjdg`ob}W{ zA;%HrH{2I+a0qh}dgM7uFmIxy>-NbND=BtpcpPRU&m-^_@zS37lQw@h-XIbsVe)CcENy#QO89gMT3wyVEe|RQ<3FPgv zL0Z`sO(Eee521FMiFjiP`m{O6EY`~} zG&v93?lNxfPQu-*n(SA<6FUxl%5YDeQwnF&hiJueIB&k~P_FRVR)OAHT7e$WA%^)0 z<*HgCvI(Kk1z|10p<*UhfxIulcOaiHgzM1gZT3$a*}5Q<5RhT8%>pN+b0-dcO5?q*o{w6niCC=ef>y z4d-|=(G?K#{ zt&D?D%Ip^tcvb=>do_SCmuOFzhR68dRFR_u(x`w{i{UC5tJ3s^?4Ulk zY*};dm-@p;Kh5mA^zU0#-5yIDdFSBD;|G& zrG0bg@lg1LzUH?IjdA(iYaGk~$HOlcjP))$tmCEJSdDidWXCE3zba&!AC z_}@2xNzqJWHzOaq+DM=hx&c`%oB2MkpDt?eTE4qUj#NjI`lq|Zc;_+O)3x!~Go%iJ zu=>cFKq~f@sx`L~a73l;I#_T_|LRWIsc;Z7BNSYHpr&Ve%|)TWVK;dV(k7Z^V^tuH zLFIFTbjK>6O_y=PIXBM;-wWKHW*dte)U0Ks!0OcJFmJ_LVU@A!Pp9Fq@$~!kR(TT- zawwP0OiHF7X_#*V44DtC5t5N!@H zsxkbflE7KFZ5erw?CR_iXiiS{+if6QyUbJy>MBnQntjJz7PJ>nQ>@DGkXvOV%>wiBmbb+!!{~2Zq+A#PhFfBli7{NgFs?(uV zD&hq9a_Tnw!2>dfgjDh^*P1&&HsF~K9;^$#V)YlF#%Avb zjjboc=606IETrRorXGxsyb%V!i2_$)U$GjB%4_9QUbLPIFk+Fs+;7NvCE;8!cm3xp ziO2;;4H})dOzE8}E zcD<3jHNZUi)^y;?nJuaY?z!*J_=2xeVG(l!))K1;!zdE7{w3MZrw)T-+ zuR3+gU$;GL5x?1QIOMS3MNi=n=fvj<^406UhizwEHfNgeMU-nVa_FS&*d8?eak>E3 zw=HWAh=lOMpRl+HPGNS-P7$A%TM}*^)bq8@%T>NYF8Ov8=`~*or1aV8@EI?H?xr$k_7Id0?N*L4$T}SvK zd&y?hQu*Tr^xl`KAITzR5NygCz(Xx!^6gWd3Jm!q5m<^pnz1na+fi-_hfEYY0CxjK zwGclBr0c6cxn)0zD0=93>7o7xB#8=17sO#5_@!x1y10HGp^d?%>ny*oxQDO`5~-h;M}X%D&Mq^V1aQMZ!nRfWx4sp;7^vI&G!kgW#?er z4KFeNn9-b9n;yrjX^zjVN&rTAXZadZc}tGWQfYR-Ee6&E7w$Ltq?VF<9A>ZsLQU;e zHY15=&q`#Q>kGICnjmCeq&us3dC(|GiD0p)8W9NHyXE+V~*CEQR*CZ(NxA6 z?B06sWXhKB_Qyb&P@^Y|9ck*H`Lm~|5yOnbZ2{s+-c7HLXqxs{MJ4&r<^phvZ*D@A z9$_cooKrT(5>Tww9o?yI@?mCYjV5Eg#D5pN1<^Yn!uj&?b0PLss~BAiR9GjS^*$3! zap5gTP*QmaH7U~K(P)MyODFxCbT6AV;kLY+#u3fcZRC#=E#f6OqrAxf-cqL}VdQ6) zfZbl}ki6{9I~`@ep)9P3U==Q(fs0`t7?5x=F_5|%6s z;U^E{GS#@O<1doGvDkRqGB5z)-lbd%VX_>9Iaqp%ScV|@m)CH^hWO>#!Wpn`bdv<@nqh@}CN3^Xwb!j52=xPoe%2ANm&k{i z>_YQCE-(MMXR+R-i^1C_gHX*!x-Wi@TXPB}Bra!){9Wd9j%SZu(o|4Noc9TA{IkOo zBLZ^cctR;{Ai5+B(K(~vcpZwUfH?hgX5&k(BTbt1j`g_*yt#xDXEmDbl8WWDko|sM zZ;Ur`z~F-XvWKcb+S!^SLOE@x5j zng)lpxs#zK?!A~Ys>MvDsJJ3hV8K}Id+eZMLJobpk@z4TfiaYVy|dQAni*1-NLp`p zLmSy#BY)*Pw%PHU)LynsGY*CF$xdp!*_~PK3j3quQtj=g-217J7zd#s1Mi3QhbPO@ zKaGCi-`ecwqHxhJ7}_LUX#s2_6!GlwOBZHA*+sdoKGU^Uc4b&02hDi}y8dGb()UI` z8%`UtPI$pO{H`H{ANKv_)e0zYe6}~uc)5W86B+s-<9%cb1Ja;bjSX@KN`Rzxu=GGz zkiCG_5m2xf>@U4!e{Pu%xsO9S@Q&C8uhJm5pT(F|pTD^l7A1=|jDe*wg~KMB=wS4+ zFd9NKtl!xhn2eH}Jx)UI$4Va&fmXH$l`d*YX!H=usjqE2-97s%Z|d$8rQ5P7)To|; zV$E?4qBx~)h$BD4@HjzS$d_tS_G04+@^fF^XAX8T3gTzPSM8GLLu1y~)LX2i-$|Qe zj&BQcp}k0h#+}9p@BH1|3{VQ_h>(`=z|B__e0lzzlOIpN*Wxl;vfbC#==V^|`r|Z_ z>}uBZ!Hz*9&zi{B%M1y;hLZVe9rDJd2*x$NFxm9}0cGBslFPjgWp$*#>2BU*_shZn zmM{fAvYU|EzTVELKC#=%Xt7Q@muLYvREV7v{k*ukY!$m%<*fB6ct5b4b)l@WHCkW6 zsxR2ZbU#tn?6oLpsZEmYs5Gihx2 z^)tZec;5Ot-Km9ON)P5s2xokcwR=Rv&ciy@j1%M>IGE!LGeY7ekd2L!jvCu4BJsd# zblf~+x%ex^IQxJ}FpWIT2n89MkIn#Jpn zZkf57aU;`MS^g&YGH8-RcW3#TZpO0JHxgoX6~R8UmGD90CsJoG5;_IdN5H}Tceb_v zq(H$}2#U#YiOy!&heuXh;(=smomkt(lqB_zYpk>T(U3C!Q1p?P1m4T9hy{`gDeI@E zx>Iy`qUifp1u*MjyeDuUGHc0_yUi_ir5nX>^V*0Q;RcT^zd!i`higPseLG67DJ0rm zDeZ*M8`Jhq$ATASs++Ns!hZ7L!4#e$eVWX#6F0(Pex;QS@FMzR%8IqU{L=<@PFG)BFx^d>Gb9Q9`=JU*XK$PZ=3(^P-(!E zy$m?${olh3OrFf@;Lq^CSL%c#fSEepSbqQSI15N=QQ)=oFYSE%-`@2FAVDNzb@2Uv z@QtH5Tz+u#{ab)+fg@u{JL_CWZ_FQbUU6NYc1lx$!}wbE_+K!O<-*w7m*VA#X^!f{ zj6$mpdbq#v_<_0ffL8VO(mn-CJwz#UBb`(L0H7zk{R(3)(Z4#rPs&>vJK zzTyX#8S|N1XL;SkHSxQFnM3ja=x6i3CqJ7n0Y#!&Tts%^`QLNB=6N%+=##tz4cg*u zy!0m{=SS4|&=D{M(xY>qKmdpANY-@~3iR9@0H_-^h7)n?5`gdvYA+9aAI_S*nbl4A z+WhRBX>-0T{r4>Hj{zjd18j>U+1?vh)PgWhn$#S}ESShvXR1x%B`p1s21JUGtLC-~ z`KlzT+#Q#M25{V?!Z3=e8EuvU(XGL}Cy~>o@{!>mna*ZoN13QwV2ob}I(Xfbz^>-^ z9SpH!avK~?o>}$6qCjimC$s$C?7tXxMI)ofLZCoNh6EUxo}-^%A5#A~`aXZ9;r|z~ zqlV`(7!kJ`&*d0lD$IXBo*H=~U19@{NR1nNS${9r+N;M+i`EXL@lF66;dlw~b9i|H zPi+aDZlfGeM8mrI<*)nSG-x+u4@mruP7b~S%6OxqFgTtm28K%@UO$}yQhPhdZgZC~ z9t(Pbz7XwV5KB7$4Gb>#{VtaosWHg9NKiz22LQ@~*hW}AVO#h`7|dVx75@8R%7Jl%67=Wz%QEJ>f*C2bo{3vhay4kiyaRUl>IsRBH2!%n(`6SF zv!H>Z<5VWDL*}4`POtCJ|B*u|F>^uz-^WHjmA+dkpT9i`K7anYn{A~HC!ZugdKR;{ z@gjLZTXC`q4DOtO*74;IX7C}}&oh6ja?#y;jp3TvYdfT00h|aQCLl)N2&mG`ZF8p` z!i7SI0A5j2aCa;53iy!ZJLzbDfg000dY>dd#YDSoT3IBejY;jp8_Y4eRv zj7W?L3AbefQ!7Wh*@6A~Oh9OzbyMl0;jGW`RZab;`aOM_Y_V;ZdZblRhiuV_JmGe- z|KaN+obFyK1^HWmCaFi7uU(^xvW;(m5b~7Ckis94GE=nWQP1s%3L_tm>9f}Fx_u#R zq3bzH?_&CCg%sU9W{nkL0gVb=UA;8O<_$@$`BrfK&&#^m)2wM}?t7V_70xeyXZ`hw zfB$G;*&kScJUn`h=E#wgNVa5smhcn~h{f1F448v7As#@LA&0L+0Z2@Q2k_}+(K23F z7(C+ZUm+w_Dd$mVN_&_JibVig@%HExpCiN6QK~C@-S0QD^fSC>iQ>ss?(qcGoeyV0 zfAr@8%-|Gpv_9l@o%F>=ac{B_kZxGeAc82XCM|0^%EV`LgQN$uf>qCfy1=h&d~q!W z!S%mZ91lq!Jod+U4FiRIf3Y$SHN1VV_eC1uX#TNG00qQ{v`@bG?fY-&;Djovh5dZt zAF9s!ppTbw8CX2jvm%^H*-Ly6%Hw=_!aX^G0LANjSk2f{-y-oFO@ZX+ECth?#@Smf zM0Z#bEGyOB2^?DjrpPEI-&fR5lv&~QG>V5hlyFt1S23f`dq-fANRB1;m>FCc7$RBWg$JELOXRX z>aT9~{O)*6rQ4W+@>T_y##=Z3j*WcDs;S%`@;0olyx*J0usoJvvvXqkM@)n>#^tgu zUI9QSuNus6jouq;K=Dmf&I54G!x9MMJLQX;GzyQ2qeJR^cCM&gKExVKv9Oy;TKT|v zJ;yi81`gg8IuHK3x_@^Fmjmrpm)SN}gslP+eA~NTyl}Yv``Z#J<(q_jm0myi@+p4| zJO=#Fw7$UR(}U%t2$$eyBG#mo1c4<$SkW2jwR`@XyzkyGJgroI>}OTA0So4uECDZe zR1(_J^?DLLl~4tWs6;=TL_ zzBmXU5N_Q%Ym1<01rzrp<1y;Q@}4@yRJW{iO@Abc|2}jK5dU%p+C(s=oLC4Peg{&9 zBh^P>I9st4*(9=xvSCP-Ad$3za_S|>!zLDCss}G7W8{8ETfA%hB4O${d97`L>2{!r#QEp}!8Zo}ea?9#!%j(chgyQ!tu zjll{kEM)xmkVJ~3YR~V;w`sccS*Y>oVO9bpU=Bxxm9K*xb*)xNjaQJtCq@I6J5)!9ANPC*7q&}OFFf&lkB(M;bpnY z40W+(Ri(>@`=(p-Eae*<@}Wek)+Y-}7cXx%dYY2 zkM_g&f_Y3tYQc}f2n*OXgGc3fJfd7F6C2)1P#ZShqYT*g%zK8t2 zABO0|VPVj@t0bUJ#PV@{;7FXXH3^2i(?O1;v8F5F(DdB~iVm#~G}Y9gn2!ZUFXdCa z#9O(|^fb26L+wiY=)s=KbEqw8xBSmYfnXgsg1zv&$9LS=(D%BsKN0rTbF=732|%q$ zsX8ll#PT9iwV+<{&HR`KAVZD~51Z$Sdw~CQi^UU*wsCjX~5&T+I<(atrN5$uP>G2=mj`vqgiS69PW}o7+Z|yPA(072alB-St zJ?W@iO{!{Flm6~xty$x@AZ`UoE3JZaCPhBnj(FlO>I~` z*kRo85bNEm`&tn!ThNDZcI7j+PK=Qz#5CJ|%ZG%qzqb+H5uzRA2$c-e{81 z|6-95dI+6NC37SBEs$&KKI^7;@%aq5@s&+mI2kSFmN=d_>Ole%JAr;m^_|JoA)un( z^j9k8jG=*d_-!UV)E>J@BQl~O9&*UDrhxm|)^ua9!Y_)lR1H;43)OxcUUTo=i)^qf zFi{(2^-XlOjuXoH&}dnrIQ11~55XpXTYto0+XD1+4UW5t2d@@J8(*Ezm z%`DsbDZS(>w%KrYe@5$b4MGgfg-}9Yqk|%4ks(si#KXvLaC?E2C7vaRkFv$Gff|fi z%W?hYimCU`NBA?#PP98f{W4;bfcc+!&dE98nzZIqvKTH<60PSr1IV0 zi#LrwRj|+9<9zP@u2_I%=!_lax4$uwE(uIRw`CVAdWoo?g-@?#})6Ea4-%w>*@%MovMtcRzEsc zbu>(NZ0hE*-Oghj53ZNL`m&eze($DygI6kxbvj`MQw+;+g{tO9G*d^T@=%LvVrpZK zhfgl5>s>k6!qjr6?b9PvH@fe_+wNSi$Y{ zjkS^7A^AXwo=DL_H3#@2{{$YUgIe*(X|RpXMN@r9V38}#d1BEOfVlZIykMi+zjvze z)k8ya$7iVq6Vx$m8}6F+f4d%ihf5WChbnZM``_>{5Iz0S+$^!AVYTpqLFQwN8eI}G z+y$yl#cvU!IW9|t)K}(@VrSraQ#P1`)!c-k^3Q^X)He%Ff#>RD(w6!Kw>7bw+n~r1 zS8yKMV0t13(}N^wc~b=ABx@1H3vuWF{upt^qaW-n36i^2L?4vnh%5f&zf!|%5zO&Y zU{kott_hfwbA65HY0x`pMO24+DnCjR9gTJK^UW;aPf2(1^f18 z^HH=JsmlDeY|d_p?@r_2>sM^LMTKKa)!f+9(Ge9x#F}ooQgn&TSOTJmtW@X@lE>~r z|7lMi-b0=`yeGiT6eY*WBB(=29Q_e``5hJM^Zk0=YQUu`XReWkDdq=f@+%Nw{;XBlK3k z+Nu$Squ39g?Wm4-uHpqX{&^!iRgEw6YX{#MB(GpGC6z>7ZiE=OSQsQL3c%4`#(o{S z98~V4$#O{rPkoXaz8wD`x))Kb%9cB&#_w)e-rB?Bnj z$~`%rCf8@tT-=XYm`(3RpUis?-a^CjEeIQ;d~ zBwa&thHsSUb3qVkuhw`Q;ke>rk#AW4d$)rAM+I;#UGvF~xIETBq0>|FRDNncoRpgvR(0^XF@e`HRU^5|AvKHJu>!$jcN6x@y+E& z=*piN;XLH*=6)s4;Zov&o#-+;td_0)+^BZ#t>SNm(SQ|KuCun*r|M(5Zv;=ocYK-jPx9*cHEMWe&Yn)14@JfBQ>QhWOKWs*-<9d|p~G zUvNzf1SqsI&2s)|#ca8a7 zIQ6KLLBs4mBJ6@mNMb3D7~aG4+kxdpFkSTRc_&g{m5Cp>C;74Mm|nEDl{L)uw*WsGn*Ft9A&;IP08Dzwiw1zerxI^={t5AM-VkI&eB5yQ@jr zzOwhf3nFI?4Q(45UR93bwpCEPXH`>>>hP=U?dE-ofl*Jc@5LVaaa@NHotDzWq$ex} zSr_pCDRq=L-5U9YbTbS6Bl+%2vf}G9-ZgLzja;*r;2eGALDT;(|xy5Z5VMz|FWq%Hk8bWC)mtyxe?%=V+lcG25^2 z(o6joK~ODq-C8Po^XdV+y~#D8$I9cM)4(m)Wz>zrQ;5*kpsx~wiMh0vAzE92E&wH# zJkspLBiaB8qSoFX7;`-;Ym8;+YP^`;!y`^@`@$0Ic{1%N-RBs6=hI#I{f|E;T_O)0 z!R32BS3(3c{$z(^I8D7Va%9g+j2A54*m)D00zNn0d>p9QqQ-Zc^I^ zkOnY6U3bB|)o-+P`4J>C1df39i7vBhKdzIYdh1JLLw|oawdc|d+mdCxMH!6c`RR!F z->Z|>{l(T+3^wpf*EGnhk)pa6Q_^XC(OoE;WlHzTKA{mT^in1F{K?L=FormbD5)O1 zg+_%B&~iSd7188vVIhd`#1MdK+&$zzEx#KJCu8lgX-7GU?JC{1&wIznapX|9k^d6! zdPtcu1jsNV`{}iuYrwf_Hp%n+0Hu-1W{5io5VyEb^ou$SJAQnOr-%2O6I`R@L6!C-^+<;e5(>3_uCs#oW4(D6i1 zSR$rfwTvQuD`)wx=Y$E6Ft9lcbHrh2nx-km*;v_*QUF^#MG`^My$RmJkd&CmzL|RN zQ(rQOYK9}TldRF`T$PG9$Dr21UkuJy*79Wb&3LT+h5##rq*qm@2~GhB>=Y3<)pstR z05;1Pv)@==~iY!8wd>Yp6jOcC-%?KaYPuHhm(xLivCdE~R}HFbnPldfsop$mwfo z2A}&L>&CEjAXBc$l<9J!QzylpWnGeemL(V;kC@?=I;i8CFzb~^aeE5p{upF1t0*e1du92@6rR0z|K@Mcf{WR)!f%&7Xe-#_|F157-{+G9TiYKN_bfC$|Fo>&z>Y+aBj z9i3kRu~`Op$G8DDm&831ug9`IP4LtRH&gOkpR; z6}LGXB${x3Siuw@835H5JLU+$x5sD*U*=yLD0qT{N?WmBUnYNo;>!@77Mady5;w9o z+766(e?mkh_$x;~Y;*5)9>aAd3XHuWFZ=EVlev;oSdjkwhspwczIhOHy6>M!u9Cy_ z{e@;Gf|m%G@40J6iNMG`Mj{r>)H*co@6h6Bf50P&^CDgZV+beUf_< zdd~XGk3Y9CO)FQU+!SO}_b4FYTfo`pR-tV>S*UBl+V}SPl)zPtk37+WHIW*dE2cm` zjbDNQ`-9oAqIX_K5Vr4&?TPaGvD3qIK5YK05RVOSHS}S)P#ws@C2ysxvTeOpYdIVa zL*(31wNRr?nsR++6mg(%P9yL_B_$B1B}*H%oX?Pwm;24E7)y8x@|4S$k%f4yXv5oV zAK*_5BnXCX;Qn2Iqp}-++#a-Ge^|&2C9mYrQ37$}{Gf`N*S$6t0>?3*{XHVYHt?lS zWoha?G6LQZJp&8s zpCa2Y=YDt#!v)Gbnws(MWCn85_ln7;he7c#7<8e9vO3%D$Q|NSpdIVM}R^n znsqEm6HC^>mJen-bt5|AGzGUa*q92hiUSXVB10NJ*m_~!W0t}Ana~F0b+W_nMMcKD z_b&w3;VNB#iG_5wAKa(wV$c7tw|bbr5Hq)Q{9V*>H%FGSA0>R&u-DQ)pU$BF_B*Ma zr0zMeloi?F4CE(P1r@K3`~j|kcL|oEM{Ghm9*PR}>;#k{w!Lo(`j)W7`E~_~ ziNp+unD@NL8qJh|e;|*Tt008w?nD!XO*j@=zBzJ=O!q!qIUI|bfs!ZPMC(ig1(Rh{ z>ggb=CU3q$1K}du3F#(^PRBX|a-Zh5+B;?SRwgU8$W!Z1mEQactjgTW-+fF5&&@2e z@{AlOz*J7J}>#x2`KsocrKxXpW(PiPN9D%eIqx;X=+vk}T%R?z2^Qf2+(OnLe z8pKr!^&f+jW5jjr@@H3|4o~CjSi^+&^M5eYgYAX0VHMv!p8bWmYXG;qz0Jw`4&OJX zL6&~HMWS{%-(n+u&))a{>F%rFqWZqJQFH(qTDqjWyStI@ZX^_t77&o`9#V#uZcqV1 zW*EApONJq(yXzf3-{-G*=eKj6xz3!~d!4iPUh58kYjoxj&!)otEWO5{B6$ht3A&k9 z-{gm^AB6^`oUs#q{m~fmL@ljvC%UO;U+kIP;xw0R@wubMK@u55wF3is4F-R|O@jaZX|LqCww!AZf7j5Q7%-SIE3 zqC?!KEI|e}jGC3`VOTm8FzmV>N&Z2L5HI27CM=#pn+zkeThM^CK_fIDlr2sx0vuT# z8bU=dW5%ObB%d@`R^q2%S2AYtB;ds_{Nh1AFl3bKH2PD-ge8TsO{cJ}LlM{?nC+$#Cjjv*S^3Ber*81_4FNM(aB41)^5iyMDP#W%Md^befr5N$XRkArY zOEi!b9uyQtcNKXVJsNRfri!t45bW6MZ=4=XNcm|0Ef!L%v!f8GL5&k_l>8@@`(XQ^sVVh&H|vvLzwMJ6K5$fd(cQt@O`&n8Io>!2E8t#9wxHX5 z!F-|aA?SQ(jn~F)N9&+i7Iv`0i%zYXIbA)@I6fSL061v&jz!M2Y(iyqu*#^>@Qm|o z5CGKij4O^*KNcLBdVsRc-}IT-d!_nFd}zqh;%Y`*N#FYl5mnU_W`d8A5pgA6S&DtK zy5eu~_XxY9pk;e%FkbolM1c$mU6z4B{1D{(dQOy#+91(4%c;)BEIjzc16#{RkzWj5 zop{S?me6nFQgB~<>0wUENC(WG$-b0rOPu@2m-6@gIp# z>xn?eU~SX0gX{mZlId40u=|pJKMMXo^=9Gcl#wBPCD>sL1pn!9{l;UK{Y|QaC;Bk@ zKX_6B6xTg6{h7dK$!0Len)gW3TyVF>Ev?O#=;jusitACr-?a78gzb*&sUgg?%e-Gk z{tnCj@aA*RVcX+DtIfKagf5dMlib8$xDPrzdr;dxt|>Qtc$pn$VTbwZKMiU)e$izRkUM8pMn#fyYc)c98A&YQekA8{>B}@S5 zkHEQ9z-8PzmZV!wK{JD{;~h5)Eib*ckAP|}{kmMP;?$fodME$0`s^C3M zLIYF3&*w$oV?v7GsAdy(+o^)NlnIR!_Bg0z=f*$ge9c@c*hYPxc@7U$pr%QBpvec3 zBZ8oJs|bYWdITz6*X7*?NL6{b@w3h6)#DZvx8A(=bNoT66 zGZObH4fHcAq+W07R@|F3_}sNgH;xO$@MSZ0e2w6I@tv3(fLuONIrAVTwH<_U0FXnX zgQ%mpr~n^wpkL!AH1QYM|~2n!IbH~k}&|+n?C8-09sKY`=(RdRc@fhkGbwY zcBbHwuvpA4Cu7Hk5X#!b#!=2e(XE%6{67)~APk(EE#9yW)T}JfDaZgc`Q5So)%wF5+84C(3CkVtmG33Uw`hY_6hI+&>6*0ueY|suu=c zCd_=B+&G1cp!WkyYgGX2@=Jf{c~56Q_NxowITT;6*0JEJ6qreRFs1f~ly%vIK+$l% zUzu3BF zYo}*2_SQ7kaetqD``9c)yBl<4+96B7;zAD;!|TmPUNHJlG>9Fom0`7=1J9QWc~+CD z$D01(VNiI1uhwHZMlLXc0g}#-?ljn=bQrh8kSK>LbWec6k@7Y#Mj5)0`14y(tXsQ0 zBjZLvvkGbXxvz!sQ0RtY39!a3h4!3BI9jSxz62~Uo*`7FbPUUiHc^GfRDG4d`_A$K z8FqDbJ^uJkTn%^8GqiZrA9FM)%J6Qz%p|+UlnM^a(Fz;8uM2Aato%!sqRXq09Y*nJ zPYSm|x-z@9DU&q3{<2HDgi07BN8y8wb*bR^@_4yC=TyZmWLR9PcXeeU-|@77Ha3A0 zh%=!;%O=`hJ|V)@lcK~?QDsD!L+o}Vaue*y748-PY9R0D1Xc88Vy$wpOBaT1t$Y7> z|B6~Yu8W{Rg_;;!!%>gb4ywfIO`Pfx5q5;f9Vs}IurI|si9c?af+9**cuhgy)y^B{ z<}R(n$5g01puD4uIBV$OD`2lJ>V#-hD)Jte`+YE?yKrLbvW!Dky(7bCH#_)ZSBskJ zN2sGt=cOS5qf&!n0VV%(g%3;)PDhzy*80Vy7_q}tl}eg@L~@yMa3@$!=An~x7Cg%`rGRH)rTkfEhQGwm>E3pQI)=OKVwc1a4|?v888&EhlTmH zjM{b6?8bj}9@5owG|4Ipp3Wnmi4GF&{e5_fSAHu{QiQx6o3&Odu+K1xh+v2@hhr6Y zSPx7L@M8EC>E3X1!PBjsN$|O^suEzr{I}A2tWQUiVxZ0IcR9{uo6nLunv6_5N7bk} zJ%0Y+f&`T)W025Uv7_`-h&qHH^3G(vq5nFE{u@$KH=Q>C6`J1sRc;Zxy7B{criq!H zM9%DVbf~hdx}-1Vl|zTj;#&EaIV%~0)L?(dJw4O>BGD%PAG9$WU&$R1WDS~pfBHp9 z{tWKT;y?EuOU~k0AXNYwD63KU<}o^?=d~aQQwRYhs;)v7TJr;TX#C5SjuaE7=uI?& zkgqAxhbtk+Xzt`5C52O}>G-dj5MyqB9WgNd1Crc>IWNq(QO)=?^w4?n@)+SpV7DL^* z;~Qnc7mgE%EBqL1qd?-QFPqDF)Te4BK>OEgm`rvz(Roe<=h|tba32Q7K;rkh)@q9A zsAs6>uz~8lcj{v51de-F^O4QUCd!KM@GChqwZFogJU)=zE-0hAXx2N_nW!hsecK+R z^vKtWx0P5Hiucq93?pM|f)JH1dRAdig71SvhH?7kTLYtf9?TI@!w6r8$oIPz;zLCp z$d_@w@~f6l{ETkEHyLK9#E5P&vspt{xjaK;bg4vb_);B!gapMQc{XLyCE5w5_rel8 zFWBQQ*rro2J3g_*h7I6a{8?*ERyqOsS{St0zwC1+;r{`tpHQy?AzNz3@xB7rA4(+O z6Nuh?)s=jX1x;qA{Yn{w+Cq*+Wn=Z_y1pRyz>e(q`L-{NqIKa1L5Qiou6` z2bnpQU?jtA#l zuiutRyhJEtgwmMrj)bd~*Kyq)(!9OjG3;W!g{k#L#fX0Jia(0!?UgOA7H^P(2`a2k zVzV*b)xsnnuks_j0t-?kuYZpj6LnX6kb5Grt2>Okn7Xfptd9zNJ6|uxJHcUwfkO@1 zX;MT(3DM*N>=yGd&o2_3mJKg@X7l~TnA_bOGfRd%cm7;(Sd%5ttBcscz0|Fk4fk9| zSC6ZZ(DP=Fr?KL;`uDfpce)%`(9M=11u2e_i|PP1qG(7V+GXmrU4iju$)roDXNjAd zUG+i@Ni@WGzS_?Q354VDU7*P*QFzEp|9wFq@_2WOMAq#UFm7{`@I7kusfZ$xr~21C z!^4TA{X+y{Jn;Zooy8}8WH-u0!G;swxHos()*m%)TxaNUoSxH7Ib7|Asb~?YM-~?S zP{CyH#$4h=&acFq;S?mFB!`qyQ<4Ec3 z}@P8VCKJwB4a2q3O2SN6RfjIzj@|Gc0f+l9~ zT-lDm@=F~LI`|`meh@r3dLch!dv;kM6NaC?y({r~-R=#0lp4I>Dl6KV`(7Atn*(nP zbVCvBWTW#n+N-P!7Fxc^aG&xcRYft0%KS5Xsu^vfDl+xvhd-mgIz+Np953%cErKIq z$mrxD(6CG1^!Ngon5QnjJeIl}9Owj}6eInC8S!RiZ^XDhSm0+~UlNq1S0fAT8^U{v z7pe?bwS4%V{~D8IRukdIB()uB0$nF`JL)7UtpxLzMGyOaESpm?t{{z`{e;L23`N>u z&BF>ho)DFAmTfod=mn~?s3%5msEhvnQ?ua{&1GlF~N66YEc1(!W=cZ>w zvZ;^&UVQA&zkalayVjvwFuXCXR~m_2s3xiu)NH8ZnUHxK@eM$aZ|VXO<#EAzxg$$Y zN#|iyRjIV-l0hi6OqeV?hXPDgwos;V>$-1MD4wnUk>VcT??RQWntJK_3Z@m&Cbu!@ zU=uAMQ^FoE-WV<6WI%27?n~TLKeL(oxwqTJh$Nk!({we>Gdk<4dH$chWG>+!{vYr)(iX*NyYh&d=VQ(FYNz^J)%Z*`*rqTz`5x2h9PpBfui|IuO zJA{f8zR!B8v>c|nXUOeipk^GssQ3OZlIAr`EsNcz1}6!{sDbCF^oa6b*`64v=2M5= z=m>r4S9nj^ter^C6iB*ZVSN&1t((zmzkpYFw({P3S7Pwm=AE&lnubP*&}BWS2lH z>3F=NOrRL59idWmfdydfiRyb5FOi&B9_EaWfuo28EyOlTl3VSL6Dqs!u%*Q4NS=6; zRZO{V^p=dO7rB31APQ~EOg{Sk`wPD!T1E*yQH?Q*kbqH*RwK%23QCrft*piZdrq2Q z+zmxQH#>C&23(&T9lOnzrc-Cxc_c2vF_g*)8xVsdnpMi|>v?Ji*KebZvARd|>>dy0 zb*$J^CSOA{M%6hdLI|(aL`S17B{SQPAnlqEo*a}?oL*LpS6wVV#ZL>co z816X@eC7}Y<(mj8{FbdvK~?cmz=Z)Ga4fV$mtB~$1E9KW>NWDO5+b}*F@66$Dm#C?3y}*12_lJvP)^s zb5YXz+McZPhsU4$<)@8$Xq(a#hT&3DoWsluie?R_1Yw)HE%5=({XJ^(3?C)f#IJfB z5x&7WWEv8#Kach~w!T+a-kO{}|L<4FQPBCr2eBQzCjU}Lfs@EddgFJ9BeAxr+Bjzv z9)E0+9-_<5(c0jQAzcSWz-UeyCw8G-3_Y9kFa001)lSULuoSWVr23}XYcTp6&p)qN znA)x$i#*4~e99VHlQg7X17sh$UEME_CepJJ?NR|ut^y{3qXX2J;%k)F=tDyOFXSML zLLYo>GR3cCbA>%@mK`I;h|jK-?641K?^<3q6uzzlC?O>(e0Az z*|xd?4G@eYK?E@jq>xYt2lEGBxqIKmEK)3CTQ??>Q+5DXI+@;5%!X{CL7EUmhQfPw zyf$6>qL!;HW;oC3ft(?V@g&yW-j76miphfET%w~dA&%7H+d^9a1ZHB8il2K34Wcmg zR`r^Ob2J^oRFJ*0=HJcyRj9*Kdh-Mgt*${zX84-=^Of~^8OnjMw!J3@8fC1bkN2xM zTN5*2is0CBTL{yCQS)!<>1}g%=_olXKxfr?JeritLl;H`fQdz)GsIJ|{z3NL%lbf` zS*AJe`suzJnxd{ZzAj@@kPPgHwcbQZ^8BC+WAANSw5g4aIKIzTdfajy`aS$eK|JqF z+p&EAO;U-+73toDqa zTaRZeKJ(YR`U6Gcv7&yjAXz`NLApPl-5^bKE8_XTe#k?6!t{iWhDswVy^o)Leda@+ zsW2z)QTAdxgk}Xo+C45 zjpUl}giRQh;P^Ab7KH!SS0PJrVfu}5UG$h�t?L>{8p8S3u}VsLsiFn4)gw{K6Pig*esC{|or(dA9Efz_Dpi%ga%?xYW3S&NnbIcXmVcv&+t+d`H*EMFA(rhc-SN-OR_Xih)A@o z59kH8)@YkZtOaP6W??5U-}yvKEeo`c2@Rj~qti(8rc3mYp7cd$ zwNJI+vkRNeKI^VH>!{erCkf;mt^o}ZF+h^A1C)439R`bBrqGDy^+GaznhZrSO`LHvv*QpdbPR{oj7N(Lb3 zht6wep;?$S0Q zv{w<2q1Yoa=BGr0TxD_e@N5DHv%=-r|Iz8!8+6~`Or(9C^gJ10%AA#VRw?Fy5N%+d zlkaMfVD^N+?$4*~_*85GZ5to-fAVTgBzH^z&?O>mmW#_K^+gWR!%(r;P^tCa8JDg4 z`^TLQNNj0akB7cWk(!BFEgqaiAVtMj(5Hxz$)=)Puy?_tq?$E^^xo>O9kF75+nW77 z@=QcR8Hbyb{Zz}BgIRTJ=%m|U!PQ|z=cl#C2$?z&^o?YeN=;vsClTJ(s}xQhTviy; z#m_WBh+Wf%V=y5itT@Cq7fH~Dw9VHj_TXH;cu?sD&kc0{q)q2D26_Itc&gHQ0Tnt zi}v+n6qi9pO*!TZvRInwm#g_I$fKuZp#EZKxtyj+?_vDu5Qz;tKo)WJeDya;|Kq3@ z@Zh;ePGz0%l>S%q{r~c;QT4&cyuqXW%b?ezohSQ8*YRAsZg=14#(B`sj6VKxzsMql zS}bx*@?d;oO_UC!)w)#5s%shYv>t%9N7R~|&$)~XE^NmMIF1@x_g2p%A_8W%&-~AU zqR+pQhPF||QsPyInVlf>bqsGPq7osY7hCwB9H}1OM2+(yFfW#*6OX?-zg}a zX(F+CZ^9#!ll8b$vprnuhAz$7fff~}RclV~C1X1+U8WDL2M+X1cZW>VH7FkIyZ_47 zQN=ZrWOlQ0P=WT_-^F#+34&{|X4?X9+@E~vYZMA-NOZpBC^GMg`V5D-Zw6m-b-~?7 zs1U8ka!`EUa?>mFU|#0EpZgPtGde@`%;4aS6VCw4Z6v*sENBF-A1kzb-?9dX7!lml z=EM;)Mx5kx(F!b=s$tZ1qqau`)L96y&(_g1Z^2IEPF1hgf7PAcRW`6i>0+A~mIW?L zKDZ!qcUwF1{5QCeTqMsvykF3_#6axzwdB8Qn@bskDoao5*W({wkbR-ah=n&7)a|ey zsCBh_@8h>F|1iA1lw9PZ1hCHv?A2fE+{}mA6Ju6k?NF6zfK6i+AIEMbvu>(bIwubO z54!~2kB|CCpb%WrB{lFuyUq;~TYViQlF1SGh_J-Q?29+iuu*|Hf`bVyf&&b)j$wv3 z?LnvT^kcpI^$f76iN|$q>PXqg%@z2?p`4Mu^!KCtVzh8BhtGr-L7om*qmo0BSTkmg zdLc`9LK$ORPq%IJg3JcX=|N`TNEy--97xuBx2a>Dy&M;jB$-VOkmNKve9@;RKDfIuY26b{nTfHh{F5rs z=F^YMLQTjL*Q7dKxSY=}v7pH}$E#o*4Gx41F6FQsNy{X!^(59Or=a+*ZHZr_Y$S*r ze0IwrA;h+?k-C+H7_V_fBIxon= zXq>i58H^O(A!*!FWdluwP{CYwLaNkf&4x(|=WPQw66aM33LN2H{FiEga@?C&YYXpl zOQFG%`Ni>8+9}A5t()_Y`$m1aF$Q7`u8tE0Vb?l)C?ok9aLw(L6#Gp0n^}c6e$C*& z))EkXTeOI6a8O0q0hSaMexlf=cglEML`@VIVe4GEVb;ke`@GUL!_vn;TIS?8-+@_5 zujJ^#nee;MKQ5NKt@W*~(%@O7wRea&BXo~#u}8`GBLUW1DM9JoH1}slHYwZ>)%p+( zcs9L1M@?fvtEv14H1my7%f z?b0$+$3*&&eCKxVbwWlMFWzH*->;%C1EZ##lp5FDK^Ol9uFS;mIPs1 z_>KDmeU7-e)tX@t_Okk{N(``%Y|-6m0LwcwYi*E835fGtj?R* z0FwDk)`}wnjE#;oK;*`)_w<}TiO!T7eE!nohAl?bZQ`}9kIL0ygD(9H{a&NJ<$Ys` zv#MT8Zy4OXty0plK3p-H{`Y=R672>9V$JDv=4WSvfy#N-L*!LqQrYMqwp6&8wP5*O zPTEHM*;&fBvIwDN{&NkM`pDAc=IUE{Zs@ zJz6FJ<_IvcWrf}lN0X88aq821QY)`c=caV)bb08j zc%%DWKNNL}4egM+m6)H~!(u9}8>em>s!Bg^Q zWoHUkZOWcX2fy;FyN9vCaW9uD8Vx2bX9Bn@#}C)xVCrm>ht2cv4ue8}z4uwDC^$af zD?c2QqlOd857Om1WNw{&uQVbX#S{43JhoYBf~RkI3G=rdeiJRD|~Omzn(nm;n^K;8@O7rTfn`*Tx9pS;=V9`0TfX2DS;XMDDBS?a=KnWH&UKJ9X5 zW}6XwB8ijq&GL#rki~vEpuMxOXNBM`SfHyT&_wd#XtrX)LNdE9@S^j2ns?0*x%fwR zQLws`T@`bE@Aa~J9gB)=rR~NUmf-n0wfF#5fm>``@68dZ(uw%T{Y%G}CDqzumzdwp zTR&yh>>^3mA%E@5q?kG`5qU>zX@&-ILXu5a2mVL4+H51QwkU+O;%N;G*8qBEf=%Du zlO7Mf8m?}Y(T>p=t4TF*o%Vts4s{c@cHkXMx!&zjVs%F57aQqQnt_j3d|*)5&&S@n zAt6-AW4~yo_UXs;6Zua`689CVezW)Q-ic5f;UzxWJ8|qK4jJMU!xnHV-orL>)#q8# z?tWeE#JfMPfy1E}W}M%_aF8cGw4YkvCT+Tue zfk(-Qx1x_uCPT49Pze_g>cLB6zYNHUl-PaIrGg(jye_wv8Z?8x1(V_ek4%~(?XS90 z73gi_6|Vgci@BibXfxnUfv%_Oeh%I{?nph)>w8qCg15+B;S?kHCwfeg=U9b}Y}17! zQ};W5A<|0U!|-<=;K4GP5fGc>F#4esIVk+m-`!XnU6~8` zY(HlJHukEz#(H7yUV+ZAJ~V=-Sd@6WXm0Qk^E19jPg|V_suvX0E;!pRLkmjVY>!W* eCJu#_N7M-NZCszT{Z Date: Thu, 17 Sep 2020 16:03:39 +0200 Subject: [PATCH 48/54] Creation of a local function init_credentials and modified requested code --- nck/readers/gs_reader.py | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/nck/readers/gs_reader.py b/nck/readers/gs_reader.py index 8b003797..3a948cab 100644 --- a/nck/readers/gs_reader.py +++ b/nck/readers/gs_reader.py @@ -71,8 +71,6 @@ help="The page number you want to access.\ The number pages starts at 0", ) -@click.option("--gs-sheet-name", required=True, help="The name you have given to your google sheet") -@click.option("--gs-page-number", default=0, type=click.INT, help="The page number you want to access") @processor("gs_private_key_id", "gs_private_key_path", "gs_client_id", "gs_client_cert") def google_sheets(**kwargs): return GSheetsReader(**extract_args("gs_", kwargs)) @@ -98,14 +96,23 @@ def __init__( page_number: int, ): self._file_name = file_name - self._page_number = page_number - private_key_txt = open(private_key_path, "r").read().replace("\\n", "\n") + credentials = self.__init_credentials( + project_id, private_key_id, private_key_path, client_email, client_id, client_cert + ) + scoped_credentials = credentials.with_scopes(self._scopes) + self._gc = gspread.Client(auth=scoped_credentials) + self._gc.session = AuthorizedSession(scoped_credentials) + + def __init_credentials(self, project_id, private_key_id, private_key_path, client_email, client_id, client_cert): + + with open(private_key_path, "r") as f: + private_key = f.read().replace("\\n", "\n") keyfile_dict = { "type": "service_account", "project_id": project_id, "private_key_id": private_key_id, - "private_key": private_key_txt, + "private_key": private_key, "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "client_email": client_email, @@ -113,10 +120,7 @@ def __init__( "client_x509_cert_url": client_cert, "token_uri": "https://accounts.google.com/o/oauth2/token", } - credentials = service_account.Credentials.from_service_account_info(info=keyfile_dict) - scoped_credentials = credentials.with_scopes(self._scopes) - self._gc = gspread.Client(auth=scoped_credentials) - self._gc.session = AuthorizedSession(scoped_credentials) + return service_account.Credentials.from_service_account_info(info=keyfile_dict) def read(self): sheet = self._gc.open(self._file_name).get_worksheet(self._page_number) From 8f1cb537ed7751254f38527e9d20b83ddad2a1ca Mon Sep 17 00:00:00 2001 From: ali-artefact <66793911+ali-artefact@users.noreply.github.com> Date: Thu, 17 Sep 2020 16:17:02 +0200 Subject: [PATCH 49/54] Update README.md Adding the google sheets documentation --- nck/readers/README.md | 37 ++++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/nck/readers/README.md b/nck/readers/README.md index 226f4727..535b6198 100644 --- a/nck/readers/README.md +++ b/nck/readers/README.md @@ -505,7 +505,42 @@ See documentation [here](https://developers.google.com/search-ads/v2/how-tos/rep ### Google Sheets Reader -*Not documented yet.* + +#### Source API + +[Google Sheets API](https://developers.google.com/sheets/api) + +#### Quickstart + +This command allows you to retrieve the desired information from the google sheet row by row in a dict format. For example, given 3 columns a, b, c and 2 rows with respectively the values d,e,f and g, h, i, we would obtain such a dict : + +``` +{"a": "d", "b": "e", "c": "f"} +{"a": "g", "b": "h", "c": "i"} +``` + +#### Parameters + +|CLI option|Documentation| +|--| -| +|`--gs-project-id`|Project ID that is given by Google services once you have created your project in the google cloud console. You can retrieve it in the JSON credential file| +|`--gs-private-key-id`|Private key ID given by Google services once you have added credentials to the project. You can retrieve it in the JSON credential file| +|`--gs-private-key-path`|The path to the private key that is stored in a txt file. You can retrieve it first in the JSON credential file| +|`--gs-client-email`|Client e-mail given by Google services once you have added credentials to the project. You can retrieve it in the JSON credential file| +|`--gs-client-id`|Client ID given by Google services once you have added credentials to the project. You can retrieve it in the JSON credential file| +|`--gs-client-cert`|Client certificate given by Google services once you have added credentials to the project. You can retrieve it in the JSON credential file| +|`--gs-file-name`|The name you have given to your google sheet file| +|`--gs-page-number`|The page number you want to access.The number pages starts at 0| + +#### How to obtain credentials + +To use the nck google_sheets you must first retrieve your credentials. In order to do so head to console.cloud.google.com. In the header, chose your project or create a new one. Next step is to enable some APIs, namely google drive and google sheets api in the API Library. You’ll find it in the « APIs & Services » tab. Now that your google drive API is enabled, click on the « create credentials » button on the upper right corner and enter these informations : + +![alt text](https://github.com/artefactory/nautilus-connectors-kit/blob/upgrade-gs/documentation_images/credentials_gs.png) + +Click on "what credentials do I need" and complete the form. +You will find the credentials you need in the JSON file that will start downloading automatically right after. + ## Oracle Reader From 969c2e5b7cbb7f9a5221df68a509463b11178589 Mon Sep 17 00:00:00 2001 From: Ali BELLAMLIH MAMOU Date: Tue, 22 Sep 2020 18:24:21 +0200 Subject: [PATCH 50/54] Modified the way private key is read in gs_reader --- .../images}/credentials_gs.png | Bin nck/readers/gs_reader.py | 18 ++++++++---------- 2 files changed, 8 insertions(+), 10 deletions(-) rename {documentation_images => documentation/images}/credentials_gs.png (100%) diff --git a/documentation_images/credentials_gs.png b/documentation/images/credentials_gs.png similarity index 100% rename from documentation_images/credentials_gs.png rename to documentation/images/credentials_gs.png diff --git a/nck/readers/gs_reader.py b/nck/readers/gs_reader.py index 3a948cab..5faa52c2 100644 --- a/nck/readers/gs_reader.py +++ b/nck/readers/gs_reader.py @@ -40,9 +40,10 @@ to the project. You can retrieve it in the JSON credential file", ) @click.option( - "--gs-private-key-path", + "--gs-private-key", required=True, - help="The path to the private key that is stored in a txt file. \ + help="The private key given by Google services once you have added credentials \ + to the project. \ You can retrieve it first in the JSON credential file", ) @click.option( @@ -71,7 +72,7 @@ help="The page number you want to access.\ The number pages starts at 0", ) -@processor("gs_private_key_id", "gs_private_key_path", "gs_client_id", "gs_client_cert") +@processor("gs_private_key_id", "gs_private_key", "gs_client_id", "gs_client_cert") def google_sheets(**kwargs): return GSheetsReader(**extract_args("gs_", kwargs)) @@ -88,7 +89,7 @@ def __init__( self, project_id: str, private_key_id: str, - private_key_path: str, + private_key: str, client_email: str, client_id: str, client_cert: str, @@ -98,21 +99,18 @@ def __init__( self._file_name = file_name self._page_number = page_number credentials = self.__init_credentials( - project_id, private_key_id, private_key_path, client_email, client_id, client_cert + project_id, private_key_id, private_key, client_email, client_id, client_cert ) scoped_credentials = credentials.with_scopes(self._scopes) self._gc = gspread.Client(auth=scoped_credentials) self._gc.session = AuthorizedSession(scoped_credentials) - def __init_credentials(self, project_id, private_key_id, private_key_path, client_email, client_id, client_cert): - - with open(private_key_path, "r") as f: - private_key = f.read().replace("\\n", "\n") + def __init_credentials(self, project_id, private_key_id, private_key, client_email, client_id, client_cert): keyfile_dict = { "type": "service_account", "project_id": project_id, "private_key_id": private_key_id, - "private_key": private_key, + "private_key": private_key.replace("\\n", "\n"), "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "client_email": client_email, From eb631f74270df44e21d016b81c06e05c10d78ebc Mon Sep 17 00:00:00 2001 From: Ali BELLAMLIH MAMOU Date: Tue, 22 Sep 2020 18:30:33 +0200 Subject: [PATCH 51/54] Adding pre-commit in the requirements-dev file --- requirements-dev.txt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/requirements-dev.txt b/requirements-dev.txt index 186f7889..c8adfde3 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -4,4 +4,5 @@ pytest flake8 nose parameterized==0.7.1 -freezegun==0.3.15 \ No newline at end of file +freezegun==0.3.15 +pre-commit==2.7.1 \ No newline at end of file From 97834d91e6a825cb9446db587d2813de132e22e5 Mon Sep 17 00:00:00 2001 From: benoitgoujon Date: Thu, 24 Sep 2020 19:58:13 +0200 Subject: [PATCH 52/54] fix: get spreadsheet thank to key rather than name --- nck/readers/gs_reader.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nck/readers/gs_reader.py b/nck/readers/gs_reader.py index 5faa52c2..34afabd4 100644 --- a/nck/readers/gs_reader.py +++ b/nck/readers/gs_reader.py @@ -64,7 +64,7 @@ help="Client certificate given by Google services once you have added credentials \ to the project. You can retrieve it in the JSON credential file", ) -@click.option("--gs-file-name", required=True, help="The name you have given to your google sheet file") +@click.option("--gs-sheet-key", required=True, help="Google spreadsheet key that is availbale in the url") @click.option( "--gs-page-number", default=0, @@ -93,10 +93,10 @@ def __init__( client_email: str, client_id: str, client_cert: str, - file_name: str, + sheet_key: str, page_number: int, ): - self._file_name = file_name + self._sheet_key = sheet_key self._page_number = page_number credentials = self.__init_credentials( project_id, private_key_id, private_key, client_email, client_id, client_cert @@ -121,7 +121,7 @@ def __init_credentials(self, project_id, private_key_id, private_key, client_ema return service_account.Credentials.from_service_account_info(info=keyfile_dict) def read(self): - sheet = self._gc.open(self._file_name).get_worksheet(self._page_number) + sheet = self._gc.open_by_key(self._sheet_key).get_worksheet(self._page_number) list_of_hashes = sheet.get_all_records() def result_generator(): From 2da464ca99421b4b965eb13156808790bd2c813c Mon Sep 17 00:00:00 2001 From: benoitgoujon Date: Fri, 25 Sep 2020 14:14:42 +0200 Subject: [PATCH 53/54] fix: better output file name with gsheet reader --- nck/readers/gs_reader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nck/readers/gs_reader.py b/nck/readers/gs_reader.py index 34afabd4..9b553814 100644 --- a/nck/readers/gs_reader.py +++ b/nck/readers/gs_reader.py @@ -128,4 +128,4 @@ def result_generator(): for record in list_of_hashes: yield record - yield JSONStream(sheet, result_generator()) + yield JSONStream("gsheet", result_generator()) From 7ec5949794142f3254545f176e46b64af1328e43 Mon Sep 17 00:00:00 2001 From: Vivien MORLET Date: Wed, 30 Sep 2020 15:00:56 +0200 Subject: [PATCH 54/54] upgrade facebook-business to v8.0.4 --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index f0e897f9..643c47c7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -10,7 +10,7 @@ curlify==2.2.1 cx-Oracle==7.3.0 docopt==0.6.2 docutils==0.15.2 -facebook-business==5.0.2 +facebook-business==8.0.4 google-api-core==1.14.3 google-api-python-client==1.4.2 google-auth==1.7.2