diff --git a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/aws_monitor_tickets.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/aws_monitor_tickets.py index 1ea3a26c..a903351a 100644 --- a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/aws_monitor_tickets.py +++ b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/aws_monitor_tickets.py @@ -1,6 +1,6 @@ import json import tempfile -from datetime import datetime +from datetime import datetime, UTC import typeguard @@ -215,7 +215,7 @@ def update_cluster_cost(self): self.__es_operations.update_elasticsearch_index(index=self.es_cro_index, id=ticket_id, metadata={ 'cluster_cost': cluster_cost, - 'timestamp': datetime.utcnow() + 'timestamp': datetime.now(UTC.utc) }) def __prepare_athena_query_for_cluster_cost(self, names: list): diff --git a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/collect_cro_reports.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/collect_cro_reports.py index 586ca438..3399bf3d 100644 --- a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/collect_cro_reports.py +++ b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/collect_cro_reports.py @@ -1,5 +1,5 @@ import logging -from datetime import datetime, timedelta +from datetime import datetime, timedelta, UTC import typeguard @@ -45,7 +45,7 @@ def get_account_budget_from_payer_ce_report(self): "must": [ {"term": {"CloudName.keyword": self.__public_cloud_name}}, {"term": {"AccountId.keyword": self.__account_id}}, - {"term": {"Month": str(datetime.utcnow().year)}}, + {"term": {"Month": str(datetime.now(UTC.utc).year)}}, ] } }, @@ -61,7 +61,7 @@ def get_total_account_usage_cost(self): This method returns the total account budget till date for this year :return: """ - current_date = datetime.utcnow().date() + current_date = datetime.now(UTC.utc).date() start_date = datetime(current_date.year, 1, 1).date() cost_explorer_operations = self.__cost_over_usage.get_cost_explorer_operations() response = cost_explorer_operations.get_cost_and_usage_from_aws(start_date=str(start_date), end_date=str(current_date+timedelta(days=1)), granularity='MONTHLY') @@ -92,7 +92,7 @@ def get_user_cost_data(self, group_by_tag_name: str, group_by_tag_value: str, re return_key = 'Forecast' else: response = self.__cost_over_usage.get_monthly_user_es_cost_data(start_date=start_date, - end_date=datetime.utcnow().replace(microsecond=self.ZERO) + timedelta(days=1), + end_date=datetime.now(UTC.utc).replace(microsecond=self.ZERO) + timedelta(days=1), extra_matches=extra_filter_matches, extra_operation=self.AND, tag_name=group_by_tag_name) return_key = 'Cost' if response: @@ -173,7 +173,7 @@ def __prepare_update_es_data(self, source: dict, instance_data: list, user_cost: source['user_cro'] = instance_data[self.ZERO].get('user_cro') if instance_data[self.ZERO].get('user') and source.get('user') != instance_data[self.ZERO].get('user'): source['user'] = instance_data[self.ZERO].get('user') - source['timestamp'] = datetime.utcnow() + source['timestamp'] = datetime.now(UTC.utc) if source.get('ticket_id_state') != 'in-progress': source['ticket_id_state'] = 'in-progress' source['approved_manager'] = instance_data[self.ZERO].get('approved_manager') @@ -213,7 +213,7 @@ def __upload_cro_report_to_es(self, monitor_data: dict): user_cost = self.get_user_cost_data(group_by_tag_name=group_by_tag_name, group_by_tag_value=ticket_id, requested_date=ticket_opened_date) duration = int(instance_data[self.ZERO].get('duration', 0)) - user_forecast = self.get_user_cost_data(group_by_tag_name=group_by_tag_name, group_by_tag_value=ticket_id, requested_date=datetime.utcnow(), extra_filter_key_values={'Project': user_project}, forecast=True, duration=duration) + user_forecast = self.get_user_cost_data(group_by_tag_name=group_by_tag_name, group_by_tag_value=ticket_id, requested_date=datetime.now(UTC.utc), extra_filter_key_values={'Project': user_project}, forecast=True, duration=duration) cost_estimation = float(instance_data[self.ZERO].get('estimated_cost', self.ZERO)) if self.__cost_over_usage.es_operations.verify_elastic_index_doc_id(index=self.__cost_over_usage.es_index_cro, doc_id=ticket_id): es_data = self.__cost_over_usage.es_operations.get_es_data_by_id(id=ticket_id,index=self.__cost_over_usage.es_index_cro) @@ -270,10 +270,10 @@ def update_in_progress_ticket_cost(self): user_name=user_name) user_daily_cost.update(ce_user_daily_report) user_forecast = self.get_user_cost_data(group_by_tag_name=group_by_tag_name, - group_by_tag_value=ticket_id, requested_date=datetime.utcnow(), + group_by_tag_value=ticket_id, requested_date=datetime.now(UTC.utc), forecast=True, duration=duration) - update_data = {'actual_cost': user_cost, 'forecast': user_forecast, 'timestamp': datetime.utcnow(), - f'TotalCurrentUsage-{datetime.utcnow().year}': total_account_cost, + update_data = {'actual_cost': user_cost, 'forecast': user_forecast, 'timestamp': datetime.now(UTC.utc), + f'TotalCurrentUsage-{datetime.now(UTC.utc).year}': total_account_cost, 'user_daily_cost': str(user_daily_cost)} if not source_data.get(self.ALLOCATED_BUDGET): update_data[self.ALLOCATED_BUDGET] = self.get_account_budget_from_payer_ce_report() @@ -302,7 +302,7 @@ def __get_user_usage_by_granularity(self, result_back_data: dict, tag_name: str, :param tag_value: :return: """ - end_date = datetime.utcnow().date() + end_date = datetime.now(UTC.utc).date() start_date = end_date - timedelta(days=days) cost_explorer_object = self.__cost_over_usage.get_cost_explorer_operations() ce_daily_usage = cost_explorer_object.get_cost_by_tags(tag=tag_name, diff --git a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/cost_over_usage.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/cost_over_usage.py index 385022bc..85483a14 100644 --- a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/cost_over_usage.py +++ b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/cost_over_usage.py @@ -1,7 +1,6 @@ -import json import logging from ast import literal_eval -from datetime import datetime, timedelta +from datetime import datetime, timedelta, UTC import typeguard @@ -45,7 +44,7 @@ def __init__(self): self.__cro_admins = self.__environment_variables_dict.get('CRO_DEFAULT_ADMINS', []) self.es_index_cro = self.__environment_variables_dict.get('CRO_ES_INDEX', '') self.__cro_duration_days = self.__environment_variables_dict.get('CRO_DURATION_DAYS') - self.current_end_date = datetime.utcnow() + self.current_end_date = datetime.now(UTC.utc) self.current_start_date = self.current_end_date - timedelta(days=self.__cro_duration_days) self.__public_cloud_name = self.__environment_variables_dict.get('PUBLIC_CLOUD_NAME') self.__ce_operations = CostExplorerOperations() @@ -293,7 +292,7 @@ def get_last_mail_alert_status(self, user: str): last_alert = response[0] last_send_date = last_alert.get('_source').get('timestamp') alert_number = last_alert.get('_source').get('Alert', 0) - current_date = datetime.utcnow().date() + current_date = datetime.now(UTC.utc).date() last_send_date = datetime.strptime(last_send_date, self.TIMESTAMP_DATE_FORMAT).date() days = (current_date - last_send_date).days if days % self.SEND_ALERT_DAY == 0 and last_send_date != current_date: diff --git a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/run_cro.py b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/run_cro.py index 45e610c7..209f68fe 100644 --- a/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/run_cro.py +++ b/cloud_governance/cloud_resource_orchestration/clouds/aws/ec2/run_cro.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, UTC import boto3 @@ -14,7 +14,7 @@ class RunCRO: - PERSISTENT_RUN_DOC_ID = f'cro_run_persistence-{datetime.utcnow().date()}' + PERSISTENT_RUN_DOC_ID = f'cro_run_persistence-{datetime.now(UTC.utc).date()}' PERSISTENT_RUN_INDEX = 'cloud_resource_orchestration_persistence_run' def __init__(self): @@ -39,7 +39,7 @@ def send_cro_alerts(self): last_run_time = source.get(f'last_run_{self.account.lower()}') if last_run_time: last_updated_time = datetime.strptime(last_run_time, "%Y-%m-%dT%H:%M:%S.%f").date() - if last_updated_time == datetime.utcnow().date(): + if last_updated_time == datetime.now(UTC.utc).date(): first_run = False self.__environment_variables_dict.update({'CRO_FIRST_RUN': first_run}) if first_run: @@ -57,9 +57,9 @@ def save_current_timestamp(self): :return: """ if not self.cro_cost_over_usage.es_operations.verify_elastic_index_doc_id(index=self.PERSISTENT_RUN_INDEX, doc_id=self.PERSISTENT_RUN_DOC_ID): - self.cro_cost_over_usage.es_operations.upload_to_elasticsearch(index=self.PERSISTENT_RUN_INDEX, data={f'last_run_{self.account}': datetime.utcnow()}, id=self.PERSISTENT_RUN_DOC_ID) + self.cro_cost_over_usage.es_operations.upload_to_elasticsearch(index=self.PERSISTENT_RUN_INDEX, data={f'last_run_{self.account}': datetime.now(UTC.utc)}, id=self.PERSISTENT_RUN_DOC_ID) else: - self.cro_cost_over_usage.es_operations.update_elasticsearch_index(index=self.PERSISTENT_RUN_INDEX, metadata={f'last_run_{self.account}': datetime.utcnow()}, id=self.PERSISTENT_RUN_DOC_ID) + self.cro_cost_over_usage.es_operations.update_elasticsearch_index(index=self.PERSISTENT_RUN_INDEX, metadata={f'last_run_{self.account}': datetime.now(UTC.utc)}, id=self.PERSISTENT_RUN_DOC_ID) @logger_time_stamp def run_cloud_resources(self): diff --git a/cloud_governance/cloud_resource_orchestration/clouds/azure/resource_groups/collect_cro_reports.py b/cloud_governance/cloud_resource_orchestration/clouds/azure/resource_groups/collect_cro_reports.py index a44cb132..5014d9be 100644 --- a/cloud_governance/cloud_resource_orchestration/clouds/azure/resource_groups/collect_cro_reports.py +++ b/cloud_governance/cloud_resource_orchestration/clouds/azure/resource_groups/collect_cro_reports.py @@ -1,5 +1,4 @@ -import logging -from datetime import datetime, timedelta +from datetime import datetime, timedelta, UTC import typeguard @@ -7,7 +6,6 @@ from cloud_governance.cloud_resource_orchestration.clouds.common.abstract_collect_cro_reports import \ AbstractCollectCROReports from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp -from cloud_governance.main.environment_variables import environment_variables class CollectCROReports(AbstractCollectCROReports): @@ -33,7 +31,7 @@ def _get_account_budget_from_payer_ce_report(self): "must": [ {"term": {"CloudName.keyword": self._public_cloud_name}}, {"term": {"AccountId.keyword": self._account_id}}, - {"term": {"Month": str(datetime.utcnow().year)}}, + {"term": {"Month": str(datetime.now(UTC.utc).year)}}, ] } }, @@ -69,7 +67,7 @@ def get_user_cost_data(self, group_by_tag_name: str, group_by_tag_value: str, re resource_type = 'Forecast' pass else: - end_date = datetime.utcnow().replace(microsecond=self.ZERO) + timedelta(days=1) + end_date = datetime.now(UTC.utc).replace(microsecond=self.ZERO) + timedelta(days=1) response = self.__cost_over_usage.get_monthly_user_es_cost_data(start_date=start_date, end_date=end_date, extra_matches=extra_filter_matches, extra_operation=self.AND, @@ -124,7 +122,7 @@ def _get_total_account_usage_cost(self): This method returns the total account budget till date for this year :return: """ - current_date = datetime.utcnow() + current_date = datetime.now(UTC.utc) start_date = datetime(current_date.year, 1, 1, 0, 0, 0) end_date = current_date + timedelta(days=1) cost_explorer_operations = self.__cost_over_usage.get_cost_management_object() @@ -163,8 +161,8 @@ def update_in_progress_ticket_cost(self): group_by_tag_name=group_by_tag_name, user_name=user_name) user_daily_cost.update(ce_user_daily_report) - update_data = {'actual_cost': user_cost, 'timestamp': datetime.utcnow(), - f'TotalCurrentUsage-{datetime.utcnow().year}': total_account_cost, + update_data = {'actual_cost': user_cost, 'timestamp': datetime.now(UTC.utc), + f'TotalCurrentUsage-{datetime.now(UTC.utc).year}': total_account_cost, 'user_daily_cost': str(user_daily_cost)} if not source_data.get(self.ALLOCATED_BUDGET): update_data[self.ALLOCATED_BUDGET] = self._get_account_budget_from_payer_ce_report() @@ -192,7 +190,7 @@ def _get_user_usage_by_granularity(self, result_back_data: dict, tag_name: str, :param tag_value: :return: """ - end_date = datetime.utcnow() + end_date = datetime.now(UTC.utc) start_date = end_date - timedelta(days=days) cost_explorer_object = self.__cost_over_usage.get_cost_management_object() ce_daily_usage = cost_explorer_object.get_usage(scope=self.__scope, grouping=[tag_name], granularity='Daily', diff --git a/cloud_governance/cloud_resource_orchestration/clouds/common/abstract_collect_cro_reports.py b/cloud_governance/cloud_resource_orchestration/clouds/common/abstract_collect_cro_reports.py index e3be48f1..b42288c4 100644 --- a/cloud_governance/cloud_resource_orchestration/clouds/common/abstract_collect_cro_reports.py +++ b/cloud_governance/cloud_resource_orchestration/clouds/common/abstract_collect_cro_reports.py @@ -1,14 +1,10 @@ -import logging from abc import ABC -from datetime import datetime, timedelta +from datetime import datetime, UTC import typeguard -from cloud_governance.cloud_resource_orchestration.clouds.aws.ec2.cost_over_usage import CostOverUsage -from cloud_governance.common.clouds.aws.iam.iam_operations import IAMOperations from cloud_governance.common.elasticsearch.elasticsearch_operations import ElasticSearchOperations from cloud_governance.common.jira.jira_operations import JiraOperations -from cloud_governance.common.logger.init_logger import handler from cloud_governance.common.logger.logger_time_stamp import logger_time_stamp from cloud_governance.main.environment_variables import environment_variables @@ -116,7 +112,7 @@ def _prepare_update_es_data(self, source: dict, instance_data: list, user_cost: source['user_cro'] = instance_data[self.ZERO].get('user_cro') if instance_data[self.ZERO].get('user') and source.get('user') != instance_data[self.ZERO].get('user'): source['user'] = instance_data[self.ZERO].get('user') - source['timestamp'] = datetime.utcnow() + source['timestamp'] = datetime.now(UTC.utc) if source.get('ticket_id_state') != 'in-progress': source['ticket_id_state'] = 'in-progress' source['approved_manager'] = instance_data[self.ZERO].get('approved_manager') diff --git a/cloud_governance/cloud_resource_orchestration/clouds/common/abstract_cost_over_usage.py b/cloud_governance/cloud_resource_orchestration/clouds/common/abstract_cost_over_usage.py index 1bb240d7..c729dae4 100644 --- a/cloud_governance/cloud_resource_orchestration/clouds/common/abstract_cost_over_usage.py +++ b/cloud_governance/cloud_resource_orchestration/clouds/common/abstract_cost_over_usage.py @@ -1,7 +1,7 @@ import logging from abc import ABC, abstractmethod from ast import literal_eval -from datetime import datetime, timedelta +from datetime import datetime, timedelta, UTC import typeguard @@ -35,7 +35,7 @@ def __init__(self): self.es_index_cro = self._environment_variables_dict.get('CRO_ES_INDEX', '') self._cro_duration_days = self._environment_variables_dict.get('CRO_DURATION_DAYS') self._over_usage_threshold = OVER_USAGE_THRESHOLD * self._over_usage_amount - self.current_end_date = datetime.utcnow() + self.current_end_date = datetime.now(UTC.utc) self.current_start_date = self.current_end_date - timedelta(days=self._cro_duration_days) self.es_operations = ElasticSearchOperations(es_host=self._es_host, es_port=self._es_port) self._elastic_search_queries = ElasticSearchQueries(cro_duration_days=self._cro_duration_days) diff --git a/cloud_governance/cloud_resource_orchestration/common/abstract_monitor_tickets.py b/cloud_governance/cloud_resource_orchestration/common/abstract_monitor_tickets.py index b51fa108..22082a6f 100644 --- a/cloud_governance/cloud_resource_orchestration/common/abstract_monitor_tickets.py +++ b/cloud_governance/cloud_resource_orchestration/common/abstract_monitor_tickets.py @@ -1,12 +1,12 @@ from abc import abstractmethod, ABC -from datetime import datetime +from datetime import datetime, UTC import typeguard from cloud_governance.cloud_resource_orchestration.utils.common_operations import string_equal_ignore_case from cloud_governance.cloud_resource_orchestration.utils.elastic_search_queries import ElasticSearchQueries from cloud_governance.cloud_resource_orchestration.utils.constant_variables import FIRST_CRO_ALERT, SECOND_CRO_ALERT, \ - CLOSE_JIRA_TICKET, JIRA_ISSUE_NEW_STATE, DATE_FORMAT + CLOSE_JIRA_TICKET, DATE_FORMAT from cloud_governance.common.elasticsearch.elasticsearch_operations import ElasticSearchOperations from cloud_governance.common.jira.jira_operations import JiraOperations from cloud_governance.common.logger.init_logger import logger @@ -147,7 +147,7 @@ def __close_and_update_ticket_data_in_es(self, ticket_id: str): This method close the ticket and update in ElasticSearch :return: """ - data = {'timestamp': datetime.utcnow(), 'ticket_id_state': 'closed'} + data = {'timestamp': datetime.now(UTC.utc), 'ticket_id_state': 'closed'} if self.__es_operations.check_elastic_search_connection(): self.__es_operations.update_elasticsearch_index(index=self.__es_index_cro, id=ticket_id, metadata=data) self.__jira_operations.move_issue_state(ticket_id, state='CLOSED') @@ -238,7 +238,7 @@ def _monitor_in_progress_tickets(self): duration = int(source_data.get('duration', 0)) used_budget = int(source_data.get('actual_cost', 0)) ticket_start_date = datetime.strptime(source_data.get('ticket_opened_date'), DATE_FORMAT).date() - completed_duration = (datetime.utcnow().date() - ticket_start_date).days + completed_duration = (datetime.now(UTC.utc).date() - ticket_start_date).days self._monitor_ticket_budget(ticket_id=ticket_id, region_name=region_name, budget=budget, used_budget=used_budget, user_cro=source_data.get('user_cro'), diff --git a/cloud_governance/cloud_resource_orchestration/common/run_cro.py b/cloud_governance/cloud_resource_orchestration/common/run_cro.py index 1eb8a603..2f018579 100644 --- a/cloud_governance/cloud_resource_orchestration/common/run_cro.py +++ b/cloud_governance/cloud_resource_orchestration/common/run_cro.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, UTC from cloud_governance.cloud_resource_orchestration.common.cro_object import CroObject from cloud_governance.cloud_resource_orchestration.utils.common_operations import string_equal_ignore_case @@ -11,7 +11,7 @@ class RunCRO: """This class monitors cro activities""" - PERSISTENT_RUN_DOC_ID = f'cro_run_persistence-{datetime.utcnow().date()}' + PERSISTENT_RUN_DOC_ID = f'cro_run_persistence-{datetime.now(UTC.utc).date()}' PERSISTENT_RUN_INDEX = 'cloud_resource_orchestration_persistence_run' def __init__(self): @@ -34,10 +34,10 @@ def save_current_timestamp(self): if not self.__es_operations.verify_elastic_index_doc_id(index=self.PERSISTENT_RUN_INDEX, doc_id=self.PERSISTENT_RUN_DOC_ID): self.__es_operations.upload_to_elasticsearch(index=self.PERSISTENT_RUN_INDEX, data={ - f'last_run_{self.__account.lower()}': datetime.utcnow()}, id=self.PERSISTENT_RUN_DOC_ID) + f'last_run_{self.__account.lower()}': datetime.now(UTC.utc)}, id=self.PERSISTENT_RUN_DOC_ID) else: self.__es_operations.update_elasticsearch_index(index=self.PERSISTENT_RUN_INDEX, - metadata={f'last_run_{self.__account.lower()}': datetime.utcnow()}, + metadata={f'last_run_{self.__account.lower()}': datetime.now(UTC.utc)}, id=self.PERSISTENT_RUN_DOC_ID) @logger_time_stamp @@ -54,7 +54,7 @@ def __send_cro_alerts(self): last_run_time = source.get(f'last_run_{self.__account.lower()}') if last_run_time: last_updated_time = datetime.strptime(last_run_time, "%Y-%m-%dT%H:%M:%S.%f").date() - if last_updated_time == datetime.utcnow().date(): + if last_updated_time == datetime.now(UTC.utc).date(): first_run = False self.__environment_variables_dict.update({'CRO_FIRST_RUN': first_run}) if first_run: diff --git a/cloud_governance/cloud_resource_orchestration/utils/elastic_search_queries.py b/cloud_governance/cloud_resource_orchestration/utils/elastic_search_queries.py index b93af381..de1c0e24 100644 --- a/cloud_governance/cloud_resource_orchestration/utils/elastic_search_queries.py +++ b/cloud_governance/cloud_resource_orchestration/utils/elastic_search_queries.py @@ -1,5 +1,5 @@ -from datetime import datetime, timedelta +from datetime import datetime, timedelta, UTC class ElasticSearchQueries: @@ -8,7 +8,7 @@ class ElasticSearchQueries: """ def __init__(self, cro_duration_days: int = 30): - self.current_end_date = datetime.utcnow() + self.current_end_date = datetime.now(UTC.utc) self.current_start_date = self.current_end_date - timedelta(days=cro_duration_days) def get_all_in_progress_tickets(self, match_conditions: list = None, fields: list = None, **kwargs): diff --git a/cloud_governance/common/clouds/aws/athena/abstract_athena_operations.py b/cloud_governance/common/clouds/aws/athena/abstract_athena_operations.py index c8e41666..4de2c603 100644 --- a/cloud_governance/common/clouds/aws/athena/abstract_athena_operations.py +++ b/cloud_governance/common/clouds/aws/athena/abstract_athena_operations.py @@ -1,12 +1,12 @@ from abc import ABC, abstractmethod -from datetime import datetime +from datetime import datetime, UTC from cloud_governance.main.environment_variables import environment_variables class AbstractAthenaOperations(ABC): - CURRENT_DATE = str(datetime.utcnow().date()).replace("-", "") + CURRENT_DATE = str(datetime.now(UTC.utc).date()).replace("-", "") def __init__(self): self.__environment_variables_dict = environment_variables.environment_variables_dict diff --git a/cloud_governance/common/clouds/azure/monitor/monitor_management_operations.py b/cloud_governance/common/clouds/azure/monitor/monitor_management_operations.py index 7a39e0cd..078ef54d 100644 --- a/cloud_governance/common/clouds/azure/monitor/monitor_management_operations.py +++ b/cloud_governance/common/clouds/azure/monitor/monitor_management_operations.py @@ -1,5 +1,4 @@ -import json -from datetime import datetime, timedelta +from datetime import datetime, timedelta, UTC from azure.core.exceptions import HttpResponseError from azure.mgmt.monitor import MonitorManagementClient @@ -22,7 +21,7 @@ def __get_end_date(self): :return: :rtype: """ - return datetime.utcnow() + return datetime.now(UTC.utc) def __get_start_date(self): """ @@ -79,7 +78,7 @@ def get_resource_metrics(self, resource_id: str, metricnames: str, aggregation: :rtype: """ if not timespan: - end_date = datetime.utcnow() + end_date = datetime.now(UTC.utc) start_date = end_date - timedelta(days=UNUSED_DAYS) timespan = f'{start_date}/{end_date}' response = self.__monitor_client.metrics.list(resource_uri=resource_id, timespan=timespan, diff --git a/cloud_governance/common/elasticsearch/elasticsearch_operations.py b/cloud_governance/common/elasticsearch/elasticsearch_operations.py index 20116574..f6f16f86 100644 --- a/cloud_governance/common/elasticsearch/elasticsearch_operations.py +++ b/cloud_governance/common/elasticsearch/elasticsearch_operations.py @@ -1,5 +1,4 @@ -import os -from datetime import datetime, timedelta +from datetime import datetime, UTC import time import pandas as pd from elasticsearch.helpers import bulk @@ -138,7 +137,7 @@ def upload_to_elasticsearch(self, index: str, data: dict, doc_type: str = '_doc' # utcnow - solve timestamp issue if not data.get('timestamp'): - data['timestamp'] = datetime.utcnow() # datetime.now() + data['timestamp'] = datetime.now(UTC.utc) # datetime.now() if 'policy' not in data: data['policy'] = self.__environment_variables_dict.get('policy') # Upload data to elastic search server @@ -322,7 +321,7 @@ def upload_data_in_bulk(self, data_items: list, index: str, **kwargs): if 'CurrentDate' in item: item['timestamp'] = datetime.strptime(item.get('CurrentDate'), "%Y-%m-%d") else: - item['timestamp'] = datetime.utcnow() + item['timestamp'] = datetime.now(UTC.utc) item['_index'] = index if item.get('AccountId'): item['AccountId'] = str(item.get('AccountId')) diff --git a/cloud_governance/common/utils/utils.py b/cloud_governance/common/utils/utils.py index c9a586d9..f0587930 100644 --- a/cloud_governance/common/utils/utils.py +++ b/cloud_governance/common/utils/utils.py @@ -1,6 +1,6 @@ import os -from datetime import datetime, timedelta +from datetime import datetime, timedelta, UTC from typing import Union @@ -127,6 +127,6 @@ def get_start_and_end_datetime(days: int) -> [datetime, datetime]: :rtype: """ days = 1 if days == 0 else days - end_date = datetime.utcnow() + end_date = datetime.now(UTC.utc) start_date = end_date - timedelta(days=days) return start_date, end_date diff --git a/cloud_governance/main/es_uploader.py b/cloud_governance/main/es_uploader.py index d1d25e1a..13a1fcdf 100644 --- a/cloud_governance/main/es_uploader.py +++ b/cloud_governance/main/es_uploader.py @@ -1,5 +1,5 @@ import json -from datetime import datetime +from datetime import datetime, UTC import pandas as pd from cloud_governance.common.elasticsearch.elasticsearch_operations import ElasticSearchOperations @@ -190,7 +190,7 @@ def upload_last_policy_to_elasticsearch(self, policy: str, index: str, doc_type: data[key] = value # utcnow - solve timestamp issue - data['timestamp'] = datetime.utcnow() # datetime.now() + data['timestamp'] = datetime.now(UTC.utc) # datetime.now() # Upload data to elastic search server try: diff --git a/cloud_governance/policy/aws/cleanup/instance_run.py b/cloud_governance/policy/aws/cleanup/instance_run.py index 4ff7af3e..84d59cc9 100644 --- a/cloud_governance/policy/aws/cleanup/instance_run.py +++ b/cloud_governance/policy/aws/cleanup/instance_run.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, UTC from cloud_governance.policy.helpers.aws.aws_policy_operations import AWSPolicyOperations @@ -19,7 +19,7 @@ def _upload_instance_type_count_to_elastic_search(self): """ instance_types = self._update_instance_type_count() account = self.account - current_day = datetime.utcnow() + current_day = datetime.now(UTC.utc) es_instance_types_data = [] for region, instance_types in instance_types.items(): for instance_type, instance_type_count in instance_types.items(): diff --git a/cloud_governance/policy/aws/cleanup/unused_nat_gateway.py b/cloud_governance/policy/aws/cleanup/unused_nat_gateway.py index f51bbcb8..b4d6baa5 100644 --- a/cloud_governance/policy/aws/cleanup/unused_nat_gateway.py +++ b/cloud_governance/policy/aws/cleanup/unused_nat_gateway.py @@ -27,7 +27,7 @@ def __check_cloud_watch_logs(self, resource_id: str, days: int = UNUSED_DAYS): """ if days == 0: days = 1 - end_time = datetime.datetime.utcnow() + end_time = datetime.datetime.now(datetime.UTC.utc) start_time = end_time - datetime.timedelta(days=days) response = self._cloudwatch.get_metric_data(start_time=start_time, end_time=end_time, resource_id=resource_id, resource_type='NatGatewayId', namespace=self.NAMESPACE, diff --git a/cloud_governance/policy/aws/cost_explorer_payer_billings.py b/cloud_governance/policy/aws/cost_explorer_payer_billings.py index 9ec0a32e..c71e0521 100644 --- a/cloud_governance/policy/aws/cost_explorer_payer_billings.py +++ b/cloud_governance/policy/aws/cost_explorer_payer_billings.py @@ -238,7 +238,7 @@ def filter_cost_details_for_sp(self, total_cost: list): def get_monthly_cost_details(self, start_date: datetime = None, end_date: datetime = None): """This method list the savings plan details""" - current_date = datetime.datetime.utcnow() + current_date = datetime.datetime.now(datetime.UTC.utc) if not start_date and not end_date: end_date = (current_date.replace(day=1) - datetime.timedelta(days=1)).date() start_date = end_date.replace(day=1) diff --git a/cloud_governance/policy/aws/monitor/cluster_run.py b/cloud_governance/policy/aws/monitor/cluster_run.py index 48e334fc..d258a0a2 100644 --- a/cloud_governance/policy/aws/monitor/cluster_run.py +++ b/cloud_governance/policy/aws/monitor/cluster_run.py @@ -57,6 +57,6 @@ def run_policy_operations(self): 'InstanceCount': 1, 'Stopped': stopped_instances, 'Running': running_instances, - 'index-id': f'{datetime.datetime.utcnow().date()}-{self._cloud_name.lower()}-{self.account.lower()}-{self._region.lower()}-{cluster_tag}' + 'index-id': f'{datetime.datetime.now(datetime.UTC.utc).date()}-{self._cloud_name.lower()}-{self.account.lower()}-{self._region.lower()}-{cluster_tag}' } return list(cluster_data.values()) diff --git a/cloud_governance/policy/aws/spot_savings_analysis.py b/cloud_governance/policy/aws/spot_savings_analysis.py index 289d2f1e..99100e53 100644 --- a/cloud_governance/policy/aws/spot_savings_analysis.py +++ b/cloud_governance/policy/aws/spot_savings_analysis.py @@ -1,5 +1,5 @@ -from datetime import datetime +from datetime import datetime, UTC import typeguard @@ -32,7 +32,7 @@ def __get_prepared_query(self): This method prepare the query :return: """ - current_date = datetime.utcnow() + current_date = datetime.now(UTC.utc) year = current_date.year current_month = current_date.month previous_month = current_month - 1 if current_month - 1 != 0 else 12 diff --git a/cloud_governance/policy/azure/cleanup/instance_run.py b/cloud_governance/policy/azure/cleanup/instance_run.py index af5bbb54..26e28935 100644 --- a/cloud_governance/policy/azure/cleanup/instance_run.py +++ b/cloud_governance/policy/azure/cleanup/instance_run.py @@ -1,4 +1,4 @@ -from datetime import datetime +from datetime import datetime, UTC from cloud_governance.policy.helpers.azure.azure_policy_operations import AzurePolicyOperations @@ -19,7 +19,7 @@ def _upload_instance_type_count_to_elastic_search(self): """ instance_types = self._update_instance_type_count() account = self.account - current_day = datetime.utcnow() + current_day = datetime.now(UTC.utc) es_instance_types_data = [] for region, instance_types in instance_types.items(): for instance_type, instance_type_count in instance_types.items(): diff --git a/cloud_governance/policy/common_policies/cloudability_cost_reports.py b/cloud_governance/policy/common_policies/cloudability_cost_reports.py index 006ef5d1..92dfe557 100644 --- a/cloud_governance/policy/common_policies/cloudability_cost_reports.py +++ b/cloud_governance/policy/common_policies/cloudability_cost_reports.py @@ -55,7 +55,7 @@ def __get_start_date(self): return (self.__get_end_date() - datetime.timedelta(days=LOOK_BACK_DAYS)).replace(day=1) def __get_end_date(self): - return datetime.datetime.utcnow().date() + return datetime.datetime.now(UTC.utc).date() def __get_cost_reports(self, start_date: str = None, end_date: str = None, custom_filter: str = ''): """ @@ -141,8 +141,8 @@ def __next_twelve_months(self): This method returns the next 12 months, year :return: """ - year = datetime.datetime.utcnow().year - next_month = datetime.datetime.utcnow().month + 1 + year = datetime.datetime.now(UTC.utc).year + next_month = datetime.datetime.now(UTC.utc).month + 1 month_year = [] for idx in range(MONTHS): month = str((idx + next_month) % MONTHS) @@ -163,12 +163,12 @@ def __forecast_for_next_months(self, cost_data: list): """ forecast_cost_data = [] month_years = self.__next_twelve_months() - month = (datetime.datetime.utcnow().month - 1) % 12 + month = (datetime.datetime.now(UTC.utc).month - 1) % 12 if month == 0: month = 12 if len(str(month)) == 1: month = f'0{month}' - year = datetime.datetime.utcnow().year + year = datetime.datetime.now(UTC.utc).year cache_start_date = f'{year}-{str(month)}-01' for data in cost_data: if cache_start_date == data.get('start_date') and data.get('CostCenter') > 0: diff --git a/cloud_governance/policy/common_policies/send_aggregated_alerts.py b/cloud_governance/policy/common_policies/send_aggregated_alerts.py index 3b67b70a..2c68d689 100644 --- a/cloud_governance/policy/common_policies/send_aggregated_alerts.py +++ b/cloud_governance/policy/common_policies/send_aggregated_alerts.py @@ -30,7 +30,7 @@ def __get_es_data(self): :return: :rtype: """ - current_date = (datetime.utcnow().date()).__str__() + current_date = (datetime.now(UTC.utc).date()).__str__() policy_es_index = self.__environment_variables.get('es_index') account_name = (self.__environment_variables.get('account', '').upper() .replace('OPENSHIFT-', '') @@ -150,14 +150,14 @@ def __update_delete_days(self, policy_es_data: list): if record.get('SkipPolicy') != 'NA': delete_date = 'skip_delete' if days_to_take_action - 5 == days: - delete_date = (datetime.utcnow() + timedelta(days=5)).date() + delete_date = (datetime.now(UTC.utc) + timedelta(days=5)).date() alert_user = True elif days == days_to_take_action - 3: - delete_date = (datetime.utcnow() + timedelta(days=3)).date() + delete_date = (datetime.now(UTC.utc) + timedelta(days=3)).date() alert_user = True else: if days >= days_to_take_action: - delete_date = datetime.utcnow().date().__str__() + delete_date = datetime.now(UTC.utc).date().__str__() alert_user = True if record.get('policy') in ['empty_roles', 's3_inactive']: record['RegionName'] = 'us-east-1' diff --git a/cloud_governance/policy/helpers/abstract_policy_operations.py b/cloud_governance/policy/helpers/abstract_policy_operations.py index 5a69df12..4610a7e6 100644 --- a/cloud_governance/policy/helpers/abstract_policy_operations.py +++ b/cloud_governance/policy/helpers/abstract_policy_operations.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from datetime import datetime +from datetime import datetime, UTC from typing import Union from cloud_governance.common.elasticsearch.elastic_upload import ElasticUpload @@ -14,7 +14,7 @@ class AbstractPolicyOperations(ABC): DAYS_TO_NOTIFY_ADMINS = 2 DAYS_TO_TRIGGER_RESOURCE_MAIL = 4 DAILY_HOURS = 24 - CURRENT_DATE = datetime.utcnow().date().__str__() + CURRENT_DATE = datetime.now(UTC.utc).date().__str__() def __init__(self): self._environment_variables_dict = environment_variables.environment_variables_dict @@ -27,7 +27,7 @@ def __init__(self): self._es_upload = ElasticUpload() self._shutdown_period = self._environment_variables_dict.get('SHUTDOWN_PERIOD') - def calculate_days(self, create_date: Union[datetime, str], start_date: Union[datetime, str] = datetime.utcnow()): + def calculate_days(self, create_date: Union[datetime, str], start_date: Union[datetime, str] = datetime.now(UTC.utc)): """ This method returns the days :param start_date: @@ -184,8 +184,8 @@ def __current_savings_year(self, unit_price: float): :return: :rtype: """ - year_end_date = datetime.utcnow().date().replace(month=12, day=31) - total_days = (year_end_date - datetime.utcnow().date()).days + 1 + year_end_date = datetime.now(UTC.utc).date().replace(month=12, day=31) + total_days = (year_end_date - datetime.now(UTC.utc).date()).days + 1 return total_days * 24 * unit_price # ES Schema format @@ -195,7 +195,7 @@ def _get_es_schema(self, resource_id: str, user: str, skip_policy: str, cleanup_ region: str, cleanup_result: str, resource_action: str, cloud_name: str, resource_state: str, resource_type: str, **kwargs): - current_date = datetime.utcnow().date() + current_date = datetime.now(UTC.utc).date() resource_data = { 'ResourceId': resource_id, 'User': user, diff --git a/cloud_governance/policy/policy_operations/aws/zombie_cluster/run_zombie_cluster_resources.py b/cloud_governance/policy/policy_operations/aws/zombie_cluster/run_zombie_cluster_resources.py index 09924914..8e7debae 100644 --- a/cloud_governance/policy/policy_operations/aws/zombie_cluster/run_zombie_cluster_resources.py +++ b/cloud_governance/policy/policy_operations/aws/zombie_cluster/run_zombie_cluster_resources.py @@ -146,7 +146,7 @@ def zombie_cluster_resource(delete: bool = False, region: str = 'us-east-2', res es_operations.upload_to_elasticsearch(data=zombie_cluster.copy(), index=es_index) logger.info(f'Uploaded the policy results to elasticsearch index: {es_index}') else: - logger.error(f'No data to upload on @{account} at {datetime.utcnow()}') + logger.error(f'No data to upload on @{account} at {datetime.now(UTC.utc)}') else: logger.error('ElasticSearch host is not pingable, Please check ') return zombie_result diff --git a/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/run_zombie_non_cluster_policies.py b/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/run_zombie_non_cluster_policies.py index f76ed2be..36b3a89f 100644 --- a/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/run_zombie_non_cluster_policies.py +++ b/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/run_zombie_non_cluster_policies.py @@ -105,7 +105,7 @@ def _calculate_days(self, create_date: datetime): This method returns the days @return: """ - today = datetime.datetime.utcnow().date() + today = datetime.datetime.now(UTC.utc).date() days = today - create_date.date() return days.days diff --git a/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/zombie_non_cluster_polices.py b/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/zombie_non_cluster_polices.py index 06226bf4..d0595fa0 100644 --- a/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/zombie_non_cluster_polices.py +++ b/cloud_governance/policy/policy_operations/aws/zombie_non_cluster/zombie_non_cluster_polices.py @@ -40,7 +40,7 @@ def run(self): self._es_operations.upload_to_elasticsearch(data=policy_dict.copy(), index=self._es_index) logger.info(f'Uploaded the policy results to elasticsearch index: {self._es_index}') else: - logger.error(f'No data to upload on @{self._account} at {datetime.utcnow()}') + logger.error(f'No data to upload on @{self._account} at {datetime.now(UTC.utc)}') else: logger.error('ElasticSearch host is not pingable, Please check ') diff --git a/cloud_governance/policy/policy_runners/elasticsearch/upload_elastic_search.py b/cloud_governance/policy/policy_runners/elasticsearch/upload_elastic_search.py index dfd56e72..66cc63f8 100644 --- a/cloud_governance/policy/policy_runners/elasticsearch/upload_elastic_search.py +++ b/cloud_governance/policy/policy_runners/elasticsearch/upload_elastic_search.py @@ -37,6 +37,6 @@ def upload(self, data: Union[list, dict]): self._es_operations.upload_to_elasticsearch(data=policy_dict.copy(), index=self._es_index) logger.info(f'Uploaded the policy results to elasticsearch index: {self._es_index}') else: - logger.error(f'No data to upload on @{self._account} at {datetime.utcnow()}') + logger.error(f'No data to upload on @{self._account} at {datetime.now(UTC.utc)}') else: logger.error('ElasticSearch host is not pingable, Please check your connection') diff --git a/tests/integration/cloud_governance/aws/zombie_cluster/test_iam_zombie_delete.py b/tests/integration/cloud_governance/aws/zombie_cluster/test_iam_zombie_delete.py index a23826a3..7e1f880d 100644 --- a/tests/integration/cloud_governance/aws/zombie_cluster/test_iam_zombie_delete.py +++ b/tests/integration/cloud_governance/aws/zombie_cluster/test_iam_zombie_delete.py @@ -1,5 +1,5 @@ import uuid -from datetime import datetime +from datetime import datetime, UTC import boto3 @@ -13,7 +13,7 @@ def test_iam_zombie_user_create_and_delete(): :return: """ short_random_id = uuid.uuid1() - time_ms = str(datetime.utcnow().strftime('%f')) + time_ms = str(datetime.now(UTC.utc).strftime('%f')) USER_NAME = f'integration-ocp-{short_random_id}-{time_ms}' iam_resource = boto3.client('iam') tags = [ diff --git a/tests/integration/cloud_governance/common/clouds/azure/cost_management/test_cost_management_operations.py b/tests/integration/cloud_governance/common/clouds/azure/cost_management/test_cost_management_operations.py index 46c804fa..33028c2f 100644 --- a/tests/integration/cloud_governance/common/clouds/azure/cost_management/test_cost_management_operations.py +++ b/tests/integration/cloud_governance/common/clouds/azure/cost_management/test_cost_management_operations.py @@ -1,6 +1,5 @@ import datetime -import pytest from cloud_governance.common.clouds.azure.cost_management.cost_management_operations import CostManagementOperations @@ -11,7 +10,7 @@ def test_get_usage(): @return: """ cost_management_operations = CostManagementOperations() - end_date = datetime.datetime.utcnow() - datetime.timedelta(days=2) + end_date = datetime.datetime.now(datetime.UTC.utc) - datetime.timedelta(days=2) start_date = end_date - datetime.timedelta(days=1) granularity = 'Daily' scope = cost_management_operations.azure_operations.get_billing_profiles_list()[0] @@ -31,7 +30,7 @@ def test_get_forecast(): @return: """ cost_management_operations = CostManagementOperations() - end_date = datetime.datetime.utcnow() + datetime.timedelta(days=1) + end_date = datetime.datetime.now(datetime.UTC.utc) + datetime.timedelta(days=1) start_date = end_date - datetime.timedelta(days=1) granularity = 'Daily' cost_forecast_data = cost_management_operations.get_forecast(scope=cost_management_operations.azure_operations.scope, diff --git a/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/mock_jira.py b/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/mock_jira.py index 11032229..30dcae86 100644 --- a/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/mock_jira.py +++ b/tests/unittest/cloud_governance/cloud_resource_orchestration/mocks/mock_jira.py @@ -1,4 +1,4 @@ -from datetime import datetime, timedelta +from datetime import datetime, timedelta, UTC from functools import wraps from unittest.mock import patch @@ -11,7 +11,7 @@ def get_ticket_response(): This method return the ticket data :return: """ - created = datetime.strftime(datetime.utcnow() - timedelta(days=2), "%Y-%m-%dT%H:%M:%S") + created = datetime.strftime(datetime.now(UTC.utc) - timedelta(days=2), "%Y-%m-%dT%H:%M:%S") response = { 'key': 'MOCK-1', 'fields': { @@ -65,7 +65,7 @@ async def mock_get_all_issues(*args, **kwargs): 'key': 'MOCK-1', 'fields': { 'status': {'name': 'Refinement'}, - 'created': datetime.utcnow() - timedelta(days=2), + 'created': datetime.now(UTC.utc) - timedelta(days=2), 'description': "First Name: Test\n" "Last Name: Mock\nEmail Address: mock@gmail.com\n" "Manager Approval Address: manager@gmail.com\nCC-Users: \nDays: 5\n" diff --git a/tests/unittest/cloud_governance/policy/aws/cleanup/test_database_idle.py b/tests/unittest/cloud_governance/policy/aws/cleanup/test_database_idle.py index 20f50676..869f2d20 100644 --- a/tests/unittest/cloud_governance/policy/aws/cleanup/test_database_idle.py +++ b/tests/unittest/cloud_governance/policy/aws/cleanup/test_database_idle.py @@ -1,4 +1,4 @@ -from datetime import datetime, timedelta +from datetime import datetime, timedelta, UTC from freezegun import freeze_time from moto import mock_rds, mock_cloudwatch @@ -9,7 +9,7 @@ from tests.unittest.configs import DB_INSTANCE_CLASS, AWS_DEFAULT_REGION, TEST_USER_NAME, DB_ENGINE, \ CLOUD_WATCH_METRICS_DAYS, PROJECT_NAME -current_date = datetime.utcnow() +current_date = datetime.now(UTC.utc) start_date = current_date - timedelta(days=CLOUD_WATCH_METRICS_DAYS + 1) diff --git a/tests/unittest/cloud_governance/policy/aws/cleanup/test_instance_idle.py b/tests/unittest/cloud_governance/policy/aws/cleanup/test_instance_idle.py index 0244115b..42a54b3d 100644 --- a/tests/unittest/cloud_governance/policy/aws/cleanup/test_instance_idle.py +++ b/tests/unittest/cloud_governance/policy/aws/cleanup/test_instance_idle.py @@ -1,5 +1,5 @@ -from datetime import datetime, timedelta +from datetime import datetime, timedelta, UTC from typing import Union from unittest.mock import patch @@ -40,7 +40,7 @@ def mock_describe_instances(*args, **kwargs): { 'InstanceId': 'i-1234567890abcdef0', 'State': {'Name': 'running'}, - 'LaunchTime': kwargs.get('LaunchTime', datetime.utcnow()), + 'LaunchTime': kwargs.get('LaunchTime', datetime.now(UTC.utc)), 'Tags': kwargs.get('Tags', []) # Change the launch time here } @@ -75,7 +75,7 @@ def test_instance_idle__check_not_idle(): environment_variables.environment_variables_dict['dry_run'] = 'yes' environment_variables.environment_variables_dict['policy'] = 'instance_idle' with patch('boto3.client') as mock_client: - mock_client.return_value.describe_instances.side_effect = [mock_describe_instances(LaunchTime=datetime.utcnow() - timedelta(days=8))] + mock_client.return_value.describe_instances.side_effect = [mock_describe_instances(LaunchTime=datetime.now(UTC.utc) - timedelta(days=8))] mock_client.return_value.get_metric_data.side_effect = [MockCloudWatchMetric(metrics=[5, 4, 8, 10]).create_metric(), MockCloudWatchMetric(metrics=[5000, 2000, 4000, 8000]).create_metric(), MockCloudWatchMetric(metrics=[1000, 200, 500]).create_metric() @@ -95,7 +95,7 @@ def test_instance_idle__skip_cluster(): environment_variables.environment_variables_dict['dry_run'] = DRY_RUN_YES environment_variables.environment_variables_dict['policy'] = 'instance_idle' with patch('boto3.client') as mock_client: - mock_client.return_value.describe_instances.side_effect = [mock_describe_instances(Tags=tags, LaunchTime=datetime.utcnow() - timedelta(days=8))] + mock_client.return_value.describe_instances.side_effect = [mock_describe_instances(Tags=tags, LaunchTime=datetime.now(UTC.utc) - timedelta(days=8))] instance_idle = InstanceIdle() response = instance_idle.run() assert len(response) == 0 @@ -111,7 +111,7 @@ def test_instance_idle__dryrun_no_active_instance(): environment_variables.environment_variables_dict['dry_run'] = DRY_RUN_NO environment_variables.environment_variables_dict['policy'] = 'instance_idle' with patch('boto3.client') as mock_client: - mock_client.return_value.describe_instances.side_effect = [mock_describe_instances(Tags=tags, LaunchTime=datetime.utcnow() - timedelta(days=8))] + mock_client.return_value.describe_instances.side_effect = [mock_describe_instances(Tags=tags, LaunchTime=datetime.now(UTC.utc) - timedelta(days=8))] mock_client.return_value.get_metric_data.side_effect = [ MockCloudWatchMetric(metrics=[5, 4, 8, 10]).create_metric(), MockCloudWatchMetric(metrics=[5000, 2000, 4000, 8000]).create_metric(), @@ -133,7 +133,7 @@ def test_instance_idle__dryrun_no_delete(): environment_variables.environment_variables_dict['policy'] = 'instance_idle' with patch('boto3.client') as mock_client: mock_client.return_value.describe_instances.side_effect = [ - mock_describe_instances(Tags=tags, LaunchTime=datetime.utcnow() - timedelta(days=8))] + mock_describe_instances(Tags=tags, LaunchTime=datetime.now(UTC.utc) - timedelta(days=8))] mock_client.return_value.get_metric_data.side_effect = [ MockCloudWatchMetric(metrics=[0, 1, 0, 0.1]).create_metric(), MockCloudWatchMetric(metrics=[50, 20, 5, 10]).create_metric(), @@ -157,7 +157,7 @@ def test_instance_idle__skips_delete(): environment_variables.environment_variables_dict['dry_run'] = DRY_RUN_NO environment_variables.environment_variables_dict['policy'] = 'instance_idle' with patch('boto3.client') as mock_client: - mock_client.return_value.describe_instances.side_effect = [mock_describe_instances(Tags=tags, LaunchTime=datetime.utcnow() - timedelta(days=8))] + mock_client.return_value.describe_instances.side_effect = [mock_describe_instances(Tags=tags, LaunchTime=datetime.now(UTC.utc) - timedelta(days=8))] mock_client.return_value.get_metric_data.side_effect = [ MockCloudWatchMetric(metrics=[0, 1, 0, 0.1]).create_metric(), MockCloudWatchMetric(metrics=[50, 20, 5, 10]).create_metric(), @@ -180,7 +180,7 @@ def test_instance_idle__set_counter_zero(): environment_variables.environment_variables_dict['dry_run'] = DRY_RUN_YES environment_variables.environment_variables_dict['policy'] = 'instance_idle' with patch('boto3.client') as mock_client: - mock_client.return_value.describe_instances.side_effect = [mock_describe_instances(Tags=tags, LaunchTime=datetime.utcnow() - timedelta(days=8))] + mock_client.return_value.describe_instances.side_effect = [mock_describe_instances(Tags=tags, LaunchTime=datetime.now(UTC.utc) - timedelta(days=8))] mock_client.return_value.get_metric_data.side_effect = [ MockCloudWatchMetric(metrics=[0, 1, 0, 0.1]).create_metric(), MockCloudWatchMetric(metrics=[50, 20, 5, 10]).create_metric(), diff --git a/tests/unittest/cloud_governance/policy/aws/cleanup/test_unused_nat_gateway.py b/tests/unittest/cloud_governance/policy/aws/cleanup/test_unused_nat_gateway.py index 4c88019b..c76fa7c3 100644 --- a/tests/unittest/cloud_governance/policy/aws/cleanup/test_unused_nat_gateway.py +++ b/tests/unittest/cloud_governance/policy/aws/cleanup/test_unused_nat_gateway.py @@ -1,4 +1,5 @@ import datetime +from datetime import UTC import boto3 from moto import mock_ec2, mock_cloudwatch @@ -53,7 +54,7 @@ def test_unused_nat_gateway_dry_run_yes_collect_none(): 'Value': nat_gateway.get('NatGatewayId') }, ], - 'Timestamp': datetime.datetime.utcnow(), + 'Timestamp': datetime.datetime.now(UTC.utc), 'Value': 123.0, 'Values': [123.0], 'Unit': 'Count', @@ -96,7 +97,7 @@ def test_unused_nat_gateway___dry_run_no_7_days_action_delete(): environment_variables.environment_variables_dict['dry_run'] = 'no' ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) subnet_id = ec2_client.describe_subnets()['Subnets'][0].get('SubnetId') - tags = [{'Key': 'DaysCount', 'Value': f'{datetime.datetime.utcnow().date()}@7'}] + tags = [{'Key': 'DaysCount', 'Value': f'{datetime.datetime.now(UTC.utc).date()}@7'}] ec2_client.create_nat_gateway(SubnetId=subnet_id, TagSpecifications=[{'ResourceType': 'nat-gateway', 'Tags': tags}]) unused_nat_gateway = UnUsedNatGateway() response = unused_nat_gateway.run() @@ -118,7 +119,7 @@ def test_unused_nat_gateway___dry_run_no_skips_delete(): environment_variables.environment_variables_dict['dry_run'] = 'no' ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) subnet_id = ec2_client.describe_subnets()['Subnets'][0].get('SubnetId') - tags = [{'Key': 'DaysCount', 'Value': f'{datetime.datetime.utcnow().date()}@7'}, + tags = [{'Key': 'DaysCount', 'Value': f'{datetime.datetime.now(UTC.utc).date()}@7'}, {'Key': 'policy', 'Value': 'not-delete'}] ec2_client.create_nat_gateway(SubnetId=subnet_id, TagSpecifications=[{'ResourceType': 'nat-gateway', 'Tags': tags}]) unused_nat_gateway = UnUsedNatGateway() diff --git a/tests/unittest/cloud_governance/policy/aws/test_zombie_snapshots.py b/tests/unittest/cloud_governance/policy/aws/test_zombie_snapshots.py index f87a1ef3..f1639acc 100644 --- a/tests/unittest/cloud_governance/policy/aws/test_zombie_snapshots.py +++ b/tests/unittest/cloud_governance/policy/aws/test_zombie_snapshots.py @@ -1,5 +1,5 @@ import os -from datetime import datetime +from datetime import datetime, UTC import boto3 from moto import mock_ec2 @@ -68,7 +68,7 @@ def test_zombie_snapshots_delete(): environment_variables.environment_variables_dict['AWS_DEFAULT_REGION'] = AWS_DEFAULT_REGION environment_variables.environment_variables_dict['policy'] = 'zombie_snapshots' tags = [{'Key': 'User', 'Value': TEST_USER_NAME}, - {'Key': 'DaysCount', 'Value': f'{datetime.utcnow().date()}@7'}] + {'Key': 'DaysCount', 'Value': f'{datetime.now(UTC.utc).date()}@7'}] ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) # delete default snapshots and images @@ -109,7 +109,7 @@ def test_zombie_snapshots_skip(): environment_variables.environment_variables_dict['AWS_DEFAULT_REGION'] = AWS_DEFAULT_REGION environment_variables.environment_variables_dict['policy'] = 'zombie_snapshots' tags = [{'Key': 'User', 'Value': TEST_USER_NAME}, {'Key': 'policy', 'Value': 'not-delete'}, - {'Key': 'DaysCount', 'Value': f'{datetime.utcnow().date()}@7'}] + {'Key': 'DaysCount', 'Value': f'{datetime.now(UTC.utc).date()}@7'}] ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) # delete default snapshots and images @@ -150,7 +150,7 @@ def test_zombie_snapshots_contains_cluster_tag(): environment_variables.environment_variables_dict['AWS_DEFAULT_REGION'] = AWS_DEFAULT_REGION environment_variables.environment_variables_dict['policy'] = 'zombie_snapshots' tags = [{'Key': 'User', 'Value': TEST_USER_NAME}, {'Key': 'policy', 'Value': 'not-delete'}, - {'Key': 'DaysCount', 'Value': f'{datetime.utcnow().date()}@7'}, + {'Key': 'DaysCount', 'Value': f'{datetime.now(UTC.utc).date()}@7'}, {'Key': 'kubernetes.io/cluster/test-zombie-cluster', 'Value': f'owned'}] ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) @@ -191,7 +191,7 @@ def test_zombie_snapshots_no_zombies(): environment_variables.environment_variables_dict['AWS_DEFAULT_REGION'] = AWS_DEFAULT_REGION environment_variables.environment_variables_dict['policy'] = 'zombie_snapshots' tags = [{'Key': 'User', 'Value': TEST_USER_NAME}, {'Key': 'policy', 'Value': 'not-delete'}, - {'Key': 'DaysCount', 'Value': f'{datetime.utcnow().date()}@7'}, + {'Key': 'DaysCount', 'Value': f'{datetime.now(UTC.utc).date()}@7'}, {'Key': 'kubernetes.io/cluster/test-zombie-cluster', 'Value': f'owned'}] ec2_client = boto3.client('ec2', region_name=AWS_DEFAULT_REGION) diff --git a/tests/unittest/cloud_governance/policy/azure/test_instance_idle.py b/tests/unittest/cloud_governance/policy/azure/test_instance_idle.py index 1d1db6ae..2bfcfc12 100644 --- a/tests/unittest/cloud_governance/policy/azure/test_instance_idle.py +++ b/tests/unittest/cloud_governance/policy/azure/test_instance_idle.py @@ -1,17 +1,14 @@ import datetime -from unittest.mock import patch, Mock +from datetime import UTC from azure.mgmt.compute import ComputeManagementClient from azure.mgmt.monitor import MonitorManagementClient from azure.mgmt.monitor.v2021_05_01.models import TimeSeriesElement, MetricValue -from azure.mgmt.resource import ResourceManagementClient from cloud_governance.main.environment_variables import environment_variables from cloud_governance.policy.azure.cleanup.instance_idle import InstanceIdle -from cloud_governance.policy.azure.cleanup.instance_run import InstanceRun from tests.unittest.configs import SUBSCRIPTION_ID, CURRENT_DATE from tests.unittest.mocks.azure.mock_compute.mock_compute import mock_compute -from tests.unittest.mocks.azure.mock_computes import MockVirtualMachine, MockAzure from tests.unittest.mocks.azure.mock_identity.mock_default_credential import MockDefaultAzureCredential from tests.unittest.mocks.azure.mock_monitor.mock_monitor import mock_monitor from tests.unittest.mocks.azure.mock_network.mock_network import mock_network @@ -34,17 +31,17 @@ def test_instance_idle(): monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-cpu-metric', unit='Percentage CPU', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-network-in-metric', unit='Network In Total', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-network-out-metric', unit='Network Out Total', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) instance_idle = InstanceIdle() response = instance_idle.run() @@ -68,17 +65,17 @@ def test_instance_idle__check_not_idle(): monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-cpu-metric', unit='Percentage CPU', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=3) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=3) ])]) monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-network-in-metric', unit='Network In Total', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-network-out-metric', unit='Network Out Total', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) instance_idle = InstanceIdle() response = instance_idle.run() @@ -122,17 +119,17 @@ def test_instance_idle__dryrun_no(): monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-cpu-metric', unit='Percentage CPU', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=3) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=3) ])]) monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-network-in-metric', unit='Network In Total', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=10000) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=10000) ])]) monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-network-out-metric', unit='Network Out Total', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=10000) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=10000) ])]) instance_idle = InstanceIdle() response = instance_idle.run() @@ -157,17 +154,17 @@ def test_instance_idle__dryrun_no_delete(): monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-cpu-metric', unit='Percentage CPU', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-network-in-metric', unit='Network In Total', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-network-out-metric', unit='Network Out Total', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) instance_idle = InstanceIdle() response = instance_idle.run() @@ -195,17 +192,17 @@ def test_instance_idle__skips_delete(): monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-cpu-metric', unit='Percentage CPU', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-network-in-metric', unit='Network In Total', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-network-out-metric', unit='Network Out Total', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) instance_idle = InstanceIdle() response = instance_idle.run() @@ -233,17 +230,17 @@ def test_instance_idle__set_counter_zero(): monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-cpu-metric', unit='Percentage CPU', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-network-in-metric', unit='Network In Total', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) monitor_client.metrics.create_metric(resource_id=instance.id, type='VirtualMachine', name='test-network-out-metric', unit='Network Out Total', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) instance_idle = InstanceIdle() response = instance_idle.run() diff --git a/tests/unittest/cloud_governance/policy/azure/test_instance_run.py b/tests/unittest/cloud_governance/policy/azure/test_instance_run.py index c7cb02d6..132e9188 100644 --- a/tests/unittest/cloud_governance/policy/azure/test_instance_run.py +++ b/tests/unittest/cloud_governance/policy/azure/test_instance_run.py @@ -1,4 +1,5 @@ import datetime +from datetime import UTC from unittest.mock import patch, Mock from azure.mgmt.compute import ComputeManagementClient @@ -114,7 +115,7 @@ def test_instance_run_stopped_test_days(): environment_variables.environment_variables_dict['SHUTDOWN_PERIOD'] = True environment_variables.environment_variables_dict['policy'] = 'instance_run' environment_variables.environment_variables_dict['dry_run'] = 'no' - date = (datetime.datetime.utcnow() - datetime.timedelta(days=1)).date() + date = (datetime.datetime.now(UTC.utc) - datetime.timedelta(days=1)).date() mock_virtual_machines = Mock() vm1 = MockVirtualMachine(tags={'User': 'mock', 'Policy': 'notdelete', 'DaysCount': f'{date}@1'}) @@ -144,7 +145,7 @@ def test_instance_run_stopped_test_current_day(): environment_variables.environment_variables_dict['SHUTDOWN_PERIOD'] = True environment_variables.environment_variables_dict['policy'] = 'instance_run' environment_variables.environment_variables_dict['dry_run'] = 'no' - date = (datetime.datetime.utcnow()).date() + date = (datetime.datetime.now(UTC.utc)).date() mock_virtual_machines = Mock() vm1 = MockVirtualMachine(tags={'User': 'mock', 'Policy': 'notdelete', 'DaysCount': f'{date}@1'}) @@ -174,7 +175,7 @@ def test_instance_run_vm_already_stopped(): environment_variables.environment_variables_dict['policy'] = 'instance_run' environment_variables.environment_variables_dict['SHUTDOWN_PERIOD'] = True environment_variables.environment_variables_dict['dry_run'] = 'no' - date = (datetime.datetime.utcnow()).date() + date = (datetime.datetime.now(UTC.utc)).date() mock_virtual_machines = Mock() vm1 = MockVirtualMachine(tags={'User': 'mock', 'Policy': 'notdelete', 'DaysCount': f'{date}@1'}) diff --git a/tests/unittest/cloud_governance/policy/azure/test_unused_nat_gateway.py b/tests/unittest/cloud_governance/policy/azure/test_unused_nat_gateway.py index f9bc7548..76845ed8 100644 --- a/tests/unittest/cloud_governance/policy/azure/test_unused_nat_gateway.py +++ b/tests/unittest/cloud_governance/policy/azure/test_unused_nat_gateway.py @@ -1,4 +1,5 @@ import datetime +from datetime import UTC from azure.mgmt.compute import ComputeManagementClient from azure.mgmt.monitor import MonitorManagementClient @@ -52,7 +53,7 @@ def test_unused_nat_gateway__check_used(): monitor_client.metrics.create_metric(resource_id=nat_gateway.id, type='NatGateway', name='test-metric', unit='SNATConnectionCount', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=100) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=100) ])]) unused_nat_gateway = UnUsedNatGateway() response = unused_nat_gateway.run() @@ -79,7 +80,7 @@ def test_unused_nat_gateway__skip_live_cluster_id(): monitor_client.metrics.create_metric(resource_id=nat_gateway.id, type='NatGateway', name='test-metric', unit='SNATConnectionCount', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) unused_nat_gateway = UnUsedNatGateway() response = unused_nat_gateway.run() @@ -104,7 +105,7 @@ def test_unused_nat_gateway__collect_not_live_cluster_id(): monitor_client.metrics.create_metric(resource_id=nat_gateway.id, type='NatGateway', name='test-metric', unit='SNATConnectionCount', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) unused_nat_gateway = UnUsedNatGateway() response = unused_nat_gateway.run() @@ -129,7 +130,7 @@ def test_unused_nat_gateway__dryrun_no(): monitor_client.metrics.create_metric(resource_id=nat_gateway.id, type='NatGateway', name='test-metric', unit='SNATConnectionCount', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) unused_nat_gateway = UnUsedNatGateway() response = unused_nat_gateway.run() @@ -154,7 +155,7 @@ def test_unused_nat_gateway__dryrun_no_delete(): monitor_client.metrics.create_metric(resource_id=nat_gateway.id, type='NatGateway', name='test-metric', unit='SNATConnectionCount', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) unused_nat_gateway = UnUsedNatGateway() response = unused_nat_gateway.run() @@ -181,7 +182,7 @@ def test_unused_nat_gateway__skips_delete(): monitor_client.metrics.create_metric(resource_id=nat_gateway.id, type='NatGateway', name='test-metric', unit='SNATConnectionCount', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) unused_nat_gateway = UnUsedNatGateway() response = unused_nat_gateway.run() @@ -208,7 +209,7 @@ def test_unused_nat_gateway__set_counter_zero(): monitor_client.metrics.create_metric(resource_id=nat_gateway.id, type='NatGateway', name='test-metric', unit='SNATConnectionCount', timeseries=[TimeSeriesElement(data=[ - MetricValue(time_stamp=datetime.datetime.utcnow(), average=0) + MetricValue(time_stamp=datetime.datetime.now(UTC.utc), average=0) ])]) unused_nat_gateway = UnUsedNatGateway() response = unused_nat_gateway.run() diff --git a/tests/unittest/cloud_governance/policy/helpers/aws/test_aws_policy_operations.py b/tests/unittest/cloud_governance/policy/helpers/aws/test_aws_policy_operations.py index 79a7aeee..a87a70a7 100644 --- a/tests/unittest/cloud_governance/policy/helpers/aws/test_aws_policy_operations.py +++ b/tests/unittest/cloud_governance/policy/helpers/aws/test_aws_policy_operations.py @@ -1,4 +1,5 @@ import datetime +from datetime import UTC import boto3 from moto import mock_ec2, mock_s3, mock_iam @@ -49,7 +50,7 @@ def test_get_clean_up_days_count_already_exists(): """ environment_variables.environment_variables_dict['dry_run'] = 'yes' aws_cleanup_operations = AWSPolicyOperations() - mock_date = (datetime.datetime.utcnow() - datetime.timedelta(days=1)).date() + mock_date = (datetime.datetime.now(UTC.utc) - datetime.timedelta(days=1)).date() tags = [{'Key': "Name", "Value": "Unittest"}, {'Key': "DaysCount", "Value": f'{mock_date}@1'}] days_count = aws_cleanup_operations.get_clean_up_days_count(tags=tags) assert days_count == 0 @@ -66,7 +67,7 @@ def test_get_clean_up_days_count_already_updated_today(): """ environment_variables.environment_variables_dict['dry_run'] = 'yes' aws_cleanup_operations = AWSPolicyOperations() - mock_date = str(datetime.datetime.utcnow().date()) + mock_date = str(datetime.datetime.now(UTC.utc).date()) tags = [{'Key': "Name", "Value": "Unittest"}, {'Key': "DaysCount", "Value": f'{mock_date}@1'}] days_count = aws_cleanup_operations.get_clean_up_days_count(tags=tags) assert days_count == 0 @@ -151,7 +152,7 @@ def test_update_resource_day_count_tag(): aws_cleanup_operations.update_resource_day_count_tag(resource_id=resource_id, cleanup_days=cleanup_days, tags=tags) instances = ec2_client.describe_instances()['Reservations'] tag_value = aws_cleanup_operations.get_tag_name_from_tags(instances[0]['Instances'][0].get('Tags'), tag_name='DaysCount') - assert tag_value == str(datetime.datetime.utcnow().date()) + "@0" + assert tag_value == str(datetime.datetime.now(UTC.utc).date()) + "@0" @mock_ec2 @@ -168,7 +169,7 @@ def test_update_resource_day_count_tag_exists_tag(): environment_variables.environment_variables_dict['dry_run'] = 'no' ec2_client = boto3.client('ec2', region_name='ap-south-1') default_ami_id = 'ami-03cf127a' - mock_date = (datetime.datetime.utcnow() - datetime.timedelta(days=1)).date() + mock_date = (datetime.datetime.now(UTC.utc) - datetime.timedelta(days=1)).date() tags = [{'Key': 'User', 'Value': 'cloud-governance'}, {'Key': "Name", "Value": "Unittest"}, {'Key': "DaysCount", "Value": f'{mock_date}@1'}] resource = ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1, @@ -181,7 +182,7 @@ def test_update_resource_day_count_tag_exists_tag(): aws_cleanup_operations.update_resource_day_count_tag(resource_id=resource_id, cleanup_days=cleanup_days, tags=tags) instances = ec2_client.describe_instances()['Reservations'] tag_value = aws_cleanup_operations.get_tag_name_from_tags(instances[0]['Instances'][0].get('Tags'), tag_name='DaysCount') - assert tag_value == str(datetime.datetime.utcnow().date()) + "@2" + assert tag_value == str(datetime.datetime.now(UTC.utc).date()) + "@2" @mock_ec2 @@ -198,7 +199,7 @@ def test_update_resource_day_count_tag_updated_tag_today(): environment_variables.environment_variables_dict['dry_run'] = 'no' ec2_client = boto3.client('ec2', region_name='ap-south-1') default_ami_id = 'ami-03cf127a' - mock_date = datetime.datetime.utcnow().date() + mock_date = datetime.datetime.now(UTC.utc).date() tags = [{'Key': 'User', 'Value': 'cloud-governance'}, {'Key': "Name", "Value": "Unittest"}, {'Key': "DryRunYesDays", "Value": f'{mock_date}@1'}] resource = ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, MinCount=1, @@ -211,4 +212,4 @@ def test_update_resource_day_count_tag_updated_tag_today(): aws_cleanup_operations.update_resource_day_count_tag(resource_id=resource_id, cleanup_days=cleanup_days, tags=tags) instances = ec2_client.describe_instances()['Reservations'] tag_value = aws_cleanup_operations.get_tag_name_from_tags(instances[0]['Instances'][0].get('Tags'), tag_name='DaysCount') - assert tag_value == str(datetime.datetime.utcnow().date()) + "@1" + assert tag_value == str(datetime.datetime.now(UTC.utc).date()) + "@1" diff --git a/tests/unittest/configs.py b/tests/unittest/configs.py index 26a8eea6..59dad9fc 100644 --- a/tests/unittest/configs.py +++ b/tests/unittest/configs.py @@ -6,8 +6,8 @@ DRY_RUN_YES = 'yes' DRY_RUN_NO = 'no' -CURRENT_DATE = datetime.datetime.utcnow().date() -CURRENT_DATE_TIME = datetime.datetime.utcnow() +CURRENT_DATE = datetime.datetime.now(datetime.UTC.utc).date() +CURRENT_DATE_TIME = datetime.datetime.now(datetime.UTC.utc) TEST_USER_NAME = 'unit-test' # AWS diff --git a/tests/unittest/mocks/azure/mock_computes.py b/tests/unittest/mocks/azure/mock_computes.py index 6b6bdda5..f98e117b 100644 --- a/tests/unittest/mocks/azure/mock_computes.py +++ b/tests/unittest/mocks/azure/mock_computes.py @@ -1,5 +1,5 @@ import uuid -from datetime import datetime +from datetime import datetime, UTC from azure.mgmt.compute.v2023_01_02.models import Disk, DiskSku from azure.mgmt.compute.v2023_03_01.models import VirtualMachine, HardwareProfile, VirtualMachineInstanceView, \ @@ -14,7 +14,7 @@ def __init__(self, tags: dict = None): super().__init__(location='mock') self.tags = tags if tags else {} self.name = 'mock_machine' - self.time_created = datetime.utcnow() + self.time_created = datetime.now(UTC.utc) self.hardware_profile = HardwareProfile(vm_size='Standard_D2s_v3') self.id = f'/subscriptions/{uuid.uuid1()}/resourceGroups/mock/providers/Microsoft.Compute/virtualMachines/mock-machine'