Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixed the bugs uploading to ES #701

Merged
merged 1 commit into from
Dec 11, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -266,10 +266,10 @@ def get_last_time_accessed(self, resource_id: str, event_name: str, start_time:
}])['Events']
if events:
events = sorted(events, key=lambda event: event['EventTime'], reverse=True)
while events[0].get('EventName') in ('CreateTags', 'DeleteTags'):
events.pop(0)
if events[0].get('EventName') == event_name:
return events[0].get('EventTime')
events = [event for event in events if event.get('EventName') not in ('CreateTags', 'DeleteTags')]
if events:
if events[0].get('EventName') == event_name:
return events[0].get('EventTime')
if kwargs:
if len(events) == 1:
if events[0].get('EventName') == kwargs['optional_event_name'][0]:
Expand Down
2 changes: 1 addition & 1 deletion cloud_governance/policy/aws/ebs_unattached.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def __delete_ebs_unattached(self):
volumes = self._ec2_client.describe_volumes(Filters=[{'Name': 'status', 'Values': ['available']}])['Volumes']
unattached_volumes_data = []
for volume in volumes:
if not self._check_cluster_tag(tags=volume.get('Tags')) or self._get_policy_value(tags=volume.get('Tags')) not in ('NOTDELETE', 'SKIP'):
if not self._check_cluster_tag(tags=volume.get('Tags', [])) or self._get_policy_value(tags=volume.get('Tags')) not in ('NOTDELETE', 'SKIP'):
volume_id = volume.get('VolumeId')
launch_days = self._calculate_days(create_date=volume.get('CreateTime'))
if launch_days >= self.DAYS_TO_DELETE_RESOURCE:
Expand Down
2 changes: 1 addition & 1 deletion cloud_governance/policy/aws/empty_roles.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def __delete_empty_roles(self):
role_name = role.get('RoleName')
try:
get_role = self._iam_client.get_role(RoleName=role.get('RoleName'))['Role']
tags = get_role.get('Tags')
tags = get_role.get('Tags', [])
if not self._check_cluster_tag(tags=tags):
role_empty = False
role_attached_policies = self._iam_client.list_attached_role_policies(RoleName=role_name)
Expand Down
2 changes: 1 addition & 1 deletion cloud_governance/policy/aws/skipped_resources.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,5 +118,5 @@ def run(self):
self._es_upload.es_upload_data(items=resources_data, es_index=self.es_index)
for resource in resources_data:
if resource.get('timestamp'):
resource['timestamp'] = str(resource['timestamp'])
resource['timestamp'] = resource['timestamp']
return resources_data
Original file line number Diff line number Diff line change
Expand Up @@ -128,13 +128,14 @@ def zombie_cluster_resource(delete: bool = False, region: str = 'us-east-2', res
all_cluster_data.extend(resource_data_list)
zombie_cluster_common_methods.send_mails_to_cluster_user(notify_data=notify_data, delete_data=delete_data,
cluster_data=cluster_data)
zombie_result['all_cluster_data'] = {'count': len(set(all_cluster_data)), 'data': set(sorted(all_cluster_data))}
all_cluster_data = ['kubernetes.io/cluster/test']
zombie_result['all_cluster_data'] = {'count': len(set(all_cluster_data)), 'data': list(set(sorted(all_cluster_data)))}
es_operations = ElasticSearchOperations()
if es_operations.check_elastic_search_connection():
environment_variables_dict = environment_variables.environment_variables_dict
es_index = environment_variables_dict.get('es_index')
account = environment_variables_dict.get('account', '')
if zombie_result:
if zombie_result.get('data'):
zombie_result['region_name'] = region
zombie_result['account'] = account
es_operations.upload_to_elasticsearch(data=zombie_result.copy(), index=es_index)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,24 +25,25 @@ def run(self):
if isinstance(response, str):
logger.info(f'key: {cls[0]}, Response: {response}')
else:
logger.info(f'key: {cls[0]}, count: {len(response)}, {response}')
policy_result = response

if self._es_operations.check_elastic_search_connection():
if policy_result:
for policy_dict in policy_result:
policy_dict['region_name'] = self._region
policy_dict['account'] = self._account
self._es_operations.upload_to_elasticsearch(data=policy_dict.copy(), index=self._es_index)
logger.info(f'Uploaded the policy results to elasticsearch index: {self._es_index}')
if self._policy != 'skipped_resources':
logger.info(f'key: {cls[0]}, count: {len(response)}, {response}')
policy_result = response

if self._es_operations.check_elastic_search_connection():
if policy_result:
for policy_dict in policy_result:
policy_dict['region_name'] = self._region
policy_dict['account'] = self._account
self._es_operations.upload_to_elasticsearch(data=policy_dict.copy(), index=self._es_index)
logger.info(f'Uploaded the policy results to elasticsearch index: {self._es_index}')
else:
logger.error(f'No data to upload on @{self._account} at {datetime.utcnow()}')
else:
logger.error(f'No data to upload on @{self._account} at {datetime.utcnow()}')
else:
logger.error('ElasticSearch host is not pingable, Please check ')

if self._policy_output:
# if self._policy not in ('ec2_idle', 'ebs_in_use', 'ec2_run', 's3_inactive', 'zombie_snapshots', 'nat_gateway_unused'):
# beautify_data = self._beautify_upload_data(upload_resource_data=response)
# policy_result = {'count': len(beautify_data), self._policy: beautify_data}
logger.info(policy_result)
self._s3operations.save_results_to_s3(policy=self._policy.replace('_', '-'), policy_output=self._policy_output, policy_result=policy_result)
logger.error('ElasticSearch host is not pingable, Please check ')

if self._policy_output:
# if self._policy not in ('ec2_idle', 'ebs_in_use', 'ec2_run', 's3_inactive', 'zombie_snapshots', 'nat_gateway_unused'):
# beautify_data = self._beautify_upload_data(upload_resource_data=response)
# policy_result = {'count': len(beautify_data), self._policy: beautify_data}
logger.info(policy_result)
self._s3operations.save_results_to_s3(policy=self._policy.replace('_', '-'), policy_output=self._policy_output, policy_result=policy_result)
1 change: 1 addition & 0 deletions jenkins/clouds/aws/daily/policies/Jenkinsfile
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ pipeline {
USERS_MANAGER_MAILS = credentials('cloud-governance-users-managers-mails')
REPLY_TO = credentials('cloud-governance-reply-to')
LDAP_HOST_NAME = credentials('cloud-governance-ldap-host-name')
ES_INDEX = credentials('cloud-governance-es-index')
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why we need a new index ?

Copy link
Collaborator Author

@athiruma athiruma Dec 11, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

to pass the dynamic index.


contact1 = "[email protected]"
contact2 = "[email protected]"
Expand Down
Loading
Loading