diff --git a/cloud_governance/main/main_oerations/main_operations.py b/cloud_governance/main/main_oerations/main_operations.py index 53a92c25b..a964d666b 100644 --- a/cloud_governance/main/main_oerations/main_operations.py +++ b/cloud_governance/main/main_oerations/main_operations.py @@ -38,7 +38,7 @@ def run(self): policy_runner = self.get_policy_runner() for policy_type, policies in policies_list.items(): # @Todo support for all the aws policies, currently supports ec2_run as urgent requirement - if self._policy in policies and self._policy in ["instance_run", "unattached_volume"]: + if self._policy in policies and self._policy in ["instance_run", "unattached_volume", "cluster_instances"]: policy_runner.run(source=policy_type) return True return False diff --git a/cloud_governance/policy/aws/cleanup/unattached_volume.py b/cloud_governance/policy/aws/cleanup/unattached_volume.py index 6a0b13f1d..f4cf71fe5 100644 --- a/cloud_governance/policy/aws/cleanup/unattached_volume.py +++ b/cloud_governance/policy/aws/cleanup/unattached_volume.py @@ -19,13 +19,11 @@ def run_policy_operations(self): """ unattached_volumes = [] available_volumes = self._get_all_volumes() - active_cluster_ids = self._get_active_cluster_ids() for volume in available_volumes: tags = volume.get('Tags', []) resource_id = volume.get('VolumeId') cleanup_result = False - cluster_tag = self._get_cluster_tag(tags=volume.get('Tags')) - if Utils.equal_ignore_case(volume.get('State'), 'available') and cluster_tag not in active_cluster_ids: + if Utils.equal_ignore_case(volume.get('State'), 'available'): cleanup_days = self.get_clean_up_days_count(tags=tags) cleanup_result = self.verify_and_delete_resource(resource_id=resource_id, tags=tags, clean_up_days=cleanup_days) diff --git a/cloud_governance/policy/aws/monitor/__init__.py b/cloud_governance/policy/aws/monitor/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/cloud_governance/policy/aws/monitor/cluster_instances.py b/cloud_governance/policy/aws/monitor/cluster_instances.py new file mode 100644 index 000000000..cf4cbade6 --- /dev/null +++ b/cloud_governance/policy/aws/monitor/cluster_instances.py @@ -0,0 +1,83 @@ +import datetime +import json +import re + +from cloud_governance.cloud_resource_orchestration.utils.common_operations import string_equal_ignore_case +from cloud_governance.policy.helpers.aws.aws_policy_operations import AWSPolicyOperations + + +class ClusterInstances(AWSPolicyOperations): + """ + This class performs the operations on running cluster resources + """ + + def __init__(self): + super().__init__() + + def __get_instance_status(self, resource_id: str, vm_name: str): + """ + This method returns the VM status of the Virtual Machine + :param resource_id: + :type resource_id: + :param vm_name: + :type vm_name: + :return: + :rtype: + """ + instance_statuses = self.compute_operations.get_instance_statuses(resource_id=resource_id, vm_name=vm_name) + statuses = instance_statuses.get('statuses', {}) + if len(statuses) >= 2: + status = statuses[1].get('display_status', '').lower() + elif len(statuses) == 1: + status = statuses[0].get('display_status', '').lower() + else: + status = 'Unknown Status' + return status + + def run_policy_operations(self): + """ + This method returns the running vms in the AAzure cloud and stops based on the action + :return: + :rtype: + """ + instances = self._get_all_instances() + cluster_data = {} + for instance in instances: + tags = instance.get('Tags', []) + cluster_tag = self._get_cluster_tag(tags=tags).strip() + instance_state = instance.get('State', {}).get('Name') + if cluster_tag and not string_equal_ignore_case(instance_state, 'terminated'): + launch_time = instance.get('LaunchTime') + running_instances = stopped_instances = 0 + running_days = self.calculate_days(instance.get('LaunchTime')) + if string_equal_ignore_case(instance_state, 'stopped'): + stopped_instances = 1 + state_transition_reason = instance.get('StateTransitionReason') + if state_transition_reason: + extract_data = re.search(r'\((\d{4}-\d{2}-\d{2})', state_transition_reason) + if extract_data: + running_days = self.calculate_days(extract_data.group(1).split()[0], start_date=launch_time) + instance_state += f"@{extract_data.group(1)}" + else: + running_instances = 1 + instance_data = f"{instance.get('InstanceId')}, {self.get_tag_name_from_tags(tags=tags, tag_name='Name')}, {instance.get('InstanceType')}, {instance_state}, {running_days}, {launch_time}" + if cluster_tag in cluster_data: + cluster_data[cluster_tag]['Instances'].append(instance_data) + cluster_data[cluster_tag]['InstanceCount'] = len(cluster_data[cluster_tag]['Instances']) + cluster_data[cluster_tag]['Stopped'] = int(cluster_data[cluster_tag]['Stopped']) + stopped_instances + cluster_data[cluster_tag]['Running'] = int(cluster_data[cluster_tag]['Running']) + running_instances + else: + cluster_data[cluster_tag] = { + 'ResourceId': cluster_tag, + 'ClusterTag': cluster_tag, + 'User': self.get_tag_name_from_tags(tags=tags, tag_name='User'), + 'RunningDays': running_days, + 'RegionName': self._region, + 'PublicCloud': self._cloud_name, + 'Instances': [instance_data], + 'InstanceCount': 1, + 'Stopped': stopped_instances, + 'Running': running_instances, + 'index-id': f'{datetime.datetime.utcnow().date()}-{self._cloud_name.lower()}-{self.account.lower()}-{self._region.lower()}-{cluster_tag}' + } + return list(cluster_data.values()) diff --git a/cloud_governance/policy/azure/cleanup/unattached_volume.py b/cloud_governance/policy/azure/cleanup/unattached_volume.py index c8f14ed3a..7867f5774 100644 --- a/cloud_governance/policy/azure/cleanup/unattached_volume.py +++ b/cloud_governance/policy/azure/cleanup/unattached_volume.py @@ -18,12 +18,10 @@ def run_policy_operations(self): """ unattached_volumes = [] available_volumes = self._get_all_volumes() - active_cluster_ids = self._get_active_cluster_ids() for volume in available_volumes: tags = volume.get('tags') cleanup_result = False - cluster_tag = self._get_cluster_tag(tags=tags) - if Utils.equal_ignore_case(volume.get('disk_state'), 'Unattached') and cluster_tag not in active_cluster_ids: + if Utils.equal_ignore_case(volume.get('disk_state'), 'Unattached'): cleanup_days = self.get_clean_up_days_count(tags=tags) cleanup_result = self.verify_and_delete_resource( resource_id=volume.get('id'), tags=tags, diff --git a/cloud_governance/policy/helpers/abstract_policy_operations.py b/cloud_governance/policy/helpers/abstract_policy_operations.py index a374f0288..a79e7fb21 100644 --- a/cloud_governance/policy/helpers/abstract_policy_operations.py +++ b/cloud_governance/policy/helpers/abstract_policy_operations.py @@ -23,9 +23,11 @@ def __init__(self): self._resource_id = self._environment_variables_dict.get('RESOURCE_ID') self._es_upload = ElasticUpload() - def calculate_days(self, create_date: Union[datetime, str]): + def calculate_days(self, create_date: Union[datetime, str], start_date: Union[datetime, str] = datetime.utcnow()): """ This method returns the days + :param start_date: + :type start_date: :param create_date: :type create_date: :return: @@ -33,8 +35,10 @@ def calculate_days(self, create_date: Union[datetime, str]): """ if isinstance(create_date, str): create_date = datetime.strptime(create_date, "%Y-%M-%d") - today = datetime.utcnow().date() - days = today - create_date.date() + if isinstance(start_date, str): + start_date = datetime.strptime(start_date, "%Y-%m-%d") + start_date = start_date.date() + days = start_date - create_date.date() return days.days def get_clean_up_days_count(self, tags: Union[list, dict]): diff --git a/cloud_governance/policy/helpers/azure/azure_policy_operations.py b/cloud_governance/policy/helpers/azure/azure_policy_operations.py index de09f15da..665ad2820 100644 --- a/cloud_governance/policy/helpers/azure/azure_policy_operations.py +++ b/cloud_governance/policy/helpers/azure/azure_policy_operations.py @@ -105,31 +105,3 @@ def _get_all_volumes(self) -> list: """ volumes = self.compute_operations.get_all_disks() return volumes - - def _get_active_cluster_ids(self): - """ - This method returns the active cluster id's - :return: - :rtype: - """ - active_instances = self._get_all_instances() - cluster_ids = [] - for vm in active_instances: - tags = vm.tags if vm.tags else {} - for key, value in tags.items(): - if key.startswith('kubernetes.io/cluster'): - cluster_ids.append(key) - break - return cluster_ids - - def _get_cluster_tag(self, tags: dict): - """ - This method returns the cluster_tag - :return: - :rtype: - """ - if tags: - for key, value in tags.items(): - if key.startswith('kubernetes.io/cluster'): - return key - return '' diff --git a/tests/unittest/cloud_governance/policy/aws/cleanup/test_unattached_volume.py b/tests/unittest/cloud_governance/policy/aws/cleanup/test_unattached_volume.py index ee3af06c2..03af071eb 100644 --- a/tests/unittest/cloud_governance/policy/aws/cleanup/test_unattached_volume.py +++ b/tests/unittest/cloud_governance/policy/aws/cleanup/test_unattached_volume.py @@ -1,9 +1,11 @@ + import boto3 from moto import mock_ec2 from cloud_governance.main.environment_variables import environment_variables from cloud_governance.policy.aws.cleanup.unattached_volume import UnattachedVolume + region_name = 'us-east-2' @@ -89,27 +91,3 @@ def test_unattached_volume_dry_run_no_skip(): response = response[0] assert response.get('ResourceDelete') == 'False' assert response.get('SkipPolicy') == 'NOTDELETE' - - -@mock_ec2 -def test_check_exists_cluster(): - """ - This tests verify skip the existing cluster volume - :return: - :rtype: - """ - tags = [{'Key': 'kubernetes.io/cluster/test', 'Value': 'owned'}] - environment_variables.environment_variables_dict['AWS_DEFAULT_REGION'] = region_name - environment_variables.environment_variables_dict['policy'] = 'unattached_volume' - environment_variables.environment_variables_dict['dry_run'] = 'no' - environment_variables.environment_variables_dict['DAYS_TO_TAKE_ACTION'] = 1 - ec2_client = boto3.client('ec2', region_name=region_name) - default_ami_id = 'ami-03cf127a' - ec2_client.run_instances(ImageId=default_ami_id, InstanceType='t2.micro', MaxCount=1, - MinCount=1, TagSpecifications=[{ 'ResourceType': 'instance','Tags': tags}]) - ec2_client.create_volume(AvailabilityZone=f'{region_name}a', Size=10, TagSpecifications=[{ - 'ResourceType': 'volume', - 'Tags': tags - }]) - volume_run = UnattachedVolume().run() - assert len(volume_run) == 0 diff --git a/tests/unittest/cloud_governance/policy/azure/test_unattached_volume.py b/tests/unittest/cloud_governance/policy/azure/test_unattached_volume.py index b1e75f0b1..842d9c0a8 100644 --- a/tests/unittest/cloud_governance/policy/azure/test_unattached_volume.py +++ b/tests/unittest/cloud_governance/policy/azure/test_unattached_volume.py @@ -4,7 +4,7 @@ from cloud_governance.main.environment_variables import environment_variables from cloud_governance.policy.azure.cleanup.unattached_volume import UnattachedVolume -from tests.unittest.mocks.azure.mock_compute import MockDisk, MockAzure, MockVirtualMachine +from tests.unittest.mocks.azure.mock_compute import MockDisk, MockAzure def test_unattached_volume_dry_run_yes_0_unattached(): @@ -14,9 +14,7 @@ def test_unattached_volume_dry_run_yes_0_unattached(): mock_azure = MockAzure(disks=[mock_disk1]) mock_virtual_machines = Mock() mock_virtual_machines.list.side_effect = mock_azure.mock_list_disks - mock_virtual_machines.list_all.side_effect = mock_azure.mock_list_all - with patch.object(ComputeManagementClient, 'disks', mock_virtual_machines), \ - patch.object(ComputeManagementClient, 'virtual_machines', mock_virtual_machines): + with patch.object(ComputeManagementClient, 'disks', mock_virtual_machines): volume_run = UnattachedVolume() response = volume_run.run() assert len(response) == 0 @@ -29,9 +27,7 @@ def test_unattached_volume_dry_run_yes(): mock_azure = MockAzure(disks=[mock_disk1]) mock_virtual_machines = Mock() mock_virtual_machines.list.side_effect = mock_azure.mock_list_disks - mock_virtual_machines.list_all.side_effect = mock_azure.mock_list_all - with patch.object(ComputeManagementClient, 'disks', mock_virtual_machines), \ - patch.object(ComputeManagementClient, 'virtual_machines', mock_virtual_machines): + with patch.object(ComputeManagementClient, 'disks', mock_virtual_machines): volume_run = UnattachedVolume() response = volume_run.run() assert len(response) > 0 @@ -48,9 +44,7 @@ def test_unattached_volume_dry_run_no(): mock_azure = MockAzure(disks=[mock_disk1]) mock_virtual_machines = Mock() mock_virtual_machines.list.side_effect = mock_azure.mock_list_disks - mock_virtual_machines.list_all.side_effect = mock_azure.mock_list_all - with patch.object(ComputeManagementClient, 'disks', mock_virtual_machines), \ - patch.object(ComputeManagementClient, 'virtual_machines', mock_virtual_machines): + with patch.object(ComputeManagementClient, 'disks', mock_virtual_machines): volume_run = UnattachedVolume() response = volume_run.run() assert len(response) > 0 @@ -67,9 +61,7 @@ def test_unattached_volume_dry_run_no_7_days_action(): mock_azure = MockAzure(disks=[mock_disk1]) mock_virtual_machines = Mock() mock_virtual_machines.list.side_effect = mock_azure.mock_list_disks - mock_virtual_machines.list_all.side_effect = mock_azure.mock_list_all - with patch.object(ComputeManagementClient, 'disks', mock_virtual_machines), \ - patch.object(ComputeManagementClient, 'virtual_machines', mock_virtual_machines): + with patch.object(ComputeManagementClient, 'disks', mock_virtual_machines): volume_run = UnattachedVolume() response = volume_run.run() assert len(response) > 0 @@ -87,35 +79,10 @@ def test_unattached_volume_dry_run_no_skip(): mock_azure = MockAzure(disks=[mock_disk1]) mock_virtual_machines = Mock() mock_virtual_machines.list.side_effect = mock_azure.mock_list_disks - mock_virtual_machines.list_all.side_effect = mock_azure.mock_list_all - with patch.object(ComputeManagementClient, 'disks', mock_virtual_machines), \ - patch.object(ComputeManagementClient, 'virtual_machines', mock_virtual_machines): + with patch.object(ComputeManagementClient, 'disks', mock_virtual_machines): volume_run = UnattachedVolume() response = volume_run.run() assert len(response) > 0 response = response[0] assert response.get('ResourceDelete') == 'False' assert response.get('SkipPolicy') == 'NOTDELETE' - - -def test_check_exists_cluster(): - """ - This tests verify skip the existing cluster volume - :return: - :rtype: - """ - tags = {'kubernetes.io/cluster/test': 'owned'} - environment_variables.environment_variables_dict['policy'] = 'unattached_volume' - environment_variables.environment_variables_dict['dry_run'] = 'no' - environment_variables.environment_variables_dict['DAYS_TO_TAKE_ACTION'] = 1 - mock_disk1 = MockDisk(disk_state='Unattached', disk_size_gb=4, tags=tags) - mock_vm1 = MockVirtualMachine(tags=tags) - mock_azure = MockAzure(disks=[mock_disk1], vms=[mock_vm1]) - mock_virtual_machines = Mock() - mock_virtual_machines.list.side_effect = mock_azure.mock_list_disks - mock_virtual_machines.list_all.side_effect = mock_azure.mock_list_all - with patch.object(ComputeManagementClient, 'disks', mock_virtual_machines), \ - patch.object(ComputeManagementClient, 'virtual_machines', mock_virtual_machines): - volume_run = UnattachedVolume() - response = volume_run.run() - assert len(response) == 0