diff --git a/.gitignore b/.gitignore
index 2676ecdc..52363a75 100644
--- a/.gitignore
+++ b/.gitignore
@@ -12,3 +12,4 @@ local-conf.yml
.venv/
.venv.nosync/
.DS_Store
+!src/spaceone/inventory/metrics/Disks/disk
\ No newline at end of file
diff --git a/Dockerfile b/Dockerfile
index 57734070..7398579c 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,4 +1,4 @@
-FROM cloudforet/python-core:1
+FROM cloudforet/python-core:2
ENV PYTHONUNBUFFERED 1
ENV CLOUDONE_PORT 50051
@@ -20,4 +20,4 @@ RUN python3 setup.py install && \
EXPOSE ${CLOUDONE_PORT}
ENTRYPOINT ["spaceone"]
-CMD ["grpc", "spaceone.inventory"]
+CMD ["run", "grpc-server", "spaceone.inventory"]
diff --git a/pkg/pip_requirements.txt b/pkg/pip_requirements.txt
index dc063090..5bf4bf43 100644
--- a/pkg/pip_requirements.txt
+++ b/pkg/pip_requirements.txt
@@ -1,6 +1,4 @@
schematics
-adal
-msrestazure
azure-identity
azure-mgmt-resource
azure-mgmt-compute
@@ -14,4 +12,5 @@ azure-keyvault-secrets
azure-mgmt-rdbms
azure-mgmt-cosmosdb
azure-mgmt-containerinstance
-azure-mgmt-webpubsub
\ No newline at end of file
+azure-mgmt-webpubsub
+azure-mgmt-resource
\ No newline at end of file
diff --git a/src/setup.py b/src/setup.py
index 894eb142..96eff2e2 100644
--- a/src/setup.py
+++ b/src/setup.py
@@ -50,6 +50,11 @@
"azure-mgmt-containerinstance",
"azure-mgmt-webpubsub",
],
- package_data={"spaceone": ["inventory/model/*/widget/*.yaml"]},
+ package_data={
+ "spaceone": [
+ "inventory/model/*/widget/*.yaml",
+ "inventory/metrics/**/**/*.yaml",
+ ]
+ },
zip_safe=False,
)
diff --git a/src/spaceone/inventory/api/plugin/__init__.py b/src/spaceone/inventory/api/plugin/__init__.py
deleted file mode 100644
index e69de29b..00000000
diff --git a/src/spaceone/inventory/conf/cloud_service_conf.py b/src/spaceone/inventory/conf/cloud_service_conf.py
index f54c7329..213486c0 100644
--- a/src/spaceone/inventory/conf/cloud_service_conf.py
+++ b/src/spaceone/inventory/conf/cloud_service_conf.py
@@ -1,29 +1,33 @@
MAX_WORKER = 20
-SUPPORTED_FEATURES = ['garbage_collection']
-SUPPORTED_SCHEDULES = ['hours']
-SUPPORTED_RESOURCE_TYPE = ['inventory.CloudService', 'inventory.CloudServiceType', 'inventory.Region']
+SUPPORTED_FEATURES = ["garbage_collection"]
+SUPPORTED_SCHEDULES = ["hours"]
+SUPPORTED_RESOURCE_TYPE = [
+ "inventory.CloudService",
+ "inventory.CloudServiceType",
+ "inventory.Region",
+]
FILTER_FORMAT = []
-ASSET_URL = 'https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure'
+ASSET_URL = "https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure"
CLOUD_SERVICE_GROUP_MAP = {
- 'ApplicationGateways': 'ApplicationGatewaysManager',
- 'ContainerInstances': 'ContainerInstancesManager',
- 'CosmosDB': 'CosmosDBManager',
- 'Disks': 'DisksManager',
- 'KeyVaults': 'KeyVaultsManager',
- 'LoadBalancers': 'LoadBalancersManager',
- 'MySQLServers': 'MySQLServersManager',
- 'SQLServers': 'SQLServersManager',
- 'SQLDatabases': 'SQLDatabasesManager',
- 'NATGateways': 'NATGatewaysManager',
- 'NetworkSecurityGroups': 'NetworkSecurityGroupsManager',
- 'PostgreSQLServers': 'PostgreSQLServersManager',
- 'PublicIPAddresses': 'PublicIPAddressesManager',
- 'Snapshots': 'SnapshotsManager',
- 'StorageAccounts': 'StorageAccountsManager',
- 'VirtualMachines': 'VirtualMachinesManager',
- 'VirtualNetworks': 'VirtualNetworksManager',
- 'VMScaleSets': 'VmScaleSetsManager',
- 'WebPubSubService': 'WebPubSubServiceManager',
+ "VirtualMachines": "VirtualMachinesManager",
+ "ApplicationGateways": "ApplicationGatewaysManager",
+ "ContainerInstances": "ContainerInstancesManager",
+ "CosmosDB": "CosmosDBManager",
+ "Disks": "DisksManager",
+ "KeyVaults": "KeyVaultsManager",
+ "LoadBalancers": "LoadBalancersManager",
+ "MySQLServers": "MySQLServersManager",
+ "SQLServers": "SQLServersManager",
+ "SQLDatabases": "SQLDatabasesManager",
+ "NATGateways": "NATGatewaysManager",
+ "NetworkSecurityGroups": "NetworkSecurityGroupsManager",
+ "PostgreSQLServers": "PostgreSQLServersManager",
+ "PublicIPAddresses": "PublicIPAddressesManager",
+ "Snapshots": "SnapshotsManager",
+ "StorageAccounts": "StorageAccountsManager",
+ "VirtualNetworks": "VirtualNetworksManager",
+ "VMScaleSets": "VmScaleSetsManager",
+ "WebPubSubService": "WebPubSubServiceManager",
}
diff --git a/src/spaceone/inventory/connector/__init__.py b/src/spaceone/inventory/connector/__init__.py
index 72309635..f115e398 100644
--- a/src/spaceone/inventory/connector/__init__.py
+++ b/src/spaceone/inventory/connector/__init__.py
@@ -5,9 +5,13 @@
from spaceone.inventory.connector.load_balancers import LoadBalancersConnector
from spaceone.inventory.connector.monitor import MonitorConnector
from spaceone.inventory.connector.virtual_networks import VirtualNetworksConnector
-from spaceone.inventory.connector.application_gateways import ApplicationGatewaysConnector
+from spaceone.inventory.connector.application_gateways import (
+ ApplicationGatewaysConnector,
+)
from spaceone.inventory.connector.public_ip_addresses import PublicIPAddressesConnector
-from spaceone.inventory.connector.network_security_groups import NetworkSecurityGroupsConnector
+from spaceone.inventory.connector.network_security_groups import (
+ NetworkSecurityGroupsConnector,
+)
from spaceone.inventory.connector.nat_gateways import NATGatewaysConnector
from spaceone.inventory.connector.storage_accounts import StorageAccountsConnector
from spaceone.inventory.connector.key_vaults import KeyVaultsConnector
@@ -19,3 +23,4 @@
from spaceone.inventory.connector.sql_databases import SQLDatabasesConnector
from spaceone.inventory.connector.container_instances import ContainerInstancesConnector
from spaceone.inventory.connector.web_pubsub_service import WebPubSubServiceConnector
+from spaceone.inventory.connector.resources import ResourcesConnector
diff --git a/src/spaceone/inventory/connector/key_vaults/connector.py b/src/spaceone/inventory/connector/key_vaults/connector.py
index 5b1855d1..47ed5ed2 100644
--- a/src/spaceone/inventory/connector/key_vaults/connector.py
+++ b/src/spaceone/inventory/connector/key_vaults/connector.py
@@ -1,39 +1,50 @@
import logging
+import azure.core.exceptions
+
from spaceone.inventory.libs.connector import AzureConnector
from spaceone.inventory.error.custom import *
from azure.keyvault.secrets import SecretClient
from azure.keyvault.certificates import CertificateClient
from azure.identity import DefaultAzureCredential
-__all__ = ['KeyVaultsConnector']
+__all__ = ["KeyVaultsConnector"]
_LOGGER = logging.getLogger(__name__)
class KeyVaultsConnector(AzureConnector):
-
def __init__(self, **kwargs):
super().__init__(**kwargs)
- self.set_connect(kwargs.get('secret_data'))
- self.key_vault_secret_client = None
- self.key_vault_certificate_client = None
+ self.set_connect(kwargs.get("secret_data"))
+ # self.key_vault_secret_client = None
+ # self.key_vault_certificate_client = None
def init_key_vault_secret_client(self, subscription_id, vault_uri):
credential = DefaultAzureCredential()
- key_vault_secret_client = SecretClient(credential=credential, subscription_id=subscription_id, vault_url=vault_uri)
+ key_vault_secret_client = SecretClient(
+ credential=credential, subscription_id=subscription_id, vault_url=vault_uri
+ )
return key_vault_secret_client
def init_key_vault_certificate_client(self, subscription_id, vault_uri):
credential = DefaultAzureCredential()
- key_vault_certificate_client = CertificateClient(credential=credential, subscription_id=subscription_id, vault_url=vault_uri)
+ key_vault_certificate_client = CertificateClient(
+ credential=credential, subscription_id=subscription_id, vault_url=vault_uri
+ )
return key_vault_certificate_client
def list_all_key_vaults(self):
return self.key_vault_client.vaults.list_by_subscription()
+ def get_key_vaults(self):
+ return self.key_vault_client.vaults.get()
+
def list_keys(self, resource_group_name, vault_name):
- return self.key_vault_client.keys.list(resource_group_name=resource_group_name, vault_name=vault_name)
+ return self.key_vault_client.keys.list(
+ resource_group_name=resource_group_name, vault_name=vault_name
+ )
def list_secrets(self):
- return self.key_vault_secret_client.list_properties_of_secrets()
+ # return self.key_vault_secrets_client.list_properties_of_secrets()
+ return self.key_vault_client.secrets.list()
diff --git a/src/spaceone/inventory/connector/postgresql_servers/connector.py b/src/spaceone/inventory/connector/postgresql_servers/connector.py
index 80ba55c7..1f876bbe 100644
--- a/src/spaceone/inventory/connector/postgresql_servers/connector.py
+++ b/src/spaceone/inventory/connector/postgresql_servers/connector.py
@@ -2,28 +2,35 @@
from spaceone.inventory.libs.connector import AzureConnector
from spaceone.inventory.error.custom import *
-__all__ = ['PostgreSQLServersConnector']
+
+__all__ = ["PostgreSQLServersConnector"]
_LOGGER = logging.getLogger(__name__)
class PostgreSQLServersConnector(AzureConnector):
-
def __init__(self, **kwargs):
super().__init__(**kwargs)
- self.set_connect(kwargs.get('secret_data'))
+ self.set_connect(kwargs.get("secret_data"))
def list_servers(self):
return self.postgre_sql_client.servers.list()
def list_firewall_rules_by_server(self, resource_group_name, server_name):
- return self.postgre_sql_client.firewall_rules.list_by_server(resource_group_name=resource_group_name, server_name=server_name)
+ return self.postgre_sql_client.firewall_rules.list_by_server(
+ resource_group_name=resource_group_name, server_name=server_name
+ )
def list_virtual_network_rules_by_server(self, resource_group_name, server_name):
- return self.postgre_sql_client.virtual_network_rules.list_by_server(resource_group_name=resource_group_name,
- server_name=server_name)
+ return self.postgre_sql_client.virtual_network_rules.list_by_server(
+ resource_group_name=resource_group_name, server_name=server_name
+ )
def list_replicas_by_server(self, resource_group_name, server_name):
- return self.postgre_sql_client.replicas.list_by_server(resource_group_name=resource_group_name, server_name=server_name)
+ return self.postgre_sql_client.replicas.list_by_server(
+ resource_group_name=resource_group_name, server_name=server_name
+ )
def list_server_administrators(self, resource_group_name, server_name):
- return self.postgre_sql_client.server_administrators.list(resource_group_name=resource_group_name, server_name=server_name)
+ return self.postgre_sql_client.server_administrators.list(
+ resource_group_name=resource_group_name, server_name=server_name
+ )
diff --git a/src/spaceone/inventory/connector/resources/__init__.py b/src/spaceone/inventory/connector/resources/__init__.py
new file mode 100644
index 00000000..a9bfc517
--- /dev/null
+++ b/src/spaceone/inventory/connector/resources/__init__.py
@@ -0,0 +1 @@
+from spaceone.inventory.connector.resources.connector import ResourcesConnector
diff --git a/src/spaceone/inventory/connector/resources/connector.py b/src/spaceone/inventory/connector/resources/connector.py
new file mode 100644
index 00000000..042cccc7
--- /dev/null
+++ b/src/spaceone/inventory/connector/resources/connector.py
@@ -0,0 +1,16 @@
+import logging
+
+from spaceone.inventory.libs.connector import AzureConnector
+from spaceone.inventory.error import *
+
+__all__ = ["ResourcesConnector"]
+_LOGGER = logging.getLogger(__name__)
+
+
+class ResourcesConnector(AzureConnector):
+ def __init__(self, **kwargs):
+ super().__init__()
+ self.set_connect(kwargs.get("secret_data"))
+
+ def list_resources(self) -> list:
+ return self.resource_client.resources.list()
diff --git a/src/spaceone/inventory/connector/storage_accounts/connector.py b/src/spaceone/inventory/connector/storage_accounts/connector.py
index 9da3a764..c6660791 100644
--- a/src/spaceone/inventory/connector/storage_accounts/connector.py
+++ b/src/spaceone/inventory/connector/storage_accounts/connector.py
@@ -1,21 +1,20 @@
import logging
from spaceone.inventory.libs.connector import AzureConnector
-from spaceone.inventory.error.custom import *
-__all__ = ['StorageAccountsConnector']
+__all__ = ["StorageAccountsConnector"]
_LOGGER = logging.getLogger(__name__)
class StorageAccountsConnector(AzureConnector):
-
def __init__(self, **kwargs):
super().__init__(**kwargs)
- self.set_connect(kwargs.get('secret_data'))
+ self.set_connect(kwargs.get("secret_data"))
def list_storage_accounts(self):
return self.storage_client.storage_accounts.list()
def list_blob_containers(self, rg_name, account_name):
- return self.storage_client.blob_containers.list(resource_group_name=rg_name, account_name=account_name)
-
+ return self.storage_client.blob_containers.list(
+ resource_group_name=rg_name, account_name=account_name
+ )
diff --git a/src/spaceone/inventory/connector/vm_scale_sets/connector.py b/src/spaceone/inventory/connector/vm_scale_sets/connector.py
index 636093f9..313f9233 100644
--- a/src/spaceone/inventory/connector/vm_scale_sets/connector.py
+++ b/src/spaceone/inventory/connector/vm_scale_sets/connector.py
@@ -3,26 +3,42 @@
from spaceone.inventory.libs.connector import AzureConnector
from spaceone.inventory.error.custom import *
-__all__ = ['VmScaleSetsConnector']
+__all__ = ["VmScaleSetsConnector"]
_LOGGER = logging.getLogger(__name__)
class VmScaleSetsConnector(AzureConnector):
-
def __init__(self, **kwargs):
super().__init__(**kwargs)
- self.set_connect(kwargs.get('secret_data'))
+ self.set_connect(kwargs.get("secret_data"))
def list_vm_scale_sets(self):
return self.compute_client.virtual_machine_scale_sets.list_all()
def list_vm_scale_set_vms(self, resource_group, vm_scale_set_name):
- return self.compute_client.virtual_machine_scale_set_vms.list(resource_group, vm_scale_set_name)
-
- def get_vm_scale_set_instance_view(self, resource_group, vm_scale_set_name, instance_id):
- return self.compute_client.virtual_machine_scale_set_vms.get_instance_view(resource_group_name=resource_group,
- vm_scale_set_name=vm_scale_set_name,
- instance_id=instance_id)
+ return self.compute_client.virtual_machine_scale_set_vms.list(
+ resource_group, vm_scale_set_name
+ )
+
+ def get_vm_scale_set_instance_view(
+ self, resource_group, vm_scale_set_name, instance_id
+ ):
+ return self.compute_client.virtual_machine_scale_set_vms.get_instance_view(
+ resource_group_name=resource_group,
+ vm_scale_set_name=vm_scale_set_name,
+ instance_id="0",
+ )
+
+ def list_vm_scale_set_instance_view(
+ self, resource_group, vm_scale_set_name, instance_id
+ ):
+ return self.compute_client.virtual_machine_scale_set_vms.list(
+ resource_group_name=resource_group,
+ vm_scale_set_name=vm_scale_set_name,
+ instance_id=instance_id,
+ )
def list_auto_scale_settings(self, resource_group):
- return self.monitor_client.autoscale_settings.list_by_resource_group(resource_group_name=resource_group)
\ No newline at end of file
+ return self.monitor_client.autoscale_settings.list_by_resource_group(
+ resource_group_name=resource_group
+ )
diff --git a/src/spaceone/inventory/info/__init__.py b/src/spaceone/inventory/info/__init__.py
index 3e799044..93b875f7 100644
--- a/src/spaceone/inventory/info/__init__.py
+++ b/src/spaceone/inventory/info/__init__.py
@@ -1,2 +1,3 @@
from spaceone.inventory.info.collector_info import *
+from spaceone.inventory.info.job_info import *
from spaceone.inventory.info.common_info import *
diff --git a/src/spaceone/inventory/info/job_info.py b/src/spaceone/inventory/info/job_info.py
new file mode 100644
index 00000000..18f24ab8
--- /dev/null
+++ b/src/spaceone/inventory/info/job_info.py
@@ -0,0 +1,18 @@
+__all__ = ["TasksInfo", "TaskInfo"]
+
+import functools
+from spaceone.api.inventory.plugin import job_pb2
+from spaceone.core.pygrpc.message_type import *
+
+
+def TaskInfo(task_data):
+ info = {"task_options": change_struct_type(task_data["task_options"])}
+ return job_pb2.TaskInfo(**info)
+
+
+def TasksInfo(result, **kwargs):
+ tasks_data = result.get("tasks", [])
+
+ return job_pb2.TasksInfo(
+ tasks=list(map(functools.partial(TaskInfo, **kwargs), tasks_data)),
+ )
diff --git a/src/spaceone/inventory/api/__init__.py b/src/spaceone/inventory/interface/__init__.py
similarity index 100%
rename from src/spaceone/inventory/api/__init__.py
rename to src/spaceone/inventory/interface/__init__.py
diff --git a/src/spaceone/inventory/interface/grpc/__init__.py b/src/spaceone/inventory/interface/grpc/__init__.py
new file mode 100644
index 00000000..edf6ec13
--- /dev/null
+++ b/src/spaceone/inventory/interface/grpc/__init__.py
@@ -0,0 +1,9 @@
+from spaceone.core.pygrpc.server import GRPCServer
+from spaceone.inventory.interface.grpc.collector import Collector
+from spaceone.inventory.interface.grpc.job import Job
+
+_all_ = ["app"]
+
+app = GRPCServer()
+app.add_service(Collector)
+app.add_service(Job)
diff --git a/src/spaceone/inventory/api/plugin/collector.py b/src/spaceone/inventory/interface/grpc/collector.py
similarity index 75%
rename from src/spaceone/inventory/api/plugin/collector.py
rename to src/spaceone/inventory/interface/grpc/collector.py
index 013d170c..05b6ddef 100644
--- a/src/spaceone/inventory/api/plugin/collector.py
+++ b/src/spaceone/inventory/interface/grpc/collector.py
@@ -15,25 +15,27 @@ class Collector(BaseAPI, collector_pb2_grpc.CollectorServicer):
def init(self, request, context):
params, metadata = self.parse_request(request, context)
- with self.locator.get_service('CollectorService', metadata) as collector_svc:
+ with self.locator.get_service("CollectorService", metadata) as collector_svc:
data = collector_svc.init(params)
- return self.locator.get_info('PluginInfo', data)
+ return self.locator.get_info("PluginInfo", data)
def verify(self, request, context):
params, metadata = self.parse_request(request, context)
- collector_svc: CollectorService = self.locator.get_service('CollectorService', metadata)
+ collector_svc: CollectorService = self.locator.get_service(
+ "CollectorService", metadata
+ )
with collector_svc:
collector_svc.verify(params)
- return self.locator.get_info('EmptyInfo')
+ return self.locator.get_info("EmptyInfo")
def collect(self, request, context):
params, metadata = self.parse_request(request, context)
- collector_svc: CollectorService = self.locator.get_service('CollectorService', metadata)
+ collector_svc: CollectorService = self.locator.get_service(
+ "CollectorService", metadata
+ )
with collector_svc:
for resource in collector_svc.collect(params):
- yield self.locator.get_info('ResourceInfo', resource.to_primitive())
-
-
+ yield self.locator.get_info("ResourceInfo", resource)
diff --git a/src/spaceone/inventory/interface/grpc/job.py b/src/spaceone/inventory/interface/grpc/job.py
new file mode 100644
index 00000000..df496f25
--- /dev/null
+++ b/src/spaceone/inventory/interface/grpc/job.py
@@ -0,0 +1,19 @@
+from spaceone.api.inventory.plugin import job_pb2_grpc, job_pb2
+from spaceone.core.pygrpc import BaseAPI
+from spaceone.core.pygrpc.message_type import *
+from spaceone.inventory.service import JobService
+import traceback
+import logging
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Job(BaseAPI, job_pb2_grpc.JobServicer):
+ pb2 = job_pb2
+ pb2_grpc = job_pb2_grpc
+
+ def get_tasks(self, request, context):
+ params, metadata = self.parse_request(request, context)
+
+ with self.locator.get_service("JobService", metadata) as job_svc:
+ return self.locator.get_info("TasksInfo", job_svc.get_tasks(params))
diff --git a/src/spaceone/inventory/libs/connector.py b/src/spaceone/inventory/libs/connector.py
index 28448218..c92ff1e7 100644
--- a/src/spaceone/inventory/libs/connector.py
+++ b/src/spaceone/inventory/libs/connector.py
@@ -17,12 +17,11 @@
from azure.mgmt.webpubsub import WebPubSubManagementClient
from spaceone.core.connector import BaseConnector
-DEFAULT_SCHEMA = 'azure_client_secret'
+DEFAULT_SCHEMA = "azure_client_secret"
_LOGGER = logging.getLogger(__name__)
class AzureConnector(BaseConnector):
-
def __init__(self, *args, **kwargs):
"""
kwargs
@@ -54,31 +53,55 @@ def __init__(self, *args, **kwargs):
self.web_pubsub_service_client = None
def set_connect(self, secret_data):
- subscription_id = secret_data['subscription_id']
+ subscription_id = secret_data["subscription_id"]
os.environ["AZURE_SUBSCRIPTION_ID"] = subscription_id
- os.environ["AZURE_TENANT_ID"] = secret_data['tenant_id']
- os.environ["AZURE_CLIENT_ID"] = secret_data['client_id']
- os.environ["AZURE_CLIENT_SECRET"] = secret_data['client_secret']
+ os.environ["AZURE_TENANT_ID"] = secret_data["tenant_id"]
+ os.environ["AZURE_CLIENT_ID"] = secret_data["client_id"]
+ os.environ["AZURE_CLIENT_SECRET"] = secret_data["client_secret"]
credential = DefaultAzureCredential()
- self.compute_client = ComputeManagementClient(credential=credential, subscription_id=subscription_id)
- self.resource_client = ResourceManagementClient(credential=credential, subscription_id=subscription_id)
- self.network_client = NetworkManagementClient(credential=credential, subscription_id=subscription_id)
- self.subscription_client: SubscriptionClient = SubscriptionClient(credential=credential)
- self.sql_client = SqlManagementClient(credential=credential, subscription_id=subscription_id)
- self.monitor_client = MonitorManagementClient(credential=credential, subscription_id=subscription_id)
- self.storage_client = StorageManagementClient(credential=credential, subscription_id=subscription_id)
- self.key_vault_client = KeyVaultManagementClient(credential=credential, subscription_id=subscription_id)
- self.mysql_client = MySQLManagementClient(credential=credential, subscription_id=subscription_id)
- self.cosmosdb_client = CosmosDBManagementClient(credential=credential, subscription_id=subscription_id)
- self.postgre_sql_client = PostgreSQLManagementClient(credential=credential, subscription_id=subscription_id)
- self.container_instance_client = ContainerInstanceManagementClient(credential=credential,
- subscription_id=subscription_id)
- self.web_pubsub_service_client = WebPubSubManagementClient(credential=credential,
- subscription_id=subscription_id)
+ self.compute_client = ComputeManagementClient(
+ credential=credential, subscription_id=subscription_id
+ )
+ self.resource_client = ResourceManagementClient(
+ credential=credential, subscription_id=subscription_id
+ )
+ self.network_client = NetworkManagementClient(
+ credential=credential, subscription_id=subscription_id
+ )
+ self.subscription_client: SubscriptionClient = SubscriptionClient(
+ credential=credential
+ )
+ self.sql_client = SqlManagementClient(
+ credential=credential, subscription_id=subscription_id
+ )
+ self.monitor_client = MonitorManagementClient(
+ credential=credential, subscription_id=subscription_id
+ )
+ self.storage_client = StorageManagementClient(
+ credential=credential, subscription_id=subscription_id
+ )
+ self.key_vault_client = KeyVaultManagementClient(
+ credential=credential, subscription_id=subscription_id
+ )
+ self.mysql_client = MySQLManagementClient(
+ credential=credential, subscription_id=subscription_id
+ )
+ self.cosmosdb_client = CosmosDBManagementClient(
+ credential=credential, subscription_id=subscription_id
+ )
+ self.postgre_sql_client = PostgreSQLManagementClient(
+ credential=credential, subscription_id=subscription_id
+ )
+ self.container_instance_client = ContainerInstanceManagementClient(
+ credential=credential, subscription_id=subscription_id
+ )
+ self.web_pubsub_service_client = WebPubSubManagementClient(
+ credential=credential, subscription_id=subscription_id
+ )
def verify(self, **kwargs):
- self.set_connect(kwargs['secret_data'])
+ self.set_connect(kwargs["secret_data"])
return "ACTIVE"
diff --git a/src/spaceone/inventory/libs/manager.py b/src/spaceone/inventory/libs/manager.py
index 8d15efbe..75e91033 100644
--- a/src/spaceone/inventory/libs/manager.py
+++ b/src/spaceone/inventory/libs/manager.py
@@ -1,3 +1,7 @@
+import logging
+import json
+import os
+
from spaceone.core.manager import BaseManager
from spaceone.inventory.libs.connector import AzureConnector
from spaceone.inventory.libs.schema.region import RegionResource, RegionResponse
@@ -5,9 +9,6 @@
from spaceone.inventory.libs.schema.resource import ErrorResourceResponse
from spaceone.inventory.error.custom import *
from collections.abc import Iterable
-import json
-import logging
-
_LOGGER = logging.getLogger(__name__)
@@ -20,28 +21,33 @@ class AzureManager(BaseManager):
region_info = {}
def verify(self, options, secret_data, **kwargs):
- """ Check collector's status.
- """
- connector: AzureConnector = self.locator.get_connector('AzureConnector', secret_data=secret_data)
- params = {'secret_data': secret_data}
+ """Check collector's status."""
+ connector = AzureConnector()
+ params = {"secret_data": secret_data}
connector.verify(**params)
def collect_cloud_service_type(self, params):
- options = params.get('options', {})
+ options = params.get("options", {})
for cloud_service_type in self.cloud_service_types:
- if 'service_code_mappers' in options:
- svc_code_maps = options['service_code_mappers']
- if getattr(cloud_service_type.resource, 'service_code') and \
- cloud_service_type.resource.service_code in svc_code_maps:
- cloud_service_type.resource.service_code = svc_code_maps[cloud_service_type.resource.service_code]
-
- if 'custom_asset_url' in options:
+ if "service_code_mappers" in options:
+ svc_code_maps = options["service_code_mappers"]
+ if (
+ getattr(cloud_service_type.resource, "service_code")
+ and cloud_service_type.resource.service_code in svc_code_maps
+ ):
+ cloud_service_type.resource.service_code = svc_code_maps[
+ cloud_service_type.resource.service_code
+ ]
+
+ if "custom_asset_url" in options:
_tags = cloud_service_type.resource.tags
- if 'spaceone:icon' in _tags:
- _icon = _tags['spaceone:icon']
- _tags['spaceone:icon'] = f'{options["custom_asset_url"]}/{_icon.split("/")[-1]}'
+ if "spaceone:icon" in _tags:
+ _icon = _tags["spaceone:icon"]
+ _tags[
+ "spaceone:icon"
+ ] = f'{options["custom_asset_url"]}/{_icon.split("/")[-1]}'
yield cloud_service_type
@@ -52,7 +58,7 @@ def collect_resources(self, params) -> list:
total_resources = []
try:
- subscription_manager = self.locator.get_manager('SubscriptionsManager')
+ subscription_manager = self.locator.get_manager("SubscriptionsManager")
self.region_info = subscription_manager.list_location_info(params)
total_resources.extend(self.collect_cloud_service_type(params))
@@ -65,9 +71,13 @@ def collect_resources(self, params) -> list:
total_resources.extend(regions)
except Exception as e:
- error_resource_response = self.generate_error_response(e, self.cloud_service_types[0].resource.group, self.cloud_service_types[0].resource.name)
+ error_resource_response = self.generate_error_response(
+ e,
+ self.cloud_service_types[0].resource.group,
+ self.cloud_service_types[0].resource.name,
+ )
total_resources.append(error_resource_response)
- _LOGGER.error(f'[collect] {e}', exc_info=True)
+ _LOGGER.error(f"[collect] {e}", exc_info=True)
return total_resources
@@ -76,46 +86,54 @@ def collect_region(self):
try:
for region_code in self.collected_region_codes:
if region := self.match_region_info(region_code):
- results.append(RegionResourceResponse({'resource': region}))
+ results.append(RegionResourceResponse({"resource": region}))
except Exception as e:
- _LOGGER.error(f'[collect] {e}', exc_info=True)
+ _LOGGER.error(f"[collect] {e}", exc_info=True)
if type(e) is dict:
- error_resource_response = ErrorResourceResponse({
- 'message': json.dumps(e),
- 'resource': {'resource_type': 'inventory.Region'}
- })
+ error_resource_response = ErrorResourceResponse(
+ {
+ "message": json.dumps(e),
+ "resource": {"resource_type": "inventory.Region"},
+ }
+ )
else:
- error_resource_response = ErrorResourceResponse({
- 'message': str(e),
- 'resource': {'resource_type': 'inventory.Region'}
- })
+ error_resource_response = ErrorResourceResponse(
+ {
+ "message": str(e),
+ "resource": {"resource_type": "inventory.Region"},
+ }
+ )
results.append(error_resource_response)
return results
def set_region_code(self, region):
if region not in self.region_info:
- region = 'global'
+ region = "global"
if region not in self.collected_region_codes:
self.collected_region_codes.append(region)
def convert_nested_dictionary(self, cloud_svc_object):
cloud_svc_dict = {}
- if hasattr(cloud_svc_object, '__dict__'): # if cloud_svc_object is not a dictionary type but has dict method
+ if hasattr(
+ cloud_svc_object, "__dict__"
+ ): # if cloud_svc_object is not a dictionary type but has dict method
cloud_svc_dict = cloud_svc_object.__dict__
elif isinstance(cloud_svc_object, dict):
cloud_svc_dict = cloud_svc_object
- elif not isinstance(cloud_svc_object, list): # if cloud_svc_object is one of type like int, float, char, ...
+ elif not isinstance(
+ cloud_svc_object, list
+ ): # if cloud_svc_object is one of type like int, float, char, ...
return cloud_svc_object
# if cloud_svc_object is dictionary type
for key, value in cloud_svc_dict.items():
- if hasattr(value, '__dict__') or isinstance(value, dict):
+ if hasattr(value, "__dict__") or isinstance(value, dict):
cloud_svc_dict[key] = self.convert_nested_dictionary(value)
- if 'azure' in str(type(value)):
+ if "azure" in str(type(value)):
cloud_svc_dict[key] = self.convert_nested_dictionary(value)
elif isinstance(value, list):
value_list = []
@@ -130,9 +148,7 @@ def match_region_info(self, region_code):
if match_region_info:
region_info = match_region_info.copy()
- region_info.update({
- 'region_code': region_code
- })
+ region_info.update({"region_code": region_code})
return RegionResource(region_info, strict=False)
return None
@@ -143,10 +159,7 @@ def convert_tag_format(tags):
if tags:
for k, v in tags.items():
- convert_tags.append({
- 'key': k,
- 'value': v
- })
+ convert_tags.append({"key": k, "value": v})
return convert_tags
@@ -154,56 +167,67 @@ def convert_tag_format(tags):
def convert_dictionary(obj):
return vars(obj)
- # def convert_nested_dictionary(self, cloud_svc_object):
- # cloud_svc_dict = self.convert_dictionary(cloud_svc_object)
- # for k, v in cloud_svc_dict.items():
- # if isinstance(v, object): # object
- # if 'azure' in str(type(v)): # 1) if cloud_svc_object is azure defined model class
- # cloud_svc_dict[k] = self.convert_nested_dictionary(v)
- # elif isinstance(v, list): # 2) if cloud_svc_object is list
- # cloud_svc_converse_list = list()
- # for list_obj in v: # if cloud_svc object's child value is Azure defined model class or dict class
- # if hasattr(list_obj, '__dict__') or 'azure' in str(type(list_obj)):
- # cloud_svc_converse_dict = self.convert_nested_dictionary(list_obj)
- # cloud_svc_converse_list.append(cloud_svc_converse_dict)
- # else: # if cloud_svc_object's child value is simple list
- # cloud_svc_converse_list.append(list_obj)
- #
- # cloud_svc_dict[k] = cloud_svc_converse_list
- #
- # elif hasattr(v, '__dict__'): # if cloud_svc_object is not a list type, just a dict
- # cloud_svc_converse_dict = self.convert_nested_dictionary(v)
- # cloud_svc_dict[k] = cloud_svc_converse_dict
- #
- # return cloud_svc_dict
-
@staticmethod
def get_resource_group_from_id(dict_id):
- resource_group = dict_id.split('/')[4]
+ resource_group = dict_id.split("/")[4]
return resource_group
@staticmethod
def generate_error_response(e, cloud_service_group, cloud_service_type):
if type(e) is dict:
- error_resource_response = ErrorResourceResponse({'message': json.dumps(e),
- 'resource': {'cloud_service_group': cloud_service_group,
- 'cloud_service_type': cloud_service_type}})
+ error_resource_response = ErrorResourceResponse(
+ {
+ "message": json.dumps(e),
+ "resource": {
+ "cloud_service_group": cloud_service_group,
+ "cloud_service_type": cloud_service_type,
+ },
+ }
+ )
else:
- error_resource_response = ErrorResourceResponse({'message': str(e),
- 'resource': {'cloud_service_group': cloud_service_group,
- 'cloud_service_type': cloud_service_type}})
+ error_resource_response = ErrorResourceResponse(
+ {
+ "message": str(e),
+ "resource": {
+ "cloud_service_group": cloud_service_group,
+ "cloud_service_type": cloud_service_type,
+ },
+ }
+ )
return error_resource_response
@staticmethod
- def generate_resource_error_response(e, cloud_service_group, cloud_service_type, resource_id):
+ def generate_resource_error_response(
+ e, cloud_service_group, cloud_service_type, resource_id
+ ):
if type(e) is dict:
- error_resource_response = ErrorResourceResponse({'message': json.dumps(e),
- 'resource': {'cloud_service_group': cloud_service_group,
- 'cloud_service_type': cloud_service_type,
- 'resource_id': resource_id}})
+ error_resource_response = ErrorResourceResponse(
+ {
+ "message": json.dumps(e),
+ "resource": {
+ "cloud_service_group": cloud_service_group,
+ "cloud_service_type": cloud_service_type,
+ "resource_id": resource_id,
+ },
+ }
+ )
else:
- error_resource_response = ErrorResourceResponse({'message': str(e),
- 'resource': {'cloud_service_group': cloud_service_group,
- 'cloud_service_type': cloud_service_type,
- 'resource_id': resource_id}})
+ error_resource_response = ErrorResourceResponse(
+ {
+ "message": str(e),
+ "resource": {
+ "cloud_service_group": cloud_service_group,
+ "cloud_service_type": cloud_service_type,
+ "resource_id": resource_id,
+ },
+ }
+ )
return error_resource_response
+
+ @staticmethod
+ def update_tenant_id_from_secret_data(
+ cloud_service_data: dict, secret_data: dict
+ ) -> dict:
+ if tenant_id := secret_data.get("tenant_id"):
+ cloud_service_data.update({"tenant_id": tenant_id})
+ return cloud_service_data
diff --git a/src/spaceone/inventory/libs/schema/metadata/dynamic_field.py b/src/spaceone/inventory/libs/schema/metadata/dynamic_field.py
index 0f316ee1..04caa8bf 100644
--- a/src/spaceone/inventory/libs/schema/metadata/dynamic_field.py
+++ b/src/spaceone/inventory/libs/schema/metadata/dynamic_field.py
@@ -1,23 +1,111 @@
import math
from schematics import Model
-from schematics.types import ModelType, StringType, PolyModelType, DictType, BooleanType, BaseType
+from schematics.types import (
+ ModelType,
+ StringType,
+ PolyModelType,
+ DictType,
+ BooleanType,
+ BaseType,
+)
from spaceone.inventory.libs.schema.metadata.dynamic_search import BaseDynamicSearch
-
BACKGROUND_COLORS = [
- 'black', 'white',
- 'gray', 'gray.100', 'gray.200', 'gray.300', 'gray.400', 'gray.500', 'gray.600', 'gray.700', 'gray.800', 'gray.900',
- 'red', 'red.100', 'red.200', 'red.300', 'red.400', 'red.500', 'red.600', 'red.700', 'red.800', 'red.900',
- 'coral', 'coral.100', 'coral.200', 'coral.300', 'coral.400', 'coral.500', 'coral.600', 'coral.700', 'coral.800', 'coral.900',
- 'yellow', 'yellow.100', 'yellow.200', 'yellow.300', 'yellow.400', 'yellow.500', 'yellow.600', 'yellow.700', 'yellow.800', 'yellow.900',
- 'green', 'green.100', 'green.200', 'green.300', 'green.400', 'green.500', 'green.600', 'green.700', 'green.800', 'green.900',
- 'blue', 'blue.100', 'blue.200', 'blue.300', 'blue.400', 'blue.500', 'blue.600', 'blue.700', 'blue.800', 'blue.900',
- 'violet', 'violet.100', 'violet.200', 'violet.300', 'violet.400', 'violet.500', 'violet.600', 'violet.700', 'violet.800', 'violet.900',
- 'peacock', 'peacock.100', 'peacock.200', 'peacock.300', 'peacock.400', 'peacock.500', 'peacock.600', 'peacock.700', 'peacock.800', 'peacock.900',
- 'indigo', 'indigo.100', 'indigo.200', 'indigo.300', 'indigo.400', 'indigo.500', 'indigo.600', 'indigo.700', 'indigo.800', 'indigo.900',
+ "black",
+ "white",
+ "gray",
+ "gray.100",
+ "gray.200",
+ "gray.300",
+ "gray.400",
+ "gray.500",
+ "gray.600",
+ "gray.700",
+ "gray.800",
+ "gray.900",
+ "red",
+ "red.100",
+ "red.200",
+ "red.300",
+ "red.400",
+ "red.500",
+ "red.600",
+ "red.700",
+ "red.800",
+ "red.900",
+ "coral",
+ "coral.100",
+ "coral.200",
+ "coral.300",
+ "coral.400",
+ "coral.500",
+ "coral.600",
+ "coral.700",
+ "coral.800",
+ "coral.900",
+ "yellow",
+ "yellow.100",
+ "yellow.200",
+ "yellow.300",
+ "yellow.400",
+ "yellow.500",
+ "yellow.600",
+ "yellow.700",
+ "yellow.800",
+ "yellow.900",
+ "green",
+ "green.100",
+ "green.200",
+ "green.300",
+ "green.400",
+ "green.500",
+ "green.600",
+ "green.700",
+ "green.800",
+ "green.900",
+ "blue",
+ "blue.100",
+ "blue.200",
+ "blue.300",
+ "blue.400",
+ "blue.500",
+ "blue.600",
+ "blue.700",
+ "blue.800",
+ "blue.900",
+ "violet",
+ "violet.100",
+ "violet.200",
+ "violet.300",
+ "violet.400",
+ "violet.500",
+ "violet.600",
+ "violet.700",
+ "violet.800",
+ "violet.900",
+ "peacock",
+ "peacock.100",
+ "peacock.200",
+ "peacock.300",
+ "peacock.400",
+ "peacock.500",
+ "peacock.600",
+ "peacock.700",
+ "peacock.800",
+ "peacock.900",
+ "indigo",
+ "indigo.100",
+ "indigo.200",
+ "indigo.300",
+ "indigo.400",
+ "indigo.500",
+ "indigo.600",
+ "indigo.700",
+ "indigo.800",
+ "indigo.900",
]
-TYPE_BADGE = ['primary', 'indigo.500', 'coral.600', 'peacock.500', 'green.500']
+TYPE_BADGE = ["primary", "indigo.500", "coral.600", "peacock.500", "green.500"]
class FieldReference(Model):
@@ -27,14 +115,28 @@ class FieldReference(Model):
class Icon(Model):
image = StringType(serialize_when_none=False)
- color = StringType(default='green', choices=BACKGROUND_COLORS)
+ color = StringType(default="green", choices=BACKGROUND_COLORS)
class BaseField(Model):
- type = StringType(choices=["text", "state", "badge", "list", "dict",
- "datetime", "image", "enum", "progress", "size"],
- serialize_when_none=False)
- options = PolyModelType([Model, DictType(PolyModelType(Model))], serialize_when_none=False)
+ type = StringType(
+ choices=[
+ "text",
+ "state",
+ "badge",
+ "list",
+ "dict",
+ "datetime",
+ "image",
+ "enum",
+ "progress",
+ "size",
+ ],
+ serialize_when_none=False,
+ )
+ options = PolyModelType(
+ [Model, DictType(PolyModelType(Model))], serialize_when_none=False
+ )
class FieldViewOption(Model):
@@ -57,7 +159,7 @@ class BaseDynamicField(BaseField):
@classmethod
def data_source(cls, name, key, **kwargs):
- return cls({'key': key, 'name': name, **kwargs})
+ return cls({"key": key, "name": name, **kwargs})
class TextDyFieldOptions(FieldViewOption):
@@ -66,7 +168,7 @@ class TextDyFieldOptions(FieldViewOption):
class BadgeDyFieldOptions(FieldViewOption):
text_color = StringType(serialize_when_none=False)
- shape = StringType(serialize_when_none=False, choices=['SQUARE', 'ROUND'])
+ shape = StringType(serialize_when_none=False, choices=["SQUARE", "ROUND"])
outline_color = StringType(serialize_when_none=False, choices=BACKGROUND_COLORS)
background_color = StringType(serialize_when_none=False, choices=BACKGROUND_COLORS)
@@ -77,13 +179,13 @@ class StateDyFieldOptions(FieldViewOption):
class ImageDyFieldOptions(FieldViewOption):
- image_url = StringType(default='')
+ image_url = StringType(default="")
width = StringType(serialize_when_none=False)
height = StringType(serialize_when_none=False)
class DateTimeDyFieldOptions(FieldViewOption):
- source_type = StringType(default='timestamp', choices=['iso8601', 'timestamp'])
+ source_type = StringType(default="timestamp", choices=["iso8601", "timestamp"])
source_format = StringType(serialize_when_none=False)
display_format = StringType(serialize_when_none=False)
@@ -93,8 +195,12 @@ class ProgressFieldOptions(FieldViewOption):
class SizeFieldOptions(FieldViewOption):
- display_unit = StringType(serialize_when_none=False, choices=('BYTES', 'KB', 'MB', 'GB', 'TB', 'PB'))
- source_unit = StringType(serialize_when_none=False, choices=('BYTES', 'KB', 'MB', 'GB', 'TB', 'PB'))
+ display_unit = StringType(
+ serialize_when_none=False, choices=("BYTES", "KB", "MB", "GB", "TB", "PB")
+ )
+ source_unit = StringType(
+ serialize_when_none=False, choices=("BYTES", "KB", "MB", "GB", "TB", "PB")
+ )
class TextDyField(BaseDynamicField):
@@ -103,12 +209,12 @@ class TextDyField(BaseDynamicField):
@classmethod
def data_source(cls, name, key, **kwargs):
- _data_source = {'key': key, 'name': name}
- if 'options' in kwargs:
- _data_source.update({'options': TextDyFieldOptions(kwargs.get('options'))})
+ _data_source = {"key": key, "name": name}
+ if "options" in kwargs:
+ _data_source.update({"options": TextDyFieldOptions(kwargs.get("options"))})
- if 'reference' in kwargs:
- _data_source.update({'reference': kwargs.get('reference')})
+ if "reference" in kwargs:
+ _data_source.update({"reference": kwargs.get("reference")})
return cls(_data_source)
@@ -119,12 +225,12 @@ class StateDyField(BaseDynamicField):
@classmethod
def data_source(cls, name, key, **kwargs):
- _data_source = {'key': key, 'name': name}
- if 'options' in kwargs:
- _data_source.update({'options': StateDyFieldOptions(kwargs.get('options'))})
+ _data_source = {"key": key, "name": name}
+ if "options" in kwargs:
+ _data_source.update({"options": StateDyFieldOptions(kwargs.get("options"))})
- if 'reference' in kwargs:
- _data_source.update({'reference': kwargs.get('reference')})
+ if "reference" in kwargs:
+ _data_source.update({"reference": kwargs.get("reference")})
return cls(_data_source)
@@ -135,16 +241,21 @@ class BadgeDyField(BaseDynamicField):
@classmethod
def data_source(cls, name, key, **kwargs):
- _data_source = {'key': key, 'name': name}
+ _data_source = {"key": key, "name": name}
- if 'options' in kwargs:
- _data_source.update({'options': BadgeDyFieldOptions(kwargs.get('options'))})
+ if "options" in kwargs:
+ _data_source.update({"options": BadgeDyFieldOptions(kwargs.get("options"))})
else:
- _data_source.update({'options': BadgeDyFieldOptions({'background_color': 'gray.200',
- 'text_color': 'gray.900'})})
+ _data_source.update(
+ {
+ "options": BadgeDyFieldOptions(
+ {"background_color": "gray.200", "text_color": "gray.900"}
+ )
+ }
+ )
- if 'reference' in kwargs:
- _data_source.update({'reference': kwargs.get('reference')})
+ if "reference" in kwargs:
+ _data_source.update({"reference": kwargs.get("reference")})
return cls(_data_source)
@@ -155,12 +266,12 @@ class ImageDyField(BaseDynamicField):
@classmethod
def data_source(cls, name, key, **kwargs):
- _data_source = {'key': key, 'name': name}
- if 'options' in kwargs:
- _data_source.update({'options': ImageDyFieldOptions(kwargs.get('options'))})
+ _data_source = {"key": key, "name": name}
+ if "options" in kwargs:
+ _data_source.update({"options": ImageDyFieldOptions(kwargs.get("options"))})
- if 'reference' in kwargs:
- _data_source.update({'reference': kwargs.get('reference')})
+ if "reference" in kwargs:
+ _data_source.update({"reference": kwargs.get("reference")})
return cls(_data_source)
@@ -171,12 +282,14 @@ class DateTimeDyField(BaseDynamicField):
@classmethod
def data_source(cls, name, key, **kwargs):
- _data_source = {'key': key, 'name': name}
- if 'options' in kwargs:
- _data_source.update({'options': DateTimeDyFieldOptions(kwargs.get('options'))})
+ _data_source = {"key": key, "name": name}
+ if "options" in kwargs:
+ _data_source.update(
+ {"options": DateTimeDyFieldOptions(kwargs.get("options"))}
+ )
- if 'reference' in kwargs:
- _data_source.update({'reference': kwargs.get('reference')})
+ if "reference" in kwargs:
+ _data_source.update({"reference": kwargs.get("reference")})
return cls(_data_source)
@@ -192,7 +305,7 @@ class StateItemDyField(BaseField):
@classmethod
def set(cls, options):
- return cls({'options': StateDyFieldOptions(options)})
+ return cls({"options": StateDyFieldOptions(options)})
class BadgeItemDyField(BaseField):
@@ -201,7 +314,7 @@ class BadgeItemDyField(BaseField):
@classmethod
def set(cls, options):
- return cls({'options': BadgeDyFieldOptions(options)})
+ return cls({"options": BadgeDyFieldOptions(options)})
class ImageItemDyField(BaseField):
@@ -210,7 +323,7 @@ class ImageItemDyField(BaseField):
@classmethod
def set(cls, options):
- return cls({'options': ImageDyFieldOptions(options)})
+ return cls({"options": ImageDyFieldOptions(options)})
class DatetimeItemDyField(BaseField):
@@ -219,11 +332,14 @@ class DatetimeItemDyField(BaseField):
@classmethod
def set(cls, options):
- return cls({'options': DateTimeDyFieldOptions(options)})
+ return cls({"options": DateTimeDyFieldOptions(options)})
class ListDyFieldOptions(FieldViewOption):
- item = PolyModelType([BadgeItemDyField, StateDyField, DateTimeDyField, DictDyField], serialize_when_none=False)
+ item = PolyModelType(
+ [BadgeItemDyField, StateDyField, DateTimeDyField, DictDyField],
+ serialize_when_none=False,
+ )
sub_key = StringType(serialize_when_none=False)
delimiter = StringType(serialize_when_none=False)
@@ -234,109 +350,134 @@ class ListDyField(BaseDynamicField):
@classmethod
def data_source(cls, name, key, **kwargs):
- _data_source = {'key': key, 'name': name}
- if 'default_badge' in kwargs:
- _default_badge = kwargs.get('default_badge')
- _list_options = {'delimiter': ' '}
+ _data_source = {"key": key, "name": name}
+ if "default_badge" in kwargs:
+ _default_badge = kwargs.get("default_badge")
+ _list_options = {"delimiter": " "}
- if 'type' in _default_badge and _default_badge.get('type') == 'outline':
- _list_options.update({'item': BadgeItemDyField.set({'outline_color': 'violet.500'})})
- elif 'type' in _default_badge and _default_badge.get('type') == 'inline':
- _list_options.update({'item': BadgeItemDyField.set({'background_color': 'violet.500'})})
+ if "type" in _default_badge and _default_badge.get("type") == "outline":
+ _list_options.update(
+ {"item": BadgeItemDyField.set({"outline_color": "violet.500"})}
+ )
+ elif "type" in _default_badge and _default_badge.get("type") == "inline":
+ _list_options.update(
+ {"item": BadgeItemDyField.set({"background_color": "violet.500"})}
+ )
- if 'sub_key' in _default_badge:
- _list_options.update({'sub_key': _default_badge.get('sub_key')})
+ if "sub_key" in _default_badge:
+ _list_options.update({"sub_key": _default_badge.get("sub_key")})
- if 'delimiter' in _default_badge:
- _list_options.update({'delimiter': _default_badge.get('delimiter')})
+ if "delimiter" in _default_badge:
+ _list_options.update({"delimiter": _default_badge.get("delimiter")})
- _data_source.update({'options': ListDyFieldOptions(_list_options)})
+ _data_source.update({"options": ListDyFieldOptions(_list_options)})
- if 'options' in kwargs:
- _data_source.update({'options': ListDyFieldOptions(kwargs.get('options'))})
+ if "options" in kwargs:
+ _data_source.update({"options": ListDyFieldOptions(kwargs.get("options"))})
- if 'reference' in kwargs:
- _data_source.update({'reference': kwargs.get('reference')})
+ if "reference" in kwargs:
+ _data_source.update({"reference": kwargs.get("reference")})
return cls(_data_source)
class EnumOptionDyField(FieldViewOption):
- items = DictType(PolyModelType([StateItemDyField, BadgeItemDyField, ImageItemDyField, DatetimeItemDyField]),
- serialize_when_none=False, default={})
+ items = DictType(
+ PolyModelType(
+ [StateItemDyField, BadgeItemDyField, ImageItemDyField, DatetimeItemDyField]
+ ),
+ serialize_when_none=False,
+ default={},
+ )
class EnumDyField(BaseDynamicField):
type = StringType(default="enum")
- options = DictType(PolyModelType([StateItemDyField, BadgeItemDyField, ImageItemDyField, DatetimeItemDyField,
- EnumOptionDyField]),
- serialize_when_none=False,
- default={})
+ options = DictType(
+ PolyModelType(
+ [
+ StateItemDyField,
+ BadgeItemDyField,
+ ImageItemDyField,
+ DatetimeItemDyField,
+ EnumOptionDyField,
+ ]
+ ),
+ serialize_when_none=False,
+ default={},
+ )
@classmethod
def data_source(cls, name, key, **kwargs):
- _data_source = {'key': key, 'name': name}
- _default_badge = kwargs.get('default_badge', {})
- _default_state = kwargs.get('default_state', {})
- _default_outline_badge = kwargs.get('default_outline_badge', [])
+ _data_source = {"key": key, "name": name}
+ _default_badge = kwargs.get("default_badge", {})
+ _default_state = kwargs.get("default_state", {})
+ _default_outline_badge = kwargs.get("default_outline_badge", [])
_options_dic = {}
for _key in _default_outline_badge:
_round_index = len(TYPE_BADGE)
_index = _default_outline_badge.index(_key)
- _num = math.floor(_index/len(TYPE_BADGE))
+ _num = math.floor(_index / len(TYPE_BADGE))
if _num > 0:
- _round_index = len(TYPE_BADGE)*_num
+ _round_index = len(TYPE_BADGE) * _num
if _round_index - 1 < _index:
_index = _index - _round_index
- _options_dic[_key] = BadgeItemDyField.set({'outline_color': TYPE_BADGE[_index]})
+ _options_dic[_key] = BadgeItemDyField.set(
+ {"outline_color": TYPE_BADGE[_index]}
+ )
for _key in _default_badge:
for _badge in _default_badge[_key]:
- _options_dic[_badge] = BadgeItemDyField.set({'background_color': _key})
+ _options_dic[_badge] = BadgeItemDyField.set({"background_color": _key})
for _key in _default_state:
for _state in _default_state[_key]:
- _state_options = {'icon': {'color': 'gray.400'}}
-
- if _key == 'safe':
- _state_options = {'icon': {'color': 'green.500'}}
- elif _key == 'disable':
- _state_options.update({'text_color': 'gray.400'})
- elif _key == 'warning':
- _state_options = {'icon': {'color': 'yellow.500'}}
- elif _key == 'available':
- _state_options = {'icon': {'color': 'blue.400'}}
- elif _key == 'alert':
- _state_options = {'text_color': 'red.500', 'icon': {'color': 'red.500'}}
+ _state_options = {"icon": {"color": "gray.400"}}
+
+ if _key == "safe":
+ _state_options = {"icon": {"color": "green.500"}}
+ elif _key == "disable":
+ _state_options.update({"text_color": "gray.400"})
+ elif _key == "warning":
+ _state_options = {"icon": {"color": "yellow.500"}}
+ elif _key == "available":
+ _state_options = {"icon": {"color": "blue.400"}}
+ elif _key == "alert":
+ _state_options = {
+ "text_color": "red.500",
+ "icon": {"color": "red.500"},
+ }
_options_dic[_state] = StateItemDyField.set(_state_options)
- _data_source.update({'options': _options_dic})
+ _data_source.update({"options": _options_dic})
- if 'options' in kwargs:
- print(f'enum options {kwargs.get("options")}')
- _data_source.update({'options': kwargs.get('options')})
+ if "options" in kwargs:
+ _data_source.update({"options": kwargs.get("options")})
- if 'reference' in kwargs:
- _data_source.update({'reference': kwargs.get('reference')})
+ if "reference" in kwargs:
+ _data_source.update({"reference": kwargs.get("reference")})
return cls(_data_source)
class ProgressField(BaseDynamicField):
type = StringType(default="progress")
- options = PolyModelType(ProgressFieldOptions, serialize_when_none=False, )
+ options = PolyModelType(
+ ProgressFieldOptions,
+ serialize_when_none=False,
+ )
@classmethod
def data_source(cls, name, key, **kwargs):
- _data_source = {'key': key, 'name': name}
+ _data_source = {"key": key, "name": name}
- if 'options' in kwargs:
- _data_source.update({'options': kwargs.get('options')})
+ if "options" in kwargs:
+ _data_source.update({"options": kwargs.get("options")})
return cls(_data_source)
@@ -347,10 +488,10 @@ class SizeField(BaseDynamicField):
@classmethod
def data_source(cls, name, key, **kwargs):
- _data_source = {'key': key, 'name': name}
+ _data_source = {"key": key, "name": name}
- if 'options' in kwargs:
- _data_source.update({'options': kwargs.get('options')})
+ if "options" in kwargs:
+ _data_source.update({"options": kwargs.get("options")})
return cls(_data_source)
@@ -364,10 +505,10 @@ def set_field(cls, label=None, icon=None):
return_dic = {}
if label is not None:
- return_dic.update({'label': label})
+ return_dic.update({"label": label})
if icon is not None:
- return_dic.update({'icon': Icon(icon)})
+ return_dic.update({"icon": Icon(icon)})
return cls(return_dic)
@@ -377,17 +518,14 @@ class SearchField(BaseDynamicSearch):
reference = StringType(serialize_when_none=False)
@classmethod
- def set(cls, name='', key='', data_type=None, enums=None, reference=None):
- return_dic = {
- 'name': name,
- 'key': key
- }
+ def set(cls, name="", key="", data_type=None, enums=None, reference=None):
+ return_dic = {"name": name, "key": key}
if data_type is not None:
- return_dic.update({'data_type': data_type})
+ return_dic.update({"data_type": data_type})
if reference is not None:
- return_dic.update({'reference': reference})
+ return_dic.update({"reference": reference})
if enums is not None:
convert_enums = {}
@@ -395,15 +533,13 @@ def set(cls, name='', key='', data_type=None, enums=None, reference=None):
enum_v = enums[enum_key]
convert_enums[enum_key] = SearchEnumField.set_field(**enum_v)
- return_dic.update({
- 'enums': convert_enums
- })
+ return_dic.update({"enums": convert_enums})
return cls(return_dic)
class MoreLayoutField(Model):
- name = StringType(default='')
+ name = StringType(default="")
type = StringType(default="popup")
options = DictType(BaseType, serialize_when_none=False)
@@ -419,9 +555,9 @@ class MoreField(BaseDynamicField):
@classmethod
def data_source(cls, name, key, **kwargs):
- _data_source = {'key': key, 'name': name}
+ _data_source = {"key": key, "name": name}
- if 'options' in kwargs:
- _data_source.update({'options': kwargs.get('options')})
+ if "options" in kwargs:
+ _data_source.update({"options": kwargs.get("options")})
return cls(_data_source)
diff --git a/src/spaceone/inventory/libs/schema/metadata/dynamic_layout.py b/src/spaceone/inventory/libs/schema/metadata/dynamic_layout.py
index 57dde1cc..2c9987f9 100644
--- a/src/spaceone/inventory/libs/schema/metadata/dynamic_layout.py
+++ b/src/spaceone/inventory/libs/schema/metadata/dynamic_layout.py
@@ -1,6 +1,9 @@
from schematics import Model
from schematics.types import StringType, PolyModelType, ListType
-from spaceone.inventory.libs.schema.metadata.dynamic_field import BaseDynamicField, TextDyField
+from spaceone.inventory.libs.schema.metadata.dynamic_field import (
+ BaseDynamicField,
+ TextDyField,
+)
class LayoutOptions(Model):
@@ -13,15 +16,25 @@ class Options:
class BaseLayoutField(Model):
@staticmethod
def _set_fields(fields=[], **kwargs):
- _options = {'fields': fields}
+ _options = {"fields": fields}
for k, v in kwargs.items():
if v is not None:
_options[k] = v
return _options
- name = StringType(default='')
- type = StringType(default="item",
- choices=("item", "table", "query-search-table", "simple-table", "list", "raw", "html"))
+ name = StringType(default="")
+ type = StringType(
+ default="item",
+ choices=(
+ "item",
+ "table",
+ "query-search-table",
+ "simple-table",
+ "list",
+ "raw",
+ "html",
+ ),
+ )
options = PolyModelType(LayoutOptions, serialize_when_none=False)
@@ -56,106 +69,118 @@ class ListLayoutOption(LayoutOptions):
class ItemDynamicLayout(BaseLayoutField):
- type = StringType(default='item')
+ type = StringType(default="item")
options = PolyModelType(ItemLayoutOption)
@classmethod
- def set(cls, name='', root_path=''):
- return cls({'name': name, 'options': ItemLayoutOption({'root_path': root_path})})
+ def set(cls, name="", root_path=""):
+ return cls(
+ {"name": name, "options": ItemLayoutOption({"root_path": root_path})}
+ )
@classmethod
- def set_fields(cls, name='', root_path=None, fields=[]):
+ def set_fields(cls, name="", root_path=None, fields=[]):
_options = cls._set_fields(fields, root_path=root_path)
- return cls({'name': name, 'options': ItemLayoutOption(_options)})
+ return cls({"name": name, "options": ItemLayoutOption(_options)})
class TableDynamicLayout(BaseLayoutField):
- type = StringType(default='table')
+ type = StringType(default="table")
options = PolyModelType(TableLayoutOption)
@classmethod
- def set(cls, name='', root_path=''):
- return cls(name=name, root_path=root_path, options=TableLayoutOption({'root_path': root_path}))
+ def set(cls, name="", root_path=""):
+ return cls(
+ name=name,
+ root_path=root_path,
+ options=TableLayoutOption({"root_path": root_path}),
+ )
@classmethod
- def set_fields(cls, name='', root_path=None, fields=[]):
+ def set_fields(cls, name="", root_path=None, fields=[]):
_options = cls._set_fields(fields, root_path=root_path)
- return cls({'name': name, 'options': TableLayoutOption(_options)})
+ return cls({"name": name, "options": TableLayoutOption(_options)})
class QuerySearchTableDynamicLayout(BaseLayoutField):
- type = StringType(default='query-search-table')
+ type = StringType(default="query-search-table")
options = PolyModelType(QuerySearchTableLayoutOption)
@classmethod
- def set(cls, name=''):
- return cls(name=name, options=QuerySearchTableLayoutOption())
+ def set(cls, name="", root_path=""):
+ return cls(
+ name=name, options=QuerySearchTableLayoutOption({"root_path": root_path})
+ )
@classmethod
- def set_fields(cls, name='', fields=[]):
- _options = cls._set_fields(fields)
- return cls({'name': name, 'options': QuerySearchTableLayoutOption(_options)})
+ def set_fields(cls, name, fields: list = None, root_path=None):
+ if fields is None:
+ fields = []
+ _options = cls._set_fields(fields, root_path=root_path)
+ return cls({"name": name, "options": QuerySearchTableLayoutOption(_options)})
class SimpleTableDynamicLayout(BaseLayoutField):
- type = StringType(default='simple-table')
+ type = StringType(default="simple-table")
options = PolyModelType(SimpleTableLayoutOption)
@classmethod
- def set(cls, name='', root_path=''):
- return cls({'name': name, 'options': SimpleTableLayoutOption({'root_path': root_path})})
+ def set(cls, name="", root_path=""):
+ return cls(
+ {"name": name, "options": SimpleTableLayoutOption({"root_path": root_path})}
+ )
@classmethod
- def set_fields(cls, name='', root_path=None, fields=[]):
+ def set_fields(cls, name="", root_path=None, fields=[]):
_options = cls._set_fields(fields, root_path=root_path)
- return cls({'name': name, 'options': SimpleTableLayoutOption(_options)})
+ return cls({"name": name, "options": SimpleTableLayoutOption(_options)})
@classmethod
- def set_tags(cls, name='Tags', root_path='data.tags', fields=None):
+ def set_tags(cls, name="Tags", root_path="data.tags", fields=None):
if fields is None:
fields = [
- TextDyField.data_source('Key', 'key'),
- TextDyField.data_source('Value', 'value'),
+ TextDyField.data_source("Key", "key"),
+ TextDyField.data_source("Value", "value"),
]
return cls.set_fields(name, root_path, fields)
class ListDynamicLayout(BaseLayoutField):
- type = StringType(default='list')
+ type = StringType(default="list")
options = PolyModelType(ListLayoutOption)
@classmethod
- def set(cls, name='', layouts=[]):
- return cls(name=name, options=ListLayoutOption({'layouts': layouts}))
+ def set(cls, name="", layouts=[]):
+ return cls(name=name, options=ListLayoutOption({"layouts": layouts}))
@classmethod
- def set_layouts(cls, name='', layouts=[]):
- return cls({'name': name, 'options': ListLayoutOption({'layouts': layouts})})
+ def set_layouts(cls, name="", layouts=[]):
+ return cls({"name": name, "options": ListLayoutOption({"layouts": layouts})})
class RawDynamicLayout(BaseLayoutField):
- type = StringType(default='raw')
+ type = StringType(default="raw")
options = PolyModelType(RawLayoutOption)
@classmethod
- def set(cls, name='', root_path=None):
+ def set(cls, name="", root_path=None):
if root_path is None:
_options = RawLayoutOption()
else:
- _options = RawLayoutOption({'root_path': root_path})
+ _options = RawLayoutOption({"root_path": root_path})
- return cls({'name': name, 'options': _options})
+ return cls({"name": name, "options": _options})
class HTMLDynamicLayout(BaseLayoutField):
- type = StringType(default='html')
+ type = StringType(default="html")
options = PolyModelType(HTMLLayoutOption)
@classmethod
- def set(cls, name='', root_path=None):
+ def set(cls, name="", root_path=None):
if root_path is None:
_options = HTMLLayoutOption()
else:
- _options = HTMLLayoutOption({'root_path': root_path})
+ _options = HTMLLayoutOption({"root_path": root_path})
- return cls({'name': name, 'options': _options})
+ return cls({"name": name, "options": _options})
diff --git a/src/spaceone/inventory/libs/schema/resource.py b/src/spaceone/inventory/libs/schema/resource.py
index 8255b950..3da160c9 100644
--- a/src/spaceone/inventory/libs/schema/resource.py
+++ b/src/spaceone/inventory/libs/schema/resource.py
@@ -5,47 +5,59 @@
class ErrorResource(Model):
- resource_type = StringType(default='inventory.CloudService')
- provider = StringType(default='azure')
- cloud_service_group = StringType(default='')
- cloud_service_type = StringType(default='')
+ resource_type = StringType(default="inventory.CloudService")
+ provider = StringType(default="azure")
+ cloud_service_group = StringType(default="")
+ cloud_service_type = StringType(default="")
resource_id = StringType(serialize_when_none=False)
class ResourceResponse(Model):
state = StringType()
- message = StringType(default='')
+ message = StringType(default="")
resource_type = StringType()
match_rules = DictType(ListType(StringType), serialize_when_none=False)
resource = DictType(StringType, default={})
class CloudServiceResourceResponse(ResourceResponse):
- state = StringType(default='SUCCESS')
- resource_type = StringType(default='inventory.CloudService')
- match_rules = DictType(ListType(StringType), default={
- '1': ['reference.resource_id', 'provider', 'cloud_service_type', 'cloud_service_group']
- })
+ state = StringType(default="SUCCESS")
+ resource_type = StringType(default="inventory.CloudService")
+ match_rules = DictType(
+ ListType(StringType),
+ default={
+ "1": [
+ "reference.resource_id",
+ "provider",
+ "cloud_service_type",
+ "cloud_service_group",
+ ]
+ },
+ )
resource = PolyModelType(CloudServiceTypeResource)
class RegionResourceResponse(ResourceResponse):
- state = StringType(default='SUCCESS')
- resource_type = StringType(default='inventory.Region')
- match_rules = DictType(ListType(StringType), default={'1': ['region_code', 'provider']})
+ state = StringType(default="SUCCESS")
+ resource_type = StringType(default="inventory.Region")
+ match_rules = DictType(
+ ListType(StringType), default={"1": ["region_code", "provider"]}
+ )
resource = PolyModelType(RegionResource)
class CloudServiceTypeResourceResponse(ResourceResponse):
- state = StringType(default='SUCCESS')
- resource_type = StringType(default='inventory.CloudServiceType')
- match_rules = DictType(ListType(StringType), default={'1': ['name', 'group', 'provider']})
+ state = StringType(default="SUCCESS")
+ resource_type = StringType(default="inventory.CloudServiceType")
+ match_rules = DictType(
+ ListType(StringType), default={"1": ["name", "group", "provider"]}
+ )
resource = PolyModelType(CloudServiceTypeResource)
class ErrorResourceResponse(ResourceResponse):
- state = StringType(default='FAILURE')
- resource_type = StringType(default='inventory.ErrorResource')
+ state = StringType(default="FAILURE")
+ resource_type = StringType(default="inventory.ErrorResource")
resource = ModelType(ErrorResource, default={})
@@ -59,7 +71,8 @@ class AzureTags(Model):
class AzureCloudService(Model):
- resource_group = StringType(serialize_when_none=False)
+ tenant_id = StringType(serialized_name=False)
subscription_id = StringType(serialize_when_none=False)
subscription_name = StringType(serialize_when_none=False)
- azure_monitor = ModelType(AzureMonitorModel, serialize_when_none=False)
\ No newline at end of file
+ resource_group = StringType(serialize_when_none=False)
+ azure_monitor = ModelType(AzureMonitorModel, serialize_when_none=False)
diff --git a/src/spaceone/inventory/manager/__init__.py b/src/spaceone/inventory/manager/__init__.py
index 1ff8a3ed..c6b75e21 100644
--- a/src/spaceone/inventory/manager/__init__.py
+++ b/src/spaceone/inventory/manager/__init__.py
@@ -1,20 +1,49 @@
from spaceone.inventory.manager.disks.disk_manager import DisksManager
-from spaceone.inventory.manager.subscriptions.subscription_manager import SubscriptionsManager
+from spaceone.inventory.manager.subscriptions.subscription_manager import (
+ SubscriptionsManager,
+)
from spaceone.inventory.manager.snapshots.instance_manager import SnapshotsManager
-from spaceone.inventory.manager.vm_scale_sets.scale_set_manager import VmScaleSetsManager
-from spaceone.inventory.manager.load_balancers.instance_manager import LoadBalancersManager
-from spaceone.inventory.manager.sql_databases.database_manager import SQLDatabasesManager
+from spaceone.inventory.manager.vm_scale_sets.scale_set_manager import (
+ VmScaleSetsManager,
+)
+from spaceone.inventory.manager.load_balancers.instance_manager import (
+ LoadBalancersManager,
+)
+from spaceone.inventory.manager.sql_databases.database_manager import (
+ SQLDatabasesManager,
+)
from spaceone.inventory.manager.sql_servers.server_manager import SQLServersManager
-from spaceone.inventory.manager.virtual_networks.instance_manager import VirtualNetworksManager
-from spaceone.inventory.manager.application_gateways.instance_manager import ApplicationGatewaysManager
-from spaceone.inventory.manager.public_ip_addresses.ip_address_manager import PublicIPAddressesManager
-from spaceone.inventory.manager.network_security_groups.instance_manager import NetworkSecurityGroupsManager
+from spaceone.inventory.manager.virtual_networks.instance_manager import (
+ VirtualNetworksManager,
+)
+from spaceone.inventory.manager.application_gateways.instance_manager import (
+ ApplicationGatewaysManager,
+)
+from spaceone.inventory.manager.public_ip_addresses.ip_address_manager import (
+ PublicIPAddressesManager,
+)
+from spaceone.inventory.manager.network_security_groups.instance_manager import (
+ NetworkSecurityGroupsManager,
+)
from spaceone.inventory.manager.nat_gateways.instance_manager import NATGatewaysManager
-from spaceone.inventory.manager.storage_accounts.instance_manager import StorageAccountsManager
+from spaceone.inventory.manager.storage_accounts.instance_manager import (
+ StorageAccountsManager,
+)
from spaceone.inventory.manager.key_vaults.instance_manager import KeyVaultsManager
from spaceone.inventory.manager.mysql_servers.server_manager import MySQLServersManager
from spaceone.inventory.manager.cosmos_db.instance_manager import CosmosDBManager
-from spaceone.inventory.manager.postgresql_servers.server_manager import PostgreSQLServersManager
-from spaceone.inventory.manager.virtual_machines.instnace_manger import VirtualMachinesManager
-from spaceone.inventory.manager.container_instances.container_manager import ContainerInstancesManager
-from spaceone.inventory.manager.web_pubsub_service.service_manager import WebPubSubServiceManager
+from spaceone.inventory.manager.postgresql_servers.server_manager import (
+ PostgreSQLServersManager,
+)
+from spaceone.inventory.manager.virtual_machines.instnace_manger import (
+ VirtualMachinesManager,
+)
+from spaceone.inventory.manager.container_instances.container_manager import (
+ ContainerInstancesManager,
+)
+from spaceone.inventory.manager.web_pubsub_service.service_manager import (
+ WebPubSubServiceManager,
+)
+from spaceone.inventory.manager.resources_manager.resource_manager import (
+ ResourcesManager,
+)
diff --git a/src/spaceone/inventory/manager/application_gateways/instance_manager.py b/src/spaceone/inventory/manager/application_gateways/instance_manager.py
index d7af80df..c2e5ce6a 100644
--- a/src/spaceone/inventory/manager/application_gateways/instance_manager.py
+++ b/src/spaceone/inventory/manager/application_gateways/instance_manager.py
@@ -2,190 +2,311 @@
import logging
from spaceone.inventory.libs.manager import AzureManager
from spaceone.inventory.libs.schema.base import ReferenceModel
-from spaceone.inventory.connector.application_gateways import ApplicationGatewaysConnector
+from spaceone.inventory.connector.application_gateways import (
+ ApplicationGatewaysConnector,
+)
from spaceone.inventory.model.application_gateways.cloud_service import *
-from spaceone.inventory.model.application_gateways.cloud_service_type import CLOUD_SERVICE_TYPES
+from spaceone.inventory.model.application_gateways.cloud_service_type import (
+ CLOUD_SERVICE_TYPES,
+)
from spaceone.inventory.model.application_gateways.data import *
_LOGGER = logging.getLogger(__name__)
class ApplicationGatewaysManager(AzureManager):
- connector_name = 'ApplicationGatewaysConnector'
+ connector_name = "ApplicationGatewaysConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : list of azure application gateway data resource information
- ErrorResourceResponse (list) : list of error resource information
- """
-
- _LOGGER.debug(f'** Application Gateway START **')
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : list of azure application gateway data resource information
+ ErrorResourceResponse (list) : list of error resource information
+ """
+
+ _LOGGER.debug(f"** Application Gateway START **")
start_time = time.time()
- subscription_info = params['subscription_info']
+ subscription_info = params["subscription_info"]
- application_gateway_conn: ApplicationGatewaysConnector = self.locator.get_connector(self.connector_name, **params)
+ application_gateway_conn: ApplicationGatewaysConnector = (
+ self.locator.get_connector(self.connector_name, **params)
+ )
application_gateways_responses = []
error_responses = []
- application_gateways_list = application_gateway_conn.list_all_application_gateways()
+ application_gateways_list = (
+ application_gateway_conn.list_all_application_gateways()
+ )
for application_gateway in application_gateways_list:
- application_gateway_id = ''
+ application_gateway_id = ""
try:
- application_gateway_dict = self.convert_nested_dictionary(application_gateway)
- application_gateway_id = application_gateway_dict['id']
+ application_gateway_dict = self.convert_nested_dictionary(
+ application_gateway
+ )
+ application_gateway_id = application_gateway_dict["id"]
# update application_gateway_dict
- application_gateway_dict.update({
- 'resource_group': self.get_resource_group_from_id(application_gateway_id),
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': application_gateway_id}
- })
-
- backend_address_pools = application_gateway_dict.get('backend_address_pools', [])
- url_path_maps = application_gateway_dict.get('url_path_maps', [])
- request_routing_rules = application_gateway_dict.get('request_routing_rules', [])
- rewrite_rule_sets = application_gateway_dict.get('rewrite_rule_sets', [])
- frontend_ip_configurations = application_gateway_dict.get('frontend_ip_configurations', [])
- ip_configurations = application_gateway_dict.get('gateway_ip_configurations', [])
+ application_gateway_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ application_gateway_id
+ ),
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": application_gateway_id},
+ }
+ )
+
+ backend_address_pools = application_gateway_dict.get(
+ "backend_address_pools", []
+ )
+ url_path_maps = application_gateway_dict.get("url_path_maps", [])
+ request_routing_rules = application_gateway_dict.get(
+ "request_routing_rules", []
+ )
+ rewrite_rule_sets = application_gateway_dict.get(
+ "rewrite_rule_sets", []
+ )
+ frontend_ip_configurations = application_gateway_dict.get(
+ "frontend_ip_configurations", []
+ )
+ ip_configurations = application_gateway_dict.get(
+ "gateway_ip_configurations", []
+ )
for frontend_ip_configuration_dict in frontend_ip_configurations:
- if frontend_ip_configuration_dict.get('private_ip_address') is not None:
- application_gateway_dict.update({
- 'private_ip_address': frontend_ip_configuration_dict['private_ip_address']
- })
- frontend_ip_configuration_dict.update({
- 'ip_type': 'Private',
- 'ip_address': frontend_ip_configuration_dict['private_ip_address']
- })
- elif frontend_ip_configuration_dict.get('public_ip_address') is not None:
- public_ip_address_name = frontend_ip_configuration_dict['public_ip_address']['id'].split('/')[8]
- public_ip_address_dict = self.get_public_ip_address(application_gateway_conn, application_gateway_dict['resource_group'], public_ip_address_name)
- application_gateway_dict.update({
- 'public_ip_address': public_ip_address_dict
- })
- frontend_ip_configuration_dict.update({
- 'ip_type': 'Public',
- 'ip_address': f'{public_ip_address_dict.get("ip_address", "-")} ({public_ip_address_dict.get("name","")})',
- 'associated_listener': self.get_associated_listener(frontend_ip_configuration_dict, application_gateway_dict.get('http_listeners', []))
- })
+ if (
+ frontend_ip_configuration_dict.get("private_ip_address")
+ is not None
+ ):
+ application_gateway_dict.update(
+ {
+ "private_ip_address": frontend_ip_configuration_dict[
+ "private_ip_address"
+ ]
+ }
+ )
+ frontend_ip_configuration_dict.update(
+ {
+ "ip_type": "Private",
+ "ip_address": frontend_ip_configuration_dict[
+ "private_ip_address"
+ ],
+ }
+ )
+ elif (
+ frontend_ip_configuration_dict.get("public_ip_address")
+ is not None
+ ):
+ public_ip_address_name = frontend_ip_configuration_dict[
+ "public_ip_address"
+ ]["id"].split("/")[8]
+ public_ip_address_dict = self.get_public_ip_address(
+ application_gateway_conn,
+ application_gateway_dict["resource_group"],
+ public_ip_address_name,
+ )
+ application_gateway_dict.update(
+ {"public_ip_address": public_ip_address_dict}
+ )
+ frontend_ip_configuration_dict.update(
+ {
+ "ip_type": "Public",
+ "ip_address": f'{public_ip_address_dict.get("ip_address", "-")} ({public_ip_address_dict.get("name","")})',
+ "associated_listener": self.get_associated_listener(
+ frontend_ip_configuration_dict,
+ application_gateway_dict.get("http_listeners", []),
+ ),
+ }
+ )
for ip_configuration in ip_configurations:
- application_gateway_dict.update({
- 'virtual_network': ip_configuration.get('subnet')['id'].split('/')[8],
- 'subnet': ip_configuration.get('subnet')['id'].split('/')[10]
- })
-
- if application_gateway_dict.get('backend_http_settings_collection') is not None:
- for backend_setting in application_gateway_dict['backend_http_settings_collection']:
- if backend_setting.get('probe') is not None:
- custom_probe = backend_setting['probe']['id'].split('/')[10]
- backend_setting.update({
- 'custom_probe': custom_probe
- })
-
- if application_gateway_dict.get('http_listeners') is not None:
+ application_gateway_dict.update(
+ {
+ "virtual_network": ip_configuration.get("subnet")[
+ "id"
+ ].split("/")[8],
+ "subnet": ip_configuration.get("subnet")["id"].split("/")[
+ 10
+ ],
+ }
+ )
+
+ if (
+ application_gateway_dict.get("backend_http_settings_collection")
+ is not None
+ ):
+ for backend_setting in application_gateway_dict[
+ "backend_http_settings_collection"
+ ]:
+ if backend_setting.get("probe") is not None:
+ custom_probe = backend_setting["probe"]["id"].split("/")[10]
+ backend_setting.update({"custom_probe": custom_probe})
+
+ if application_gateway_dict.get("http_listeners") is not None:
custom_error_configurations_list = []
- for http_listener in application_gateway_dict['http_listeners']:
-
+ for http_listener in application_gateway_dict["http_listeners"]:
# Update Port information
- if http_listener.get('frontend_port') is not None:
- frontend_port_id = http_listener['frontend_port']['id']
- http_listener['frontend_port'].update({
- 'port': self.get_port(frontend_port_id, application_gateway_dict.get('frontend_ports', []))
- })
- http_listener.update({
- 'port': http_listener.get('frontend_port', {}).get('port', '')
- })
+ if http_listener.get("frontend_port") is not None:
+ frontend_port_id = http_listener["frontend_port"]["id"]
+ http_listener["frontend_port"].update(
+ {
+ "port": self.get_port(
+ frontend_port_id,
+ application_gateway_dict.get(
+ "frontend_ports", []
+ ),
+ )
+ }
+ )
+ http_listener.update(
+ {
+ "port": http_listener.get("frontend_port", {}).get(
+ "port", ""
+ )
+ }
+ )
# Update custom error configuration
- if http_listener.get('custom_error_configurations') is not None:
- for custom_error_conf in http_listener['custom_error_configurations']:
- custom_error_conf.update({'listener_name': http_listener['name']})
- custom_error_configurations_list.append(custom_error_conf)
-
- application_gateway_dict.update({
- 'custom_error_configurations': custom_error_configurations_list
- })
+ if http_listener.get("custom_error_configurations") is not None:
+ for custom_error_conf in http_listener[
+ "custom_error_configurations"
+ ]:
+ custom_error_conf.update(
+ {"listener_name": http_listener["name"]}
+ )
+ custom_error_configurations_list.append(
+ custom_error_conf
+ )
+
+ application_gateway_dict.update(
+ {
+ "custom_error_configurations": custom_error_configurations_list
+ }
+ )
for rewrite_rule in rewrite_rule_sets:
- rewrite_rule_id = rewrite_rule.get('id')
- rewrite_config_rule_displays = self.list_rewrite_config_rule_display(rewrite_rule)
- rewrite_rule.update({
- 'rewrite_rules_display': rewrite_config_rule_displays
- })
-
- rules_applied_list = self.list_rewrite_rule_rules_applied(rewrite_rule_id, request_routing_rules, url_path_maps)
- rewrite_rule.update({
- 'rules_applied': rules_applied_list
- })
+ rewrite_rule_id = rewrite_rule.get("id")
+ rewrite_config_rule_displays = (
+ self.list_rewrite_config_rule_display(rewrite_rule)
+ )
+ rewrite_rule.update(
+ {"rewrite_rules_display": rewrite_config_rule_displays}
+ )
+
+ rules_applied_list = self.list_rewrite_rule_rules_applied(
+ rewrite_rule_id, request_routing_rules, url_path_maps
+ )
+ rewrite_rule.update({"rules_applied": rules_applied_list})
# Update request routing rules
for request_routing_rule in request_routing_rules:
- if request_routing_rule.get('http_listener') is not None:
- request_routing_rule.update({
- 'http_listener_name': request_routing_rule['http_listener']['id'].split('/')[10]
- })
+ if request_routing_rule.get("http_listener") is not None:
+ request_routing_rule.update(
+ {
+ "http_listener_name": request_routing_rule[
+ "http_listener"
+ ]["id"].split("/")[10]
+ }
+ )
# Find http listener attached to this rule, and put rule's name to http_listeners dict
http_applied_rules_list = []
- http_listener_id = request_routing_rule['http_listener']['id']
-
- for request_routing_rule in application_gateway_dict.get('request_routing_rules', []):
- if http_listener_id in request_routing_rule.get('http_listener').get('id', ''):
- http_applied_rules_list.append(request_routing_rule['name'])
-
- self.update_http_listeners_list(application_gateway_dict['http_listeners'], http_listener_id, http_applied_rules_list)
+ http_listener_id = request_routing_rule["http_listener"]["id"]
+
+ for request_routing_rule in application_gateway_dict.get(
+ "request_routing_rules", []
+ ):
+ if http_listener_id in request_routing_rule.get(
+ "http_listener"
+ ).get("id", ""):
+ http_applied_rules_list.append(
+ request_routing_rule["name"]
+ )
+
+ self.update_http_listeners_list(
+ application_gateway_dict["http_listeners"],
+ http_listener_id,
+ http_applied_rules_list,
+ )
for backend_address_pool in backend_address_pools:
- backend_address_pool_associated_rules = self.get_backend_pool_associated_rules(backend_address_pool, url_path_maps, request_routing_rules)
- backend_address_pool.update({
- 'associated_rules': backend_address_pool_associated_rules
- })
-
- backend_addresses = backend_address_pool.get('backend_addresses', [])
- backend_addresses_display = [backend_address.get('fqdn') for backend_address in backend_addresses]
- backend_address_pool.update({
- 'backend_addresses_display': backend_addresses_display
- })
-
- application_gateway_data = ApplicationGateway(application_gateway_dict, strict=False)
- application_gateway_resource = ApplicationGatewayResource({
- 'data': application_gateway_data,
- 'tags': application_gateway_dict.get('tags', {}),
- 'region_code': application_gateway_data.location,
- 'reference': ReferenceModel(application_gateway_data.reference()),
- 'name': application_gateway_data.name,
- 'instance_type': application_gateway_data.sku.name,
- 'account': application_gateway_data.subscription_id
- })
+ backend_address_pool_associated_rules = (
+ self.get_backend_pool_associated_rules(
+ backend_address_pool, url_path_maps, request_routing_rules
+ )
+ )
+ backend_address_pool.update(
+ {"associated_rules": backend_address_pool_associated_rules}
+ )
+
+ backend_addresses = backend_address_pool.get(
+ "backend_addresses", []
+ )
+ backend_addresses_display = [
+ backend_address.get("fqdn")
+ for backend_address in backend_addresses
+ ]
+ backend_address_pool.update(
+ {"backend_addresses_display": backend_addresses_display}
+ )
+
+ application_gateway_data = ApplicationGateway(
+ application_gateway_dict, strict=False
+ )
+ application_gateway_resource = ApplicationGatewayResource(
+ {
+ "data": application_gateway_data,
+ "tags": application_gateway_dict.get("tags", {}),
+ "region_code": application_gateway_data.location,
+ "reference": ReferenceModel(
+ application_gateway_data.reference()
+ ),
+ "name": application_gateway_data.name,
+ "instance_type": application_gateway_data.sku.name,
+ "account": application_gateway_data.subscription_id,
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(application_gateway_data['location'])
- application_gateways_responses.append(ApplicationGatewayResponse({'resource': application_gateway_resource}))
+ self.set_region_code(application_gateway_data["location"])
+ application_gateways_responses.append(
+ ApplicationGatewayResponse(
+ {"resource": application_gateway_resource}
+ )
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {application_gateway_id} {e}', exc_info=True)
- error_response = self.generate_resource_error_response(e, 'Network', 'ApplicationGateway', application_gateway_id)
+ _LOGGER.error(
+ f"[list_instances] {application_gateway_id} {e}", exc_info=True
+ )
+ error_response = self.generate_resource_error_response(
+ e, "Network", "ApplicationGateway", application_gateway_id
+ )
error_responses.append(error_response)
- _LOGGER.debug(f'** Application Gateway Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(
+ f"** Application Gateway Finished {time.time() - start_time} Seconds **"
+ )
return application_gateways_responses, error_responses
- def get_public_ip_address(self, application_gateway_conn, resource_group_name, pip_name):
- public_ip_address_obj = application_gateway_conn.get_public_ip_addresses(resource_group_name, pip_name)
+ def get_public_ip_address(
+ self, application_gateway_conn, resource_group_name, pip_name
+ ):
+ public_ip_address_obj = application_gateway_conn.get_public_ip_addresses(
+ resource_group_name, pip_name
+ )
public_ip_address_dict = self.convert_nested_dictionary(public_ip_address_obj)
# _LOGGER.debug(f'[Public IP Address]{public_ip_address_dict}')
@@ -196,70 +317,94 @@ def get_public_ip_address(self, application_gateway_conn, resource_group_name, p
def get_associated_listener(frontend_ip_configuration_dict, http_listeners_list):
associated_listener = []
for http_listener in http_listeners_list:
- if http_listener.get('frontend_ip_configuration') is not None:
- if frontend_ip_configuration_dict['id'] in http_listener.get('frontend_ip_configuration', {}).get('id', ''):
- associated_listener.append(http_listener.get('name'))
+ if http_listener.get("frontend_ip_configuration") is not None:
+ if frontend_ip_configuration_dict["id"] in http_listener.get(
+ "frontend_ip_configuration", {}
+ ).get("id", ""):
+ associated_listener.append(http_listener.get("name"))
return associated_listener
@staticmethod
def get_port(port_id, frontend_ports_list):
port = 0
for fe_port in frontend_ports_list:
- if port_id == fe_port.get('id'):
- port = fe_port.get('port')
+ if port_id == fe_port.get("id"):
+ port = fe_port.get("port")
break
return port
@staticmethod
- def get_backend_pool_associated_rules(backend_address_pool, url_path_maps, request_routing_rules):
+ def get_backend_pool_associated_rules(
+ backend_address_pool, url_path_maps, request_routing_rules
+ ):
backend_address_pool_associated_rules = []
- backend_address_pool_id = backend_address_pool.get('id')
+ backend_address_pool_id = backend_address_pool.get("id")
for url_path_map in url_path_maps:
- default_backend_address_pool = url_path_map.get('default_backend_address_pool')
- if default_backend_address_pool is not None and default_backend_address_pool.get(
- 'id') == backend_address_pool_id:
- backend_address_pool_associated_rules.append(url_path_map.get('name'))
+ default_backend_address_pool = url_path_map.get(
+ "default_backend_address_pool"
+ )
+ if (
+ default_backend_address_pool is not None
+ and default_backend_address_pool.get("id") == backend_address_pool_id
+ ):
+ backend_address_pool_associated_rules.append(url_path_map.get("name"))
for request_routing_rule in request_routing_rules:
- request_backend_address_pool = request_routing_rule.get('backend_address_pool')
- if request_backend_address_pool is not None and request_backend_address_pool.get(
- 'id') == backend_address_pool_id:
- backend_address_pool_associated_rules.append(request_routing_rule.get('name'))
+ request_backend_address_pool = request_routing_rule.get(
+ "backend_address_pool"
+ )
+ if (
+ request_backend_address_pool is not None
+ and request_backend_address_pool.get("id") == backend_address_pool_id
+ ):
+ backend_address_pool_associated_rules.append(
+ request_routing_rule.get("name")
+ )
return backend_address_pool_associated_rules
@staticmethod
- def update_http_listeners_list(http_listeners_list, http_listener_id, http_applied_rules):
+ def update_http_listeners_list(
+ http_listeners_list, http_listener_id, http_applied_rules
+ ):
for http_listener in http_listeners_list:
- if http_listener['id'] == http_listener_id:
- http_listener.update({
- 'associated_rules': http_applied_rules
- })
+ if http_listener["id"] == http_listener_id:
+ http_listener.update({"associated_rules": http_applied_rules})
@staticmethod
def list_rewrite_config_rule_display(rewrite_rule):
rewrite_config_rule_displays = []
- rewrite_rule_list = rewrite_rule.get('rewrite_rules', [])
+ rewrite_rule_list = rewrite_rule.get("rewrite_rules", [])
for rule in rewrite_rule_list:
- rewrite_config_rule_displays.append(str(rule.get('name')) + ", " + str(rule.get('rule_sequence')))
+ rewrite_config_rule_displays.append(
+ str(rule.get("name")) + ", " + str(rule.get("rule_sequence"))
+ )
return rewrite_config_rule_displays
@staticmethod
- def list_rewrite_rule_rules_applied(rewrite_rule_id, request_routing_rules, url_path_maps):
+ def list_rewrite_rule_rules_applied(
+ rewrite_rule_id, request_routing_rules, url_path_maps
+ ):
rules_applied_list = []
for request_routing_rule in request_routing_rules:
- if request_routing_rule.get('rewrite_rule_set') is not None:
- if request_routing_rule['rewrite_rule_set'].get('id') == rewrite_rule_id:
- rules_applied_list.append(request_routing_rule['name'])
+ if request_routing_rule.get("rewrite_rule_set") is not None:
+ if (
+ request_routing_rule["rewrite_rule_set"].get("id")
+ == rewrite_rule_id
+ ):
+ rules_applied_list.append(request_routing_rule["name"])
for url_path_map in url_path_maps:
- if url_path_map.get('default_rewrite_rule_set') is not None:
- if url_path_map['default_rewrite_rule_set'].get('id') == rewrite_rule_id:
- rules_applied_list.append(url_path_map['name'])
-
- if url_path_map.get('path_rules') is not None:
- for path_rule in url_path_map['path_rules']:
- if path_rule.get('rewrite_rule_set') is not None:
- if path_rule['rewrite_rule_set'].get('id') == rewrite_rule_id:
- rules_applied_list.append(url_path_map['name'])
+ if url_path_map.get("default_rewrite_rule_set") is not None:
+ if (
+ url_path_map["default_rewrite_rule_set"].get("id")
+ == rewrite_rule_id
+ ):
+ rules_applied_list.append(url_path_map["name"])
+
+ if url_path_map.get("path_rules") is not None:
+ for path_rule in url_path_map["path_rules"]:
+ if path_rule.get("rewrite_rule_set") is not None:
+ if path_rule["rewrite_rule_set"].get("id") == rewrite_rule_id:
+ rules_applied_list.append(url_path_map["name"])
return rules_applied_list
diff --git a/src/spaceone/inventory/manager/container_instances/container_manager.py b/src/spaceone/inventory/manager/container_instances/container_manager.py
index 3104a84a..e6d17068 100644
--- a/src/spaceone/inventory/manager/container_instances/container_manager.py
+++ b/src/spaceone/inventory/manager/container_instances/container_manager.py
@@ -3,7 +3,9 @@
from spaceone.inventory.libs.manager import AzureManager
from spaceone.inventory.connector.container_instances import ContainerInstancesConnector
from spaceone.inventory.model.container_instances.cloud_service import *
-from spaceone.inventory.model.container_instances.cloud_service_type import CLOUD_SERVICE_TYPES
+from spaceone.inventory.model.container_instances.cloud_service_type import (
+ CLOUD_SERVICE_TYPES,
+)
from spaceone.inventory.model.container_instances.data import *
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.core.utils import *
@@ -12,47 +14,57 @@
class ContainerInstancesManager(AzureManager):
- connector_name = 'ContainerInstancesConnector'
+ connector_name = "ContainerInstancesConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : list of azure container instances data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : list of azure container instances data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
- _LOGGER.debug(f'** Container Instances START **')
+ _LOGGER.debug(f"** Container Instances START **")
start_time = time.time()
- subscription_info = params['subscription_info']
+ secret_data = params.get("secret_data", {})
+ subscription_info = params["subscription_info"]
- container_instances_conn: ContainerInstancesConnector = self.locator.get_connector(self.connector_name, **params)
+ container_instances_conn: ContainerInstancesConnector = (
+ self.locator.get_connector(self.connector_name, **params)
+ )
container_instances_responses = []
error_responses = []
container_instances = container_instances_conn.list_container_groups()
for container_instance in container_instances:
- container_instance_id = ''
+ container_instance_id = ""
try:
- container_instance_dict = self.convert_nested_dictionary(container_instance)
- container_instance_id = container_instance_dict['id']
+ container_instance_dict = self.convert_nested_dictionary(
+ container_instance
+ )
+ container_instance_id = container_instance_dict["id"]
# if bug fix these code will be deleted
- resource_group_name = self.get_resource_group_from_id(container_instance_id)
- container_group_name = container_instance_dict['name']
+ resource_group_name = self.get_resource_group_from_id(
+ container_instance_id
+ )
+ container_group_name = container_instance_dict["name"]
container_instance = container_instances_conn.get_container_groups(
resource_group_name=resource_group_name,
- container_group_name=container_group_name)
- container_instance_dict = self.convert_nested_dictionary(container_instance)
+ container_group_name=container_group_name,
+ )
+ container_instance_dict = self.convert_nested_dictionary(
+ container_instance
+ )
time.sleep(0.2) # end code
# Update data info in Container Instance's Raw Data
@@ -60,92 +72,137 @@ def collect_cloud_service(self, params):
_gpu_count_display = 0
_memory_size_display = 0.0
- for container in container_instance_dict['containers']:
- _cpu_count_display += int(container['resources']['requests']['cpu'])
- _memory_size_display += float(container['resources']['requests']['memory_in_gb'])
+ for container in container_instance_dict["containers"]:
+ _cpu_count_display += int(container["resources"]["requests"]["cpu"])
+ _memory_size_display += float(
+ container["resources"]["requests"]["memory_in_gb"]
+ )
_gpu_count_display += int(self._get_gpu_count_display(container))
# Set detail volume info for container
- if container_instance_dict['volumes'] is not None:
- for volume in container_instance_dict['volumes']:
- self._set_volumes_detail_info(volume, container_instance_dict['containers'])
+ if container_instance_dict["volumes"] is not None:
+ for volume in container_instance_dict["volumes"]:
+ self._set_volumes_detail_info(
+ volume, container_instance_dict["containers"]
+ )
# Set Container Instance volume type and volume count
- self._set_container_instance_volume_type(container_instance_dict['volumes'])
- container_instance_dict['volume_count_display'] = len(container_instance_dict['volumes'])
-
- container_instance_dict.update({
- 'resource_group': self.get_resource_group_from_id(container_instance_id),
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': container_instance_id},
- 'container_count_display': len(container_instance_dict['containers']),
- 'cpu_count_display': _cpu_count_display,
- 'memory_size_display': _memory_size_display,
- 'gpu_count_display': _gpu_count_display,
- })
- container_instance_data = ContainerInstance(container_instance_dict, strict=False)
+ self._set_container_instance_volume_type(
+ container_instance_dict["volumes"]
+ )
+ container_instance_dict["volume_count_display"] = len(
+ container_instance_dict["volumes"]
+ )
+
+ container_instance_dict = self.update_tenant_id_from_secret_data(
+ container_instance_dict, secret_data
+ )
+
+ container_instance_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ container_instance_id
+ ),
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": container_instance_id},
+ "container_count_display": len(
+ container_instance_dict["containers"]
+ ),
+ "cpu_count_display": _cpu_count_display,
+ "memory_size_display": _memory_size_display,
+ "gpu_count_display": _gpu_count_display,
+ }
+ )
+
+ container_instance_data = ContainerInstance(
+ container_instance_dict, strict=False
+ )
# Update resource info of Container Instance
- container_instance_resource = ContainerInstanceResource({
- 'name': container_instance_data.name,
- 'account': container_instance_dict['subscription_id'],
- 'data': container_instance_data,
- 'tags': container_instance_dict.get('tags', {}),
- 'region_code': container_instance_data.location,
- 'reference': ReferenceModel(container_instance_data.reference())
- })
-
- self.set_region_code(container_instance_data['location'])
+ container_instance_resource = ContainerInstanceResource(
+ {
+ "name": container_instance_data.name,
+ "account": container_instance_dict["subscription_id"],
+ "data": container_instance_data,
+ "tags": container_instance_dict.get("tags", {}),
+ "region_code": container_instance_data.location,
+ "reference": ReferenceModel(
+ container_instance_data.reference()
+ ),
+ }
+ )
+
+ self.set_region_code(container_instance_data["location"])
container_instances_responses.append(
- ContainerInstanceResponse({'resource': container_instance_resource}))
+ ContainerInstanceResponse({"resource": container_instance_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {container_instance_id} {e}', exc_info=True)
- error_response = self.generate_resource_error_response(e, 'Container', 'ContainerInstances',
- container_instance_id)
+ _LOGGER.error(
+ f"[list_instances] {container_instance_id} {e}", exc_info=True
+ )
+ error_response = self.generate_resource_error_response(
+ e, "Container", "ContainerInstances", container_instance_id
+ )
error_responses.append(error_response)
- _LOGGER.debug(f'** Container Instances Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(
+ f"** Container Instances Finished {time.time() - start_time} Seconds **"
+ )
return container_instances_responses, error_responses
@staticmethod
def _set_container_instance_volume_type(volumes):
for volume in volumes:
- if volume.get('git_repo') is not None:
- volume['volume_type'] = 'Git repo'
- elif volume.get('azure_file') is not None:
- volume['volume_type'] = 'Azure file'
- elif volume.get('empty_dir') is not None:
- volume['volume_type'] = 'Empty directory'
- elif volume.get('secret') is not None:
- volume['volume_type'] = 'Secret'
+ if volume.get("git_repo") is not None:
+ volume["volume_type"] = "Git repo"
+ elif volume.get("azure_file") is not None:
+ volume["volume_type"] = "Azure file"
+ elif volume.get("empty_dir") is not None:
+ volume["volume_type"] = "Empty directory"
+ elif volume.get("secret") is not None:
+ volume["volume_type"] = "Secret"
@staticmethod
def _set_volumes_detail_info(volume, containers):
for container in containers:
- if volume_mounts := container['volume_mounts']:
- container['volume_mount_count_display'] = len(volume_mounts)
+ if volume_mounts := container["volume_mounts"]:
+ container["volume_mount_count_display"] = len(volume_mounts)
for volume_mount in volume_mounts:
- if volume_mount['name'] == volume['name']:
- volume.update({
- 'mount_path': volume_mount['mount_path'],
- 'container_name': container['name']
- })
+ if volume_mount["name"] == volume["name"]:
+ volume.update(
+ {
+ "mount_path": volume_mount["mount_path"],
+ "container_name": container["name"],
+ }
+ )
return
@staticmethod
def _get_gpu_count_display(container):
_gpu_count = 0
- if _gpu_info := container.get('resources', {}).get('requests', {}).get('gpu', {}):
- _gpu_count = _gpu_info.get('count', 0)
+ if (
+ _gpu_info := container.get("resources", {})
+ .get("requests", {})
+ .get("gpu", {})
+ ):
+ _gpu_count = _gpu_info.get("count", 0)
return _gpu_count
@staticmethod
def _convert_start_time_datetime_to_iso861(container):
- if _start_time := container.get('instance_view', {}).get('current_state', {}).get('start_time'):
+ if (
+ _start_time := container.get("instance_view", {})
+ .get("current_state", {})
+ .get("start_time")
+ ):
_start_time = datetime_to_iso8601(_start_time)
- container['instance_view']['current_state']['start_time'] = _start_time
- elif _finish_time := container.get('instance_view', {}).get('current_state', {}).get('_finish_time'):
+ container["instance_view"]["current_state"]["start_time"] = _start_time
+ elif (
+ _finish_time := container.get("instance_view", {})
+ .get("current_state", {})
+ .get("_finish_time")
+ ):
_finish_time = datetime_to_iso8601(_finish_time)
- container['instance_view']['current_state']['finished_time'] = _finish_time
+ container["instance_view"]["current_state"]["finished_time"] = _finish_time
diff --git a/src/spaceone/inventory/manager/cosmos_db/instance_manager.py b/src/spaceone/inventory/manager/cosmos_db/instance_manager.py
index 69c7379d..dadc059d 100644
--- a/src/spaceone/inventory/manager/cosmos_db/instance_manager.py
+++ b/src/spaceone/inventory/manager/cosmos_db/instance_manager.py
@@ -12,30 +12,32 @@
class CosmosDBManager(AzureManager):
- connector_name = 'CosmosDBConnector'
+ connector_name = "CosmosDBConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of azure cosmosdb data resource information
- ErrorResourceResponse (list) : list of error resource information
-
-
- """
- _LOGGER.debug(f'** CosmosDB START **')
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of azure cosmosdb data resource information
+ ErrorResourceResponse (list) : list of error resource information
+
+
+ """
+ _LOGGER.debug(f"** CosmosDB START **")
start_time = time.time()
- subscription_info = params['subscription_info']
- cosmos_db_conn: CosmosDBConnector = self.locator.get_connector(self.connector_name, **params)
+ subscription_info = params["subscription_info"]
+ cosmos_db_conn: CosmosDBConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
cosmos_db_account_responses = []
error_responses = []
@@ -43,87 +45,144 @@ def collect_cloud_service(self, params):
cosmos_db_accounts_list = cosmos_db_conn.list_all_cosmos_db_accounts()
for cosmos_db_account in cosmos_db_accounts_list:
- cosmos_db_account_id = ''
+ cosmos_db_account_id = ""
try:
- cosmos_db_account_dict = self.convert_nested_dictionary(cosmos_db_account)
- cosmos_db_account_id = cosmos_db_account_dict.get('id')
- cosmos_db_account_dict['location'] = cosmos_db_account_dict['location'].replace(' ', '').lower()
+ cosmos_db_account_dict = self.convert_nested_dictionary(
+ cosmos_db_account
+ )
+ cosmos_db_account_id = cosmos_db_account_dict.get("id")
+ cosmos_db_account_dict["location"] = (
+ cosmos_db_account_dict["location"].replace(" ", "").lower()
+ )
# update cosmosdb_dict
- cosmos_db_account_dict.update({
- 'resource_group': self.get_resource_group_from_id(cosmos_db_account_dict['id']),
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': cosmos_db_account_id}
- })
-
- if cosmos_db_account_dict.get('capabilities') is not None:
- cosmos_db_account_dict.update({
- 'capability_display': self.get_capability_type(cosmos_db_account_dict['capabilities'])
- })
-
- if cosmos_db_account_dict.get('virtual_network_rules') is not None:
- cosmos_db_account_dict.update({
- 'virtual_network_display': self.get_virtual_networks(
- cosmos_db_account_dict['virtual_network_rules'])
- })
-
- if cosmos_db_account_dict.get('private_endpoint_connections') is not None:
- for private_connection in cosmos_db_account_dict['private_endpoint_connections']:
- private_connection.update({
- 'private_endpoint': self.get_private_endpoint_name(private_connection['private_endpoint']),
- 'name': self.get_private_connection_name(private_connection['id'])
- })
- if cosmos_db_account_dict.get('cors') is not None:
- cosmos_db_account_dict.update({
- 'cors_display': self.get_cors_display(cosmos_db_account_dict['cors'])
- })
-
- if cosmos_db_account_dict.get('name') is not None:
- cosmos_db_account_dict.update({
- 'keys': self.get_keys(cosmos_db_conn, cosmos_db_account_dict['name'],
- cosmos_db_account_dict['resource_group']),
- 'sql_databases': self.get_sql_resources(cosmos_db_conn, cosmos_db_account_dict['name'],
- cosmos_db_account_dict['resource_group'])
- })
-
- # _LOGGER.debug(f'[COSMOS DB INFO]{cosmos_db_account_dict}')
- cosmos_db_account_data = DatabaseAccountGetResults(cosmos_db_account_dict, strict=False)
- cosmos_db_resource = CosmosDBResource({
- 'data': cosmos_db_account_data,
- 'tags': cosmos_db_account_dict.get('tags', {}),
- 'region_code': cosmos_db_account_data.location,
- 'reference': ReferenceModel(cosmos_db_account_data.reference()),
- 'name': cosmos_db_account_data.name,
- 'account': cosmos_db_account_data.subscription_id,
- 'instance_type': cosmos_db_account_data.database_account_offer_type,
- 'launched_at': datetime_to_iso8601(cosmos_db_account_data.system_data.created_at)
- })
+ cosmos_db_account_dict = self.update_tenant_id_from_secret_data(
+ cosmos_db_account_dict, params["secret_data"]
+ )
+ cosmos_db_account_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ cosmos_db_account_dict["id"]
+ ),
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": cosmos_db_account_id},
+ }
+ )
+
+ if cosmos_db_account_dict.get("capabilities") is not None:
+ cosmos_db_account_dict.update(
+ {
+ "capability_display": self.get_capability_type(
+ cosmos_db_account_dict["capabilities"]
+ )
+ }
+ )
+
+ if cosmos_db_account_dict.get("virtual_network_rules") is not None:
+ cosmos_db_account_dict.update(
+ {
+ "virtual_network_display": self.get_virtual_networks(
+ cosmos_db_account_dict["virtual_network_rules"]
+ )
+ }
+ )
+
+ if (
+ cosmos_db_account_dict.get("private_endpoint_connections")
+ is not None
+ ):
+ for private_connection in cosmos_db_account_dict[
+ "private_endpoint_connections"
+ ]:
+ private_connection.update(
+ {
+ "private_endpoint": self.get_private_endpoint_name(
+ private_connection["private_endpoint"]
+ ),
+ "name": self.get_private_connection_name(
+ private_connection["id"]
+ ),
+ }
+ )
+ if cosmos_db_account_dict.get("cors") is not None:
+ cosmos_db_account_dict.update(
+ {
+ "cors_display": self.get_cors_display(
+ cosmos_db_account_dict["cors"]
+ )
+ }
+ )
+
+ if cosmos_db_account_dict.get("name") is not None:
+ sql_databases = self.get_sql_resources(
+ cosmos_db_conn,
+ cosmos_db_account_dict["name"],
+ cosmos_db_account_dict["resource_group"],
+ )
+
+ cosmos_db_account_dict.update(
+ {
+ # "keys": self.get_keys(
+ # cosmos_db_conn,
+ # cosmos_db_account_dict["name"],
+ # cosmos_db_account_dict["resource_group"],
+ # ),
+ "sql_databases": sql_databases,
+ "sql_databases_count_display": len(sql_databases),
+ }
+ )
+
+ cosmos_db_account_data = DatabaseAccountGetResults(
+ cosmos_db_account_dict, strict=False
+ )
+ cosmos_db_resource = CosmosDBResource(
+ {
+ "data": cosmos_db_account_data,
+ "tags": cosmos_db_account_dict.get("tags", {}),
+ "region_code": cosmos_db_account_data.location,
+ "reference": ReferenceModel(cosmos_db_account_data.reference()),
+ "name": cosmos_db_account_data.name,
+ "account": cosmos_db_account_data.subscription_id,
+ "instance_type": cosmos_db_account_data.database_account_offer_type,
+ "launched_at": datetime_to_iso8601(
+ cosmos_db_account_data.system_data.created_at
+ ),
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(cosmos_db_account_data['location'])
- cosmos_db_account_responses.append(CosmosDBResponse({'resource': cosmos_db_resource}))
+ self.set_region_code(cosmos_db_account_data["location"])
+ cosmos_db_account_responses.append(
+ CosmosDBResponse({"resource": cosmos_db_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {cosmos_db_account_id} {e}', exc_info=True)
- error_response = self.generate_resource_error_response(e, 'Database', 'AzureCosmosDB',
- cosmos_db_account_id)
+ _LOGGER.error(
+ f"[list_instances] {cosmos_db_account_id} {e}", exc_info=True
+ )
+ error_response = self.generate_resource_error_response(
+ e, "Database", "AzureCosmosDB", cosmos_db_account_id
+ )
error_responses.append(error_response)
- _LOGGER.debug(f'** CosmosDB Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(f"** CosmosDB Finished {time.time() - start_time} Seconds **")
return cosmos_db_account_responses, error_responses
def get_keys(self, cosmos_db_conn, account_name, resource_group):
- keys_obj = cosmos_db_conn.list_keys(account_name=account_name, resource_group_name=resource_group)
+ keys_obj = cosmos_db_conn.list_keys(
+ account_name=account_name, resource_group_name=resource_group
+ )
key_dict = self.convert_nested_dictionary(keys_obj)
return key_dict
def get_sql_resources(self, cosmos_db_conn, account_name, resource_group):
sql_resources = []
- sql_resources_obj = cosmos_db_conn.list_sql_resources(account_name=account_name,
- resource_group_name=resource_group)
+ sql_resources_obj = cosmos_db_conn.list_sql_resources(
+ account_name=account_name, resource_group_name=resource_group
+ )
for sql in sql_resources_obj:
sql_dict = self.convert_nested_dictionary(sql)
@@ -135,34 +194,32 @@ def get_capability_type(capabilities):
if capabilities:
capability_str_list = []
for capability in capabilities:
- capability_str_list.append(capability.get('name'))
+ capability_str_list.append(capability.get("name"))
- if 'EnableServerless' in capability_str_list:
- return 'Serverless'
+ if "EnableServerless" in capability_str_list:
+ return "Serverless"
else:
- return 'Provisioned Throughput'
+ return "Provisioned Throughput"
@staticmethod
def get_virtual_networks(virtual_network_rules):
virtual_network_rules_display = []
for virtual_network in virtual_network_rules:
- virtual_network_name = virtual_network['id'].split('/')[8]
+ virtual_network_name = virtual_network["id"].split("/")[8]
virtual_network_rules_display.append(virtual_network_name)
return virtual_network_rules_display
@staticmethod
def get_private_endpoint_name(private_endpoint):
- if private_endpoint.get('id') is not None:
- private_endpoint.update({
- 'name': private_endpoint['id'].split('/')[8]
- })
+ if private_endpoint.get("id") is not None:
+ private_endpoint.update({"name": private_endpoint["id"].split("/")[8]})
return private_endpoint
@staticmethod
def get_private_connection_name(private_connection_id):
- private_connection_name = private_connection_id.split('/')[10]
+ private_connection_name = private_connection_id.split("/")[10]
return private_connection_name
@staticmethod
@@ -170,5 +227,5 @@ def get_cors_display(cors_list):
cors_display = []
for cors in cors_list:
- cors_display.append(cors.get('allowed_origins', ''))
+ cors_display.append(cors.get("allowed_origins", ""))
return cors_display
diff --git a/src/spaceone/inventory/manager/disks/disk_manager.py b/src/spaceone/inventory/manager/disks/disk_manager.py
index dca58a08..46f25cfa 100644
--- a/src/spaceone/inventory/manager/disks/disk_manager.py
+++ b/src/spaceone/inventory/manager/disks/disk_manager.py
@@ -11,143 +11,167 @@
class DisksManager(AzureManager):
- connector_name = 'DisksConnector'
+ connector_name = "DisksConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of azure disk data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of azure disk data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
- _LOGGER.debug(f'** Disk START **')
+ _LOGGER.debug(f"** Disk START **")
start_time = time.time()
- subscription_info = params['subscription_info']
+ subscription_info = params["subscription_info"]
- disk_conn: DisksConnector = self.locator.get_connector(self.connector_name, **params)
+ disk_conn: DisksConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
disk_responses = []
error_responses = []
disks = disk_conn.list_disks()
for disk in disks:
- disk_id = ''
+ disk_id = ""
try:
disk_dict = self.convert_nested_dictionary(disk)
- disk_id = disk_dict['id']
+ disk_id = disk_dict["id"]
# Switch DiskStorageAccountType to disk_sku_name for user-friendly words. (ex.Premium SSD, Standard HDD..)
- if disk_dict.get('sku') is not None:
- sku_dict = disk_dict['sku']
- sku_dict.update({
- 'name': self.get_disk_sku_name(sku_dict['name'])
- })
- disk_dict.update({
- 'sku': sku_dict
- })
+ if disk_dict.get("sku") is not None:
+ sku_dict = disk_dict["sku"]
+ sku_dict.update({"name": self.get_disk_sku_name(sku_dict["name"])})
+ disk_dict.update({"sku": sku_dict})
# update disk_data dict
- disk_dict.update({
- 'resource_group': self.get_resource_group_from_id(disk_dict['id']), # parse resource_group from ID
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'size': disk_dict['disk_size_bytes'],
- 'tier_display': self.get_tier_display(disk_dict['disk_iops_read_write'],
- disk_dict['disk_m_bps_read_write']),
- 'azure_monitor': {'resource_id': disk_id},
- 'time_created': datetime_to_iso8601(disk_dict['time_created'])
- })
+ disk_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ disk_dict["id"]
+ ), # parse resource_group from ID
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "size": disk_dict["disk_size_bytes"],
+ "tier_display": self.get_tier_display(
+ disk_dict["disk_iops_read_write"],
+ disk_dict["disk_m_bps_read_write"],
+ ),
+ "azure_monitor": {"resource_id": disk_id},
+ "time_created": datetime_to_iso8601(disk_dict["time_created"]),
+ }
+ )
# Update Network access policy to user-friendly words
- if disk_dict.get('network_access_policy') is not None:
- disk_dict.update({
- 'network_access_policy_display': self.get_network_access_policy(
- disk_dict['network_access_policy'])
- })
+ if disk_dict.get("network_access_policy") is not None:
+ disk_dict.update(
+ {
+ "network_access_policy_display": self.get_network_access_policy(
+ disk_dict["network_access_policy"]
+ )
+ }
+ )
# get attached vm's name
- if disk_dict.get('managed_by') is not None:
- managed_by = disk_dict['managed_by']
- disk_dict.update({
- 'managed_by': self.get_attached_vm_name_from_managed_by(managed_by)
- })
-
- max_shares = disk_dict.get('max_shares')
+ if disk_dict.get("managed_by") is not None:
+ managed_by = disk_dict["managed_by"]
+ disk_dict.update(
+ {
+ "managed_by": self.get_attached_vm_name_from_managed_by(
+ managed_by
+ )
+ }
+ )
+
+ max_shares = disk_dict.get("max_shares")
if max_shares is not None and max_shares > 0:
- disk_dict.update({
- 'enable_shared_disk_display': True
- })
+ disk_dict.update({"enable_shared_disk_display": True})
+
+ if disk_dict.get("bursting_enabled") is None:
+ disk_dict["bursting_enabled"] = False
- if disk_dict.get('bursting_enabled') is None:
- disk_dict['bursting_enabled'] = False
+ disk_dict = self.update_tenant_id_from_secret_data(
+ disk_dict, params.get("secret_data", {})
+ )
disk_data = Disk(disk_dict, strict=False)
- disk_resource = DiskResource({
- 'data': disk_data,
- 'region_code': disk_data.location,
- 'reference': ReferenceModel(disk_data.reference()),
- 'tags': disk_dict.get('tags', {}),
- 'name': disk_data.name,
- 'account': disk_data.subscription_id,
- 'instance_type': disk_data.sku.name,
- 'instance_size': float(disk_data.disk_size_bytes)
- })
+ disk_resource = DiskResource(
+ {
+ "data": disk_data,
+ "region_code": disk_data.location,
+ "reference": ReferenceModel(disk_data.reference()),
+ "tags": disk_dict.get("tags", {}),
+ "name": disk_data.name,
+ "account": disk_data.subscription_id,
+ "instance_type": disk_data.sku.name,
+ "instance_size": float(disk_data.disk_size_bytes),
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(disk_data['location'])
+ self.set_region_code(disk_data["location"])
# _LOGGER.debug(f'[DISK INFO: {disk_resource.to_primitive()}]')
- disk_responses.append(DiskResponse({'resource': disk_resource}))
+ disk_responses.append(DiskResponse({"resource": disk_resource}))
except Exception as e:
- _LOGGER.error(f'[list_instances] {disk_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, resource_id=disk_id,
- cloud_service_group='Compute',
- cloud_service_type='Disk')
+ _LOGGER.error(f"[list_instances] {disk_id} {e}", exc_info=True)
+ error_resource_response = self.generate_resource_error_response(
+ e,
+ resource_id=disk_id,
+ cloud_service_group="Compute",
+ cloud_service_type="Disk",
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** Disk Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(f"** Disk Finished {time.time() - start_time} Seconds **")
return disk_responses, error_responses
@staticmethod
def get_attached_vm_name_from_managed_by(managed_by):
- attached_vm_name = managed_by.split('/')[8]
+ attached_vm_name = managed_by.split("/")[8]
return attached_vm_name
@staticmethod
def get_disk_sku_name(sku_tier):
- if sku_tier == 'Premium_LRS':
- sku_name = 'Premium SSD'
- elif sku_tier == 'StandardSSD_LRS':
- sku_name = 'Standard SSD'
- elif sku_tier == 'Standard_LRS':
- sku_name = 'Standard HDD'
+ if sku_tier == "Premium_LRS":
+ sku_name = "Premium SSD"
+ elif sku_tier == "StandardSSD_LRS":
+ sku_name = "Standard SSD"
+ elif sku_tier == "Standard_LRS":
+ sku_name = "Standard HDD"
else:
- sku_name = 'Ultra SSD'
+ sku_name = "Ultra SSD"
return sku_name
@staticmethod
def get_network_access_policy(network_access_policy):
- network_access_policy_display = ''
- if network_access_policy == 'AllowAll':
- network_access_policy_display = 'Public endpoint (all network)'
- elif network_access_policy == 'AllowPrivate':
- network_access_policy_display = 'Private endpoint (through disk access)'
- elif network_access_policy == 'DenyAll':
- network_access_policy_display = 'Deny all'
+ network_access_policy_display = ""
+ if network_access_policy == "AllowAll":
+ network_access_policy_display = "Public endpoint (all network)"
+ elif network_access_policy == "AllowPrivate":
+ network_access_policy_display = "Private endpoint (through disk access)"
+ elif network_access_policy == "DenyAll":
+ network_access_policy_display = "Deny all"
return network_access_policy_display
@staticmethod
def get_tier_display(disk_iops_read_write, disk_m_bps_read_write):
- tier_display = str(disk_iops_read_write) + ' IOPS' + ', ' + str(disk_m_bps_read_write) + ' Mbps'
+ tier_display = (
+ str(disk_iops_read_write)
+ + " IOPS"
+ + ", "
+ + str(disk_m_bps_read_write)
+ + " Mbps"
+ )
return tier_display
diff --git a/src/spaceone/inventory/manager/key_vaults/instance_manager.py b/src/spaceone/inventory/manager/key_vaults/instance_manager.py
index c359a4c9..f826373e 100644
--- a/src/spaceone/inventory/manager/key_vaults/instance_manager.py
+++ b/src/spaceone/inventory/manager/key_vaults/instance_manager.py
@@ -1,5 +1,8 @@
import time
import logging
+
+import azure.core.exceptions
+
from spaceone.inventory.libs.manager import AzureManager
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.key_vaults import KeyVaultsConnector
@@ -11,109 +14,169 @@
class KeyVaultsManager(AzureManager):
- connector_name = 'KeyVaultsConnector'
+ connector_name = "KeyVaultsConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
- def collect_cloud_service(self, params):
+ def collect_cloud_service(self, params: dict):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of azure key vault data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of azure key vault data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
- _LOGGER.debug(f'** Key Vault START **')
+ _LOGGER.debug(f"** Key Vault START **")
start_time = time.time()
- subscription_info = params['subscription_info']
+ subscription_info = params["subscription_info"]
+
+ key_vault_conn: KeyVaultsConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
- key_vault_conn: KeyVaultsConnector = self.locator.get_connector(self.connector_name, **params)
key_vault_responses = []
error_responses = []
key_vaults_obj_list = key_vault_conn.list_all_key_vaults()
for key_vault in key_vaults_obj_list:
- key_vault_id = ''
+ key_vault_id = ""
try:
key_vault_dict = self.convert_nested_dictionary(key_vault)
- key_vault_id = key_vault_dict['id']
-
- key_vault_dict.update({
- 'resource_group': self.get_resource_group_from_id(key_vault_id), # parse resource_group from ID
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': key_vault_id}
- })
-
- resource_group_name = key_vault_dict.get('resource_group', '')
- subscription_id = key_vault_dict.get('subscription_id', '')
+ key_vault_id = key_vault_dict["id"]
+
+ key_vault_dict = self.update_tenant_id_from_secret_data(
+ key_vault_dict, params.get("secret_data", {})
+ )
+
+ key_vault_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ key_vault_id
+ ), # parse resource_group from ID
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": key_vault_id},
+ }
+ )
+
+ resource_group_name = key_vault_dict.get("resource_group", "")
+ subscription_id = key_vault_dict.get("subscription_id", "")
# Get list of keys, secrets
- if key_vault_dict.get('properties', {}).get('vault_uri') is not None:
- vault_name = key_vault_dict['name']
- vault_uri = key_vault_dict['properties']['vault_uri']
-
- keys = self.list_keys(key_vault_conn, resource_group_name, vault_name)
- secrets = self.list_secrets(key_vault_conn, subscription_id, vault_uri)
- certificates = self.list_certificates(key_vault_conn, subscription_id, vault_uri)
- key_vault_dict.update({
- 'keys': keys,
- 'secrets': secrets,
- 'certificates': certificates
- })
+ if key_vault_dict.get("properties", {}).get("vault_uri") is not None:
+ vault_name = key_vault_dict["name"]
+ vault_uri = key_vault_dict["properties"]["vault_uri"]
+
+ keys = self.list_keys(
+ key_vault_conn, resource_group_name, vault_name
+ )
+ secrets, secrets_permissions_display = self.list_secrets(
+ key_vault_conn, subscription_id, vault_uri
+ )
+ (
+ certificates,
+ certificates_permissions_display,
+ ) = self.list_certificates(
+ key_vault_conn, subscription_id, vault_uri
+ )
+
+ key_vault_dict.update(
+ {
+ "keys": keys,
+ "secrets": secrets,
+ "certificates": certificates,
+ "key_count": len(keys),
+ "secret_count": len(secrets),
+ "certificate_count": len(certificates),
+ "total_credentials_count": len(keys)
+ + len(secrets)
+ + len(certificates),
+ "keys_permissions_description_display": "Microsoft.KeyVault/vaults/read",
+ "secrets_permissions_description_display": secrets_permissions_display,
+ "certificates_permissions_description_display": certificates_permissions_display,
+ }
+ )
# Get name of private connection from ID
- if key_vault_dict.get('properties', {}).get('private_endpoint_connections') is not None:
- key_vault_dict['properties'].update({
- 'private_endpoint_connections': self.get_private_endpoint_name(
- key_vault_dict['properties']['private_endpoint_connections'])
- })
+ if (
+ key_vault_dict.get("properties", {}).get(
+ "private_endpoint_connections"
+ )
+ is not None
+ ):
+ key_vault_dict["properties"].update(
+ {
+ "private_endpoint_connections": self.get_private_endpoint_name(
+ key_vault_dict["properties"][
+ "private_endpoint_connections"
+ ]
+ )
+ }
+ )
# Change purge protection to user-friendly word
- if key_vault_dict.get('properties', {}).get('enable_purge_protection') is not None:
- key_vault_dict['properties'].update({
- 'enable_purge_protection_str': 'Disabled' if key_vault_dict['properties'][
- 'enable_purge_protection'] is False else 'Enabled'
- })
+ if (
+ key_vault_dict.get("properties", {}).get("enable_purge_protection")
+ is not None
+ ):
+ key_vault_dict["properties"].update(
+ {
+ "enable_purge_protection_str": "Disabled"
+ if key_vault_dict["properties"]["enable_purge_protection"]
+ is False
+ else "Enabled"
+ }
+ )
+ if sku := key_vault_dict.get("properties", {}).get("sku"):
+ key_vault_dict["sku"] = sku
# switch tags form
key_vault_data = KeyVault(key_vault_dict, strict=False)
- key_vault_resource = KeyVaultResource({
- 'data': key_vault_data,
- 'region_code': key_vault_data.location,
- 'reference': ReferenceModel(key_vault_data.reference()),
- 'name': key_vault_data.name,
- 'instance_type': key_vault_data.properties.sku.name,
- 'account': key_vault_data.subscription_id,
- 'tags': key_vault_dict.get('tags', {})
- })
+
+ key_vault_resource = KeyVaultResource(
+ {
+ "data": key_vault_data,
+ "region_code": key_vault_data.location,
+ "reference": ReferenceModel(key_vault_data.reference()),
+ "name": key_vault_data.name,
+ "instance_type": key_vault_data.properties.sku.name,
+ "account": key_vault_data.subscription_id,
+ "tags": key_vault_dict.get("tags", {}),
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(key_vault_data['location'])
+ self.set_region_code(key_vault_data["location"])
# _LOGGER.debug(f'[KEY VAULT INFO]{key_vault_resource.to_primitive()}')
- key_vault_responses.append(KeyVaultResponse({'resource': key_vault_resource}))
+ key_vault_responses.append(
+ KeyVaultResponse({"resource": key_vault_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {key_vault_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, 'KeyVault', 'KeyVault', key_vault_id)
+ _LOGGER.error(f"[list_instances] {key_vault_id} {e}", exc_info=True)
+ error_resource_response = self.generate_resource_error_response(
+ e, "KeyVault", "KeyVault", key_vault_id
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** Key Vault Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(f"** Key Vault Finished {time.time() - start_time} Seconds **")
return key_vault_responses, error_responses
def list_keys(self, key_vault_conn, resource_group_name, vault_name):
keys = []
- keys_obj_list = key_vault_conn.list_keys(resource_group_name=resource_group_name, vault_name=vault_name)
+ keys_obj_list = key_vault_conn.list_keys(
+ resource_group_name=resource_group_name, vault_name=vault_name
+ )
if keys_obj_list:
for key in keys_obj_list:
@@ -122,36 +185,51 @@ def list_keys(self, key_vault_conn, resource_group_name, vault_name):
return keys
def list_secrets(self, key_vault_conn, subscription_id, vault_uri):
- key_vault_secret_client = key_vault_conn.init_key_vault_secret_client(subscription_id=subscription_id,
- vault_uri=vault_uri)
-
secrets = []
- secrets_obj_list = key_vault_secret_client.list_properties_of_secrets()
-
- if secrets_obj_list:
- for secret in secrets_obj_list:
- secret_dict = self.convert_nested_dictionary(secret)
- secrets.append(secret_dict)
- return secrets
+ permissions_display = "Microsoft.KeyVault/vaults/secrets/read, Microsoft.KeyVault/vaults/secrets/readMetadata/action"
- def list_certificates(self, key_vault_conn, subscription_id, vault_uri):
- key_vault_certificate_client = key_vault_conn.init_key_vault_certificate_client(subscription_id=subscription_id,
- vault_uri=vault_uri)
+ try:
+ key_vault_secret_client = key_vault_conn.init_key_vault_secret_client(
+ subscription_id=subscription_id, vault_uri=vault_uri
+ )
- certificates = []
- certificate_obj_list = key_vault_certificate_client.list_properties_of_certificates()
+ secrets_obj_list = key_vault_secret_client.list_properties_of_secrets()
- if certificate_obj_list:
- for certificate in certificate_obj_list:
- secret_dict = self.convert_nested_dictionary(certificate)
- certificates.append(secret_dict)
+ if secrets_obj_list:
+ for secret in secrets_obj_list:
+ secret_dict = self.convert_nested_dictionary(secret)
+ secrets.append(secret_dict)
+ except azure.core.exceptions.HttpResponseError as e:
+ _LOGGER.error(f"[list_secrets] {e}", exc_info=True)
+ permissions_display = "If you want to see the secretes list, please grant 'List' permission(Microsoft.KeyVault/vaults/secrets/read, Microsoft.KeyVault/vaults/secrets/readMetadata/action) to the service principal. or assign built-in role KeyVault Reader to the service principal.(https://learn.microsoft.com/en-us/azure/key-vault/general/rbac-guide?tabs=azure-cli#azure-built-in-roles-for-key-vault-data-plane-operations)"
+ return secrets, permissions_display
- return certificates
+ def list_certificates(self, key_vault_conn, subscription_id, vault_uri):
+ certificates = []
+ permissions_display = "Microsoft.KeyVault/vaults/secrets/readMetadata/action, Microsoft.KeyVault/vaults/certificates/read"
+ try:
+ key_vault_certificate_client = (
+ key_vault_conn.init_key_vault_certificate_client(
+ subscription_id=subscription_id, vault_uri=vault_uri
+ )
+ )
+
+ certificate_obj_list = (
+ key_vault_certificate_client.list_properties_of_certificates()
+ )
+
+ if certificate_obj_list:
+ for certificate in certificate_obj_list:
+ secret_dict = self.convert_nested_dictionary(certificate)
+ certificates.append(secret_dict)
+ except azure.core.exceptions.HttpResponseError as e:
+ _LOGGER.error(f"[list_secrets] {e}", exc_info=True)
+ permissions_display = "If you want to see the secretes list, please grant 'List' permission(Microsoft.KeyVault/vaults/secrets/read, Microsoft.KeyVault/vaults/secrets/readMetadata/action) to the service principal. or assign built-in role 'KeyVault Reader' to the service principal.(https://learn.microsoft.com/en-us/azure/key-vault/general/rbac-guide?tabs=azure-cli#azure-built-in-roles-for-key-vault-data-plane-operations)"
+
+ return certificates, permissions_display
@staticmethod
def get_private_endpoint_name(private_endpoint_connections):
for private_endpoint in private_endpoint_connections:
- private_endpoint.update({
- 'name': private_endpoint['id'].split('/')[10]
- })
+ private_endpoint.update({"name": private_endpoint["id"].split("/")[10]})
return private_endpoint_connections
diff --git a/src/spaceone/inventory/manager/load_balancers/instance_manager.py b/src/spaceone/inventory/manager/load_balancers/instance_manager.py
index 82f75a23..26336136 100644
--- a/src/spaceone/inventory/manager/load_balancers/instance_manager.py
+++ b/src/spaceone/inventory/manager/load_balancers/instance_manager.py
@@ -4,229 +4,316 @@
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.load_balancers import LoadBalancersConnector
from spaceone.inventory.model.load_balancers.cloud_service import *
-from spaceone.inventory.model.load_balancers.cloud_service_type import CLOUD_SERVICE_TYPES
+from spaceone.inventory.model.load_balancers.cloud_service_type import (
+ CLOUD_SERVICE_TYPES,
+)
from spaceone.inventory.model.load_balancers.data import *
_LOGGER = logging.getLogger(__name__)
class LoadBalancersManager(AzureManager):
- connector_name = 'LoadBalancersConnector'
+ connector_name = "LoadBalancersConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
- """"
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of azure load balancer data resource information
- ErrorResourceResponse (list) : list of error resource information
+ """ "
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of azure load balancer data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
- _LOGGER.debug(f'** LoadBalancer START **')
+ _LOGGER.debug(f"** LoadBalancer START **")
start_time = time.time()
- subscription_info = params['subscription_info']
- load_balancer_conn: LoadBalancersConnector = self.locator.get_connector(self.connector_name, **params)
+ subscription_info = params["subscription_info"]
+ load_balancer_conn: LoadBalancersConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
load_balancer_responses = []
error_responses = []
load_balancers = load_balancer_conn.list_load_balancers()
for load_balancer in load_balancers:
- load_balancer_id = ''
+ load_balancer_id = ""
try:
load_balancer_dict = self.convert_nested_dictionary(load_balancer)
- load_balancer_id = load_balancer_dict['id']
-
- load_balancer_dict.update({
- 'resource_group': self.get_resource_group_from_id(load_balancer_id),
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': load_balancer_id}
- })
+ load_balancer_dict = self.update_tenant_id_from_secret_data(
+ load_balancer_dict, params["secret_data"]
+ )
+
+ load_balancer_id = load_balancer_dict["id"]
+
+ load_balancer_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ load_balancer_id
+ ),
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": load_balancer_id},
+ }
+ )
# Get Network Interfaces attached in this load balancer
- load_balancer_dict.update({
- 'network_interfaces': self.get_network_interfaces(load_balancer_conn,
- load_balancer_dict['resource_group'],
- load_balancer_dict['name'])
- })
+ load_balancer_dict.update(
+ {
+ "network_interfaces": self.get_network_interfaces(
+ load_balancer_conn,
+ load_balancer_dict["resource_group"],
+ load_balancer_dict["name"],
+ )
+ }
+ )
# Get Frontend IP Configurations information
- if load_balancer_dict.get('frontend_ip_configurations') is not None:
+ if load_balancer_dict.get("frontend_ip_configurations") is not None:
private_ip_address_list = list()
used_by_list = list()
- for fic in load_balancer_dict['frontend_ip_configurations']:
+ for fic in load_balancer_dict["frontend_ip_configurations"]:
if fic.get(
- 'subnet'): # If the 'public' type, Skip this part because there isn't subnet information for them.
- fic['subnet']['address_prefix'] = self.get_frontend_address_prefix(load_balancer_conn,
- fic['subnet'])
- fic['subnet']['name'] = self.get_frontend_ip_subnet_name(fic['subnet']['id'])
+ "subnet"
+ ): # If the 'public' type, Skip this part because there isn't subnet information for them.
+ fic["subnet"][
+ "address_prefix"
+ ] = self.get_frontend_address_prefix(
+ load_balancer_conn, fic["subnet"]
+ )
+ fic["subnet"]["name"] = self.get_frontend_ip_subnet_name(
+ fic["subnet"]["id"]
+ )
# Get used inbound NAT rules
- if fic.get('inbound_nat_rules') is not None:
- load_balancer_dict.update({
- 'frontend_ip_configurations_used_by_display': self.get_frontend_ip_configurations_used_by_display(
- used_by_list, fic['inbound_nat_rules'])
- })
+ if fic.get("inbound_nat_rules") is not None:
+ load_balancer_dict.update(
+ {
+ "frontend_ip_configurations_used_by_display": self.get_frontend_ip_configurations_used_by_display(
+ used_by_list, fic["inbound_nat_rules"]
+ )
+ }
+ )
# Get used load balancing NAT rules
- if fic.get('load_balancing_rules') is not None:
- load_balancer_dict.update({
- 'frontend_ip_configurations_used_by_display': self.get_frontend_ip_configurations_used_by_display(
- used_by_list, fic['load_balancing_rules']),
- })
+ if fic.get("load_balancing_rules") is not None:
+ load_balancer_dict.update(
+ {
+ "frontend_ip_configurations_used_by_display": self.get_frontend_ip_configurations_used_by_display(
+ used_by_list, fic["load_balancing_rules"]
+ ),
+ }
+ )
# Get all of private ip addresses
- private_ip_address_list.append(fic['private_ip_address'])
+ private_ip_address_list.append(fic["private_ip_address"])
- load_balancer_dict.update({
- 'private_ip_address_display': private_ip_address_list
- })
+ load_balancer_dict.update(
+ {"private_ip_address_display": private_ip_address_list}
+ )
# Since Azure python sdk returns only one backend pool, delete the backend pool list first, and then use the new API connection
- if load_balancer_dict.get('backend_address_pools') is not None:
- load_balancer_dict['backend_address_pools'].clear()
- load_balancer_dict.update({
- 'backend_address_pools': self.list_load_balancer_backend_address_pools(load_balancer_conn,
- load_balancer_dict[
- 'resource_group'],
- load_balancer_dict[
- 'name'])
- })
+ if load_balancer_dict.get("backend_address_pools") is not None:
+ load_balancer_dict["backend_address_pools"].clear()
+ load_balancer_dict.update(
+ {
+ "backend_address_pools": self.list_load_balancer_backend_address_pools(
+ load_balancer_conn,
+ load_balancer_dict["resource_group"],
+ load_balancer_dict["name"],
+ )
+ }
+ )
# get backend address pool's count
- load_balancer_dict.update({
- 'backend_address_pools_count_display': self.get_backend_address_pools_count(
- load_balancer_dict['backend_address_pools'])
- })
+ load_balancer_dict.update(
+ {
+ "backend_address_pools_count_display": self.get_backend_address_pools_count(
+ load_balancer_dict["backend_address_pools"]
+ )
+ }
+ )
# Get load balancing Rules for display
- if load_balancer_dict.get('load_balancing_rules') is not None:
- load_balancer_dict.update({
- 'load_balancing_rules_display': self.get_load_balancing_rules_display(
- load_balancer_dict['load_balancing_rules']),
- })
-
- for lbr in load_balancer_dict['load_balancing_rules']:
- if lbr.get('backend_address_pool') is not None:
- lbr.update({
- 'backend_address_pool_display': self.get_backend_address_pool_name(
- lbr['backend_address_pool']),
- })
-
- if lbr.get('load_distribution') is not None:
- lbr.update({
- 'load_distribution_display': self.get_load_distribution_display(
- lbr['load_distribution'])
- })
-
- if lbr.get('frontend_ip_configuration') is not None:
- lbr.update({
- 'frontend_ip_configuration_display': self.get_frontend_ip_configuration_display(
- lbr['frontend_ip_configuration'])
- })
+ if load_balancer_dict.get("load_balancing_rules") is not None:
+ load_balancer_dict.update(
+ {
+ "load_balancing_rules_display": self.get_load_balancing_rules_display(
+ load_balancer_dict["load_balancing_rules"]
+ ),
+ }
+ )
+
+ for lbr in load_balancer_dict["load_balancing_rules"]:
+ if lbr.get("backend_address_pool") is not None:
+ lbr.update(
+ {
+ "backend_address_pool_display": self.get_backend_address_pool_name(
+ lbr["backend_address_pool"]
+ ),
+ }
+ )
+
+ if lbr.get("load_distribution") is not None:
+ lbr.update(
+ {
+ "load_distribution_display": self.get_load_distribution_display(
+ lbr["load_distribution"]
+ )
+ }
+ )
+
+ if lbr.get("frontend_ip_configuration") is not None:
+ lbr.update(
+ {
+ "frontend_ip_configuration_display": self.get_frontend_ip_configuration_display(
+ lbr["frontend_ip_configuration"]
+ )
+ }
+ )
# Get Inbound NAT Rules for display
- if load_balancer_dict.get('inbound_nat_rules') is not None:
- load_balancer_dict.update({
- 'inbound_nat_rules_display': self.get_nat_rules_display(load_balancer_dict['inbound_nat_rules'])
- })
- for inr in load_balancer_dict['inbound_nat_rules']:
- inr.update({
- 'frontend_ip_configuration_display': self.get_frontend_ip_configuration_display(
- inr['frontend_ip_configuration']),
- 'port_mapping_display': self.get_port_mapping_display(inr['frontend_port'],
- inr['backend_port']),
- 'target_virtual_machine': self.get_matched_vm_info(inr['backend_ip_configuration']['id'],
- load_balancer_dict['network_interfaces'])
- })
+ if load_balancer_dict.get("inbound_nat_rules") is not None:
+ load_balancer_dict.update(
+ {
+ "inbound_nat_rules_display": self.get_nat_rules_display(
+ load_balancer_dict["inbound_nat_rules"]
+ )
+ }
+ )
+ for inr in load_balancer_dict["inbound_nat_rules"]:
+ inr.update(
+ {
+ "frontend_ip_configuration_display": self.get_frontend_ip_configuration_display(
+ inr["frontend_ip_configuration"]
+ ),
+ "port_mapping_display": self.get_port_mapping_display(
+ inr["frontend_port"], inr["backend_port"]
+ ),
+ "target_virtual_machine": self.get_matched_vm_info(
+ inr["backend_ip_configuration"]["id"],
+ load_balancer_dict["network_interfaces"],
+ ),
+ }
+ )
# Get Health Probes for display
- if load_balancer_dict.get('probes') is not None:
- load_balancer_dict.update({
- 'probes_display': self.get_probe_display_list(load_balancer_dict['probes'])
- })
+ if load_balancer_dict.get("probes") is not None:
+ load_balancer_dict.update(
+ {
+ "probes_display": self.get_probe_display_list(
+ load_balancer_dict["probes"]
+ )
+ }
+ )
load_balancer_data = LoadBalancer(load_balancer_dict, strict=False)
- load_balancer_resource = LoadBalancerResource({
- 'data': load_balancer_data,
- 'region_code': load_balancer_data.location,
- 'reference': ReferenceModel(load_balancer_data.reference()),
- 'tags': load_balancer_dict.get('tags', {}),
- 'name': load_balancer_data.name,
- 'instance_type': load_balancer_data.sku.name,
- 'account': load_balancer_data.subscription_id
- })
+ load_balancer_resource = LoadBalancerResource(
+ {
+ "data": load_balancer_data,
+ "region_code": load_balancer_data.location,
+ "reference": ReferenceModel(load_balancer_data.reference()),
+ "tags": load_balancer_dict.get("tags", {}),
+ "name": load_balancer_data.name,
+ "instance_type": load_balancer_data.sku.name,
+ "account": load_balancer_data.subscription_id,
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(load_balancer_data['location'])
+ self.set_region_code(load_balancer_data["location"])
# _LOGGER.debug(f'[LOAD BALANCER INFO] {load_balancer_resource.to_primitive()}')
- load_balancer_responses.append(LoadBalancerResponse({'resource': load_balancer_resource}))
+ load_balancer_responses.append(
+ LoadBalancerResponse({"resource": load_balancer_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {load_balancer_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, 'Network', 'LoadBalancer',
- load_balancer_id)
+ _LOGGER.error(f"[list_instances] {load_balancer_id} {e}", exc_info=True)
+ error_resource_response = self.generate_resource_error_response(
+ e, "Network", "LoadBalancer", load_balancer_id
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** LoadBalancer Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(f"** LoadBalancer Finished {time.time() - start_time} Seconds **")
return load_balancer_responses, error_responses
def get_network_interfaces(self, load_balancer_conn, rg_name, lb_name):
- network_interface_object_list = list(load_balancer_conn.list_load_balancer_network_interfaces(rg_name, lb_name))
+ network_interface_object_list = list(
+ load_balancer_conn.list_load_balancer_network_interfaces(rg_name, lb_name)
+ )
network_interface_list = []
# network_interfaces >> network_interfaces >> ip_configurations
for nil in network_interface_object_list:
network_interface_dict = self.convert_nested_dictionary(nil)
- nic_rg_name = network_interface_dict.get('id', '').split('/')[4]
-
- if network_interface_dict.get('ip_configurations') is not None:
+ nic_rg_name = network_interface_dict.get("id", "").split("/")[4]
+ if network_interface_dict.get("ip_configurations") is not None:
# Get LB's name, VMs name attached to Backend Pool
- for ip_configuration in network_interface_dict['ip_configurations']:
- if ip_configuration.get('load_balancer_backend_address_pools') is not None:
- for ic in ip_configuration['load_balancer_backend_address_pools']:
+ for ip_configuration in network_interface_dict["ip_configurations"]:
+ if (
+ ip_configuration.get("load_balancer_backend_address_pools")
+ is not None
+ ):
+ for ic in ip_configuration[
+ "load_balancer_backend_address_pools"
+ ]:
# Get backend address vm name
- backend_pool_vm_name = ic['id'].split('/')[10]
+ backend_pool_vm_name = ic["id"].split("/")[10]
- network_interface_dict.update({
- 'load_balancer_backend_address_pools_name_display': backend_pool_vm_name,
- })
+ network_interface_dict.update(
+ {
+ "load_balancer_backend_address_pools_name_display": backend_pool_vm_name,
+ }
+ )
# Get the primary ip configuration from a network interface card
- network_interface_dict.update({
- 'private_ip_display': self.get_ip_configuration_display(network_interface_dict['ip_configurations'])
- })
+ network_interface_dict.update(
+ {
+ "private_ip_display": self.get_ip_configuration_display(
+ network_interface_dict["ip_configurations"]
+ )
+ }
+ )
# 2) Get VM's name which is attached to this network interface card
- if network_interface_dict.get('virtual_machine') is not None:
- network_interface_dict.update({
- 'virtual_machine_name_display': network_interface_dict['virtual_machine']['id'].split('/')[8]
- })
+ if network_interface_dict.get("virtual_machine") is not None:
+ network_interface_dict.update(
+ {
+ "virtual_machine_name_display": network_interface_dict[
+ "virtual_machine"
+ ]["id"].split("/")[8]
+ }
+ )
network_interface_list.append(network_interface_dict)
return network_interface_list
- def get_ip_configurations_list(self, load_balancer_conn, rg_name, network_interface_name):
+ def get_ip_configurations_list(
+ self, load_balancer_conn, rg_name, network_interface_name
+ ):
ip_configuration_list = []
if network_interface_name:
- ip_configurations_object = load_balancer_conn.list_network_interface_ip_configurations(rg_name,
- network_interface_name)
+ ip_configurations_object = (
+ load_balancer_conn.list_network_interface_ip_configurations(
+ rg_name, network_interface_name
+ )
+ )
ip_configurations_object_list = list(ip_configurations_object)
if ip_configurations_object_list:
for ip_configuration_object in ip_configurations_object_list:
- ip_object_dict = self.convert_nested_dictionary(ip_configuration_object)
+ ip_object_dict = self.convert_nested_dictionary(
+ ip_configuration_object
+ )
ip_configuration_list.append(ip_object_dict)
return ip_configuration_list
@@ -234,9 +321,12 @@ def get_ip_configurations_list(self, load_balancer_conn, rg_name, network_interf
def list_load_balancer_backend_address_pools(self, conn, rg_name, lb_name):
backend_pools_list = list() # return result list
- backend_pools_object = conn.list_load_balancer_backend_address_pools(rg_name, lb_name)
+ backend_pools_object = conn.list_load_balancer_backend_address_pools(
+ rg_name, lb_name
+ )
backend_pools_object_list = list(
- backend_pools_object) # Since return type is ItemPagedClass, change to the list before convert dictionary
+ backend_pools_object
+ ) # Since return type is ItemPagedClass, change to the list before convert dictionary
# Loop for converting backend pools objects to dictionary
for bp in backend_pools_object_list:
@@ -249,17 +339,16 @@ def list_load_balancer_backend_address_pools(self, conn, rg_name, lb_name):
def get_ip_configuration_display(ip_configurations_list):
ic_list = list()
for ic in ip_configurations_list:
- ic_list.append(ic['private_ip_address'])
+ ic_list.append(ic["private_ip_address"])
return ic_list
@staticmethod
def get_frontend_address_prefix(conn, subnet):
-
# Parse Vnet, LB name from subnet id
- subnet_id = subnet['id']
- resource_group_name = subnet_id.split('/')[4]
- vnet_name = subnet_id.split('/')[8]
- subnet_name = subnet_id.split('/')[10]
+ subnet_id = subnet["id"]
+ resource_group_name = subnet_id.split("/")[4]
+ vnet_name = subnet_id.split("/")[8]
+ subnet_name = subnet_id.split("/")[10]
# API request for subnet dictionary
subnet = conn.get_subnets(resource_group_name, vnet_name, subnet_name)
@@ -268,13 +357,13 @@ def get_frontend_address_prefix(conn, subnet):
@staticmethod
def get_frontend_ip_subnet_name(subnet_id):
- subnet_name = subnet_id.split('/')[10]
+ subnet_name = subnet_id.split("/")[10]
return subnet_name
@staticmethod
def get_frontend_ip_configurations_used_by_display(used_by_list, used_object_list):
for used_object in used_object_list:
- used_by_list.append(used_object['id'].split('/')[10])
+ used_by_list.append(used_object["id"].split("/")[10])
return used_by_list
@@ -283,9 +372,13 @@ def get_backend_address_pools_count(backend_address_dict):
backend_address_pools_count = len(backend_address_dict)
if backend_address_pools_count == 1:
- backend_address_pools_count_display = str(backend_address_pools_count) + " backend pool"
+ backend_address_pools_count_display = (
+ str(backend_address_pools_count) + " backend pool"
+ )
else:
- backend_address_pools_count_display = str(backend_address_pools_count) + " backend pools"
+ backend_address_pools_count_display = (
+ str(backend_address_pools_count) + " backend pools"
+ )
return backend_address_pools_count_display
@@ -293,24 +386,29 @@ def get_backend_address_pools_count(backend_address_dict):
def get_matched_vm_info(find_key, find_list_pool):
matched_vm_list = list()
for find_object in find_list_pool:
- if find_object[
- 'id'] in find_key: # if network interface card's id matches to the backend configuration's id
- if find_object.get('virtual_machine') is not None:
- matched_vm_list.append((find_object['virtual_machine']['id']).split('/')[8])
+ if (
+ find_object["id"] in find_key
+ ): # if network interface card's id matches to the backend configuration's id
+ if find_object.get("virtual_machine") is not None:
+ matched_vm_list.append(
+ (find_object["virtual_machine"]["id"]).split("/")[8]
+ )
return matched_vm_list
@staticmethod
def get_probe_display_list(probes_list):
probe_display_list = list()
for probe in probes_list:
- probe_display_list.append(probe['name'])
+ probe_display_list.append(probe["name"])
return probe_display_list
@staticmethod
def get_load_balancing_rules_display(load_balancing_rules_list):
lbr_name_list = list()
for lbr in load_balancing_rules_list:
- lbr_name_list.append(lbr['name']) # 'name' key always exists if there are load balancing rules.
+ lbr_name_list.append(
+ lbr["name"]
+ ) # 'name' key always exists if there are load balancing rules.
return lbr_name_list
@@ -318,34 +416,37 @@ def get_load_balancing_rules_display(load_balancing_rules_list):
def get_nat_rules_display(inbound_nat_rules_list):
nat_rules_list = list()
for inr in inbound_nat_rules_list:
- nat_rules_list.append(inr['name']) # 'name' key always exists if there are inbound NAT rules.
+ nat_rules_list.append(
+ inr["name"]
+ ) # 'name' key always exists if there are inbound NAT rules.
return nat_rules_list
@staticmethod
def get_backend_address_pool_name(
- lbr_backend_address_pool): # id must exist if there is a backend address pool object
- return lbr_backend_address_pool['id'].split('/')[10]
+ lbr_backend_address_pool,
+ ): # id must exist if there is a backend address pool object
+ return lbr_backend_address_pool["id"].split("/")[10]
@staticmethod
def get_load_distribution_display(lbr_load_distribution):
- if lbr_load_distribution == 'Default':
- lbr_load_distribution_display = 'None'
- elif lbr_load_distribution == 'SourceIPProtocol':
- lbr_load_distribution_display = 'Client IP and Protocol'
- elif lbr_load_distribution == 'SourceIP':
- lbr_load_distribution_display = 'Client IP'
+ if lbr_load_distribution == "Default":
+ lbr_load_distribution_display = "None"
+ elif lbr_load_distribution == "SourceIPProtocol":
+ lbr_load_distribution_display = "Client IP and Protocol"
+ elif lbr_load_distribution == "SourceIP":
+ lbr_load_distribution_display = "Client IP"
return lbr_load_distribution_display
@staticmethod
def get_frontend_ip_configuration_display(lbr_frontend_ip_configuration_dict):
- return lbr_frontend_ip_configuration_dict['id'].split('/')[10]
+ return lbr_frontend_ip_configuration_dict["id"].split("/")[10]
@staticmethod
def get_port_mapping_display(frontend_port, backend_port):
if frontend_port == backend_port:
- port_mapping_display = 'Default'
+ port_mapping_display = "Default"
else:
- port_mapping_display = 'Custom'
+ port_mapping_display = "Custom"
return port_mapping_display
diff --git a/src/spaceone/inventory/manager/mysql_servers/server_manager.py b/src/spaceone/inventory/manager/mysql_servers/server_manager.py
index d6f2c7b2..83868dda 100644
--- a/src/spaceone/inventory/manager/mysql_servers/server_manager.py
+++ b/src/spaceone/inventory/manager/mysql_servers/server_manager.py
@@ -4,100 +4,132 @@
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.mysql_servers import MySQLServersConnector
from spaceone.inventory.model.mysql_servers.cloud_service import *
-from spaceone.inventory.model.mysql_servers.cloud_service_type import CLOUD_SERVICE_TYPES
+from spaceone.inventory.model.mysql_servers.cloud_service_type import (
+ CLOUD_SERVICE_TYPES,
+)
from spaceone.inventory.model.mysql_servers.data import *
_LOGGER = logging.getLogger(__name__)
class MySQLServersManager(AzureManager):
- connector_name = 'MySQLServersConnector'
+ connector_name = "MySQLServersConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (dict) : dictionary of mysql servers data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (dict) : dictionary of mysql servers data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
- _LOGGER.debug(f'** MySQL Servers START **')
+ _LOGGER.debug(f"** MySQL Servers START **")
start_time = time.time()
- subscription_info = params['subscription_info']
+ subscription_info = params["subscription_info"]
- mysql_servers_conn: MySQLServersConnector = self.locator.get_connector(self.connector_name, **params)
+ mysql_servers_conn: MySQLServersConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
mysql_server_responses = []
error_responses = []
mysql_servers_obj_list = mysql_servers_conn.list_servers()
for mysql_server in mysql_servers_obj_list:
- mysql_server_id = ''
+ mysql_server_id = ""
try:
mysql_server_dict = self.convert_nested_dictionary(mysql_server)
- mysql_server_id = mysql_server_dict['id']
-
- mysql_server_dict.update({
- 'resource_group': self.get_resource_group_from_id(mysql_server_id),
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': mysql_server_id}
- })
-
- if mysql_server_dict.get('name') is not None:
- resource_group = mysql_server_dict.get('resource_group', '')
- server_name = mysql_server_dict['name']
- mysql_server_dict.update({
- 'firewall_rules': self.get_firewall_rules_by_server(mysql_servers_conn, resource_group, server_name),
- })
-
- if mysql_server_dict.get('firewall_rules') is not None:
- mysql_server_dict.update({
- 'allow_azure_services_access': self.get_azure_service_access(mysql_server_dict['firewall_rules'])
- })
-
- if mysql_server_dict.get('storage_profile') is not None:
- mysql_server_dict['storage_profile'].update({
- 'storage_gb': self.get_storage_gb(mysql_server_dict['storage_profile'].get('storage_mb', ''))
- })
+ mysql_server_id = mysql_server_dict["id"]
+
+ mysql_server_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ mysql_server_id
+ ),
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": mysql_server_id},
+ }
+ )
+
+ if mysql_server_dict.get("name") is not None:
+ resource_group = mysql_server_dict.get("resource_group", "")
+ server_name = mysql_server_dict["name"]
+ mysql_server_dict.update(
+ {
+ "firewall_rules": self.get_firewall_rules_by_server(
+ mysql_servers_conn, resource_group, server_name
+ ),
+ }
+ )
+
+ if mysql_server_dict.get("firewall_rules") is not None:
+ mysql_server_dict.update(
+ {
+ "allow_azure_services_access": self.get_azure_service_access(
+ mysql_server_dict["firewall_rules"]
+ )
+ }
+ )
+
+ if mysql_server_dict.get("storage_profile") is not None:
+ mysql_server_dict["storage_profile"].update(
+ {
+ "storage_gb": self.get_storage_gb(
+ mysql_server_dict["storage_profile"].get(
+ "storage_mb", ""
+ )
+ )
+ }
+ )
mysql_server_data = MySQLServer(mysql_server_dict, strict=False)
- mysql_server_resource = MySQLServerResource({
- 'data': mysql_server_data,
- 'tags': mysql_server_dict.get('tags', {}),
- 'region_code': mysql_server_data.location,
- 'reference': ReferenceModel(mysql_server_data.reference()),
- 'name': mysql_server_data.name,
- 'account': mysql_server_data.subscription_id,
- 'instance_type': mysql_server_data.sku.tier
- })
+ mysql_server_resource = MySQLServerResource(
+ {
+ "data": mysql_server_data,
+ "tags": mysql_server_dict.get("tags", {}),
+ "region_code": mysql_server_data.location,
+ "reference": ReferenceModel(mysql_server_data.reference()),
+ "name": mysql_server_data.name,
+ "account": mysql_server_data.subscription_id,
+ "instance_type": mysql_server_data.sku.tier,
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(mysql_server_data['location'])
+ self.set_region_code(mysql_server_data["location"])
# _LOGGER.debug(f'[MYSQL SERVER INFO] {mysql_server_resource.to_primitive()}')
- mysql_server_responses.append(MySQLServerResponse({'resource': mysql_server_resource}))
+ mysql_server_responses.append(
+ MySQLServerResponse({"resource": mysql_server_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {mysql_server_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, 'Database', 'MySQLServer', mysql_server_id)
+ _LOGGER.error(f"[list_instances] {mysql_server_id} {e}", exc_info=True)
+ error_resource_response = self.generate_resource_error_response(
+ e, "Database", "MySQLServer", mysql_server_id
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** MySQL Server Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(f"** MySQL Server Finished {time.time() - start_time} Seconds **")
return mysql_server_responses, error_responses
- def get_firewall_rules_by_server(self, mysql_servers_conn, resource_group, server_name):
+ def get_firewall_rules_by_server(
+ self, mysql_servers_conn, resource_group, server_name
+ ):
firewall_rules = []
- firewall_rules_obj = mysql_servers_conn.list_firewall_rules_by_server(resource_group_name=resource_group, server_name=server_name)
+ firewall_rules_obj = mysql_servers_conn.list_firewall_rules_by_server(
+ resource_group_name=resource_group, server_name=server_name
+ )
for firewall_rule in firewall_rules_obj:
firewall_dict = self.convert_nested_dictionary(firewall_rule)
firewall_rules.append(firewall_dict)
@@ -109,10 +141,10 @@ def get_azure_service_access(firewall_rules):
firewall_rule_name_list = []
for firewall_rule in firewall_rules:
- if firewall_rule.get('name') is not None:
- firewall_rule_name_list.append(firewall_rule['name'])
+ if firewall_rule.get("name") is not None:
+ firewall_rule_name_list.append(firewall_rule["name"])
- if 'AllowAllWindowsAzureIps' in firewall_rule_name_list:
+ if "AllowAllWindowsAzureIps" in firewall_rule_name_list:
return True
return False
diff --git a/src/spaceone/inventory/manager/nat_gateways/instance_manager.py b/src/spaceone/inventory/manager/nat_gateways/instance_manager.py
index 33d2f0c0..9948eea6 100644
--- a/src/spaceone/inventory/manager/nat_gateways/instance_manager.py
+++ b/src/spaceone/inventory/manager/nat_gateways/instance_manager.py
@@ -11,126 +11,170 @@
class NATGatewaysManager(AzureManager):
- connector_name = 'NATGatewaysConnector'
+ connector_name = "NATGatewaysConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (dict) : dictionary of azure nat gateway data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (dict) : dictionary of azure nat gateway data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
- _LOGGER.debug(f'** NAT Gateway START **')
+ _LOGGER.debug(f"** NAT Gateway START **")
start_time = time.time()
- subscription_info = params['subscription_info']
+ subscription_info = params["subscription_info"]
- nat_gateway_conn: NATGatewaysConnector = self.locator.get_connector(self.connector_name, **params)
+ nat_gateway_conn: NATGatewaysConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
nat_gateway_responses = []
error_responses = []
nat_gateways = nat_gateway_conn.list_all_nat_gateways()
for nat_gateway in nat_gateways:
- nat_gateway_id = ''
+ nat_gateway_id = ""
try:
nat_gateway_dict = self.convert_nested_dictionary(nat_gateway)
- nat_gateway_id = nat_gateway_dict['id']
+ nat_gateway_id = nat_gateway_dict["id"]
+
+ if (
+ sku_tier := nat_gateway_dict["sku"]
+ .get("additional_properties")
+ .get("tier")
+ ):
+ nat_gateway_dict["sku"]["tier"] = sku_tier
+
+ nat_gateway_dict = self.update_tenant_id_from_secret_data(
+ nat_gateway_dict, params["secret_data"]
+ )
# update application_gateway_dict
- nat_gateway_dict.update({
- 'resource_group': self.get_resource_group_from_id(nat_gateway_id),
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': nat_gateway_id}
- })
-
- if nat_gateway_dict.get('public_ip_addresses') is not None:
+ nat_gateway_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ nat_gateway_id
+ ),
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": nat_gateway_id},
+ }
+ )
+
+ if nat_gateway_dict.get("public_ip_addresses") is not None:
# Get Count of Public IP Address
- nat_gateway_dict.update({
- 'public_ip_addresses_count': len(nat_gateway_dict['public_ip_addresses'])
- })
+ nat_gateway_dict.update(
+ {
+ "public_ip_addresses_count": len(
+ nat_gateway_dict["public_ip_addresses"]
+ )
+ }
+ )
# Get Public IP Address Dictionary
- if not nat_gateway_dict['public_ip_addresses']:
+ if not nat_gateway_dict["public_ip_addresses"]:
break
pip_list = []
- for pip in nat_gateway_dict['public_ip_addresses']:
- public_ip_prefixes_id = pip['id']
- pip_dict = self.get_public_ip_address_dict(nat_gateway_conn, public_ip_prefixes_id)
+ for pip in nat_gateway_dict["public_ip_addresses"]:
+ public_ip_prefixes_id = pip["id"]
+ pip_dict = self.get_public_ip_address_dict(
+ nat_gateway_conn, public_ip_prefixes_id
+ )
pip_list.append(pip_dict)
- nat_gateway_dict['public_ip_addresses'] = pip_list
+ nat_gateway_dict["public_ip_addresses"] = pip_list
- if nat_gateway_dict.get('public_ip_prefixes') is not None:
- nat_gateway_dict.update({
- 'public_ip_prefixes_count': len(nat_gateway_dict['public_ip_addresses'])
- })
+ if nat_gateway_dict.get("public_ip_prefixes") is not None:
+ nat_gateway_dict.update(
+ {
+ "public_ip_prefixes_count": len(
+ nat_gateway_dict["public_ip_addresses"]
+ )
+ }
+ )
# Get Public IP Address Dictionary
- if not nat_gateway_dict['public_ip_prefixes']:
+ if not nat_gateway_dict["public_ip_prefixes"]:
break
pip_list = []
- for pip in nat_gateway_dict['public_ip_prefixes']:
- public_ip_prefixes_id = pip['id']
- pip_dict = self.get_public_ip_prefixes_dict(nat_gateway_conn, public_ip_prefixes_id)
+ for pip in nat_gateway_dict["public_ip_prefixes"]:
+ public_ip_prefixes_id = pip["id"]
+ pip_dict = self.get_public_ip_prefixes_dict(
+ nat_gateway_conn, public_ip_prefixes_id
+ )
pip_list.append(pip_dict)
- nat_gateway_dict['public_ip_prefixes'] = pip_list
+ nat_gateway_dict["public_ip_prefixes"] = pip_list
- if nat_gateway_dict.get('subnets') is not None:
- nat_gateway_dict.update({
- 'subnets': self.get_subnets(nat_gateway_conn, nat_gateway_dict['subnets'])
- })
+ if nat_gateway_dict.get("subnets") is not None:
+ nat_gateway_dict.update(
+ {
+ "subnets": self.get_subnets(
+ nat_gateway_conn, nat_gateway_dict["subnets"]
+ ),
+ "subnets_count": len(nat_gateway_dict["subnets"]),
+ }
+ )
nat_gateway_data = NatGateway(nat_gateway_dict, strict=False)
- nat_gateway_resource = NatGatewayResource({
- 'data': nat_gateway_data,
- 'tags': nat_gateway_dict.get('tags', {}),
- 'region_code': nat_gateway_data.location,
- 'reference': ReferenceModel(nat_gateway_data.reference()),
- 'name': nat_gateway_data.name,
- 'account': nat_gateway_data.subscription_id,
- 'instance_type': nat_gateway_data.sku.name
- })
+ nat_gateway_resource = NatGatewayResource(
+ {
+ "data": nat_gateway_data,
+ "tags": nat_gateway_dict.get("tags", {}),
+ "region_code": nat_gateway_data.location,
+ "reference": ReferenceModel(nat_gateway_data.reference()),
+ "name": nat_gateway_data.name,
+ "account": nat_gateway_data.subscription_id,
+ "instance_type": nat_gateway_data.sku.name,
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(nat_gateway_data['location'])
+ self.set_region_code(nat_gateway_data["location"])
# _LOGGER.debug(f'[NAT GATEWAYS INFO] {nat_gateway_resource.to_primitive()}')
- nat_gateway_responses.append(NatGatewayResponse({'resource': nat_gateway_resource}))
+ nat_gateway_responses.append(
+ NatGatewayResponse({"resource": nat_gateway_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {nat_gateway_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, 'Network', 'NATGateway', nat_gateway_id)
+ _LOGGER.error(f"[list_instances] {nat_gateway_id} {e}", exc_info=True)
+ error_resource_response = self.generate_resource_error_response(
+ e, "Network", "NATGateway", nat_gateway_id
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** NAT Gateway Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(f"** NAT Gateway Finished {time.time() - start_time} Seconds **")
return nat_gateway_responses, error_responses
def get_public_ip_address_dict(self, nat_gateway_conn, pip_id):
- pip_name = pip_id.split('/')[8]
- resource_group_name = pip_id.split('/')[4]
- pip_obj = nat_gateway_conn.get_public_ip_addresses(resource_group_name=resource_group_name, public_ip_address_name=pip_name)
+ pip_name = pip_id.split("/")[8]
+ resource_group_name = pip_id.split("/")[4]
+ pip_obj = nat_gateway_conn.get_public_ip_addresses(
+ resource_group_name=resource_group_name, public_ip_address_name=pip_name
+ )
pip_dict = self.convert_nested_dictionary(pip_obj)
return pip_dict
def get_public_ip_prefixes_dict(self, nat_gateway_conn, pip_id):
- pip_name = pip_id.split('/')[8]
- resource_group_name = pip_id.split('/')[4]
- pip_obj = nat_gateway_conn.get_public_ip_prefixes(resource_group_name=resource_group_name, public_ip_prefixes_name=pip_name)
+ pip_name = pip_id.split("/")[8]
+ resource_group_name = pip_id.split("/")[4]
+ pip_obj = nat_gateway_conn.get_public_ip_prefixes(
+ resource_group_name=resource_group_name, public_ip_prefixes_name=pip_name
+ )
pip_dict = self.convert_nested_dictionary(pip_obj)
return pip_dict
@@ -139,15 +183,17 @@ def get_subnets(self, nat_gateway_conn, subnets):
subnet_list = []
for subnet in subnets:
- resource_group_name = subnet['id'].split('/')[4]
- subnet_name = subnet['id'].split('/')[10]
- vnet_name = subnet['id'].split('/')[8]
-
- subnet_obj = nat_gateway_conn.get_subnet(resource_group_name=resource_group_name, subnet_name=subnet_name, vnet_name=vnet_name)
+ resource_group_name = subnet["id"].split("/")[4]
+ subnet_name = subnet["id"].split("/")[10]
+ vnet_name = subnet["id"].split("/")[8]
+
+ subnet_obj = nat_gateway_conn.get_subnet(
+ resource_group_name=resource_group_name,
+ subnet_name=subnet_name,
+ vnet_name=vnet_name,
+ )
subnet_dict = self.convert_nested_dictionary(subnet_obj)
- subnet_dict.update({
- 'virtual_network': vnet_name
- })
+ subnet_dict.update({"virtual_network": vnet_name})
subnet_list.append(subnet_dict)
diff --git a/src/spaceone/inventory/manager/network_security_groups/instance_manager.py b/src/spaceone/inventory/manager/network_security_groups/instance_manager.py
index af2e357d..68e61580 100644
--- a/src/spaceone/inventory/manager/network_security_groups/instance_manager.py
+++ b/src/spaceone/inventory/manager/network_security_groups/instance_manager.py
@@ -2,78 +2,100 @@
import logging
from spaceone.inventory.libs.manager import AzureManager
from spaceone.inventory.libs.schema.base import ReferenceModel
-from spaceone.inventory.connector.network_security_groups import NetworkSecurityGroupsConnector
+from spaceone.inventory.connector.network_security_groups import (
+ NetworkSecurityGroupsConnector,
+)
from spaceone.inventory.model.network_security_groups.cloud_service import *
-from spaceone.inventory.model.network_security_groups.cloud_service_type import CLOUD_SERVICE_TYPES
+from spaceone.inventory.model.network_security_groups.cloud_service_type import (
+ CLOUD_SERVICE_TYPES,
+)
from spaceone.inventory.model.network_security_groups.data import *
_LOGGER = logging.getLogger(__name__)
class NetworkSecurityGroupsManager(AzureManager):
- connector_name = 'NetworkSecurityGroupsConnector'
+ connector_name = "NetworkSecurityGroupsConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of azure network security group data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of azure network security group data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
_LOGGER.debug("** Network Security Group START **")
start_time = time.time()
- subscription_info = params['subscription_info']
+ subscription_info = params["subscription_info"]
# cloud_service_info = self.get_cloud
- network_security_group_conn: NetworkSecurityGroupsConnector = self.locator.get_connector(self.connector_name, **params)
+ network_security_group_conn: NetworkSecurityGroupsConnector = (
+ self.locator.get_connector(self.connector_name, **params)
+ )
network_security_group_responses = []
error_responses = []
- network_security_groups = network_security_group_conn.list_all_network_security_groups()
- network_interfaces = [self.convert_nested_dictionary(ni) for ni in network_security_group_conn.list_all_network_interfaces()]
+ network_security_groups = (
+ network_security_group_conn.list_all_network_security_groups()
+ )
+ network_interfaces = [
+ self.convert_nested_dictionary(ni)
+ for ni in network_security_group_conn.list_all_network_interfaces()
+ ]
for network_security_group in network_security_groups:
- network_security_group_id = ''
+ network_security_group_id = ""
try:
- network_security_group_dict = self.convert_nested_dictionary(network_security_group)
- network_security_group_id = network_security_group_dict['id']
+ network_security_group_dict = self.convert_nested_dictionary(
+ network_security_group
+ )
+ network_security_group_id = network_security_group_dict["id"]
inbound_rules = []
outbound_rules = []
- if network_security_group_dict.get('security_rules') is not None:
+ if network_security_group_dict.get("security_rules") is not None:
# update custom security rules
- inbound, outbound = self.split_security_rules(network_security_group_dict, 'security_rules')
+ inbound, outbound = self.split_security_rules(
+ network_security_group_dict, "security_rules"
+ )
for ib in inbound:
inbound_rules.append(ib)
for ob in outbound:
outbound_rules.append(ob)
# update default security rules
- if network_security_group_dict.get('default_security_rules') is not None:
- inbound, outbound = self.split_security_rules(network_security_group_dict, 'default_security_rules')
+ if (
+ network_security_group_dict.get("default_security_rules")
+ is not None
+ ):
+ inbound, outbound = self.split_security_rules(
+ network_security_group_dict, "default_security_rules"
+ )
for ib in inbound:
inbound_rules.append(ib)
for ob in outbound:
outbound_rules.append(ob)
- network_security_group_dict.update({
- 'inbound_security_rules': inbound_rules,
- 'outbound_security_rules': outbound_rules
- })
+ network_security_group_dict.update(
+ {
+ "inbound_security_rules": inbound_rules,
+ "outbound_security_rules": outbound_rules,
+ }
+ )
# TODO : update network interface name
- '''
+ """
# get network interfaces
if network_security_group_dict.get('network_interfaces') is not None:
new_network_interfaces_list, virtual_machines_display_str = self.get_network_interfaces(self, network_security_group_conn, network_security_group_dict['network_interfaces'])
@@ -81,84 +103,131 @@ def collect_cloud_service(self, params):
network_security_group_dict.update({
'virtual_machines_display': virtual_machines_display_str
})
- '''
+ """
- virtual_machines_display_str = self.get_virtual_machine_name(network_interfaces, network_security_group_id)
+ virtual_machines_display_str = self.get_virtual_machine_name(
+ network_interfaces, network_security_group_id
+ )
if virtual_machines_display_str is not None:
- network_security_group_dict.update({
- 'virtual_machines_display': virtual_machines_display_str
- })
+ network_security_group_dict.update(
+ {"virtual_machines_display": virtual_machines_display_str}
+ )
# Change Subnet models to ID
- if network_security_group_dict.get('network_interfaces') is not None:
- self.replace_subnet_model_to_id(network_security_group_dict['network_interfaces'])
+ if network_security_group_dict.get("network_interfaces") is not None:
+ self.replace_subnet_model_to_id(
+ network_security_group_dict["network_interfaces"]
+ )
# Get private ip address and public ip address
- if network_security_group_dict.get('network_interfaces') is not None:
- self.get_ip_addresses(network_security_group_dict['network_interfaces'])
+ if network_security_group_dict.get("network_interfaces") is not None:
+ self.get_ip_addresses(
+ network_security_group_dict["network_interfaces"]
+ )
# Get Subnet information
- if network_security_group_dict.get('subnets') is not None:
- network_security_group_dict['subnets'] = self.get_subnet(network_security_group_conn, network_security_group_dict['subnets'])
-
- if network_security_group_dict.get('subnets'):
- for subnet in network_security_group_dict['subnets']:
- subnet.update({
- 'virtual_network': self.get_virtual_network(subnet['id'])
- })
+ if network_security_group_dict.get("subnets") is not None:
+ network_security_group_dict["subnets"] = self.get_subnet(
+ network_security_group_conn,
+ network_security_group_dict["subnets"],
+ )
+
+ if network_security_group_dict.get("subnets"):
+ for subnet in network_security_group_dict["subnets"]:
+ subnet.update(
+ {
+ "virtual_network": self.get_virtual_network(
+ subnet["id"]
+ )
+ }
+ )
# update application_gateway_dict
- network_security_group_dict.update({
- 'resource_group': self.get_resource_group_from_id(network_security_group_id),
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': network_security_group_id}
- })
-
- network_security_group_data = NetworkSecurityGroup(network_security_group_dict, strict=False)
- network_security_group_resource = NetworkSecurityGroupResource({
- 'data': network_security_group_data,
- 'tags': network_security_group_dict.get('tags', {}),
- 'region_code': network_security_group_data.location,
- 'reference': ReferenceModel(network_security_group_data.reference()),
- 'name': network_security_group_data.name,
- 'account': network_security_group_data.subscription_id
- })
+ network_security_group_dict = self.update_tenant_id_from_secret_data(
+ network_security_group_dict, params["secret_data"]
+ )
+ network_security_group_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ network_security_group_id
+ ),
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": network_security_group_id},
+ }
+ )
+
+ network_security_group_data = NetworkSecurityGroup(
+ network_security_group_dict, strict=False
+ )
+
+ network_security_group_resource = NetworkSecurityGroupResource(
+ {
+ "data": network_security_group_data,
+ "tags": network_security_group_dict.get("tags", {}),
+ "region_code": network_security_group_data.location,
+ "reference": ReferenceModel(
+ network_security_group_data.reference()
+ ),
+ "name": network_security_group_data.name,
+ "account": network_security_group_data.subscription_id,
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(network_security_group_data['location'])
+ self.set_region_code(network_security_group_data["location"])
# _LOGGER.debug(f'[NETWORK SECURITY GROUP INFO] {network_security_group_resource.to_primitive()}')
- network_security_group_responses.append(NetworkSecurityGroupResponse({'resource': network_security_group_resource}))
+ network_security_group_responses.append(
+ NetworkSecurityGroupResponse(
+ {"resource": network_security_group_resource}
+ )
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {network_security_group_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, 'Network', 'NetworkSecurityGroup', network_security_group_id)
+ _LOGGER.error(
+ f"[list_instances] {network_security_group_id} {e}", exc_info=True
+ )
+ error_resource_response = self.generate_resource_error_response(
+ e, "Network", "NetworkSecurityGroup", network_security_group_id
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** Network Security Group Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(
+ f"** Network Security Group Finished {time.time() - start_time} Seconds **"
+ )
return network_security_group_responses, error_responses
- def get_network_interfaces(self, network_security_group_conn, network_interfaces_list):
+ def get_network_interfaces(
+ self, network_security_group_conn, network_interfaces_list
+ ):
network_interfaces_new_list = []
virtual_machines_display_list = []
- virtual_machines_str = ''
+ virtual_machines_str = ""
for network_interface in network_interfaces_list:
- resource_group = network_interface['id'].split('/')[4]
- network_interface_name = network_interface['id'].split('/')[8] # TODO : network interface name diverse
- network_interface_obj = network_security_group_conn.get_network_interfaces(network_interface_name, resource_group)
- network_interface_dict = self.convert_nested_dictionary(network_interface_obj)
-
- if network_interface_dict['id'] == network_interface['id']:
+ resource_group = network_interface["id"].split("/")[4]
+ network_interface_name = network_interface["id"].split("/")[
+ 8
+ ] # TODO : network interface name diverse
+ network_interface_obj = network_security_group_conn.get_network_interfaces(
+ network_interface_name, resource_group
+ )
+ network_interface_dict = self.convert_nested_dictionary(
+ network_interface_obj
+ )
+
+ if network_interface_dict["id"] == network_interface["id"]:
# Get virtual machine display
- if network_interface_dict.get('virtual_machine') is not None:
- virtual_machine_display = network_interface_dict['virtual_machine']['id'].split('/')[8]
+ if network_interface_dict.get("virtual_machine") is not None:
+ virtual_machine_display = network_interface_dict["virtual_machine"][
+ "id"
+ ].split("/")[8]
virtual_machines_display_list.append(virtual_machine_display)
- network_interface_dict.update({
- 'virtual_machine_display': virtual_machine_display
- })
+ network_interface_dict.update(
+ {"virtual_machine_display": virtual_machine_display}
+ )
network_interfaces_new_list.append(network_interface_dict)
- virtual_machines_str = ', '.join(virtual_machines_display_list)
+ virtual_machines_str = ", ".join(virtual_machines_display_list)
return network_interfaces_new_list, virtual_machines_str
@@ -166,11 +235,13 @@ def get_subnet(self, network_security_group_conn, subnets_list):
subnets_full_list = []
if subnets_list:
for subnet in subnets_list:
- resource_group_name = subnet['id'].split('/')[4]
- subnet_name = subnet['id'].split('/')[10]
- virtual_network_name = subnet['id'].split('/')[8]
+ resource_group_name = subnet["id"].split("/")[4]
+ subnet_name = subnet["id"].split("/")[10]
+ virtual_network_name = subnet["id"].split("/")[8]
- subnet_obj = network_security_group_conn.get_subnet(resource_group_name, subnet_name, virtual_network_name)
+ subnet_obj = network_security_group_conn.get_subnet(
+ resource_group_name, subnet_name, virtual_network_name
+ )
subnet_dict = self.convert_nested_dictionary(subnet_obj)
subnets_full_list.append(subnet_dict)
@@ -183,15 +254,15 @@ def split_security_rules(network_security_group_dict, mode):
outbound_security_rules = []
rule_list = []
- if mode == 'security_rules':
- rule_list = network_security_group_dict['security_rules']
- elif mode == 'default_security_rules':
- rule_list = network_security_group_dict['default_security_rules']
+ if mode == "security_rules":
+ rule_list = network_security_group_dict["security_rules"]
+ elif mode == "default_security_rules":
+ rule_list = network_security_group_dict["default_security_rules"]
for security_rule in rule_list:
- if security_rule.get('direction', '') == 'Inbound':
+ if security_rule.get("direction", "") == "Inbound":
inbound_security_rules.append(security_rule)
- elif security_rule.get('direction', '') == 'Outbound':
+ elif security_rule.get("direction", "") == "Outbound":
outbound_security_rules.append(security_rule)
return inbound_security_rules, outbound_security_rules
@@ -199,40 +270,48 @@ def split_security_rules(network_security_group_dict, mode):
@staticmethod
def replace_subnet_model_to_id(network_interfaces_list):
for network_interface in network_interfaces_list:
- if network_interface.get('ip_configurations') is not None:
- for ip_configuration in network_interface['ip_configurations']:
- ip_configuration['subnet'] = ip_configuration.get('subnet', {}).get('id', '')
+ if network_interface.get("ip_configurations") is not None:
+ for ip_configuration in network_interface["ip_configurations"]:
+ ip_configuration["subnet"] = ip_configuration.get("subnet", {}).get(
+ "id", ""
+ )
return
@staticmethod
def get_ip_addresses(network_interfaces_list):
if network_interfaces_list:
for network_interface in network_interfaces_list:
- if network_interface.get('ip_configurations') is not None:
- for ip_configuration in network_interface['ip_configurations']:
- private_ip_address = ip_configuration['private_ip_address']
- network_interface.update({
- 'private_ip_address': private_ip_address
- })
-
- if ip_configuration.get('public_ip_address') is not None:
- public_ip_address = ip_configuration['public_ip_address']['id'].split('/')[8]
- network_interface.update({
- 'public_ip_address': public_ip_address,
- })
+ if network_interface.get("ip_configurations") is not None:
+ for ip_configuration in network_interface["ip_configurations"]:
+ private_ip_address = ip_configuration["private_ip_address"]
+ network_interface.update(
+ {"private_ip_address": private_ip_address}
+ )
+
+ if ip_configuration.get("public_ip_address") is not None:
+ public_ip_address = ip_configuration["public_ip_address"][
+ "id"
+ ].split("/")[8]
+ network_interface.update(
+ {
+ "public_ip_address": public_ip_address,
+ }
+ )
return
@staticmethod
def get_virtual_network(subnet_id):
- virtual_network = subnet_id.split('/')[8]
+ virtual_network = subnet_id.split("/")[8]
return virtual_network
@staticmethod
def get_virtual_machine_name(network_interfaces, network_security_group_id):
virtual_machine_name = None
for network_interface in network_interfaces:
- if _network_security_group := network_interface['network_security_group']:
- if _network_security_group['id'].split('/')[-1] == network_security_group_id.split('/')[-1]:
+ if _network_security_group := network_interface["network_security_group"]:
+ if (
+ _network_security_group["id"].split("/")[-1]
+ == network_security_group_id.split("/")[-1]
+ ):
return virtual_machine_name
return virtual_machine_name
-
diff --git a/src/spaceone/inventory/manager/postgresql_servers/server_manager.py b/src/spaceone/inventory/manager/postgresql_servers/server_manager.py
index c4ccf043..5878c0c1 100644
--- a/src/spaceone/inventory/manager/postgresql_servers/server_manager.py
+++ b/src/spaceone/inventory/manager/postgresql_servers/server_manager.py
@@ -4,100 +4,136 @@
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.postgresql_servers import PostgreSQLServersConnector
from spaceone.inventory.model.postgresql_servers.cloud_service import *
-from spaceone.inventory.model.postgresql_servers.cloud_service_type import CLOUD_SERVICE_TYPES
+from spaceone.inventory.model.postgresql_servers.cloud_service_type import (
+ CLOUD_SERVICE_TYPES,
+)
from spaceone.inventory.model.postgresql_servers.data import *
_LOGGER = logging.getLogger(__name__)
class PostgreSQLServersManager(AzureManager):
- connector_name = 'PostgreSQLServersConnector'
+ connector_name = "PostgreSQLServersConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of azure postgresql servers data resource information
- ErrorResourceResponse (list) : list of error resource information
-
-
- """
- _LOGGER.debug(f'** Postgre SQL Servers START **')
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of azure postgresql servers data resource information
+ ErrorResourceResponse (list) : list of error resource information
+
+
+ """
+ _LOGGER.debug(f"** Postgre SQL Servers START **")
start_time = time.time()
- subscription_info = params['subscription_info']
- postgre_sql_conn: PostgreSQLServersConnector = self.locator.get_connector(self.connector_name, **params)
+ subscription_info = params["subscription_info"]
+ postgre_sql_conn: PostgreSQLServersConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
postgre_sql_server_responses = []
error_responses = []
postgre_sql_servers = postgre_sql_conn.list_servers()
for postgre_sql_server in postgre_sql_servers:
- postgre_sql_server_id = ''
+ postgre_sql_server_id = ""
try:
- postgre_sql_server_dict = self.convert_nested_dictionary(postgre_sql_server)
- postgre_sql_server_id = postgre_sql_server_dict['id']
+ postgre_sql_server_dict = self.convert_nested_dictionary(
+ postgre_sql_server
+ )
+ postgre_sql_server_id = postgre_sql_server_dict["id"]
# update application_gateway_dict
- postgre_sql_server_dict.update({
- 'resource_group': self.get_resource_group_from_id(postgre_sql_server_id),
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': postgre_sql_server_id}
- })
-
- if postgre_sql_server_dict.get('name') is not None:
- resource_group = postgre_sql_server_dict['resource_group']
- server_name = postgre_sql_server_dict['name']
- postgre_sql_server_dict.update({
- 'firewall_rules': self.list_firewall_rules_by_server(postgre_sql_conn, resource_group,
- server_name),
- 'virtual_network_rules': self.list_virtual_network_rules_by_server(postgre_sql_conn,
- resource_group, server_name),
- 'replicas': self.list_replicas_by_server(postgre_sql_conn, resource_group, server_name),
- 'server_administrators': self.list_server_administrators(postgre_sql_conn, resource_group,
- server_name)
- })
-
- postgre_sql_server_data = PostgreSQLServer(postgre_sql_server_dict, strict=False)
- postgre_sql_server_resource = PostgreSQLServerResource({
- 'data': postgre_sql_server_data,
- 'region_code': postgre_sql_server_data.location,
- 'reference': ReferenceModel(postgre_sql_server_data.reference()),
- 'tags': postgre_sql_server_dict.get('tags', {}),
- 'name': postgre_sql_server_data.name,
- 'account': postgre_sql_server_data.subscription_id,
- 'instance_type': postgre_sql_server_data.sku.tier,
- 'instance_size': float(postgre_sql_server_data.storage_profile.storage_mb)
- })
+ postgre_sql_server_dict = self.update_tenant_id_from_secret_data(
+ postgre_sql_server_dict, params["secret_data"]
+ )
+ postgre_sql_server_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ postgre_sql_server_id
+ ),
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": postgre_sql_server_id},
+ }
+ )
+
+ if postgre_sql_server_dict.get("name") is not None:
+ resource_group = postgre_sql_server_dict["resource_group"]
+ server_name = postgre_sql_server_dict["name"]
+ postgre_sql_server_dict.update(
+ {
+ "firewall_rules": self.list_firewall_rules_by_server(
+ postgre_sql_conn, resource_group, server_name
+ ),
+ "virtual_network_rules": self.list_virtual_network_rules_by_server(
+ postgre_sql_conn, resource_group, server_name
+ ),
+ "replicas": self.list_replicas_by_server(
+ postgre_sql_conn, resource_group, server_name
+ ),
+ "server_administrators": self.list_server_administrators(
+ postgre_sql_conn, resource_group, server_name
+ ),
+ }
+ )
+
+ postgre_sql_server_data = PostgreSQLServer(
+ postgre_sql_server_dict, strict=False
+ )
+ postgre_sql_server_resource = PostgreSQLServerResource(
+ {
+ "data": postgre_sql_server_data,
+ "region_code": postgre_sql_server_data.location,
+ "reference": ReferenceModel(
+ postgre_sql_server_data.reference()
+ ),
+ "tags": postgre_sql_server_dict.get("tags", {}),
+ "name": postgre_sql_server_data.name,
+ "account": postgre_sql_server_data.subscription_id,
+ "instance_type": postgre_sql_server_data.sku.tier,
+ "instance_size": float(
+ postgre_sql_server_data.storage_profile.storage_mb
+ ),
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(postgre_sql_server_data['location'])
+ self.set_region_code(postgre_sql_server_data["location"])
# _LOGGER.debug(f'[POSTGRESQL SERVERS INFO] {postgre_sql_server_resource.to_primitive()}')
- postgre_sql_server_responses.append(PostgreSQLServerResponse({'resource': postgre_sql_server_resource}))
+ postgre_sql_server_responses.append(
+ PostgreSQLServerResponse({"resource": postgre_sql_server_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {postgre_sql_server_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, 'Database', 'PostgreSQLServer',
- postgre_sql_server_id)
+ _LOGGER.error(
+ f"[list_instances] {postgre_sql_server_id} {e}", exc_info=True
+ )
+ error_resource_response = self.generate_resource_error_response(
+ e, "Database", "PostgreSQLServer", postgre_sql_server_id
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** PostgreSQLServer Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(
+ f"** PostgreSQLServer Finished {time.time() - start_time} Seconds **"
+ )
return postgre_sql_server_responses, error_responses
def get_sql_resources(self, cosmos_db_conn, account_name, resource_group):
sql_resources = []
- sql_resources_obj = cosmos_db_conn.list_sql_resources(account_name=account_name,
- resource_group_name=resource_group)
+ sql_resources_obj = cosmos_db_conn.list_sql_resources(
+ account_name=account_name, resource_group_name=resource_group
+ )
for sql in sql_resources_obj:
sql_dict = self.convert_nested_dictionary(sql)
@@ -106,8 +142,9 @@ def get_sql_resources(self, cosmos_db_conn, account_name, resource_group):
def list_firewall_rules_by_server(self, postgresql_conn, resource_group, name):
firewall_rules = []
- firewall_rules_obj = postgresql_conn.list_firewall_rules_by_server(resource_group_name=resource_group,
- server_name=name)
+ firewall_rules_obj = postgresql_conn.list_firewall_rules_by_server(
+ resource_group_name=resource_group, server_name=name
+ )
for firewall_rule in firewall_rules_obj:
firewall_rule_dict = self.convert_nested_dictionary(firewall_rule)
@@ -115,40 +152,57 @@ def list_firewall_rules_by_server(self, postgresql_conn, resource_group, name):
return firewall_rules
- def list_virtual_network_rules_by_server(self, postgresql_conn, resource_group, name):
+ def list_virtual_network_rules_by_server(
+ self, postgresql_conn, resource_group, name
+ ):
virtual_network_rules = []
- virtual_network_rules_obj = postgresql_conn.list_virtual_network_rules_by_server(
- resource_group_name=resource_group, server_name=name)
+ virtual_network_rules_obj = (
+ postgresql_conn.list_virtual_network_rules_by_server(
+ resource_group_name=resource_group, server_name=name
+ )
+ )
for virtual_network in virtual_network_rules_obj:
virtual_network_dict = self.convert_nested_dictionary(virtual_network)
- if virtual_network_dict.get('virtual_network_subnet_id') is not None:
- virtual_network_dict.update({
- 'subnet_name': self.get_subnet_name(virtual_network_dict['virtual_network_subnet_id']),
- 'virtual_network_name_display': self.get_virtual_network_name(
- virtual_network_dict['virtual_network_subnet_id'])
- })
+ if virtual_network_dict.get("virtual_network_subnet_id") is not None:
+ virtual_network_dict.update(
+ {
+ "subnet_name": self.get_subnet_name(
+ virtual_network_dict["virtual_network_subnet_id"]
+ ),
+ "virtual_network_name_display": self.get_virtual_network_name(
+ virtual_network_dict["virtual_network_subnet_id"]
+ ),
+ }
+ )
virtual_network_rules.append(virtual_network_dict)
return virtual_network_rules
def list_replicas_by_server(self, postgresql_conn, resource_group, name):
replicas_list = []
- replicas_obj = postgresql_conn.list_replicas_by_server(resource_group_name=resource_group, server_name=name)
+ replicas_obj = postgresql_conn.list_replicas_by_server(
+ resource_group_name=resource_group, server_name=name
+ )
for replica in replicas_obj:
replica_dict = self.convert_nested_dictionary(replica)
- if replica_dict.get('master_server_id') is not None:
- replica_dict.update({
- 'master_server_name': self.get_replica_master_server_name(replica_dict['master_server_id'])
- })
+ if replica_dict.get("master_server_id") is not None:
+ replica_dict.update(
+ {
+ "master_server_name": self.get_replica_master_server_name(
+ replica_dict["master_server_id"]
+ )
+ }
+ )
replicas_list.append(replica_dict)
return replicas_list
def list_server_administrators(self, postgresql_conn, resource_group, name):
server_administrators = []
- server_admin_obj = postgresql_conn.list_server_administrators(resource_group_name=resource_group,
- server_name=name)
+ server_admin_obj = postgresql_conn.list_server_administrators(
+ resource_group_name=resource_group, server_name=name
+ )
for server_admin in server_admin_obj:
server_admin_dict = self.convert_nested_dictionary(server_admin)
server_administrators.append(server_admin_dict)
@@ -157,19 +211,19 @@ def list_server_administrators(self, postgresql_conn, resource_group, name):
@staticmethod
def get_subnet_name(subnet_id):
- subnet_name = ''
+ subnet_name = ""
if subnet_id:
- subnet_name = subnet_id.split('/')[10]
+ subnet_name = subnet_id.split("/")[10]
return subnet_name
@staticmethod
def get_virtual_network_name(subnet_id):
- virtual_network_name = ''
+ virtual_network_name = ""
if subnet_id:
- virtual_network_name = subnet_id.split('/')[8]
+ virtual_network_name = subnet_id.split("/")[8]
return virtual_network_name
@staticmethod
def get_replica_master_server_name(master_server_id):
- master_server_name = master_server_id.split('/')[8]
+ master_server_name = master_server_id.split("/")[8]
return master_server_name
diff --git a/src/spaceone/inventory/manager/public_ip_addresses/ip_address_manager.py b/src/spaceone/inventory/manager/public_ip_addresses/ip_address_manager.py
index 00db5c03..17873a44 100644
--- a/src/spaceone/inventory/manager/public_ip_addresses/ip_address_manager.py
+++ b/src/spaceone/inventory/manager/public_ip_addresses/ip_address_manager.py
@@ -4,85 +4,111 @@
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.public_ip_addresses import PublicIPAddressesConnector
from spaceone.inventory.model.public_ip_addresses.cloud_service import *
-from spaceone.inventory.model.public_ip_addresses.cloud_service_type import CLOUD_SERVICE_TYPES
+from spaceone.inventory.model.public_ip_addresses.cloud_service_type import (
+ CLOUD_SERVICE_TYPES,
+)
from spaceone.inventory.model.public_ip_addresses.data import *
_LOGGER = logging.getLogger(__name__)
class PublicIPAddressesManager(AzureManager):
- connector_name = 'PublicIPAddressesConnector'
+ connector_name = "PublicIPAddressesConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of azure public ip address data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of azure public ip address data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
_LOGGER.debug("** Public IP Address START **")
start_time = time.time()
- subscription_info = params['subscription_info']
+ subscription_info = params["subscription_info"]
- public_ip_address_conn: PublicIPAddressesConnector = self.locator.get_connector(self.connector_name,**params)
+ public_ip_address_conn: PublicIPAddressesConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
public_ip_address_responses = []
error_responses = []
public_ip_addresses_list = public_ip_address_conn.list_all_public_ip_addresses()
for public_ip_address in public_ip_addresses_list:
- public_ip_address_id = ''
+ public_ip_address_id = ""
try:
- public_ip_address_dict = self.convert_nested_dictionary(public_ip_address)
- public_ip_address_id = public_ip_address_dict['id']
+ public_ip_address_dict = self.convert_nested_dictionary(
+ public_ip_address
+ )
+ public_ip_address_id = public_ip_address_dict["id"]
# update application_gateway_dict
- public_ip_address_dict.update({
- 'resource_group': self.get_resource_group_from_id(public_ip_address_id),
- # parse resource_group from ID
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': public_ip_address_id}
- })
-
- if public_ip_address_dict.get('ip_configuration') is not None:
- associated_to = public_ip_address_dict['ip_configuration']['id'].split('/')[8]
+ public_ip_address_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ public_ip_address_id
+ ),
+ # parse resource_group from ID
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": public_ip_address_id},
+ }
+ )
+
+ if public_ip_address_dict.get("ip_configuration") is not None:
+ associated_to = public_ip_address_dict["ip_configuration"][
+ "id"
+ ].split("/")[8]
if associated_to:
- public_ip_address_dict.update({
- 'associated_to': associated_to
- })
-
- public_ip_address_data = PublicIPAddress(public_ip_address_dict, strict=False)
- public_ip_address_resource = PublicIPAddressResource({
- 'data': public_ip_address_data,
- 'tags': public_ip_address_dict.get('tags', {}),
- 'region_code': public_ip_address_data.location,
- 'reference': ReferenceModel(public_ip_address_data.reference()),
- 'name': public_ip_address_data.name,
- 'account': public_ip_address_data.subscription_id,
- 'instance_type': public_ip_address_data.sku.name
- })
+ public_ip_address_dict.update({"associated_to": associated_to})
+
+ public_ip_address_dict = self.update_tenant_id_from_secret_data(
+ public_ip_address_dict, params.get("secret_data", {})
+ )
+
+ public_ip_address_data = PublicIPAddress(
+ public_ip_address_dict, strict=False
+ )
+ public_ip_address_resource = PublicIPAddressResource(
+ {
+ "data": public_ip_address_data,
+ "tags": public_ip_address_dict.get("tags", {}),
+ "region_code": public_ip_address_data.location,
+ "reference": ReferenceModel(public_ip_address_data.reference()),
+ "name": public_ip_address_data.name,
+ "account": public_ip_address_data.subscription_id,
+ "instance_type": public_ip_address_data.sku.name,
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(public_ip_address_data['location'])
+ self.set_region_code(public_ip_address_data["location"])
# _LOGGER.debug(f'[PUBLIC IP ADDRESS INFO IN PIP MANAGER] {public_ip_address_resource.to_primitive()}')
- public_ip_address_responses.append(PublicIPAddressResponse({'resource': public_ip_address_resource}))
+ public_ip_address_responses.append(
+ PublicIPAddressResponse({"resource": public_ip_address_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {public_ip_address_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, 'Network', 'PublicIPAddress', public_ip_address_id)
+ _LOGGER.error(
+ f"[list_instances] {public_ip_address_id} {e}", exc_info=True
+ )
+ error_resource_response = self.generate_resource_error_response(
+ e, "Network", "PublicIPAddress", public_ip_address_id
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** Public IP Address Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(
+ f"** Public IP Address Finished {time.time() - start_time} Seconds **"
+ )
return public_ip_address_responses, error_responses
diff --git a/src/spaceone/inventory/manager/resources_manager/__init__.py b/src/spaceone/inventory/manager/resources_manager/__init__.py
new file mode 100644
index 00000000..8b137891
--- /dev/null
+++ b/src/spaceone/inventory/manager/resources_manager/__init__.py
@@ -0,0 +1 @@
+
diff --git a/src/spaceone/inventory/manager/resources_manager/resource_manager.py b/src/spaceone/inventory/manager/resources_manager/resource_manager.py
new file mode 100644
index 00000000..86372dd0
--- /dev/null
+++ b/src/spaceone/inventory/manager/resources_manager/resource_manager.py
@@ -0,0 +1,27 @@
+import logging
+from spaceone.inventory.libs.manager import AzureManager
+from spaceone.inventory.connector.resources import ResourcesConnector
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class ResourcesManager(AzureManager):
+ connector_name = "ResourcesConnector"
+
+ def collect_exist_resources(self, params: dict) -> list:
+ """ " """
+
+ resources_info = []
+ resources_conn: ResourcesConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
+
+ resources_obj = resources_conn.list_resources()
+
+ for resource_obj in resources_obj:
+ resource_info = self.convert_nested_dictionary(resource_obj)
+ type = resource_info.get("type").split("/")[1]
+ if type not in resources_info:
+ resources_info.append(type)
+
+ return resources_info
diff --git a/src/spaceone/inventory/manager/snapshots/instance_manager.py b/src/spaceone/inventory/manager/snapshots/instance_manager.py
index eeb00ca8..f5157356 100644
--- a/src/spaceone/inventory/manager/snapshots/instance_manager.py
+++ b/src/spaceone/inventory/manager/snapshots/instance_manager.py
@@ -11,163 +11,198 @@
class SnapshotsManager(AzureManager):
- connector_name = 'SnapshotsConnector'
+ connector_name = "SnapshotsConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of azure snapshot data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of azure snapshot data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
_LOGGER.debug("** Snapshot START **")
start_time = time.time()
- subscription_info = params['subscription_info']
+ subscription_info = params["subscription_info"]
- snapshot_conn: SnapshotsConnector = self.locator.get_connector(self.connector_name, **params)
+ snapshot_conn: SnapshotsConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
snapshot_responses = []
error_responses = []
snapshots = snapshot_conn.list_snapshots()
for snapshot in snapshots:
- snapshot_id = ''
+ snapshot_id = ""
try:
snapshot_dict = self.convert_nested_dictionary(snapshot)
- snapshot_id = snapshot_dict['id']
+ snapshot_id = snapshot_dict["id"]
# update sku_dict
# switch SnapshotStorageAccountType to snapshot_sku_name for user-friendly words.
# (ex.Premium_LRS -> Premium SSD, Standard HDD..)
- sku_dict = snapshot_dict.get('sku', {})
- sku_dict.update({
- 'name': self.get_disk_sku_name(sku_dict.get('name', ''))
- })
+ sku_dict = snapshot_dict.get("sku", {})
+ sku_dict.update(
+ {"name": self.get_disk_sku_name(sku_dict.get("name", ""))}
+ )
# update encryption_dict type to user-friendly words
# (ex.EncryptionAtRestWithPlatformKey -> Platform-managed key...)
- if snapshot_dict.get('encryption', {}).get('type') is not None:
- type = snapshot_dict['encryption']['type']
- encryption_type = ''
- if type == 'EncryptionAtRestWithPlatformKey':
- encryption_type = 'Platform-managed key'
- elif type == 'EncryptionAtRestWithPlatformAndCustomerKeys':
- encryption_type = 'Platform and customer managed key'
- elif type == 'EncryptionAtRestWithCustomerKey':
- encryption_type = 'Customer-managed key'
-
- snapshot_dict['encryption'].update({
- 'type_display': encryption_type
- })
+ if snapshot_dict.get("encryption", {}).get("type") is not None:
+ type = snapshot_dict["encryption"]["type"]
+ encryption_type = ""
+ if type == "EncryptionAtRestWithPlatformKey":
+ encryption_type = "Platform-managed key"
+ elif type == "EncryptionAtRestWithPlatformAndCustomerKeys":
+ encryption_type = "Platform and customer managed key"
+ elif type == "EncryptionAtRestWithCustomerKey":
+ encryption_type = "Customer-managed key"
+
+ snapshot_dict["encryption"].update(
+ {"type_display": encryption_type}
+ )
# update snapshot_dict
- snapshot_dict.update({
- 'resource_group': self.get_resource_group_from_id(snapshot_id), # parse resource_group from ID
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'size': snapshot_dict['disk_size_bytes'],
- 'sku': sku_dict,
- 'incremental_display': self.get_incremental_display(snapshot_dict['incremental']),
- 'azure_monitor': {'resource_id': snapshot_id},
- 'time_created': datetime_to_iso8601(snapshot_dict['time_created'])
- })
-
- if snapshot_dict.get('network_access_policy') is not None:
- snapshot_dict.update({
- 'network_access_policy_display': self.get_network_access_policy(snapshot_dict['network_access_policy'])
- })
+ snapshot_dict = self.update_tenant_id_from_secret_data(
+ snapshot_dict, params["secret_data"]
+ )
+ snapshot_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ snapshot_id
+ ), # parse resource_group from ID
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "size": snapshot_dict["disk_size_bytes"],
+ "sku": sku_dict,
+ "incremental_display": self.get_incremental_display(
+ snapshot_dict["incremental"]
+ ),
+ "azure_monitor": {"resource_id": snapshot_id},
+ "time_created": datetime_to_iso8601(
+ snapshot_dict["time_created"]
+ ),
+ }
+ )
+
+ if snapshot_dict.get("network_access_policy") is not None:
+ snapshot_dict.update(
+ {
+ "network_access_policy_display": self.get_network_access_policy(
+ snapshot_dict["network_access_policy"]
+ )
+ }
+ )
# get attached vm's name
- if snapshot_dict.get('managed_by') is not None:
- snapshot_dict.update({
- 'managed_by': self.get_attached_vm_name_from_managed_by(snapshot_dict['managed_by'])
- })
+ if snapshot_dict.get("managed_by") is not None:
+ snapshot_dict.update(
+ {
+ "managed_by": self.get_attached_vm_name_from_managed_by(
+ snapshot_dict["managed_by"]
+ )
+ }
+ )
# get source_disk_name from source_resource_id
- if snapshot_dict.get('creation_data') is not None:
- source_resource_id = snapshot_dict['creation_data'].get('source_resource_id', '')
- snapshot_dict.update({
- 'source_disk_name': self.get_source_disk_name(source_resource_id)
- })
+ if snapshot_dict.get("creation_data") is not None:
+ source_resource_id = snapshot_dict["creation_data"].get(
+ "source_resource_id", ""
+ )
+ snapshot_dict.update(
+ {
+ "source_disk_name": self.get_source_disk_name(
+ source_resource_id
+ )
+ }
+ )
snapshot_data = Snapshot(snapshot_dict, strict=False)
- snapshot_resource = SnapshotResource({
- 'data': snapshot_data,
- 'region_code': snapshot_data.location,
- 'reference': ReferenceModel(snapshot_data.reference()),
- 'tags': snapshot_dict.get('tags', {}),
- 'name': snapshot_data.name,
- 'account': snapshot_data.subscription_id,
- 'instance_size': float(snapshot_data.disk_size_bytes),
- 'instance_type': snapshot_data.sku.name
- })
+ snapshot_resource = SnapshotResource(
+ {
+ "data": snapshot_data,
+ "region_code": snapshot_data.location,
+ "reference": ReferenceModel(snapshot_data.reference()),
+ "tags": snapshot_dict.get("tags", {}),
+ "name": snapshot_data.name,
+ "account": snapshot_data.subscription_id,
+ "instance_size": float(snapshot_data.disk_size_bytes),
+ "instance_type": snapshot_data.sku.name,
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(snapshot_data['location'])
+ self.set_region_code(snapshot_data["location"])
# _LOGGER.debug(f'[SNAPSHOT INFO] {snapshot_resource.to_primitive()}')
- snapshot_responses.append(SnapshotResponse({'resource': snapshot_resource}))
+ snapshot_responses.append(
+ SnapshotResponse({"resource": snapshot_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {snapshot_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, 'Compute', 'Snapshot', snapshot_id)
+ _LOGGER.error(f"[list_instances] {snapshot_id} {e}", exc_info=True)
+ error_resource_response = self.generate_resource_error_response(
+ e, "Compute", "Snapshot", snapshot_id
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** Snapshot Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(f"** Snapshot Finished {time.time() - start_time} Seconds **")
return snapshot_responses, error_responses
@staticmethod
def get_attached_vm_name_from_managed_by(managed_by):
- attached_vm_name = ''
+ attached_vm_name = ""
if managed_by:
- attached_vm_name = managed_by.split('/')[8] # parse attached_ from ID
+ attached_vm_name = managed_by.split("/")[8] # parse attached_ from ID
return attached_vm_name
@staticmethod
def get_disk_sku_name(sku_tier):
- sku_name = ''
- if sku_tier == 'Premium_LRS':
- sku_name = 'Premium SSD'
- elif sku_tier == 'Standard_ZRS':
- sku_name = 'Standard zone'
- elif sku_tier == 'Standard_LRS':
- sku_name = 'Standard HDD'
+ sku_name = ""
+ if sku_tier == "Premium_LRS":
+ sku_name = "Premium SSD"
+ elif sku_tier == "Standard_ZRS":
+ sku_name = "Standard zone"
+ elif sku_tier == "Standard_LRS":
+ sku_name = "Standard HDD"
return sku_name
@staticmethod
def get_network_access_policy(network_access_policy):
- network_access_policy_display = ''
- if network_access_policy == 'AllowAll':
- network_access_policy_display = 'Public endpoint (all network)'
- elif network_access_policy == 'AllowPrivate':
- network_access_policy_display = 'Private endpoint (through disk access)'
- elif network_access_policy == 'DenyAll':
- network_access_policy_display = 'Deny all'
+ network_access_policy_display = ""
+ if network_access_policy == "AllowAll":
+ network_access_policy_display = "Public endpoint (all network)"
+ elif network_access_policy == "AllowPrivate":
+ network_access_policy_display = "Private endpoint (through disk access)"
+ elif network_access_policy == "DenyAll":
+ network_access_policy_display = "Deny all"
return network_access_policy_display
@staticmethod
def get_incremental_display(incremental):
if incremental is False:
- incremental_display = 'Full'
+ incremental_display = "Full"
else:
- incremental_display = 'Incremental'
+ incremental_display = "Incremental"
return incremental_display
@staticmethod
def get_source_disk_name(source_resource_id):
- source_disk_name = ''
+ source_disk_name = ""
if source_resource_id:
- source_disk_name = source_resource_id.split('/')[8] # parse source_disk_name from source_resource_id
+ source_disk_name = source_resource_id.split("/")[
+ 8
+ ] # parse source_disk_name from source_resource_id
return source_disk_name
diff --git a/src/spaceone/inventory/manager/sql_servers/server_manager.py b/src/spaceone/inventory/manager/sql_servers/server_manager.py
index a4cd692d..f8285a37 100644
--- a/src/spaceone/inventory/manager/sql_servers/server_manager.py
+++ b/src/spaceone/inventory/manager/sql_servers/server_manager.py
@@ -8,40 +8,46 @@
from spaceone.inventory.model.sql_servers.cloud_service_type import CLOUD_SERVICE_TYPES
from spaceone.inventory.model.sql_databases.data import *
from spaceone.inventory.model.sql_servers.data import *
-from spaceone.inventory.manager.sql_databases.database_manager import SQLDatabasesManager
+from spaceone.inventory.manager.sql_databases.database_manager import (
+ SQLDatabasesManager,
+)
from spaceone.core.utils import *
_LOGGER = logging.getLogger(__name__)
class SQLServersManager(AzureManager):
- connector_name = 'SQLServersConnector'
- monitor_connector_name = 'MonitorConnector'
+ connector_name = "SQLServersConnector"
+ monitor_connector_name = "MonitorConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of azure sql servers data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of azure sql servers data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
- _LOGGER.debug(f'** SQL Servers START **')
+ _LOGGER.debug(f"** SQL Servers START **")
start_time = time.time()
- subscription_info = params['subscription_info']
+ subscription_info = params["subscription_info"]
- sql_servers_conn: SQLServersConnector = self.locator.get_connector(self.connector_name, **params)
- sql_servers_monitor_conn: MonitorConnector = self.locator.get_connector(self.monitor_connector_name, **params)
+ sql_servers_conn: SQLServersConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
+ sql_servers_monitor_conn: MonitorConnector = self.locator.get_connector(
+ self.monitor_connector_name, **params
+ )
sql_server_responses = []
error_responses = []
@@ -49,121 +55,171 @@ def collect_cloud_service(self, params):
sql_servers = sql_servers_conn.list_servers()
for sql_server in sql_servers:
- sql_server_id = ''
+ sql_server_id = ""
try:
sql_server_dict = self.convert_nested_dictionary(sql_server)
- sql_server_id = sql_server_dict['id']
+ sql_server_id = sql_server_dict["id"]
# update sql_servers_data dict
- sql_server_dict.update({
- 'resource_group': self.get_resource_group_from_id(sql_server_id),
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': sql_server_id}
- })
-
- resource_group_name = sql_server_dict['resource_group']
- name = sql_server_dict['name']
+ sql_server_dict = self.update_tenant_id_from_secret_data(
+ sql_server_dict, params["secret_data"]
+ )
+ sql_server_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ sql_server_id
+ ),
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": sql_server_id},
+ }
+ )
+
+ resource_group_name = sql_server_dict["resource_group"]
+ name = sql_server_dict["name"]
# Get Server Auditing Settings, Failover groups. azure ad administrators
- server_auditing_settings_dict = self.get_server_auditing_settings(sql_servers_conn, resource_group_name,
- name)
- failover_group_list = self.list_failover_groups(sql_servers_conn, resource_group_name, name)
- transparent_data_encryption_dict = self.list_encryption_protectors(sql_servers_conn,
- resource_group_name, name)
- azure_ad_admin_list = self.list_azure_ad_administrators(sql_servers_conn, resource_group_name, name)
- server_automatic_tuning_dict = self.get_server_automatic_tuning(sql_servers_conn, resource_group_name,
- name)
- databases_list = self.list_databases(sql_servers_conn=sql_servers_conn,
- sql_monitor_conn=sql_servers_monitor_conn,
- resource_group_name=resource_group_name, server_name=name,
- server_admin_name=sql_server_dict.get('administrator_login'))
- elastic_pools_list = self.list_elastic_pools(sql_servers_conn, resource_group_name, name)
- deleted_databases_list = self.list_deleted_databases(sql_servers_conn, resource_group_name, name)
- virtual_network_rules_list = self.list_virtual_network_rules(sql_servers_conn, resource_group_name,
- name)
- firewall_rules_list = self.list_firewall_rules(sql_servers_conn, resource_group_name, name)
-
- sql_server_dict.update({
- 'azure_ad_administrators': azure_ad_admin_list,
- 'server_auditing_settings': server_auditing_settings_dict,
- 'failover_groups': failover_group_list,
- 'server_automatic_tuning': server_automatic_tuning_dict,
- 'databases': databases_list,
- 'elastic_pools': elastic_pools_list,
- 'deleted_databases': deleted_databases_list,
- 'virtual_network_rules': virtual_network_rules_list,
- 'firewall_rules': firewall_rules_list,
- 'encryption_protectors': transparent_data_encryption_dict
- })
-
- if sql_server_dict.get('azure_ad_administrators') is not None:
- sql_server_dict.update({
- 'azure_ad_admin_name': self.get_azure_ad_admin_name(sql_server_dict['azure_ad_administrators'])
- })
-
- if sql_server_dict.get('private_endpoint_connections') is not None:
- sql_server_dict.update({
- 'private_endpoint_connections': self.get_private_endpoint_connections(sql_server_dict[
- 'private_endpoint_connections'])
- })
+ server_auditing_settings_dict = self.get_server_auditing_settings(
+ sql_servers_conn, resource_group_name, name
+ )
+ failover_group_list = self.list_failover_groups(
+ sql_servers_conn, resource_group_name, name
+ )
+ transparent_data_encryption_dict = self.list_encryption_protectors(
+ sql_servers_conn, resource_group_name, name
+ )
+ azure_ad_admin_list = self.list_azure_ad_administrators(
+ sql_servers_conn, resource_group_name, name
+ )
+ server_automatic_tuning_dict = self.get_server_automatic_tuning(
+ sql_servers_conn, resource_group_name, name
+ )
+ databases_list = self.list_databases(
+ sql_servers_conn=sql_servers_conn,
+ sql_monitor_conn=sql_servers_monitor_conn,
+ resource_group_name=resource_group_name,
+ server_name=name,
+ server_admin_name=sql_server_dict.get("administrator_login"),
+ )
+ elastic_pools_list = self.list_elastic_pools(
+ sql_servers_conn, resource_group_name, name
+ )
+ deleted_databases_list = self.list_deleted_databases(
+ sql_servers_conn, resource_group_name, name
+ )
+ virtual_network_rules_list = self.list_virtual_network_rules(
+ sql_servers_conn, resource_group_name, name
+ )
+ firewall_rules_list = self.list_firewall_rules(
+ sql_servers_conn, resource_group_name, name
+ )
+
+ sql_server_dict.update(
+ {
+ "azure_ad_administrators": azure_ad_admin_list,
+ "server_auditing_settings": server_auditing_settings_dict,
+ "failover_groups": failover_group_list,
+ "server_automatic_tuning": server_automatic_tuning_dict,
+ "databases": databases_list,
+ "elastic_pools": elastic_pools_list,
+ "deleted_databases": deleted_databases_list,
+ "virtual_network_rules": virtual_network_rules_list,
+ "firewall_rules": firewall_rules_list,
+ "encryption_protectors": transparent_data_encryption_dict,
+ }
+ )
+
+ if sql_server_dict.get("azure_ad_administrators") is not None:
+ sql_server_dict.update(
+ {
+ "azure_ad_admin_name": self.get_azure_ad_admin_name(
+ sql_server_dict["azure_ad_administrators"]
+ )
+ }
+ )
+
+ if sql_server_dict.get("private_endpoint_connections") is not None:
+ sql_server_dict.update(
+ {
+ "private_endpoint_connections": self.get_private_endpoint_connections(
+ sql_server_dict["private_endpoint_connections"]
+ )
+ }
+ )
sql_server_data = SQLServer(sql_server_dict, strict=False)
- sql_server_resource = SQLServerResource({
- 'data': sql_server_data,
- 'region_code': sql_server_data.location,
- 'reference': ReferenceModel(sql_server_data.reference()),
- 'tags': sql_server_dict.get('tags', {}),
- 'name': sql_server_data.name,
- 'account': sql_server_data.subscription_id
- })
- sql_server_responses.append(SQLServerResponse({'resource': sql_server_resource}))
+ sql_server_resource = SQLServerResource(
+ {
+ "data": sql_server_data,
+ "region_code": sql_server_data.location,
+ "reference": ReferenceModel(sql_server_data.reference()),
+ "tags": sql_server_dict.get("tags", {}),
+ "name": sql_server_data.name,
+ "account": sql_server_data.subscription_id,
+ }
+ )
+ sql_server_responses.append(
+ SQLServerResponse({"resource": sql_server_resource})
+ )
# _LOGGER.debug(f'[SQL SERVER INFO] {sql_server_resource.to_primitive()}')
# Must set_region_code method for region collection
- self.set_region_code(sql_server_data['location'])
+ self.set_region_code(sql_server_data["location"])
except Exception as e:
- _LOGGER.error(f'[list_instances] {sql_server_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, 'Database', 'SQLServer',
- sql_server_id)
+ _LOGGER.error(f"[list_instances] {sql_server_id} {e}", exc_info=True)
+ error_resource_response = self.generate_resource_error_response(
+ e, "Database", "SQLServer", sql_server_id
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** SQL Servers Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(f"** SQL Servers Finished {time.time() - start_time} Seconds **")
return sql_server_responses, error_responses
def list_elastic_pools(self, sql_servers_conn, rg_name, server_name):
elastic_pools_list = []
- elastic_pools = sql_servers_conn.list_elastic_pools_by_server(resource_group=rg_name, server_name=server_name)
+ elastic_pools = sql_servers_conn.list_elastic_pools_by_server(
+ resource_group=rg_name, server_name=server_name
+ )
for elastic_pool in elastic_pools:
elastic_pool_dict = self.convert_nested_dictionary(elastic_pool)
# Get Databases list by elastic pool
- elastic_pool_dict['databases'] = self.get_databases_by_elastic_pools(sql_servers_conn,
- elastic_pool_dict['name'], rg_name,
- server_name)
+ elastic_pool_dict["databases"] = self.get_databases_by_elastic_pools(
+ sql_servers_conn, elastic_pool_dict["name"], rg_name, server_name
+ )
# Get pricing tier for display
- if elastic_pool_dict.get('per_database_settings') is not None:
- elastic_pool_dict.update({
- 'pricing_tier_display': self.get_pricing_tier_display(elastic_pool_dict['sku']),
- 'per_db_settings_display': self.get_per_db_settings(elastic_pool_dict['per_database_settings']),
- 'number_of_databases': len(elastic_pool_dict['databases']),
- 'unit_display': elastic_pool_dict['sku']['tier'],
- 'server_name_display': elastic_pool_dict['id'].split('/')[8],
- 'resource_group_display': elastic_pool_dict['id'].split('/')[4],
- 'max_size_gb': elastic_pool_dict['max_size_bytes'] / 1073741824
- })
+ if elastic_pool_dict.get("per_database_settings") is not None:
+ elastic_pool_dict.update(
+ {
+ "pricing_tier_display": self.get_pricing_tier_display(
+ elastic_pool_dict["sku"]
+ ),
+ "per_db_settings_display": self.get_per_db_settings(
+ elastic_pool_dict["per_database_settings"]
+ ),
+ "number_of_databases": len(elastic_pool_dict["databases"]),
+ "unit_display": elastic_pool_dict["sku"]["tier"],
+ "server_name_display": elastic_pool_dict["id"].split("/")[8],
+ "resource_group_display": elastic_pool_dict["id"].split("/")[4],
+ "max_size_gb": elastic_pool_dict["max_size_bytes"] / 1073741824,
+ }
+ )
elastic_pools_list.append(elastic_pool_dict)
return elastic_pools_list
- def get_databases_by_elastic_pools(self, sql_servers_conn, elastic_pool_name, rg_name, server_name):
- databases_obj = sql_servers_conn.list_databases_by_elastic_pool(elastic_pool_name, rg_name, server_name)
+ def get_databases_by_elastic_pools(
+ self, sql_servers_conn, elastic_pool_name, rg_name, server_name
+ ):
+ databases_obj = sql_servers_conn.list_databases_by_elastic_pool(
+ elastic_pool_name, rg_name, server_name
+ )
databases_list = []
for database in databases_obj:
database_dict = self.convert_nested_dictionary(database)
@@ -172,8 +228,11 @@ def get_databases_by_elastic_pools(self, sql_servers_conn, elastic_pool_name, rg
return databases_list
def list_deleted_databases(self, sql_servers_conn, rg_name, server_name):
- deleted_databases_obj = sql_servers_conn.list_restorable_dropped_databases_by_server(resource_group=rg_name,
- server_name=server_name)
+ deleted_databases_obj = (
+ sql_servers_conn.list_restorable_dropped_databases_by_server(
+ resource_group=rg_name, server_name=server_name
+ )
+ )
deleted_databases_list = []
for deleted_database in deleted_databases_obj:
deleted_database_dict = self.convert_nested_dictionary(deleted_database)
@@ -182,7 +241,9 @@ def list_deleted_databases(self, sql_servers_conn, rg_name, server_name):
return deleted_databases_list
def list_firewall_rules(self, sql_servers_conn, rg_name, server_name):
- firewall_obj = sql_servers_conn.list_firewall_rules_by_server(resource_group=rg_name, server_name=server_name)
+ firewall_obj = sql_servers_conn.list_firewall_rules_by_server(
+ resource_group=rg_name, server_name=server_name
+ )
firewall_list = []
for firewall in firewall_obj:
firewall_rule_dict = self.convert_nested_dictionary(firewall)
@@ -191,39 +252,55 @@ def list_firewall_rules(self, sql_servers_conn, rg_name, server_name):
return firewall_list
def list_virtual_network_rules(self, sql_servers_conn, rg_name, server_name):
- virtual_network_rule_obj = sql_servers_conn.list_virtual_network_rules_by_server(resource_group=rg_name,
- server_name=server_name)
+ virtual_network_rule_obj = (
+ sql_servers_conn.list_virtual_network_rules_by_server(
+ resource_group=rg_name, server_name=server_name
+ )
+ )
virtual_network_rules_list = []
for virtual_network_rule in virtual_network_rule_obj:
- virtual_network_rule_dict = self.convert_nested_dictionary(virtual_network_rule)
-
- if virtual_network_rule_dict.get('id') is not None: # Get Virtual Network's name
- virtual_network_rule_dict.update({
- 'virtual_network_name_display': virtual_network_rule_dict['virtual_network_subnet_id'].split('/')[
- 8],
- 'subscription_id': virtual_network_rule_dict['id'].split('/')[2],
- 'resource_group': virtual_network_rule_dict['id'].split('/')[4]
- })
+ virtual_network_rule_dict = self.convert_nested_dictionary(
+ virtual_network_rule
+ )
+
+ if (
+ virtual_network_rule_dict.get("id") is not None
+ ): # Get Virtual Network's name
+ virtual_network_rule_dict.update(
+ {
+ "virtual_network_name_display": virtual_network_rule_dict[
+ "virtual_network_subnet_id"
+ ].split("/")[8],
+ "subscription_id": virtual_network_rule_dict["id"].split("/")[
+ 2
+ ],
+ "resource_group": virtual_network_rule_dict["id"].split("/")[4],
+ }
+ )
virtual_network_rules_list.append(virtual_network_rule_dict)
return virtual_network_rules_list
def list_encryption_protectors(self, sql_servers_conn, rg_name, server_name):
encryption_protectors_list = []
- encryption_protectors_obj = sql_servers_conn.list_encryption_protectors(resource_group=rg_name,
- server_name=server_name)
+ encryption_protectors_obj = sql_servers_conn.list_encryption_protectors(
+ resource_group=rg_name, server_name=server_name
+ )
for encryption_protector in encryption_protectors_obj:
- encryption_protectors_dict = self.convert_nested_dictionary(encryption_protector)
+ encryption_protectors_dict = self.convert_nested_dictionary(
+ encryption_protector
+ )
encryption_protectors_list.append(encryption_protectors_dict)
return encryption_protectors_list
def list_azure_ad_administrators(self, sql_servers_conn, rg_name, server_name):
- ad_admin_list = [] # return list
- ad_admin_obj = sql_servers_conn.list_server_azure_ad_administrators(resource_group=rg_name,
- server_name=server_name)
+ ad_admin_list = [] # return list
+ ad_admin_obj = sql_servers_conn.list_server_azure_ad_administrators(
+ resource_group=rg_name, server_name=server_name
+ )
for ad_admin in ad_admin_obj:
ad_admin_list.append(self.convert_dictionary(ad_admin))
@@ -231,29 +308,33 @@ def list_azure_ad_administrators(self, sql_servers_conn, rg_name, server_name):
return ad_admin_list
def get_server_automatic_tuning(self, sql_servers_conn, rg_name, server_name):
- server_automatic_tuning_obj = sql_servers_conn.get_server_automatic_tuning(rg_name, server_name)
- server_automatic_tuning_dict = self.convert_nested_dictionary(server_automatic_tuning_obj)
- server_automatic_tuning_dict.update({
- 'options': self.get_server_automatic_tuning_options(server_automatic_tuning_dict['options'])
- })
+ server_automatic_tuning_obj = sql_servers_conn.get_server_automatic_tuning(
+ rg_name, server_name
+ )
+ server_automatic_tuning_dict = self.convert_nested_dictionary(
+ server_automatic_tuning_obj
+ )
+ server_automatic_tuning_dict.update(
+ {
+ "options": self.get_server_automatic_tuning_options(
+ server_automatic_tuning_dict["options"]
+ )
+ }
+ )
return server_automatic_tuning_dict
def get_server_automatic_tuning_options(self, options_dict):
options_list = []
- created_index_dict = self.convert_nested_dictionary(options_dict['createIndex'])
- drop_index_dict = self.convert_nested_dictionary(options_dict['dropIndex'])
- force_plan_dict = self.convert_nested_dictionary(options_dict['forceLastGoodPlan'])
-
- created_index_dict.update({
- 'tuning_type': 'createIndex'
- })
- drop_index_dict.update({
- 'tuning_type': 'dropIndex'
- })
- force_plan_dict.update({
- 'tuning_type': 'forceLastGoodPlan'
- })
+ created_index_dict = self.convert_nested_dictionary(options_dict["createIndex"])
+ drop_index_dict = self.convert_nested_dictionary(options_dict["dropIndex"])
+ force_plan_dict = self.convert_nested_dictionary(
+ options_dict["forceLastGoodPlan"]
+ )
+
+ created_index_dict.update({"tuning_type": "createIndex"})
+ drop_index_dict.update({"tuning_type": "dropIndex"})
+ force_plan_dict.update({"tuning_type": "forceLastGoodPlan"})
options_list.append(created_index_dict)
options_list.append(drop_index_dict)
@@ -262,43 +343,62 @@ def get_server_automatic_tuning_options(self, options_dict):
return options_list
def get_server_auditing_settings(self, sql_servers_conn, rg_name, server_name):
- server_auditing_settings_obj = sql_servers_conn.get_server_auditing_settings(rg_name, server_name)
- server_auditing_settings_dict = self.convert_nested_dictionary(server_auditing_settings_obj)
+ server_auditing_settings_obj = sql_servers_conn.get_server_auditing_settings(
+ rg_name, server_name
+ )
+ server_auditing_settings_dict = self.convert_nested_dictionary(
+ server_auditing_settings_obj
+ )
return server_auditing_settings_dict
def list_failover_groups(self, sql_servers_conn, rg_name, server_name):
failover_groups_list = []
- failover_groups_obj = sql_servers_conn.list_failover_groups(rg_name, server_name)
+ failover_groups_obj = sql_servers_conn.list_failover_groups(
+ rg_name, server_name
+ )
for failover in failover_groups_obj:
failover_dict = self.convert_nested_dictionary(failover)
- if failover_dict.get('id') is not None: # Get Primary server's name
- failover_dict.update({
- 'primary_server': failover_dict['id'].split('/')[8]
- })
-
- if failover_dict.get('partner_servers') is not None: # Get Secondary Server's name
- failover_dict.update({
- 'secondary_server': self.get_failover_secondary_server(failover_dict['partner_servers'])
- })
-
- if failover_dict.get('read_write_endpoint') is not None:
- failover_dict.update({
- 'failover_policy_display': failover_dict['read_write_endpoint'].get('failover_policy'),
- 'grace_period_display': failover_dict['read_write_endpoint'].get(
- 'failover_with_data_loss_grace_period_minutes')
- })
+ if failover_dict.get("id") is not None: # Get Primary server's name
+ failover_dict.update(
+ {"primary_server": failover_dict["id"].split("/")[8]}
+ )
+
+ if (
+ failover_dict.get("partner_servers") is not None
+ ): # Get Secondary Server's name
+ failover_dict.update(
+ {
+ "secondary_server": self.get_failover_secondary_server(
+ failover_dict["partner_servers"]
+ )
+ }
+ )
+
+ if failover_dict.get("read_write_endpoint") is not None:
+ failover_dict.update(
+ {
+ "failover_policy_display": failover_dict[
+ "read_write_endpoint"
+ ].get("failover_policy"),
+ "grace_period_display": failover_dict[
+ "read_write_endpoint"
+ ].get("failover_with_data_loss_grace_period_minutes"),
+ }
+ )
failover_groups_list.append(failover_dict)
return failover_groups_list
- def list_data_masking_rules(self, sql_servers_conn, rg_name, server_name, database_name):
+ def list_data_masking_rules(
+ self, sql_servers_conn, rg_name, server_name, database_name
+ ):
data_masking_rules_list = []
- data_masking_rule_obj = sql_servers_conn.list_data_masking_rules_by_database(resource_group=rg_name,
- server_name=server_name,
- database_name=database_name)
+ data_masking_rule_obj = sql_servers_conn.list_data_masking_rules_by_database(
+ resource_group=rg_name, server_name=server_name, database_name=database_name
+ )
for data_masking_rule in data_masking_rule_obj:
data_masking_dict = self.convert_nested_dictionary(data_masking_rule)
@@ -306,90 +406,132 @@ def list_data_masking_rules(self, sql_servers_conn, rg_name, server_name, databa
return data_masking_rules_list
- def list_databases(self, sql_servers_conn, sql_monitor_conn, resource_group_name, server_name, server_admin_name):
+ def list_databases(
+ self,
+ sql_servers_conn,
+ sql_monitor_conn,
+ resource_group_name,
+ server_name,
+ server_admin_name,
+ ):
databases_list = []
- databases = sql_servers_conn.list_databases_by_server(resource_group_name=resource_group_name,
- server_name=server_name)
+ databases = sql_servers_conn.list_databases_by_server(
+ resource_group_name=resource_group_name, server_name=server_name
+ )
for database in databases:
database_dict = self.convert_nested_dictionary(database)
- if database_dict.get('sku'):
- if database_dict.get('name') != 'master': # No pricing tier for system database
- database_dict.update({
- 'pricing_tier_display': self.get_pricing_tier_display(database_dict['sku']),
- 'service_tier_display': database_dict['sku'].get('tier')
- })
-
- if db_id := database_dict.get('id'):
- database_dict.update({
- 'server_name': db_id.split('/')[8],
- 'subscription_id': db_id.split('/')[2],
- 'resource_group': db_id.split('/')[4],
- 'azure_monitor': {'resource_id': db_id}
- })
-
- if compute_tier := database_dict.get('kind'):
- database_dict.update({
- 'compute_tier': self.get_db_compute_tier(compute_tier)
- })
-
- if database_dict.get('max_size_bytes'):
- database_dict.update({
- 'max_size_gb': database_dict['max_size_bytes'] / 1073741824 # 2의 30승
- })
+ if database_dict.get("sku"):
+ if (
+ database_dict.get("name") != "master"
+ ): # No pricing tier for system database
+ database_dict.update(
+ {
+ "pricing_tier_display": self.get_pricing_tier_display(
+ database_dict["sku"]
+ ),
+ "service_tier_display": database_dict["sku"].get("tier"),
+ }
+ )
+
+ if db_id := database_dict.get("id"):
+ database_dict.update(
+ {
+ "server_name": db_id.split("/")[8],
+ "subscription_id": db_id.split("/")[2],
+ "resource_group": db_id.split("/")[4],
+ "azure_monitor": {"resource_id": db_id},
+ }
+ )
+
+ if compute_tier := database_dict.get("kind"):
+ database_dict.update(
+ {"compute_tier": self.get_db_compute_tier(compute_tier)}
+ )
+
+ if database_dict.get("max_size_bytes"):
+ database_dict.update(
+ {
+ "max_size_gb": database_dict["max_size_bytes"]
+ / 1073741824 # 2의 30승
+ }
+ )
# Get Sync Groups by databases
- if database_dict.get('service_tier_display') != 'DataWarehouse':
- database_dict.update({
- 'sync_group': self.get_sync_group_by_databases(sql_servers_conn, resource_group_name, server_name,
- database_dict['name']),
- })
-
- if database_dict.get('sync_group'):
- database_dict.update({
- 'sync_group_display': self.get_sync_group_display(database_dict['sync_group'])
- })
+ if database_dict.get("service_tier_display") != "DataWarehouse":
+ database_dict.update(
+ {
+ "sync_group": self.get_sync_group_by_databases(
+ sql_servers_conn,
+ resource_group_name,
+ server_name,
+ database_dict["name"],
+ ),
+ }
+ )
+
+ if database_dict.get("sync_group"):
+ database_dict.update(
+ {
+ "sync_group_display": self.get_sync_group_display(
+ database_dict["sync_group"]
+ )
+ }
+ )
# Get Sync Agents by servers
- database_dict.update({
- 'sync_agent': self.get_sync_agent_by_servers(sql_servers_conn, resource_group_name, server_name)
- })
-
- if database_dict.get('sync_agent'):
- database_dict.update({
- 'sync_agent_display': self.get_sync_agent_display(database_dict['sync_agent'])
- })
- '''
+ database_dict.update(
+ {
+ "sync_agent": self.get_sync_agent_by_servers(
+ sql_servers_conn, resource_group_name, server_name
+ )
+ }
+ )
+
+ if database_dict.get("sync_agent"):
+ database_dict.update(
+ {
+ "sync_agent_display": self.get_sync_agent_display(
+ database_dict["sync_agent"]
+ )
+ }
+ )
+ """
# Get Data masking rules
database_dict.update({
'data_masking_rules': self.list_data_masking_rules(self, sql_servers_conn, rg_name, server_name, database_dict['name'])
})
- '''
+ """
# Get Diagnostic Settings
- database_dict.update({
- 'diagnostic_settings_resource': self.list_diagnostics_settings(sql_monitor_conn,
- database_dict['id'])
- })
+ database_dict.update(
+ {
+ "diagnostic_settings_resource": self.list_diagnostics_settings(
+ sql_monitor_conn, database_dict["id"]
+ )
+ }
+ )
# Get Database Replication Type
- database_dict.update({
- 'replication_link': self.list_replication_link(sql_servers_conn, resource_group_name, server_name,
- database_dict['name'])
- })
+ database_dict.update(
+ {
+ "replication_link": self.list_replication_link(
+ sql_servers_conn,
+ resource_group_name,
+ server_name,
+ database_dict["name"],
+ )
+ }
+ )
# Get azure_ad_admin name
if server_admin_name is not None:
- database_dict.update({
- 'administrator_login': server_admin_name
- })
+ database_dict.update({"administrator_login": server_admin_name})
# switch tags form
- tags = database_dict.get('tags', {})
+ tags = database_dict.get("tags", {})
_tags = self.convert_tag_format(tags)
- database_dict.update({
- 'tags': _tags
- })
+ database_dict.update({"tags": _tags})
databases_list.append(database_dict)
@@ -397,7 +539,9 @@ def list_databases(self, sql_servers_conn, sql_monitor_conn, resource_group_name
def get_sync_agent_by_servers(self, sql_servers_conn, rg_name, server_name):
sync_agent_list = []
- sync_agent_obj = sql_servers_conn.list_sync_agents_by_server(rg_name, server_name)
+ sync_agent_obj = sql_servers_conn.list_sync_agents_by_server(
+ rg_name, server_name
+ )
for sync_agent in sync_agent_obj:
sync_agent_dict = self.convert_nested_dictionary(sync_agent)
@@ -407,7 +551,9 @@ def get_sync_agent_by_servers(self, sql_servers_conn, rg_name, server_name):
def list_diagnostics_settings(self, sql_monitor_conn, resource_uri):
diagnostic_settings_list = []
- diagnostic_settings_objs = sql_monitor_conn.list_diagnostic_settings(resource_uri=resource_uri)
+ diagnostic_settings_objs = sql_monitor_conn.list_diagnostic_settings(
+ resource_uri=resource_uri
+ )
for diagnostic_setting in diagnostic_settings_objs:
diagnostic_setting_dict = self.convert_nested_dictionary(diagnostic_setting)
@@ -415,9 +561,13 @@ def list_diagnostics_settings(self, sql_monitor_conn, resource_uri):
return diagnostic_settings_list
- def list_replication_link(self, sql_servers_conn, rg_name, server_name, database_name):
+ def list_replication_link(
+ self, sql_servers_conn, rg_name, server_name, database_name
+ ):
replication_link_list = []
- replication_link_obj = sql_servers_conn.list_replication_link(rg_name, server_name, database_name)
+ replication_link_obj = sql_servers_conn.list_replication_link(
+ rg_name, server_name, database_name
+ )
for replication_link in replication_link_obj:
replication_link_dict = self.convert_nested_dictionary(replication_link)
@@ -425,10 +575,14 @@ def list_replication_link(self, sql_servers_conn, rg_name, server_name, database
return replication_link_list
- def get_sync_group_by_databases(self, sql_servers_conn, resource_group_name, server_name, database_name):
- sync_group_obj = sql_servers_conn.list_sync_groups_by_databases(resource_group=resource_group_name,
- server_name=server_name,
- database_name=database_name)
+ def get_sync_group_by_databases(
+ self, sql_servers_conn, resource_group_name, server_name, database_name
+ ):
+ sync_group_obj = sql_servers_conn.list_sync_groups_by_databases(
+ resource_group=resource_group_name,
+ server_name=server_name,
+ database_name=database_name,
+ )
sync_group_list = []
for sync_group in sync_group_obj:
sync_group_dict = self.convert_nested_dictionary(sync_group)
@@ -438,24 +592,33 @@ def get_sync_group_by_databases(self, sql_servers_conn, resource_group_name, ser
@staticmethod
def get_private_endpoint_connections(private_endpoint_connection_list):
for pec in private_endpoint_connection_list:
- if pec.get('id') is not None:
- pec.update({
- 'connection_id': pec['id'].split('/')[10]
- })
-
- if pec.get('properties') is not None:
- pec.update({
- 'private_endpoint_name': pec['properties'].get('private_endpoint').get('id').split('/')[8],
- 'description': pec['properties'].get('private_link_service_connection_state').get('description'),
- 'status': pec['properties'].get('private_link_service_connection_state').get('status')
- })
+ if pec.get("id") is not None:
+ pec.update({"connection_id": pec["id"].split("/")[10]})
+
+ if pec.get("properties") is not None:
+ pec.update(
+ {
+ "private_endpoint_name": pec["properties"]
+ .get("private_endpoint")
+ .get("id")
+ .split("/")[8],
+ "description": pec["properties"]
+ .get("private_link_service_connection_state")
+ .get("description"),
+ "status": pec["properties"]
+ .get("private_link_service_connection_state")
+ .get("status"),
+ }
+ )
return private_endpoint_connection_list
@staticmethod
def get_per_db_settings(per_database_settings_dict):
- per_db_settings = f"{str(per_database_settings_dict['min_capacity'])} - " \
- f"{str(per_database_settings_dict['max_capacity'])} vCores"
+ per_db_settings = (
+ f"{str(per_database_settings_dict['min_capacity'])} - "
+ f"{str(per_database_settings_dict['max_capacity'])} vCores"
+ )
return per_db_settings
@staticmethod
@@ -463,27 +626,27 @@ def get_failover_secondary_server(partner_servers):
secondary_server = None
for partner_server in partner_servers:
- if partner_server['replication_role'] == 'Secondary':
- secondary_server = partner_server['id'].split('/')[8]
+ if partner_server["replication_role"] == "Secondary":
+ secondary_server = partner_server["id"].split("/")[8]
return secondary_server
@staticmethod
def get_azure_ad_admin_name(azure_ad_administrators_list):
- az_admin_name = ''
+ az_admin_name = ""
for az_admin in azure_ad_administrators_list:
- if az_admin.get('login') is not None:
- az_admin_name = az_admin.get('login')
+ if az_admin.get("login") is not None:
+ az_admin_name = az_admin.get("login")
return az_admin_name
@staticmethod
def get_db_compute_tier(kind):
- if 'serverless' in kind:
- compute_tier = 'Serverless'
+ if "serverless" in kind:
+ compute_tier = "Serverless"
else:
- compute_tier = 'Provisioned'
+ compute_tier = "Provisioned"
return compute_tier
@@ -499,7 +662,7 @@ def get_sync_agent_display(sync_agent_list):
@staticmethod
def get_pricing_tier_display(sku_dict):
pricing_tier = None
- if sku_dict.get('capacity') is not None:
+ if sku_dict.get("capacity") is not None:
pricing_tier = f'{str(sku_dict["tier"])} : {str(sku_dict["family"])} , {str(sku_dict["capacity"])} vCores'
return pricing_tier
diff --git a/src/spaceone/inventory/manager/storage_accounts/instance_manager.py b/src/spaceone/inventory/manager/storage_accounts/instance_manager.py
index cc3a9d11..8383aa21 100644
--- a/src/spaceone/inventory/manager/storage_accounts/instance_manager.py
+++ b/src/spaceone/inventory/manager/storage_accounts/instance_manager.py
@@ -7,37 +7,43 @@
from spaceone.inventory.connector.storage_accounts import StorageAccountsConnector
from spaceone.inventory.connector.monitor import MonitorConnector
from spaceone.inventory.model.storage_accounts.cloud_service import *
-from spaceone.inventory.model.storage_accounts.cloud_service_type import CLOUD_SERVICE_TYPES
+from spaceone.inventory.model.storage_accounts.cloud_service_type import (
+ CLOUD_SERVICE_TYPES,
+)
from spaceone.inventory.model.storage_accounts.data import *
_LOGGER = logging.getLogger(__name__)
class StorageAccountsManager(AzureManager):
- connector_name = 'StorageAccountsConnector'
+ connector_name = "StorageAccountsConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of azure storage account data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of azure storage account data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
_LOGGER.debug("** Storage Account START **")
start_time = time.time()
- subscription_info = params['subscription_info']
- storage_account_conn: StorageAccountsConnector = self.locator.get_connector(self.connector_name, **params)
- monitor_conn: MonitorConnector = self.locator.get_connector('MonitorConnector', **params)
+ subscription_info = params["subscription_info"]
+ storage_account_conn: StorageAccountsConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
+ monitor_conn: MonitorConnector = self.locator.get_connector(
+ "MonitorConnector", **params
+ )
storage_account_responses = []
error_responses = []
@@ -45,114 +51,152 @@ def collect_cloud_service(self, params):
storage_accounts = storage_account_conn.list_storage_accounts()
for storage_account in storage_accounts:
- storage_account_id = ''
+ storage_account_id = ""
try:
storage_account_dict = self.convert_nested_dictionary(storage_account)
- storage_account_id = storage_account_dict['id']
+ kind = storage_account_dict.get("kind")
+ storage_account_id = storage_account_dict["id"]
resource_group = self.get_resource_group_from_id(storage_account_id)
- if storage_account_dict.get('network_rule_set') is not None:
- storage_account_dict.update({
- 'network_rule_set': self.get_network_rule_set(storage_account_dict['network_rule_set'])
- })
-
- if storage_account_dict.get('name') is not None:
- container_count = self.get_blob_containers_count(storage_account_conn, resource_group,
- storage_account_dict['name'])
-
- storage_account_dict.update({'container_count_display': container_count})
-
- if storage_account_dict.get('routing_preference') is not None:
- storage_account_dict.update({
- 'routing_preference_display': 'Internet routing'
- })
+ if storage_account_dict.get("network_rule_set") is not None:
+ storage_account_dict.update(
+ {
+ "network_rule_set": self.get_network_rule_set(
+ storage_account_dict["network_rule_set"]
+ )
+ }
+ )
+
+ # https://learn.microsoft.com/en-us/rest/api/storagerp/storage-accounts/list?view=rest-storagerp-2023-01-01&tabs=HTTP#kind
+ if storage_account_dict.get("name") is not None and kind not in [
+ "FileStorage"
+ ]:
+ container_count = self.get_blob_containers_count(
+ storage_account_conn,
+ resource_group,
+ storage_account_dict["name"],
+ )
+
+ storage_account_dict.update(
+ {"container_count_display": container_count}
+ )
+
+ if storage_account_dict.get("routing_preference") is not None:
+ storage_account_dict.update(
+ {"routing_preference_display": "Internet routing"}
+ )
else:
- storage_account_dict.update({
- 'routing_preference_display': 'Microsoft network routing'
- })
-
- storage_account_dict.update({
- 'resource_group': resource_group,
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': storage_account_id},
- 'blob_count_display': self._get_blob_count_from_monitoring(monitor_conn, storage_account_id),
- 'blob_size_display': self._get_blob_size_from_monitoring(monitor_conn, storage_account_id),
- })
-
- storage_account_data = StorageAccount(storage_account_dict, strict=False)
- storage_account_resource = StorageAccountResource({
- 'data': storage_account_data,
- 'tags': storage_account_dict.get('tags', {}),
- 'region_code': storage_account_data.location,
- 'reference': ReferenceModel(storage_account_data.reference()),
- 'name': storage_account_data.name,
- 'account': storage_account_data.subscription_id,
- 'instance_type': storage_account_data.sku.tier
- })
-
+ storage_account_dict.update(
+ {"routing_preference_display": "Microsoft network routing"}
+ )
+
+ storage_account_dict = self.update_tenant_id_from_secret_data(
+ storage_account_dict, params["secret_data"]
+ )
+ storage_account_dict.update(
+ {
+ "resource_group": resource_group,
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": storage_account_id},
+ "blob_count_display": self._get_blob_count_from_monitoring(
+ monitor_conn, storage_account_id
+ ),
+ "blob_size_display": self._get_blob_size_from_monitoring(
+ monitor_conn, storage_account_id
+ ),
+ }
+ )
+
+ storage_account_data = StorageAccount(
+ storage_account_dict, strict=False
+ )
+ storage_account_resource = StorageAccountResource(
+ {
+ "data": storage_account_data,
+ "tags": storage_account_dict.get("tags", {}),
+ "region_code": storage_account_data.location,
+ "reference": ReferenceModel(storage_account_data.reference()),
+ "name": storage_account_data.name,
+ "account": storage_account_data.subscription_id,
+ "instance_type": storage_account_data.sku.tier,
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(storage_account_data['location'])
- storage_account_responses.append(StorageAccountResponse({'resource': storage_account_resource}))
+ self.set_region_code(storage_account_data["location"])
+ storage_account_responses.append(
+ StorageAccountResponse({"resource": storage_account_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {storage_account_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, 'Storage', 'StorageAccount', storage_account_id)
+ _LOGGER.error(
+ f"[list_instances] {storage_account_id} {e}", exc_info=True
+ )
+ error_resource_response = self.generate_resource_error_response(
+ e, "Storage", "StorageAccount", storage_account_id
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** Storage Account Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(
+ f"** Storage Account Finished {time.time() - start_time} Seconds **"
+ )
return storage_account_responses, error_responses
- def get_public_ip_address(self, application_gateway_conn, resource_group_name, pip_name):
- public_ip_address_obj = application_gateway_conn.get_public_ip_addresses(resource_group_name, pip_name)
+ def get_public_ip_address(
+ self, application_gateway_conn, resource_group_name, pip_name
+ ):
+ public_ip_address_obj = application_gateway_conn.get_public_ip_addresses(
+ resource_group_name, pip_name
+ )
public_ip_address_dict = self.convert_nested_dictionary(public_ip_address_obj)
- _LOGGER.debug(f'[Public IP Address]{public_ip_address_dict}')
-
return public_ip_address_dict
def get_network_rule_set(self, network_rule_dict):
- if network_rule_dict.get('virtual_network_rules') is not None:
- network_rule_dict.update({
- 'virtual_networks': self.get_virtual_network_names(network_rule_dict['virtual_network_rules']),
- 'is_public_access_allowed': False
- })
+ if network_rule_dict.get("virtual_network_rules") is not None:
+ network_rule_dict.update(
+ {
+ "virtual_networks": self.get_virtual_network_names(
+ network_rule_dict["virtual_network_rules"]
+ ),
+ "is_public_access_allowed": False,
+ }
+ )
if not network_rule_dict.get(
- 'virtual_network_rules'): # if virtual_network_rules are empty, this SA is public allowable
- network_rule_dict.update({
- 'is_public_access_allowed': True
- })
+ "virtual_network_rules"
+ ): # if virtual_network_rules are empty, this SA is public allowable
+ network_rule_dict.update({"is_public_access_allowed": True})
- if network_rule_dict.get('ip_rules') is not None:
+ if network_rule_dict.get("ip_rules") is not None:
firewall_address_list = []
- for rule in network_rule_dict['ip_rules']:
- firewall_address_list.append(rule['ip_address_or_range'])
+ for rule in network_rule_dict["ip_rules"]:
+ firewall_address_list.append(rule["ip_address_or_range"])
- network_rule_dict.update({
- 'firewall_address_range': firewall_address_list
- })
+ network_rule_dict.update({"firewall_address_range": firewall_address_list})
- if network_rule_dict.get('resource_access_rules') is not None:
+ if network_rule_dict.get("resource_access_rules") is not None:
resource_access_rules_list = []
- for rule in network_rule_dict['resource_access_rules']:
+ for rule in network_rule_dict["resource_access_rules"]:
try:
- resource_type = rule.get('resource_id').split('/')[6]
+ resource_type = rule.get("resource_id").split("/")[6]
resource_access_rules_list.append(resource_type)
except Exception as e:
- _LOGGER.error(f'[ERROR: Azure Storage Account Network Rules]: {e}')
+ _LOGGER.error(f"[ERROR: Azure Storage Account Network Rules]: {e}")
- network_rule_dict.update({
- 'resource_access_rules_display': resource_access_rules_list
- })
+ network_rule_dict.update(
+ {"resource_access_rules_display": resource_access_rules_list}
+ )
return network_rule_dict
def list_blob_containers(self, storage_conn, rg_name, account_name):
blob_containers_list = []
- blob_containers_obj = storage_conn.list_blob_containers(rg_name=rg_name, account_name=account_name)
+ blob_containers_obj = storage_conn.list_blob_containers(
+ rg_name=rg_name, account_name=account_name
+ )
for blob_container in blob_containers_obj:
blob_dict = self.convert_nested_dictionary(blob_container)
blob_containers_list.append(blob_dict)
@@ -161,44 +205,65 @@ def list_blob_containers(self, storage_conn, rg_name, account_name):
def _get_blob_count_from_monitoring(self, monitor_conn, storage_account_id):
timespan = self._get_timespan_from_now(1)
- aggregation = 'total'
- interval = 'PT1H'
- container_blob_count_metric = self._get_metric_data(monitor_conn, f'{storage_account_id}/blobServices/default',
- metricnames='BlobCount', aggregation=aggregation,
- timespan=timespan, interval=interval)
-
- container_blob_count_metric_dict = self.convert_nested_dictionary(container_blob_count_metric)
- return self._get_timeseries_data_from_metric(container_blob_count_metric_dict, aggregation)
+ aggregation = "total"
+ interval = "PT1H"
+ container_blob_count_metric = self._get_metric_data(
+ monitor_conn,
+ f"{storage_account_id}/blobServices/default",
+ metricnames="BlobCount",
+ aggregation=aggregation,
+ timespan=timespan,
+ interval=interval,
+ )
+
+ container_blob_count_metric_dict = self.convert_nested_dictionary(
+ container_blob_count_metric
+ )
+ return self._get_timeseries_data_from_metric(
+ container_blob_count_metric_dict, aggregation
+ )
def _get_blob_size_from_monitoring(self, monitor_conn, storage_account_id):
timespan = self._get_timespan_from_now(1)
- aggregation = 'total'
- interval = 'PT1H'
- container_blob_capacity_metric = self._get_metric_data(monitor_conn,
- f'{storage_account_id}/blobServices/default',
- metricnames='BlobCapacity', aggregation=aggregation,
- timespan=timespan, interval=interval)
- container_blob_capacity_metric_dict = self.convert_nested_dictionary(container_blob_capacity_metric)
- return self._get_timeseries_data_from_metric(container_blob_capacity_metric_dict, aggregation)
+ aggregation = "total"
+ interval = "PT1H"
+ container_blob_capacity_metric = self._get_metric_data(
+ monitor_conn,
+ f"{storage_account_id}/blobServices/default",
+ metricnames="BlobCapacity",
+ aggregation=aggregation,
+ timespan=timespan,
+ interval=interval,
+ )
+ container_blob_capacity_metric_dict = self.convert_nested_dictionary(
+ container_blob_capacity_metric
+ )
+ return self._get_timeseries_data_from_metric(
+ container_blob_capacity_metric_dict, aggregation
+ )
@staticmethod
def _get_timeseries_data_from_metric(metric_dict, aggregation):
try:
- timeseries_data = metric_dict['value'][0]['timeseries'][0]['data'][0].get(aggregation)
+ timeseries_data = metric_dict["value"][0]["timeseries"][0]["data"][0].get(
+ aggregation
+ )
return timeseries_data if timeseries_data is not None else 0
except Exception as e:
- _LOGGER.warning(f'[_get_timeseries_data_from_metric]: {e}')
+ _LOGGER.warning(f"[_get_timeseries_data_from_metric]: {e}")
return 0
@staticmethod
def get_associated_listener(frontend_ip_configuration_dict, http_listeners_list):
- associated_listener = ''
+ associated_listener = ""
for http_listener in http_listeners_list:
- if http_listener.get('frontend_ip_configuration') is not None:
- if frontend_ip_configuration_dict['id'] in http_listener.get('frontend_ip_configuration', {}).get('id', ''):
- associated_listener = http_listener.get('name', '-')
+ if http_listener.get("frontend_ip_configuration") is not None:
+ if frontend_ip_configuration_dict["id"] in http_listener.get(
+ "frontend_ip_configuration", {}
+ ).get("id", ""):
+ associated_listener = http_listener.get("name", "-")
else:
- associated_listener = '-'
+ associated_listener = "-"
return associated_listener
@@ -206,8 +271,8 @@ def get_associated_listener(frontend_ip_configuration_dict, http_listeners_list)
def get_port(port_id, frontend_ports_list):
port = 0
for fe_port in frontend_ports_list:
- if port_id in fe_port['id']:
- port = fe_port.get('port', 0)
+ if port_id in fe_port["id"]:
+ port = fe_port.get("port", 0)
return port
else:
return port
@@ -215,43 +280,54 @@ def get_port(port_id, frontend_ports_list):
@staticmethod
def update_backend_pool_dict(backend_pool_list, backend_pool_id, request_rules):
for backend_pool in backend_pool_list:
- if backend_pool['id'] == backend_pool_id:
- backend_pool.update({
- 'associated_rules': request_rules
- })
+ if backend_pool["id"] == backend_pool_id:
+ backend_pool.update({"associated_rules": request_rules})
@staticmethod
- def update_rewrite_ruleset_dict(rewrite_rule_sets_list, rewrite_rule_id, applied_rules_list):
+ def update_rewrite_ruleset_dict(
+ rewrite_rule_sets_list, rewrite_rule_id, applied_rules_list
+ ):
for rewrite_rule in rewrite_rule_sets_list:
- if rewrite_rule['id'] == rewrite_rule_id:
- rewrite_rule.update({
- 'rules_applied': applied_rules_list
- })
+ if rewrite_rule["id"] == rewrite_rule_id:
+ rewrite_rule.update({"rules_applied": applied_rules_list})
@staticmethod
- def update_http_listeners_list(http_listeners_list, http_listener_id, http_applied_rules):
+ def update_http_listeners_list(
+ http_listeners_list, http_listener_id, http_applied_rules
+ ):
for http_listener in http_listeners_list:
- if http_listener['id'] == http_listener_id:
- http_listener.update({
- 'associated_rules': http_applied_rules
- })
+ if http_listener["id"] == http_listener_id:
+ http_listener.update({"associated_rules": http_applied_rules})
@staticmethod
def get_virtual_network_names(virtual_network_rules):
names = []
try:
for virtual_network_rule in virtual_network_rules:
- name = virtual_network_rule['virtual_network_resource_id'].split('/')[8]
+ name = virtual_network_rule["virtual_network_resource_id"].split("/")[8]
names.append(name)
except Exception as e:
- _LOGGER.error(f'[ERROR: Azure Storage Account Network Rule Get Name]: {e}')
+ _LOGGER.error(f"[ERROR: Azure Storage Account Network Rule Get Name]: {e}")
return names
@staticmethod
- def _get_metric_data(monitor_conn, resource_uri, metricnames, aggregation=None, timespan=None, interval=None):
- return monitor_conn.list_metrics(resource_uri, metricnames=metricnames, aggregation=aggregation, timespan=timespan, interval=interval)
+ def _get_metric_data(
+ monitor_conn,
+ resource_uri,
+ metricnames,
+ aggregation=None,
+ timespan=None,
+ interval=None,
+ ):
+ return monitor_conn.list_metrics(
+ resource_uri,
+ metricnames=metricnames,
+ aggregation=aggregation,
+ timespan=timespan,
+ interval=interval,
+ )
@staticmethod
def _get_timespan_from_now(hours):
@@ -261,6 +337,7 @@ def _get_timespan_from_now(hours):
@staticmethod
def get_blob_containers_count(storage_conn, rg_name, account_name):
- blob_containers_obj = storage_conn.list_blob_containers(rg_name=rg_name, account_name=account_name)
+ blob_containers_obj = storage_conn.list_blob_containers(
+ rg_name=rg_name, account_name=account_name
+ )
return len(list(blob_containers_obj))
-
diff --git a/src/spaceone/inventory/manager/virtual_machines/__init__.py b/src/spaceone/inventory/manager/virtual_machines/__init__.py
index 146aaffb..5296d8a4 100644
--- a/src/spaceone/inventory/manager/virtual_machines/__init__.py
+++ b/src/spaceone/inventory/manager/virtual_machines/__init__.py
@@ -1,9 +1,18 @@
-from spaceone.inventory.manager.virtual_machines.disk_manager import VirtualMachineDiskManager
-from spaceone.inventory.manager.virtual_machines.load_balancer_manager import VirtualMachineLoadBalancerManager
-from spaceone.inventory.manager.virtual_machines.network_security_group_manager import VirtualMachineNetworkSecurityGroupManager
-from spaceone.inventory.manager.virtual_machines.nic_manager import VirtualMachineNICManager
-from spaceone.inventory.manager.virtual_machines.vm_manager import VirtualMachineVmManager
-from spaceone.inventory.manager.virtual_machines.vmss_manager import VirtualMachineVMScaleSetManager
-from spaceone.inventory.manager.virtual_machines.vnet_manager import VirtualMachineVNetManager
-
-
+from spaceone.inventory.manager.virtual_machines.disk_manager import (
+ VirtualMachineDiskManager,
+)
+from spaceone.inventory.manager.virtual_machines.load_balancer_manager import (
+ VirtualMachineLoadBalancerManager,
+)
+from spaceone.inventory.manager.virtual_machines.network_security_group_manager import (
+ VirtualMachineNetworkSecurityGroupManager,
+)
+from spaceone.inventory.manager.virtual_machines.nic_manager import (
+ VirtualMachineNICManager,
+)
+from spaceone.inventory.manager.virtual_machines.vm_manager import (
+ VirtualMachineVmManager,
+)
+from spaceone.inventory.manager.virtual_machines.vnet_manager import (
+ VirtualMachineVNetManager,
+)
diff --git a/src/spaceone/inventory/manager/virtual_machines/instnace_manger.py b/src/spaceone/inventory/manager/virtual_machines/instnace_manger.py
index b138be9e..ff715fff 100644
--- a/src/spaceone/inventory/manager/virtual_machines/instnace_manger.py
+++ b/src/spaceone/inventory/manager/virtual_machines/instnace_manger.py
@@ -1,38 +1,49 @@
import logging
from spaceone.inventory.libs.manager import AzureManager
from spaceone.inventory.connector.virtual_machines import VirtualMachinesConnector
-from spaceone.inventory.manager.virtual_machines import VirtualMachineDiskManager, VirtualMachineLoadBalancerManager,\
- VirtualMachineNetworkSecurityGroupManager, VirtualMachineNICManager, \
- VirtualMachineVmManager, VirtualMachineVMScaleSetManager, VirtualMachineVNetManager
+from spaceone.inventory.manager.virtual_machines import (
+ VirtualMachineDiskManager,
+ VirtualMachineLoadBalancerManager,
+ VirtualMachineNetworkSecurityGroupManager,
+ VirtualMachineNICManager,
+ VirtualMachineVmManager,
+ VirtualMachineVNetManager,
+)
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.model.virtual_machines.data import *
-from spaceone.inventory.libs.schema.resource import ErrorResourceResponse, CloudServiceResourceResponse, AzureMonitorModel
+from spaceone.inventory.libs.schema.resource import (
+ ErrorResourceResponse,
+ CloudServiceResourceResponse,
+ AzureMonitorModel,
+)
from spaceone.core.utils import *
-from spaceone.inventory.model.virtual_machines.cloud_service_type import CLOUD_SERVICE_TYPES
+from spaceone.inventory.model.virtual_machines.cloud_service_type import (
+ CLOUD_SERVICE_TYPES,
+)
from spaceone.inventory.model.virtual_machines.cloud_service import *
_LOGGER = logging.getLogger(__name__)
class VirtualMachinesManager(AzureManager):
- connector_name = 'VirtualMachinesConnector'
+ connector_name = "VirtualMachinesConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
# refactoring
def collect_cloud_service(self, params):
- '''
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of azure vm scale set data resource information
- ErrorResourceResponse (list) : list of error resource information
- '''
+ """
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of azure vm scale set data resource information
+ ErrorResourceResponse (list) : list of error resource information
+ """
_LOGGER.debug("** VirtualMachine START **")
start_time = time.time()
@@ -40,38 +51,52 @@ def collect_cloud_service(self, params):
servers = []
errors = []
- azure_vm_connector: VirtualMachinesConnector = self.locator.get_connector(self.connector_name, **params)
- azure_vm_connector.set_connect(params['secret_data'])
+ azure_vm_connector: VirtualMachinesConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
# call all managers
- vm_manager: VirtualMachineVmManager = VirtualMachineVmManager(params, azure_vm_connector=azure_vm_connector)
- disk_manager: VirtualMachineDiskManager = VirtualMachineDiskManager(params,
- azure_vm_connector=azure_vm_connector)
- load_balancer_manager: VirtualMachineLoadBalancerManager = \
- VirtualMachineLoadBalancerManager(params, azure_vm_connector=azure_vm_connector)
- network_security_group_manager: VirtualMachineNetworkSecurityGroupManager = \
- VirtualMachineNetworkSecurityGroupManager(params, azure_vm_connector=azure_vm_connector)
- nic_manager: VirtualMachineNICManager = VirtualMachineNICManager(params, azure_vm_connector=azure_vm_connector)
- # vmss_manager: AzureVMScaleSetManager = AzureVMScaleSetManager(params, azure_vm_connector=azure_vm_connector)
- vnet_manager: VirtualMachineVNetManager = VirtualMachineVNetManager(params,
- azure_vm_connector=azure_vm_connector)
+ vm_manager: VirtualMachineVmManager = VirtualMachineVmManager(
+ params, azure_vm_connector=azure_vm_connector
+ )
+ disk_manager: VirtualMachineDiskManager = VirtualMachineDiskManager(
+ params, azure_vm_connector=azure_vm_connector
+ )
+ load_balancer_manager: VirtualMachineLoadBalancerManager = (
+ VirtualMachineLoadBalancerManager(
+ params, azure_vm_connector=azure_vm_connector
+ )
+ )
+ network_security_group_manager: VirtualMachineNetworkSecurityGroupManager = (
+ VirtualMachineNetworkSecurityGroupManager(
+ params, azure_vm_connector=azure_vm_connector
+ )
+ )
+ nic_manager: VirtualMachineNICManager = VirtualMachineNICManager(
+ params, azure_vm_connector=azure_vm_connector
+ )
+ vnet_manager: VirtualMachineVNetManager = VirtualMachineVNetManager(
+ params, azure_vm_connector=azure_vm_connector
+ )
vms = list(azure_vm_connector.list_all_vms())
resource_groups = list(azure_vm_connector.list_resource_groups())
load_balancers = list(azure_vm_connector.list_load_balancers())
- network_security_groups = list(azure_vm_connector.list_network_security_groups())
+ network_security_groups = list(
+ azure_vm_connector.list_network_security_groups()
+ )
network_interfaces = list(azure_vm_connector.list_network_interfaces())
disks = list(azure_vm_connector.list_disks())
public_ip_addresses = list(azure_vm_connector.list_public_ip_addresses())
virtual_networks = list(azure_vm_connector.list_virtual_networks())
skus = list(azure_vm_connector.list_skus())
- subscription_id = params['secret_data'].get('subscription_id')
+ subscription_id = params["secret_data"].get("subscription_id")
subscription_info = azure_vm_connector.get_subscription_info(subscription_id)
subscription_data = {
- 'subscription_id': subscription_info.subscription_id,
- 'subscription_name': subscription_info.display_name,
- 'tenant_id': subscription_info.tenant_id
+ "subscription_id": subscription_info.subscription_id,
+ "subscription_name": subscription_info.display_name,
+ "tenant_id": subscription_info.tenant_id,
}
for vm in vms:
@@ -79,80 +104,125 @@ def collect_cloud_service(self, params):
vnet_data = None
subnet_data = None
lb_vos = []
- resource_group, resource_group_name = self.get_resource_info_in_vm(vm, resource_groups)
+ resource_group, resource_group_name = self.get_resource_info_in_vm(
+ vm, resource_groups
+ )
skus_dict = self.get_skus_resource(skus)
disk_vos = disk_manager.get_disk_info(vm, disks)
- nic_vos, primary_ip = nic_manager.get_nic_info(vm, network_interfaces, public_ip_addresses,
- virtual_networks)
-
- vm_resource = vm_manager.get_vm_info(vm, disk_vos, nic_vos, resource_group, subscription_id,
- network_security_groups, primary_ip, skus_dict)
+ nic_vos, primary_ip = nic_manager.get_nic_info(
+ vm, network_interfaces, public_ip_addresses, virtual_networks
+ )
+
+ vm_resource = vm_manager.get_vm_info(
+ vm,
+ disk_vos,
+ nic_vos,
+ resource_group,
+ subscription_id,
+ network_security_groups,
+ primary_ip,
+ skus_dict,
+ )
if load_balancers is not None:
- lb_vos = load_balancer_manager.get_load_balancer_info(vm, load_balancers, public_ip_addresses)
+ lb_vos = load_balancer_manager.get_load_balancer_info(
+ vm, load_balancers, public_ip_addresses
+ )
- nsg_vos = network_security_group_manager.get_network_security_group_info(vm, network_security_groups,
- network_interfaces)
+ nsg_vos = (
+ network_security_group_manager.get_network_security_group_info(
+ vm, network_security_groups, network_interfaces
+ )
+ )
- nic_name = vm.network_profile.network_interfaces[0].id.split('/')[-1]
+ nic_name = vm.network_profile.network_interfaces[0].id.split("/")[-1]
if nic_name is not None:
- vnet_subnet_dict = vnet_manager.get_vnet_subnet_info(nic_name, network_interfaces, virtual_networks)
+ vnet_subnet_dict = vnet_manager.get_vnet_subnet_info(
+ nic_name, network_interfaces, virtual_networks
+ )
- if vnet_subnet_dict.get('vnet_info'):
- vnet_data = vnet_subnet_dict['vnet_info']
+ if vnet_subnet_dict.get("vnet_info"):
+ vnet_data = vnet_subnet_dict["vnet_info"]
- if vnet_subnet_dict.get('subnet_info'):
- subnet_data = vnet_subnet_dict['subnet_info']
+ if vnet_subnet_dict.get("subnet_info"):
+ subnet_data = vnet_subnet_dict["subnet_info"]
- vm_resource.update({
- 'tags': self.get_tags(vm.tags)
- })
+ vm_resource.update({"tags": self.get_tags(vm.tags)})
resource_id = f'/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.Compute/virtualMachines/{vm_resource["name"]}'
- vm_resource['data'].update({
- 'load_balancer': lb_vos,
- 'security_group': nsg_vos,
- 'vnet': vnet_data,
- 'subnet': subnet_data,
- 'subscription': Subscription(subscription_data, strict=False),
- 'azure_monitor': AzureMonitorModel({
- 'resource_id': resource_id
- }, strict=False),
- 'activity_log': ActivityLog({
- 'resource_uri': resource_id
- }, strict=False)
- })
-
- vm_resource['data']['compute']['account'] = subscription_data['subscription_name']
- vm_resource.update({
- 'reference': ReferenceModel({
- 'resource_id': vm_resource['data']['compute']['instance_id'],
- 'external_link': f"https://portal.azure.com/#@.onmicrosoft.com/resource/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.Compute/virtualMachines/{vm_resource['data']['compute']['instance_name']}/overview"
- }),
- 'account': subscription_data['subscription_id'],
- 'instance_type': vm_resource['data']['compute']['instance_type'],
- 'launched_at': datetime_to_iso8601(vm_resource['data']['compute']['launched_at']),
- 'tags': vm.tags
- })
-
- self.set_region_code(vm_resource['region_code'])
+ # update vm_resource data
+ vm_resource["data"].update(
+ {
+ "tenant_id": subscription_data["tenant_id"],
+ "subscription_name": subscription_data["subscription_name"],
+ "subscription_id": subscription_data["subscription_id"],
+ "resource_group": resource_group_name,
+ }
+ )
+
+ vm_resource["data"].update(
+ {
+ "load_balancer": lb_vos,
+ "security_group": nsg_vos,
+ "vnet": vnet_data,
+ "subnet": subnet_data,
+ "azure_monitor": AzureMonitorModel(
+ {"resource_id": resource_id}, strict=False
+ ),
+ "activity_log": ActivityLog(
+ {"resource_uri": resource_id}, strict=False
+ ),
+ }
+ )
+
+ vm_resource["data"]["compute"]["account"] = subscription_data[
+ "subscription_name"
+ ]
+ vm_resource.update(
+ {
+ "reference": ReferenceModel(
+ {
+ "resource_id": vm_resource["data"]["compute"][
+ "instance_id"
+ ],
+ "external_link": f"https://portal.azure.com/#@.onmicrosoft.com/resource/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.Compute/virtualMachines/{vm_resource['data']['compute']['instance_name']}/overview",
+ }
+ ),
+ "account": subscription_data["subscription_id"],
+ "instance_type": vm_resource["data"]["compute"][
+ "instance_type"
+ ],
+ "launched_at": datetime_to_iso8601(
+ vm_resource["data"]["compute"]["launched_at"]
+ ),
+ "tags": vm.tags,
+ }
+ )
+
+ self.set_region_code(vm_resource["region_code"])
vm_resource_vo = VirtualMachineResource(vm_resource, strict=False)
- servers.append(VirtualMachineResponse({'resource': vm_resource_vo}))
+ servers.append(VirtualMachineResponse({"resource": vm_resource_vo}))
except Exception as e:
- _LOGGER.error(f'[list_instances] [{vm.id}] {e}')
+ _LOGGER.error(f"[list_instances] [{vm.id}] {e}")
if type(e) is dict:
- error_resource_response = ErrorResourceResponse({'message': json.dumps(e)})
+ error_resource_response = ErrorResourceResponse(
+ {"message": json.dumps(e)}
+ )
else:
- error_resource_response = ErrorResourceResponse({'message': str(e), 'resource': {'resource_id': vm.id}})
+ error_resource_response = ErrorResourceResponse(
+ {"message": str(e), "resource": {"resource_id": vm.id}}
+ )
errors.append(error_resource_response)
- _LOGGER.debug(f'** VirtualMachine Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(
+ f"** VirtualMachine Finished {time.time() - start_time} Seconds **"
+ )
return servers, errors
@staticmethod
@@ -160,17 +230,14 @@ def get_tags(tags):
tags_result = []
if tags:
for k, v in tags.items():
- tags_result.append({
- 'key': k,
- 'value': v
- })
+ tags_result.append({"key": k, "value": v})
return tags_result
@staticmethod
def get_resource_info_in_vm(vm, resource_groups):
for rg in resource_groups:
- vm_info = vm.id.split('/')
+ vm_info = vm.id.split("/")
for info in vm_info:
if info == rg.name.upper():
resource_group = rg
@@ -181,7 +248,7 @@ def get_resource_info_in_vm(vm, resource_groups):
def get_resources_in_resource_group(resources, resource_group_name):
infos = []
for resource in resources:
- id_info = resource.id.split('/')
+ id_info = resource.id.split("/")
for info in id_info:
if info == resource_group_name.upper():
infos.append(resource)
@@ -191,26 +258,28 @@ def get_resources_in_resource_group(resources, resource_group_name):
def get_skus_resource(skus):
skus_dict = {}
for sku in skus:
- if sku.resource_type == 'virtualMachines':
+ if sku.resource_type == "virtualMachines":
location = sku.locations[0].lower()
if location not in skus_dict:
skus_dict[location] = []
info = {}
- # get sku information for discriminating instance type
- info.update({
- 'resource_type': sku.resource_type,
- 'name': sku.name,
- 'tier': sku.tier,
- 'size': sku.size,
- 'family': sku.family,
- })
+ # get sku information for discriminating Instance type
+ info.update(
+ {
+ "resource_type": sku.resource_type,
+ "name": sku.name,
+ "tier": sku.tier,
+ "size": sku.size,
+ "family": sku.family,
+ }
+ )
# get cpu and memory information
for capa in sku.capabilities:
- if capa.name == 'vCPUs':
- info['core'] = capa.value
- elif capa.name == 'MemoryGB':
- info['memory'] = capa.value
+ if capa.name == "vCPUs":
+ info["core"] = capa.value
+ elif capa.name == "MemoryGB":
+ info["memory"] = capa.value
skus_dict[location].append(info)
return skus_dict
diff --git a/src/spaceone/inventory/manager/virtual_machines/load_balancer_manager.py b/src/spaceone/inventory/manager/virtual_machines/load_balancer_manager.py
index 03efc1a3..898b1a18 100644
--- a/src/spaceone/inventory/manager/virtual_machines/load_balancer_manager.py
+++ b/src/spaceone/inventory/manager/virtual_machines/load_balancer_manager.py
@@ -4,7 +4,6 @@
class VirtualMachineLoadBalancerManager(BaseManager):
-
def __init__(self, params, azure_vm_connector=None, **kwargs):
super().__init__(**kwargs)
self.params = params
@@ -25,23 +24,23 @@ def get_load_balancer_info(self, vm, load_balancers, public_ip_addresses):
}
"""
lb_data = []
- match_load_balancers = self.get_load_balancers_from_nic(vm.network_profile.network_interfaces, load_balancers)
+ match_load_balancers = self.get_load_balancers_from_nic(
+ vm.network_profile.network_interfaces, load_balancers
+ )
for match_load_balancer in match_load_balancers:
ports, protocols = self.get_lb_port_protocol(match_load_balancer)
load_balancer_data = {
- 'type': 'network',
- 'scheme': self.get_lb_scheme(match_load_balancer),
- 'endpoint': self.get_lb_endpoint(match_load_balancer, public_ip_addresses),
- 'port': ports,
- 'name': match_load_balancer.name,
- 'protocol': protocols,
- 'tags': {
- 'lb_id': match_load_balancer.id
- }
-
+ "type": "network",
+ "scheme": self.get_lb_scheme(match_load_balancer),
+ "endpoint": self.get_lb_endpoint(
+ match_load_balancer, public_ip_addresses
+ ),
+ "port": ports,
+ "name": match_load_balancer.name,
+ "protocol": protocols,
+ "tags": {"lb_id": match_load_balancer.id},
}
- # pprint.pprint(load_balancer_data)
lb_data.append(LoadBalancer(load_balancer_data, strict=False))
return lb_data
@@ -55,18 +54,18 @@ def get_lb_endpoint(self, match_load_balancer, public_ip_addresses):
if frontend_ip_configurations:
frontend_ip_configurations = []
- if self.get_lb_scheme(match_load_balancer) == 'internet-facing':
+ if self.get_lb_scheme(match_load_balancer) == "internet-facing":
for ip in frontend_ip_configurations:
- public_ip_address_name = ip.public_ip_address.id.split('/')[-1]
+ public_ip_address_name = ip.public_ip_address.id.split("/")[-1]
for pub_ip in public_ip_addresses:
if public_ip_address_name == pub_ip.name:
return pub_ip.ip_address
- elif self.get_lb_scheme(match_load_balancer) == 'internal':
+ elif self.get_lb_scheme(match_load_balancer) == "internal":
for ip in frontend_ip_configurations:
return ip.private_ip_address
- return ''
+ return ""
@staticmethod
def get_load_balancers_from_nic(network_interfaces, load_balancers):
@@ -80,7 +79,7 @@ def get_load_balancers_from_nic(network_interfaces, load_balancers):
vm_nics = []
for nic in network_interfaces:
- vm_nics.append(nic.id.split('/')[-1])
+ vm_nics.append(nic.id.split("/")[-1])
for vm_nic in vm_nics:
for lb in load_balancers:
@@ -88,7 +87,7 @@ def get_load_balancers_from_nic(network_interfaces, load_balancers):
for be in lb.backend_address_pools:
if be.backend_ip_configurations:
for ip_conf in be.backend_ip_configurations:
- nic_name = ip_conf.id.split('/')[-3]
+ nic_name = ip_conf.id.split("/")[-3]
if nic_name == vm_nic:
match_load_balancers.append(lb)
@@ -99,9 +98,9 @@ def get_lb_scheme(match_load_balancer):
frontend_ip_configurations = match_load_balancer.frontend_ip_configurations
for fe_ip_conf in frontend_ip_configurations:
if fe_ip_conf.public_ip_address:
- return 'internet-facing'
+ return "internet-facing"
else:
- return 'internal'
+ return "internal"
@staticmethod
def get_lb_port_protocol(match_load_balancer):
@@ -114,4 +113,3 @@ def get_lb_port_protocol(match_load_balancer):
protocols.append(lbr.protocol.upper())
return ports, protocols
-
diff --git a/src/spaceone/inventory/manager/virtual_machines/network_security_group_manager.py b/src/spaceone/inventory/manager/virtual_machines/network_security_group_manager.py
index a7016169..709ae778 100644
--- a/src/spaceone/inventory/manager/virtual_machines/network_security_group_manager.py
+++ b/src/spaceone/inventory/manager/virtual_machines/network_security_group_manager.py
@@ -4,14 +4,15 @@
class VirtualMachineNetworkSecurityGroupManager(BaseManager):
-
def __init__(self, params, azure_vm_connector=None, **kwargs):
super().__init__(**kwargs)
self.params = params
self.azure_vm_connector: VirtualMachinesConnector = azure_vm_connector
- def get_network_security_group_info(self, vm, network_security_groups, network_interfaces):
- '''
+ def get_network_security_group_info(
+ self, vm, network_security_groups, network_interfaces
+ ):
+ """
nsg_data = {
"protocol" = "",
"remote" = "",
@@ -26,20 +27,23 @@ def get_network_security_group_info(self, vm, network_security_groups, network_i
"port" = "",
"priority" = 0
}
- '''
+ """
nsg_data = []
network_security_groups_data = []
- if getattr(vm.network_profile, 'network_interfaces') and vm.network_profile.network_interfaces:
+ if (
+ getattr(vm.network_profile, "network_interfaces")
+ and vm.network_profile.network_interfaces
+ ):
vm_network_interfaces = vm.network_profile.network_interfaces
else:
vm_network_interfaces = []
- match_network_security_groups = self.get_network_security_group_from_nic(vm_network_interfaces,
- network_interfaces,
- network_security_groups)
+ match_network_security_groups = self.get_network_security_group_from_nic(
+ vm_network_interfaces, network_interfaces, network_security_groups
+ )
for network_security_group in match_network_security_groups:
sg_id = network_security_group.id
@@ -48,11 +52,11 @@ def get_network_security_group_info(self, vm, network_security_groups, network_i
network_security_groups_data.extend(security_data)
default_security_rules = network_security_group.default_security_rules
- default_security_data = self.get_nsg_security_rules(default_security_rules, sg_id)
+ default_security_data = self.get_nsg_security_rules(
+ default_security_rules, sg_id
+ )
network_security_groups_data.extend(default_security_data)
- # pprint.pprint(network_security_groups_data)
-
for nsg in network_security_groups_data:
nsg_data.append(SecurityGroup(nsg, strict=False))
@@ -62,14 +66,14 @@ def get_nsg_security_rules(self, security_rules, sg_id):
result = []
for s_rule in security_rules:
security_rule_data = {
- 'protocol': self.get_nsg_protocol(s_rule.protocol),
- 'remote_id': s_rule.id,
- 'security_group_name': s_rule.id.split('/')[-3],
- 'description': s_rule.description,
- 'direction': s_rule.direction.lower(),
- 'priority': s_rule.priority,
- 'security_group_id': sg_id,
- 'action': s_rule.access.lower()
+ "protocol": self.get_nsg_protocol(s_rule.protocol),
+ "remote_id": s_rule.id,
+ "security_group_name": s_rule.id.split("/")[-3],
+ "description": s_rule.description,
+ "direction": s_rule.direction.lower(),
+ "priority": s_rule.priority,
+ "security_group_id": sg_id,
+ "action": s_rule.access.lower(),
}
remote_data = self.get_nsg_remote(s_rule)
@@ -83,20 +87,24 @@ def get_nsg_security_rules(self, security_rules, sg_id):
@staticmethod
def get_nsg_protocol(protocol):
- if protocol == '*':
- return 'ALL'
+ if protocol == "*":
+ return "ALL"
return protocol
@staticmethod
- def get_network_security_group_from_nic(vm_network_interfaces, network_interfaces,
- network_security_groups):
+ def get_network_security_group_from_nic(
+ vm_network_interfaces, network_interfaces, network_security_groups
+ ):
nsgs = []
for vm_nic in vm_network_interfaces:
- vm_nic_name = vm_nic.id.split('/')[-1]
+ vm_nic_name = vm_nic.id.split("/")[-1]
for nic in network_interfaces:
if vm_nic_name == nic.name:
- if getattr(nic, 'network_security_group') and nic.network_security_group:
- nsg_name = nic.network_security_group.id.split('/')[-1]
+ if (
+ getattr(nic, "network_security_group")
+ and nic.network_security_group
+ ):
+ nsg_name = nic.network_security_group.id.split("/")[-1]
for nsg in network_security_groups:
if nsg.name == nsg_name:
nsgs.append(nsg)
@@ -109,36 +117,30 @@ def get_network_security_group_from_nic(vm_network_interfaces, network_interface
def get_nsg_remote(s_rule):
remote_result = {}
if s_rule.source_address_prefix is not None:
- if '/' in s_rule.source_address_prefix:
- remote_result.update({
- 'remote': s_rule.source_address_prefix,
- 'remote_cidr': s_rule.source_address_prefix
- })
- elif s_rule.source_address_prefix == '*':
- remote_result.update({
- 'remote': '*',
- 'remote_cidr': '*'
- })
+ if "/" in s_rule.source_address_prefix:
+ remote_result.update(
+ {
+ "remote": s_rule.source_address_prefix,
+ "remote_cidr": s_rule.source_address_prefix,
+ }
+ )
+ elif s_rule.source_address_prefix == "*":
+ remote_result.update({"remote": "*", "remote_cidr": "*"})
else:
- remote_result.update({
- 'remote': s_rule.source_address_prefix
- })
+ remote_result.update({"remote": s_rule.source_address_prefix})
else:
address_prefixes = s_rule.source_address_prefixes
- remote = ''
+ remote = ""
if address_prefixes:
for prfx in address_prefixes:
remote += prfx
- remote += ', '
+ remote += ", "
remote = remote[:-2]
- remote_result.update({
- 'remote': remote,
- 'remote_cidr': remote
- })
+ remote_result.update({"remote": remote, "remote_cidr": remote})
if len(remote_result) > 0:
return remote_result
@@ -149,41 +151,49 @@ def get_nsg_remote(s_rule):
def get_nsg_port(s_rule):
port_result = {}
- if getattr(s_rule, 'destination_port_range') and s_rule.destination_port_range is not None:
- if '-' in s_rule.destination_port_range:
- port_min = s_rule.destination_port_range.split('-')[0]
- port_max = s_rule.destination_port_range.split('-')[1]
- port_result.update({
- 'port_range_min': port_min,
- 'port_range_max': port_max,
- 'port': s_rule.destination_port_range
- })
- elif s_rule.destination_port_range == '*':
- port_result.update({
- 'port_range_min': 0,
- 'port_range_max': 0,
- 'port': '*'
- })
+ if (
+ getattr(s_rule, "destination_port_range")
+ and s_rule.destination_port_range is not None
+ ):
+ if "-" in s_rule.destination_port_range:
+ port_min = s_rule.destination_port_range.split("-")[0]
+ port_max = s_rule.destination_port_range.split("-")[1]
+ port_result.update(
+ {
+ "port_range_min": port_min,
+ "port_range_max": port_max,
+ "port": s_rule.destination_port_range,
+ }
+ )
+ elif s_rule.destination_port_range == "*":
+ port_result.update(
+ {"port_range_min": 0, "port_range_max": 0, "port": "*"}
+ )
else:
- port_result.update({
- 'port_range_min': s_rule.destination_port_range,
- 'port_range_max': s_rule.destination_port_range,
- 'port': s_rule.destination_port_range
- })
+ port_result.update(
+ {
+ "port_range_min": s_rule.destination_port_range,
+ "port_range_max": s_rule.destination_port_range,
+ "port": s_rule.destination_port_range,
+ }
+ )
else:
- if getattr(s_rule, "destination_port_ranges") and s_rule.destination_port_ranges:
+ if (
+ getattr(s_rule, "destination_port_ranges")
+ and s_rule.destination_port_ranges
+ ):
port_ranges = s_rule.destination_port_ranges
if not port_ranges:
port_ranges = []
port_min = 0
port_max = 0
- all_port = ''
+ all_port = ""
ports = []
for port in port_ranges:
- if '-' in port: # ex. ['33-55']
- for i in port.split('-'):
+ if "-" in port: # ex. ['33-55']
+ for i in port.split("-"):
ports.append(i)
else: # ex. ['8080']
ports.append(port)
@@ -196,11 +206,13 @@ def get_nsg_port(s_rule):
all_port = ", ".join(map(str, ports)) # Update string
- port_result.update({
- 'port_range_min': port_min,
- 'port_range_max': port_max,
- 'port': all_port
- })
+ port_result.update(
+ {
+ "port_range_min": port_min,
+ "port_range_max": port_max,
+ "port": all_port,
+ }
+ )
if len(port_result) > 0:
return port_result
diff --git a/src/spaceone/inventory/manager/virtual_machines/nic_manager.py b/src/spaceone/inventory/manager/virtual_machines/nic_manager.py
index aa424bf7..80525382 100644
--- a/src/spaceone/inventory/manager/virtual_machines/nic_manager.py
+++ b/src/spaceone/inventory/manager/virtual_machines/nic_manager.py
@@ -2,18 +2,17 @@
from spaceone.inventory.model.virtual_machines.data import NIC, NICTags
from spaceone.inventory.connector.virtual_machines import VirtualMachinesConnector
-import pprint
-
class VirtualMachineNICManager(BaseManager):
-
def __init__(self, params, azure_vm_connector=None, **kwargs):
super().__init__(**kwargs)
self.params = params
self.azure_vm_connector: VirtualMachinesConnector = azure_vm_connector
- def get_nic_info(self, vm, network_interfaces, public_ip_addresses, virtual_networks):
- '''
+ def get_nic_info(
+ self, vm, network_interfaces, public_ip_addresses, virtual_networks
+ ):
+ """
nic_data = {
"device_index": 0,
"device": "",
@@ -26,7 +25,7 @@ def get_nic_info(self, vm, network_interfaces, public_ip_addresses, virtual_netw
"nic_id": ""
}
}
- '''
+ """
nic_data = []
index = 0
@@ -36,22 +35,27 @@ def get_nic_info(self, vm, network_interfaces, public_ip_addresses, virtual_netw
if vm_network_interfaces is None:
vm_network_interfaces = []
- match_network_interfaces = self.get_network_interfaces(vm_network_interfaces, network_interfaces)
+ match_network_interfaces = self.get_network_interfaces(
+ vm_network_interfaces, network_interfaces
+ )
for vm_nic in match_network_interfaces:
ip_configurations = self.get_ip_configurations(vm_nic)
network_data = {
- 'device_index': index,
- 'cidr': self.get_nic_cidr(ip_configurations, virtual_networks),
- 'ip_addresses': self.get_nic_ip_addresses(ip_configurations),
- 'mac_address': vm_nic.mac_address,
- 'public_ip_address': self.get_nic_public_ip_addresses(ip_configurations,
- public_ip_addresses),
- 'tags': self.get_tags(vm_nic)
+ "device_index": index,
+ "cidr": self.get_nic_cidr(ip_configurations, virtual_networks),
+ "ip_addresses": self.get_nic_ip_addresses(ip_configurations),
+ "mac_address": vm_nic.mac_address,
+ "public_ip_address": self.get_nic_public_ip_addresses(
+ ip_configurations, public_ip_addresses
+ ),
+ "tags": self.get_tags(vm_nic),
}
- primary_ip = self.get_primary_ip_addresses(self.get_ip_configurations(vm_nic))
+ primary_ip = self.get_primary_ip_addresses(
+ self.get_ip_configurations(vm_nic)
+ )
index += 1
nic_data.append(NIC(network_data, strict=False))
@@ -61,8 +65,8 @@ def get_nic_info(self, vm, network_interfaces, public_ip_addresses, virtual_netw
@staticmethod
def get_nic_public_ip_addresses(ip_configurations, public_ip_addresses):
for ip_conf in ip_configurations:
- if getattr(ip_conf, 'public_ip_address') and ip_conf.public_ip_address:
- ip_name = ip_conf.public_ip_address.id.split('/')[-1]
+ if getattr(ip_conf, "public_ip_address") and ip_conf.public_ip_address:
+ ip_name = ip_conf.public_ip_address.id.split("/")[-1]
for pub_ip in public_ip_addresses:
if ip_name == pub_ip.name:
return pub_ip.ip_address
@@ -72,7 +76,7 @@ def get_nic_public_ip_addresses(ip_configurations, public_ip_addresses):
@staticmethod
def get_nic_cidr(ip_configurations, virtual_networks):
if ip_configurations:
- subnet_name = ip_configurations[0].subnet.id.split('/')[-1]
+ subnet_name = ip_configurations[0].subnet.id.split("/")[-1]
for vnet in virtual_networks:
for subnet in vnet.subnets:
if subnet_name == subnet.name:
@@ -95,16 +99,14 @@ def get_nic_ip_addresses(ip_configurations):
def get_primary_ip_addresses(ip_configurations):
result = {}
for ip_conf in ip_configurations:
- result.update({
- ip_conf.private_ip_address: ip_conf.primary
- })
+ result.update({ip_conf.private_ip_address: ip_conf.primary})
return result
@staticmethod
def get_ip_configurations(vm_nic):
result = []
- if getattr(vm_nic, 'ip_configurations') and vm_nic.ip_configurations:
+ if getattr(vm_nic, "ip_configurations") and vm_nic.ip_configurations:
for ip in vm_nic.ip_configurations:
result.append(ip)
@@ -113,10 +115,10 @@ def get_ip_configurations(vm_nic):
@staticmethod
def get_tags(vm_nic):
return {
- 'name': vm_nic.name,
- 'etag': vm_nic.etag,
- 'enable_accelerated_networking': vm_nic.enable_accelerated_networking,
- 'enable_ip_forwarding': vm_nic.enable_ip_forwarding
+ "name": vm_nic.name,
+ "etag": vm_nic.etag,
+ "enable_accelerated_networking": vm_nic.enable_accelerated_networking,
+ "enable_ip_forwarding": vm_nic.enable_ip_forwarding,
}
@staticmethod
@@ -124,7 +126,7 @@ def get_network_interfaces(vm_network_interfaces, network_interfaces):
result = []
for vm_nic in vm_network_interfaces:
for nic in network_interfaces:
- if vm_nic.id.split('/')[-1] == nic.name:
+ if vm_nic.id.split("/")[-1] == nic.name:
result.append(nic)
break
diff --git a/src/spaceone/inventory/manager/virtual_machines/vmss_manager.py b/src/spaceone/inventory/manager/virtual_machines/vmss_manager.py
deleted file mode 100644
index 4c6952bf..00000000
--- a/src/spaceone/inventory/manager/virtual_machines/vmss_manager.py
+++ /dev/null
@@ -1,25 +0,0 @@
-from spaceone.core.manager import BaseManager
-from spaceone.inventory.model.virtual_machines.data import VMSS
-from spaceone.inventory.connector.virtual_machines import VirtualMachinesConnector
-
-
-class VirtualMachineVMScaleSetManager(BaseManager):
-
- def __init__(self, params, azure_vm_connector=None, **kwargs):
- super().__init__(**kwargs)
- self.params = params
- self.azure_vm_connector: VirtualMachinesConnector = azure_vm_connector
-
- def get_vmss_info(self, vmss):
- '''
- vmss_data = {
- "scale_set_name": ""
- "capacity": ""
- "admin_username": ""
- "unique_id": ""
- }
- '''
-
- vmss_data = {}
- return VMSS(vmss_data, strict=False)
-
diff --git a/src/spaceone/inventory/manager/virtual_networks/instance_manager.py b/src/spaceone/inventory/manager/virtual_networks/instance_manager.py
index d7ec3d26..d4f6d97f 100644
--- a/src/spaceone/inventory/manager/virtual_networks/instance_manager.py
+++ b/src/spaceone/inventory/manager/virtual_networks/instance_manager.py
@@ -4,128 +4,156 @@
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.virtual_networks import VirtualNetworksConnector
from spaceone.inventory.model.virtual_networks.cloud_service import *
-from spaceone.inventory.model.virtual_networks.cloud_service_type import CLOUD_SERVICE_TYPES
+from spaceone.inventory.model.virtual_networks.cloud_service_type import (
+ CLOUD_SERVICE_TYPES,
+)
from spaceone.inventory.model.virtual_networks.data import *
_LOGGER = logging.getLogger(__name__)
class VirtualNetworksManager(AzureManager):
- connector_name = 'VirtualNetworksConnector'
+ connector_name = "VirtualNetworksConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of virtual network data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of virtual network data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
_LOGGER.debug("** Vnet START **")
start_time = time.time()
- secret_data = params['secret_data']
- subscription_info = params['subscription_info']
+ subscription_info = params["subscription_info"]
- vnet_conn: VirtualNetworksConnector = self.locator.get_connector(self.connector_name, **params)
+ vnet_conn: VirtualNetworksConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
virtual_network_responses = []
error_responses = []
virtual_networks = vnet_conn.list_all_virtual_networks()
for virtual_network in virtual_networks:
- virtual_network_id = ''
+ virtual_network_id = ""
try:
vnet_dict = self.convert_nested_dictionary(virtual_network)
- virtual_network_id = vnet_dict['id']
+ virtual_network_id = vnet_dict["id"]
# update vnet_dict
- vnet_dict.update({
- 'resource_group': self.get_resource_group_from_id(virtual_network_id),
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': virtual_network_id}
- })
+ vnet_dict = self.update_tenant_id_from_secret_data(
+ vnet_dict, params["secret_data"]
+ )
+ vnet_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ virtual_network_id
+ ),
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": virtual_network_id},
+ }
+ )
- if vnet_dict.get('subnets') is not None:
- subnets = vnet_dict['subnets']
- resource_group = vnet_dict['resource_group']
+ if vnet_dict.get("subnets") is not None:
+ subnets = vnet_dict["subnets"]
+ resource_group = vnet_dict["resource_group"]
# Change attached network interfaces objects to id
self.change_subnet_object_to_ids_list(subnets)
- vnet_dict.update({
- 'subnets': self.update_subnet_info(subnets),
- 'service_endpoints': self.get_service_endpoints(subnets),
- 'private_endpoints': self.get_private_endpoints(subnets),
- 'azure_firewall': self.get_azure_firewall(vnet_conn, subnets, resource_group),
- 'connected_devices': self.get_connected_devices(subnets)
- })
+ vnet_dict.update(
+ {
+ "subnets": self.update_subnet_info(subnets),
+ "service_endpoints": self.get_service_endpoints(subnets),
+ "private_endpoints": self.get_private_endpoints(subnets),
+ "azure_firewall": self.get_azure_firewall(
+ vnet_conn, subnets, resource_group
+ ),
+ "connected_devices": self.get_connected_devices(subnets),
+ }
+ )
# If not 'custom dns servers', add default azure dns server dict to vnet
- if vnet_dict.get('dhcp_options') is None:
- dhcp_option_dict = {
- 'dns_servers': ['Azure provided DNS service']
- }
- vnet_dict.update({
- 'dhcp_options': dhcp_option_dict
- })
+ if vnet_dict.get("dhcp_options") is None:
+ dhcp_option_dict = {"dns_servers": ["Azure provided DNS service"]}
+ vnet_dict.update({"dhcp_options": dhcp_option_dict})
- '''
+ """
# Get IP Address Range, Count
if vnet_dict.get('address_space') is not None:
if vnet_dict['address_space'].get('address_prefixes') is not None:
for address_space in vnet_dict['address_space']['address_prefixes']: # ex. address_space = '10.0.0.0/16'
ip = IPNetwork(address_space)
# vnet_dict['address_space']['address_count'] = ip.size
- '''
+ """
vnet_data = VirtualNetwork(vnet_dict, strict=False)
- vnet_resource = VirtualNetworkResource({
- 'data': vnet_data,
- 'region_code': vnet_data.location,
- 'reference': ReferenceModel(vnet_data.reference()),
- 'name': vnet_data.name,
- 'account': vnet_data.subscription_id,
- 'tags': vnet_dict.get('tags', {})
- })
+ vnet_resource = VirtualNetworkResource(
+ {
+ "data": vnet_data,
+ "region_code": vnet_data.location,
+ "reference": ReferenceModel(vnet_data.reference()),
+ "name": vnet_data.name,
+ "account": vnet_data.subscription_id,
+ "tags": vnet_dict.get("tags", {}),
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(vnet_data['location'])
+ self.set_region_code(vnet_data["location"])
# _LOGGER.debug(f'[VNET INFO] {vnet_resource.to_primitive()}')
- virtual_network_responses.append(VirtualNetworkResponse({'resource': vnet_resource}))
+ virtual_network_responses.append(
+ VirtualNetworkResponse({"resource": vnet_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {virtual_network_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, 'Network', 'VirtualNetwork', virtual_network_id)
+ _LOGGER.error(
+ f"[list_instances] {virtual_network_id} {e}", exc_info=True
+ )
+ error_resource_response = self.generate_resource_error_response(
+ e, "Network", "VirtualNetwork", virtual_network_id
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** Virtual Network Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(
+ f"** Virtual Network Finished {time.time() - start_time} Seconds **"
+ )
return virtual_network_responses, error_responses
def get_azure_firewall(self, vnet_conn, subnet_list, resource_group_name):
# Get Azure firewall information
azure_firewall_list = []
for subnet in subnet_list:
- if subnet.get('connected_devices_list'):
- for device in subnet['connected_devices_list']:
- if device['type'] == 'azureFirewalls': # The subnet which has 'AzureFirewall' is typed as 'azureFirewalls'
- firewall_obj = vnet_conn.list_all_firewalls(resource_group_name) # List all firewalls in the resource group
+ if subnet.get("connected_devices_list"):
+ for device in subnet["connected_devices_list"]:
+ if (
+ device["type"] == "azureFirewalls"
+ ): # The subnet which has 'AzureFirewall' is typed as 'azureFirewalls'
+ firewall_obj = vnet_conn.list_all_firewalls(
+ resource_group_name
+ ) # List all firewalls in the resource group
for firewall in firewall_obj:
firewall_dict = self.convert_nested_dictionary(firewall)
- for ip_configuration in firewall_dict['ip_configurations']:
- if ip_configuration.get('subnet') is not None:
- if subnet['id'] in ip_configuration['subnet']['id']: # If subnet id matches the firewall's subnet id
- firewall_dict['subnet'] = subnet['id'].split('/')[10]
+ for ip_configuration in firewall_dict["ip_configurations"]:
+ if ip_configuration.get("subnet") is not None:
+ if (
+ subnet["id"] in ip_configuration["subnet"]["id"]
+ ): # If subnet id matches the firewall's subnet id
+ firewall_dict["subnet"] = subnet["id"].split(
+ "/"
+ )[10]
azure_firewall_list.append(firewall_dict)
return azure_firewall_list
@@ -134,20 +162,20 @@ def get_azure_firewall(self, vnet_conn, subnet_list, resource_group_name):
def change_subnet_object_to_ids_list(subnets_dict):
subnet_id_list = []
for subnet in subnets_dict:
- subnet_id_list.append(subnet['id'])
- if subnet.get('private_endpoints') is not None:
- for private_endpoint in subnet['private_endpoints']:
- if private_endpoint.get('network_interfaces') is not None:
- for ni in private_endpoint['network_interfaces']:
- if ni.get('network_security_group') is not None:
- ni['network_interfaces'] = ni['id']
- ni['subnets'] = subnet_id_list
+ subnet_id_list.append(subnet["id"])
+ if subnet.get("private_endpoints") is not None:
+ for private_endpoint in subnet["private_endpoints"]:
+ if private_endpoint.get("network_interfaces") is not None:
+ for ni in private_endpoint["network_interfaces"]:
+ if ni.get("network_security_group") is not None:
+ ni["network_interfaces"] = ni["id"]
+ ni["subnets"] = subnet_id_list
return subnet_id_list
@staticmethod
def update_subnet_info(subnet_list):
- '''
+ """
: subnets_dict = {
ip_configurations= [
{
@@ -173,21 +201,25 @@ def update_subnet_info(subnet_list):
}
}
...
- '''
+ """
for subnet in subnet_list:
# Get network security group's name
- if subnet.get('network_security_group') is not None:
- subnet['network_security_group']['name'] = subnet['network_security_group']['id'].split('/')[8]
+ if subnet.get("network_security_group") is not None:
+ subnet["network_security_group"]["name"] = subnet[
+ "network_security_group"
+ ]["id"].split("/")[8]
# Get private endpoints
- if subnet.get('private_endpoints') is not None:
- for private_endpoint in subnet['private_endpoints']:
- private_endpoint.update({
- 'name': private_endpoint['id'].split('/')[8],
- 'subnet': subnet['name'],
- 'resource_group': private_endpoint['id'].split('/')[4]
- })
+ if subnet.get("private_endpoints") is not None:
+ for private_endpoint in subnet["private_endpoints"]:
+ private_endpoint.update(
+ {
+ "name": private_endpoint["id"].split("/")[8],
+ "subnet": subnet["name"],
+ "resource_group": private_endpoint["id"].split("/")[4],
+ }
+ )
return subnet_list
@@ -196,9 +228,9 @@ def get_service_endpoints(subnet_list):
service_endpoint_list = []
for subnet in subnet_list:
# Put subnet name to service endpoints dictionary
- if subnet.get('service_endpoints') is not None:
- for service_endpoint in subnet['service_endpoints']:
- service_endpoint['subnet'] = subnet['name']
+ if subnet.get("service_endpoints") is not None:
+ for service_endpoint in subnet["service_endpoints"]:
+ service_endpoint["subnet"] = subnet["name"]
service_endpoint_list.append(service_endpoint)
return service_endpoint_list
@@ -207,8 +239,8 @@ def get_service_endpoints(subnet_list):
def get_private_endpoints(subnet_list):
private_endpoint_list = []
for subnet in subnet_list:
- if subnet.get('private_endpoints') is not None:
- for private_endpoint in subnet['private_endpoints']:
+ if subnet.get("private_endpoints") is not None:
+ for private_endpoint in subnet["private_endpoints"]:
private_endpoint_list.append(private_endpoint)
return private_endpoint_list
@@ -219,11 +251,11 @@ def get_connected_devices(subnet_list):
for subnet in subnet_list:
device_dict = {}
- if subnet.get('ip_configurations') is not None:
- for ip_configuration in subnet['ip_configurations']:
- device_dict['name'] = subnet['name']
- device_dict['type'] = ip_configuration['id'].split('/')[7]
- device_dict['device'] = ip_configuration['id'].split('/')[8]
+ if subnet.get("ip_configurations") is not None:
+ for ip_configuration in subnet["ip_configurations"]:
+ device_dict["name"] = subnet["name"]
+ device_dict["type"] = ip_configuration["id"].split("/")[7]
+ device_dict["device"] = ip_configuration["id"].split("/")[8]
connected_devices_list.append(device_dict)
return connected_devices_list
diff --git a/src/spaceone/inventory/manager/vm_scale_sets/scale_set_manager.py b/src/spaceone/inventory/manager/vm_scale_sets/scale_set_manager.py
index b060f623..92463cb4 100644
--- a/src/spaceone/inventory/manager/vm_scale_sets/scale_set_manager.py
+++ b/src/spaceone/inventory/manager/vm_scale_sets/scale_set_manager.py
@@ -4,164 +4,261 @@
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.inventory.connector.vm_scale_sets import VmScaleSetsConnector
from spaceone.inventory.model.vm_scale_sets.cloud_service import *
-from spaceone.inventory.model.vm_scale_sets.cloud_service_type import CLOUD_SERVICE_TYPES
+from spaceone.inventory.model.vm_scale_sets.cloud_service_type import (
+ CLOUD_SERVICE_TYPES,
+)
from spaceone.inventory.model.vm_scale_sets.data import *
_LOGGER = logging.getLogger(__name__)
class VmScaleSetsManager(AzureManager):
- connector_name = 'VmScaleSetsConnector'
+ connector_name = "VmScaleSetsConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
- Response:
- CloudServiceResponse (list) : dictionary of azure vm scale set data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+ Response:
+ CloudServiceResponse (list) : dictionary of azure vm scale set data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
_LOGGER.debug("** VmScaleSet START **")
start_time = time.time()
- subscription_info = params['subscription_info']
+ subscription_info = params["subscription_info"]
- vm_scale_set_conn: VmScaleSetsConnector = self.locator.get_connector(self.connector_name, **params)
+ vm_scale_set_conn: VmScaleSetsConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
vm_scale_set_responses = []
error_responses = []
vm_scale_sets = vm_scale_set_conn.list_vm_scale_sets()
for vm_scale_set in vm_scale_sets:
- vm_scale_set_id = ''
+ vm_scale_set_id = ""
try:
vm_scale_set_dict = self.convert_nested_dictionary(vm_scale_set)
- vm_scale_set_id = vm_scale_set_dict['id']
+ vm_scale_set_id = vm_scale_set_dict["id"]
# update vm_scale_set_dict
- vm_scale_set_dict.update({
- 'resource_group': self.get_resource_group_from_id(vm_scale_set_id), # parse resource_group from ID
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': vm_scale_set_id}
- })
-
- if vm_scale_set_dict.get('proximity_placement_group'): # if it has a key -> get value -> check if it isn't None / if no 'Key' -> return None
- vm_scale_set_dict.update({
- 'proximity_placement_group_display': self.get_proximity_placement_group_name(vm_scale_set_dict['proximity_placement_group']['id'])
- })
+ vm_scale_set_dict.update(
+ {
+ "resource_group": self.get_resource_group_from_id(
+ vm_scale_set_id
+ ), # parse resource_group from ID
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": vm_scale_set_id},
+ }
+ )
+
+ if vm_scale_set_dict.get(
+ "proximity_placement_group"
+ ): # if it has a key -> get value -> check if it isn't None / if no 'Key' -> return None
+ vm_scale_set_dict.update(
+ {
+ "proximity_placement_group_display": self.get_proximity_placement_group_name(
+ vm_scale_set_dict["proximity_placement_group"]["id"]
+ )
+ }
+ )
# Get Instance termination notification display
- if vm_scale_set_dict.get('virtual_machine_profile') is not None:
- if vm_scale_set_dict['virtual_machine_profile'].get('scheduled_events_profile') is not None:
- if vm_scale_set.virtual_machine_profile['scheduled_events_profile']['terminate_notification_profile']['enable']:
- terminate_notification_display = 'On'
+ if vm_scale_set_dict.get("virtual_machine_profile") is not None:
+ if (
+ vm_scale_set_dict["virtual_machine_profile"].get(
+ "scheduled_events_profile"
+ )
+ is not None
+ ):
+ if vm_scale_set.virtual_machine_profile[
+ "scheduled_events_profile"
+ ]["terminate_notification_profile"]["enable"]:
+ terminate_notification_display = "On"
else:
- terminate_notification_display = 'Off'
+ terminate_notification_display = "Off"
- vm_scale_set_dict.update({
- 'terminate_notification_display': terminate_notification_display
- })
+ vm_scale_set_dict.update(
+ {
+ "terminate_notification_display": terminate_notification_display
+ }
+ )
# Convert disks' sku-dict to string display
- if vm_scale_set_dict['virtual_machine_profile'].get('storage_profile') is not None:
- if vm_scale_set_dict['virtual_machine_profile']['storage_profile'].get('image_reference'):
- image_reference_dict = vm_scale_set_dict['virtual_machine_profile']['storage_profile']['image_reference']
- image_reference_str = \
- str(image_reference_dict['publisher']) + " / " + str(image_reference_dict['offer']) + " / " + str(image_reference_dict['sku']) + " / " + str(image_reference_dict['version'])
- vm_scale_set_dict['virtual_machine_profile']['storage_profile'].update({
- 'image_reference_display': image_reference_str
- })
+ if (
+ vm_scale_set_dict["virtual_machine_profile"].get(
+ "storage_profile"
+ )
+ is not None
+ ):
+ if vm_scale_set_dict["virtual_machine_profile"][
+ "storage_profile"
+ ].get("image_reference"):
+ image_reference_dict = vm_scale_set_dict[
+ "virtual_machine_profile"
+ ]["storage_profile"]["image_reference"]
+ image_reference_str = (
+ str(image_reference_dict["publisher"])
+ + " / "
+ + str(image_reference_dict["offer"])
+ + " / "
+ + str(image_reference_dict["sku"])
+ + " / "
+ + str(image_reference_dict["version"])
+ )
+ vm_scale_set_dict["virtual_machine_profile"][
+ "storage_profile"
+ ].update({"image_reference_display": image_reference_str})
# switch storage_account_type to storage_account_type for user-friendly words.
# (ex.Premium LRS -> Premium SSD, Standard HDD..)
- if vm_scale_set_dict['virtual_machine_profile']['storage_profile'].get('data_disks'):
- for data_disk in vm_scale_set_dict['virtual_machine_profile']['storage_profile']['data_disks']:
- data_disk['managed_disk'].update({
- 'storage_type': self.get_disk_storage_type(data_disk['managed_disk']['storage_account_type'])
- })
+ if vm_scale_set_dict["virtual_machine_profile"][
+ "storage_profile"
+ ].get("data_disks"):
+ for data_disk in vm_scale_set_dict[
+ "virtual_machine_profile"
+ ]["storage_profile"]["data_disks"]:
+ data_disk["managed_disk"].update(
+ {
+ "storage_type": self.get_disk_storage_type(
+ data_disk["managed_disk"][
+ "storage_account_type"
+ ]
+ )
+ }
+ )
# Get VM Profile's operating_system type (Linux or Windows)
- if vm_scale_set_dict['virtual_machine_profile'].get('os_profile') is not None:
- vm_scale_set_dict['virtual_machine_profile']['os_profile'].update({
- 'operating_system': self.get_operating_system(vm_scale_set_dict['virtual_machine_profile']['os_profile'])
- })
+ if (
+ vm_scale_set_dict["virtual_machine_profile"].get("os_profile")
+ is not None
+ ):
+ vm_scale_set_dict["virtual_machine_profile"][
+ "os_profile"
+ ].update(
+ {
+ "operating_system": self.get_operating_system(
+ vm_scale_set_dict["virtual_machine_profile"][
+ "os_profile"
+ ]
+ )
+ }
+ )
# Get VM Profile's primary Vnet\
- if vm_scale_set_dict['virtual_machine_profile'].get('network_profile') is not None:
- vmss_vm_network_profile_dict = vm_scale_set_dict['virtual_machine_profile']['network_profile']
-
- if primary_vnet := self.get_primary_vnet(vmss_vm_network_profile_dict['network_interface_configurations']):
- vmss_vm_network_profile_dict.update({'primary_vnet': primary_vnet})
+ if (
+ vm_scale_set_dict["virtual_machine_profile"].get(
+ "network_profile"
+ )
+ is not None
+ ):
+ vmss_vm_network_profile_dict = vm_scale_set_dict[
+ "virtual_machine_profile"
+ ]["network_profile"]
+
+ if primary_vnet := self.get_primary_vnet(
+ vmss_vm_network_profile_dict[
+ "network_interface_configurations"
+ ]
+ ):
+ vmss_vm_network_profile_dict.update(
+ {"primary_vnet": primary_vnet}
+ )
# Add vm instances list attached to VMSS
vm_instances_list = list()
instance_count = 0
- resource_group = vm_scale_set_dict['resource_group']
- name = vm_scale_set_dict['name']
+ resource_group = vm_scale_set_dict["resource_group"]
+ name = vm_scale_set_dict["name"]
- for vm_instance in vm_scale_set_conn.list_vm_scale_set_vms(resource_group, name):
+ for vm_instance in vm_scale_set_conn.list_vm_scale_set_vms(
+ resource_group, name
+ ):
instance_count += 1
- vm_scale_set_dict.update({
- 'instance_count': instance_count
- })
+ vm_scale_set_dict.update({"instance_count": instance_count})
- vm_instance_dict = self.get_vm_instance_dict(vm_instance, vm_scale_set_conn, resource_group, name)
+ vm_instance_dict = self.get_vm_instance_dict(
+ vm_instance, vm_scale_set_conn, resource_group, name
+ )
vm_instances_list.append(vm_instance_dict)
- vm_scale_set_dict['vm_instances'] = vm_instances_list
+ vm_scale_set_dict["vm_instances"] = vm_instances_list
# Get auto scale settings by resource group and vm id
- vm_scale_set_dict.update({
- 'autoscale_settings': self.list_auto_scale_settings_obj(vm_scale_set_conn, resource_group, vm_scale_set_id)
- })
+ vm_scale_set_dict.update(
+ {
+ "autoscale_settings": self.list_auto_scale_settings_obj(
+ vm_scale_set_conn, resource_group, vm_scale_set_id
+ )
+ }
+ )
# Set virtual_machine_scale_set_power_state information
- if vm_scale_set_dict.get('autoscale_settings') is not None:
- vm_scale_set_dict.update({
- 'virtual_machine_scale_set_power_state': self.list_virtual_machine_scale_set_power_state(vm_scale_set_dict['autoscale_settings']),
- })
+ if vm_scale_set_dict.get("autoscale_settings") is not None:
+ vm_scale_set_dict.update(
+ {
+ "virtual_machine_scale_set_power_state": self.list_virtual_machine_scale_set_power_state(
+ vm_scale_set_dict["autoscale_settings"]
+ ),
+ }
+ )
# update auto_scale_settings to autoscale_setting_resource_collection
auto_scale_setting_resource_col_dict = dict()
- auto_scale_setting_resource_col_dict.update({
- 'value': self.list_auto_scale_settings(vm_scale_set_conn, resource_group, vm_scale_set_id)
- })
-
- vm_scale_set_dict.update({
- 'autoscale_setting_resource_collection': auto_scale_setting_resource_col_dict
- })
-
- vm_scale_set_data = VirtualMachineScaleSet(vm_scale_set_dict, strict=False)
- vm_scale_set_resource = VmScaleSetResource({
- 'data': vm_scale_set_data,
- 'region_code': vm_scale_set_data.location,
- 'reference': ReferenceModel(vm_scale_set_data.reference()),
- 'tags': vm_scale_set_dict.get('tags', {}),
- 'name': vm_scale_set_data.name,
- 'account': vm_scale_set_data.subscription_id,
- 'instance_type': vm_scale_set_data.sku.name
- })
+ auto_scale_setting_resource_col_dict.update(
+ {
+ "value": self.list_auto_scale_settings(
+ vm_scale_set_conn, resource_group, vm_scale_set_id
+ )
+ }
+ )
+
+ vm_scale_set_dict.update(
+ {
+ "autoscale_setting_resource_collection": auto_scale_setting_resource_col_dict
+ }
+ )
+
+ vm_scale_set_data = VirtualMachineScaleSet(
+ vm_scale_set_dict, strict=False
+ )
+ vm_scale_set_resource = VmScaleSetResource(
+ {
+ "data": vm_scale_set_data,
+ "region_code": vm_scale_set_data.location,
+ "reference": ReferenceModel(vm_scale_set_data.reference()),
+ "tags": vm_scale_set_dict.get("tags", {}),
+ "name": vm_scale_set_data.name,
+ "account": vm_scale_set_data.subscription_id,
+ "instance_type": vm_scale_set_data.sku.name,
+ }
+ )
# Must set_region_code method for region collection
- self.set_region_code(vm_scale_set_data['location'])
+ self.set_region_code(vm_scale_set_data["location"])
# _LOGGER.debug(f'[VM_SCALE_SET_INFO] {vm_scale_set_resource.to_primitive()}')
- vm_scale_set_responses.append(VmScaleSetResponse({'resource': vm_scale_set_resource}))
+ vm_scale_set_responses.append(
+ VmScaleSetResponse({"resource": vm_scale_set_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {vm_scale_set_id} {e}', exc_info=True)
- error_resource_response = self.generate_resource_error_response(e, 'Compute', 'VMScaleSet', vm_scale_set_id)
+ _LOGGER.error(f"[list_instances] {vm_scale_set_id} {e}", exc_info=True)
+ error_resource_response = self.generate_resource_error_response(
+ e, "Compute", "VMScaleSet", vm_scale_set_id
+ )
error_responses.append(error_resource_response)
- _LOGGER.debug(f'** VmScaleSet Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(f"** VmScaleSet Finished {time.time() - start_time} Seconds **")
return vm_scale_set_responses, error_responses
def get_autoscale_rules(self, rules_dict):
@@ -172,54 +269,97 @@ def get_autoscale_rules(self, rules_dict):
return rule_list
# Get instances of a virtual machine from a VM scale set
- def get_vm_instance_dict(self, vm_instance, vm_instance_conn, resource_group, vm_scale_set_name):
+ def get_vm_instance_dict(
+ self, vm_instance, vm_instance_conn, resource_group, vm_scale_set_name
+ ):
vm_instance_dict = self.convert_nested_dictionary(vm_instance)
- # Get instance view of a virtual machine from a VM scale set instance
- if vm_instance_dict.get('instance_id') is not None:
- vm_instance_dict.update({
- 'vm_instance_status_profile': self.get_vm_instance_view_dict(vm_instance_conn, resource_group, vm_scale_set_name, vm_instance.instance_id)
- })
- if vm_instance_dict.get('vm_instance_status_profile') is not None:
- if vm_instance_dict['vm_instance_status_profile'].get('vm_agent') is not None:
- vm_instance_dict.update({
- 'vm_instance_status_display': vm_instance_dict['vm_instance_status_profile']['vm_agent']['display_status']
- })
+ # Get Instance view of a virtual machine from a VM scale set Instance
+ # todo : remove
+ # issue ticket : https://github.com/Azure/azure-sdk-for-python/issues/35789
+ # if vm_instance_dict.get("instance_id") is not None:
+ # vm_instance_dict.update(
+ # {
+ # "vm_instance_status_profile": self.get_vm_instance_view_dict(
+ # vm_instance_conn,
+ # resource_group,
+ # vm_scale_set_name,
+ # "0:",
+ # )
+ # }
+ # )
+ if vm_instance_dict.get("vm_instance_status_profile") is not None:
+ if (
+ vm_instance_dict["vm_instance_status_profile"].get("vm_agent")
+ is not None
+ ):
+ vm_instance_dict.update(
+ {
+ "vm_instance_status_display": vm_instance_dict[
+ "vm_instance_status_profile"
+ ]["vm_agent"]["display_status"]
+ }
+ )
# Get Primary Vnet display
- if getattr(vm_instance, 'network_profile_configuration') is not None:
- if primary_vnet := self.get_primary_vnet(vm_instance_dict['network_profile_configuration']['network_interface_configurations']):
- vm_instance_dict.update({'primary_vnet': primary_vnet})
+ if getattr(vm_instance, "network_profile_configuration") is not None:
+ if primary_vnet := self.get_primary_vnet(
+ vm_instance_dict["network_profile_configuration"][
+ "network_interface_configurations"
+ ]
+ ):
+ vm_instance_dict.update({"primary_vnet": primary_vnet})
return vm_instance_dict
- # Get instance view of a virtual machine from a VM scale set instance
- def get_vm_instance_view_dict(self, vm_instance_conn, resource_group, vm_scale_set_name, instance_id):
- vm_instance_status_profile = vm_instance_conn.get_vm_scale_set_instance_view(resource_group, vm_scale_set_name, instance_id)
- vm_instance_status_profile_dict = self.convert_nested_dictionary(vm_instance_status_profile)
+ # Get Instance view of a virtual machine from a VM scale set Instance
+ def get_vm_instance_view_dict(
+ self, vm_instance_conn, resource_group, vm_scale_set_name, instance_id
+ ):
+ vm_instance_status_profile = vm_instance_conn.get_vm_scale_set_instance_view(
+ resource_group, vm_scale_set_name, instance_id
+ )
+ vm_instance_status_profile_dict = self.convert_nested_dictionary(
+ vm_instance_status_profile
+ )
if vm_instance_status_profile.vm_agent is not None:
status_str = None
- for status in vm_instance_status_profile_dict.get('vm_agent').get('statuses'):
- status_str = status['display_status']
+ for status in vm_instance_status_profile_dict.get("vm_agent").get(
+ "statuses"
+ ):
+ status_str = status["display_status"]
if status_str:
- vm_instance_status_profile_dict['vm_agent'].update({'display_status': status_str})
+ vm_instance_status_profile_dict["vm_agent"].update(
+ {"display_status": status_str}
+ )
return vm_instance_status_profile_dict
- def list_auto_scale_settings(self, vm_scale_set_conn, resource_group_name, vm_scale_set_id):
+ def list_auto_scale_settings(
+ self, vm_scale_set_conn, resource_group_name, vm_scale_set_id
+ ):
auto_scale_settings_list = list()
- auto_scale_settings_obj = vm_scale_set_conn.list_auto_scale_settings(resource_group=resource_group_name) # List all of the Auto scaling Rules in this resource group
+ auto_scale_settings_obj = vm_scale_set_conn.list_auto_scale_settings(
+ resource_group=resource_group_name
+ ) # List all of the Auto scaling Rules in this resource group
- ''''''
+ """"""
for auto_scale_setting in auto_scale_settings_obj:
auto_scale_setting_dict = self.convert_nested_dictionary(auto_scale_setting)
- auto_scale_setting_dict.update({
- 'profiles_display': self.get_autoscale_profiles_display(auto_scale_setting_dict['profiles'])
- })
- if auto_scale_setting_dict['target_resource_uri'].lower() == vm_scale_set_id.lower(): # Compare resources' id
+ auto_scale_setting_dict.update(
+ {
+ "profiles_display": self.get_autoscale_profiles_display(
+ auto_scale_setting_dict["profiles"]
+ )
+ }
+ )
+ if (
+ auto_scale_setting_dict["target_resource_uri"].lower()
+ == vm_scale_set_id.lower()
+ ): # Compare resources' id
auto_scale_settings_list.append(auto_scale_setting_dict)
return auto_scale_settings_list
@@ -237,51 +377,63 @@ def list_virtual_machine_scale_set_power_state(self, autoscale_obj_list):
power_state_list = list()
for autoscale_setting in autoscale_obj_list:
- power_state_dict.update({
- 'location': autoscale_setting.location,
- 'profiles': self.get_autoscale_profiles_list(autoscale_setting), # profiles_list
- 'enabled': autoscale_setting.enabled,
- 'name': autoscale_setting.name,
- 'notifications': autoscale_setting.notifications,
- 'target_resource_uri': autoscale_setting.target_resource_uri,
- 'tags': autoscale_setting.tags
- })
-
- if power_state_dict.get('profiles') is not None:
- power_state_dict.update({
- 'profiles_display': self.get_autoscale_profiles_display(power_state_dict['profiles'])
- })
+ power_state_dict.update(
+ {
+ "location": autoscale_setting.location,
+ "profiles": self.get_autoscale_profiles_list(
+ autoscale_setting
+ ), # profiles_list
+ "enabled": autoscale_setting.enabled,
+ "name": autoscale_setting.name,
+ "notifications": autoscale_setting.notifications,
+ "target_resource_uri": autoscale_setting.target_resource_uri,
+ "tags": autoscale_setting.tags,
+ }
+ )
+
+ if power_state_dict.get("profiles") is not None:
+ power_state_dict.update(
+ {
+ "profiles_display": self.get_autoscale_profiles_display(
+ power_state_dict["profiles"]
+ )
+ }
+ )
power_state_list.append(power_state_dict)
return power_state_list
@staticmethod
def get_proximity_placement_group_name(placement_group_id):
- placement_group_name = placement_group_id.split('/')[8] # parse placement_group_name from placement_group_id
+ placement_group_name = placement_group_id.split("/")[
+ 8
+ ] # parse placement_group_name from placement_group_id
return placement_group_name
@staticmethod
def get_source_disk_name(source_resource_id):
- source_disk_name = source_resource_id.split('/')[8] # parse source_disk_name from source_resource_id
+ source_disk_name = source_resource_id.split("/")[
+ 8
+ ] # parse source_disk_name from source_resource_id
return source_disk_name
@staticmethod
def get_disk_storage_type(sku_tier):
- if sku_tier == 'Premium_LRS':
- sku_name = 'Premium SSD'
- elif sku_tier == 'StandardSSD_LRS':
- sku_name = 'Standard SSD'
- elif sku_tier == 'Standard_LRS':
- sku_name = 'Standard HDD'
+ if sku_tier == "Premium_LRS":
+ sku_name = "Premium SSD"
+ elif sku_tier == "StandardSSD_LRS":
+ sku_name = "Standard SSD"
+ elif sku_tier == "Standard_LRS":
+ sku_name = "Standard HDD"
else:
- sku_name = 'Ultra SSD'
+ sku_name = "Ultra SSD"
return sku_name
@staticmethod
def get_operating_system(os_profile_dictionary):
- if os_profile_dictionary['linux_configuration'] is None:
- operating_system = 'Windows'
+ if os_profile_dictionary["linux_configuration"] is None:
+ operating_system = "Windows"
else:
- operating_system = 'Linux'
+ operating_system = "Linux"
return operating_system
@staticmethod
@@ -290,22 +442,29 @@ def get_primary_vnet(network_interface_configurations):
# 1) Find Primary NIC
for nic in network_interface_configurations:
- if nic['primary'] is True:
+ if nic["primary"] is True:
# 2) Find primary ip configurations
- for ip_configuration in nic['ip_configurations']:
- if ip_configuration['primary'] is True:
- vnet_id = ip_configuration['subnet']['id'].split('/')[8]
+ for ip_configuration in nic["ip_configurations"]:
+ if ip_configuration["primary"] is True:
+ vnet_id = ip_configuration["subnet"]["id"].split("/")[8]
return vnet_id
@staticmethod
- def list_auto_scale_settings_obj(vm_scale_set_conn, resource_group_name, vm_scale_set_id):
+ def list_auto_scale_settings_obj(
+ vm_scale_set_conn, resource_group_name, vm_scale_set_id
+ ):
auto_scale_settings_obj_list = list()
# all List of the Auto scaling Rules in this resource group
- auto_scale_settings_obj = vm_scale_set_conn.list_auto_scale_settings(resource_group=resource_group_name)
+ auto_scale_settings_obj = vm_scale_set_conn.list_auto_scale_settings(
+ resource_group=resource_group_name
+ )
for auto_scale_setting in auto_scale_settings_obj:
- if auto_scale_setting.target_resource_uri.lower() == vm_scale_set_id.lower():
+ if (
+ auto_scale_setting.target_resource_uri.lower()
+ == vm_scale_set_id.lower()
+ ):
auto_scale_settings_obj_list.append(auto_scale_setting)
return auto_scale_settings_obj_list
@@ -314,6 +473,17 @@ def list_auto_scale_settings_obj(vm_scale_set_conn, resource_group_name, vm_scal
def get_autoscale_profiles_display(power_state_profiles):
profiles_list = list()
for profile in power_state_profiles:
- profiles_list.append('minimum : ' + str(profile['capacity']['minimum']) + ' / ' + 'maximum : ' + str(profile['capacity']['maximum'] + ' / ' + 'default : ' + profile['capacity']['default']))
+ profiles_list.append(
+ "minimum : "
+ + str(profile["capacity"]["minimum"])
+ + " / "
+ + "maximum : "
+ + str(
+ profile["capacity"]["maximum"]
+ + " / "
+ + "default : "
+ + profile["capacity"]["default"]
+ )
+ )
return profiles_list
diff --git a/src/spaceone/inventory/manager/web_pubsub_service/service_manager.py b/src/spaceone/inventory/manager/web_pubsub_service/service_manager.py
index 4ee04e58..dd3fc5f2 100644
--- a/src/spaceone/inventory/manager/web_pubsub_service/service_manager.py
+++ b/src/spaceone/inventory/manager/web_pubsub_service/service_manager.py
@@ -4,8 +4,12 @@
from spaceone.inventory.libs.manager import AzureManager
from spaceone.inventory.libs.schema.base import ReferenceModel
from spaceone.core.utils import *
-from spaceone.inventory.model.web_pubsub_service.cloud_service_type import CLOUD_SERVICE_TYPES
-from spaceone.inventory.connector.web_pubsub_service.connector import WebPubSubServiceConnector
+from spaceone.inventory.model.web_pubsub_service.cloud_service_type import (
+ CLOUD_SERVICE_TYPES,
+)
+from spaceone.inventory.connector.web_pubsub_service.connector import (
+ WebPubSubServiceConnector,
+)
from spaceone.inventory.model.web_pubsub_service.cloud_service import *
from spaceone.inventory.model.web_pubsub_service.data import *
@@ -13,142 +17,205 @@
class WebPubSubServiceManager(AzureManager):
- connector_name = 'WebPubSubServiceConnector'
+ connector_name = "WebPubSubServiceConnector"
cloud_service_types = CLOUD_SERVICE_TYPES
def collect_cloud_service(self, params):
"""
- Args:
- params (dict):
- - 'options' : 'dict'
- - 'schema' : 'str'
- - 'secret_data' : 'dict'
- - 'filter' : 'dict'
- - 'zones' : 'list'
- - 'subscription_info' : 'dict'
-
- Response:
- CloudServiceResponse (list) : list of azure web pubsub service data resource information
- ErrorResourceResponse (list) : list of error resource information
+ Args:
+ params (dict):
+ - 'options' : 'dict'
+ - 'schema' : 'str'
+ - 'secret_data' : 'dict'
+ - 'filter' : 'dict'
+ - 'zones' : 'list'
+ - 'subscription_info' : 'dict'
+
+ Response:
+ CloudServiceResponse (list) : list of azure web pubsub service data resource information
+ ErrorResourceResponse (list) : list of error resource information
"""
- _LOGGER.debug(f'** Web PubSub Service START **')
+ _LOGGER.debug(f"** Web PubSub Service START **")
start_time = time.time()
- subscription_info = params['subscription_info']
+ subscription_info = params["subscription_info"]
web_pubsub_responses = []
error_responses = []
- web_pubsub_service_conn: WebPubSubServiceConnector = self.locator.get_connector(self.connector_name, **params)
+ web_pubsub_service_conn: WebPubSubServiceConnector = self.locator.get_connector(
+ self.connector_name, **params
+ )
web_pubsub_services = web_pubsub_service_conn.list_by_subscription()
for web_pubsub_service in web_pubsub_services:
- web_pubsub_service_id = ''
+ web_pubsub_service_id = ""
try:
- web_pubsub_service_dict = self.convert_nested_dictionary(web_pubsub_service)
- web_pubsub_service_id = web_pubsub_service_dict['id']
- resource_group_name = self.get_resource_group_from_id(web_pubsub_service_id)
- resource_name = web_pubsub_service_dict['name']
+ web_pubsub_service_dict = self.convert_nested_dictionary(
+ web_pubsub_service
+ )
+ web_pubsub_service_id = web_pubsub_service_dict["id"]
+ resource_group_name = self.get_resource_group_from_id(
+ web_pubsub_service_id
+ )
+ resource_name = web_pubsub_service_dict["name"]
# Update data info in Container Instance's Raw Data
# Make private endpoint name
- if private_endpoints := web_pubsub_service_dict.get('private_endpoint_connections', []):
+ if private_endpoints := web_pubsub_service_dict.get(
+ "private_endpoint_connections", []
+ ):
for private_endpoint in private_endpoints:
- private_endpoint['private_endpoint'][
- 'private_endpoint_name_display'] = self.get_resource_name_from_id(
- private_endpoint['private_endpoint']['id'])
+ private_endpoint["private_endpoint"][
+ "private_endpoint_name_display"
+ ] = self.get_resource_name_from_id(
+ private_endpoint["private_endpoint"]["id"]
+ )
# Collect Web PubSub Hub resource
- web_pubsub_hubs = web_pubsub_service_conn.list_hubs(resource_group_name=resource_group_name, resource_name=resource_name)
- web_pubsub_hubs_dict = [self.convert_nested_dictionary(hub) for hub in web_pubsub_hubs]
-
- _hub_responses, _hub_errors = self._collect_web_pubsub_hub(web_pubsub_hubs_dict, subscription_info, web_pubsub_service_dict['location'])
+ web_pubsub_hubs = web_pubsub_service_conn.list_hubs(
+ resource_group_name=resource_group_name, resource_name=resource_name
+ )
+ web_pubsub_hubs_dict = [
+ self.convert_nested_dictionary(hub) for hub in web_pubsub_hubs
+ ]
+
+ _hub_responses, _hub_errors = self._collect_web_pubsub_hub(
+ web_pubsub_hubs_dict,
+ subscription_info,
+ web_pubsub_service_dict["location"],
+ params["secret_data"]["tenant_id"],
+ )
web_pubsub_responses.extend(_hub_responses)
error_responses.extend(_hub_errors)
# Add Web PubSub Hub info in data
- web_pubsub_hub_datas = [WebPubSubHub(hub_dict, strict=False) for hub_dict in web_pubsub_hubs_dict]
+ web_pubsub_hub_datas = [
+ WebPubSubHub(hub_dict, strict=False)
+ for hub_dict in web_pubsub_hubs_dict
+ ]
# Add Web PubSub Key info in data
- web_pubsub_key = web_pubsub_service_conn.list_keys(resource_group_name=resource_group_name,
- resource_name=resource_name)
-
- web_pubsub_service_dict.update({
- 'resource_group': resource_group_name,
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': web_pubsub_service_id},
- 'web_pubsub_hubs': web_pubsub_hub_datas,
- 'web_pubsub_hub_count_display': len(web_pubsub_hub_datas),
- 'web_pubsub_key': WebPubSubKey(self.convert_nested_dictionary(web_pubsub_key), strict=False)
- })
-
- web_pubsub_service_data = WebPubSubService(web_pubsub_service_dict, strict=False)
+ # web_pubsub_key = web_pubsub_service_conn.list_keys(
+ # resource_group_name=resource_group_name, resource_name=resource_name
+ # )
+ web_pubsub_key = {}
+ if web_pubsub_key:
+ web_pubsub_service_dict["web_pubsub_key"] = (
+ WebPubSubKey(
+ self.convert_nested_dictionary(web_pubsub_key), strict=False
+ ),
+ )
+
+ web_pubsub_service_dict = self.update_tenant_id_from_secret_data(
+ web_pubsub_service_dict, params["secret_data"]
+ )
+ web_pubsub_service_dict.update(
+ {
+ "resource_group": resource_group_name,
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": web_pubsub_service_id},
+ "web_pubsub_hubs": web_pubsub_hub_datas,
+ "web_pubsub_hub_count_display": len(web_pubsub_hub_datas),
+ }
+ )
+
+ web_pubsub_service_data = WebPubSubService(
+ web_pubsub_service_dict, strict=False
+ )
# Update resource info of Container Instance
- web_pubsub_service_resource = WebPubSubServiceResource({
- 'name': resource_name,
- 'account': web_pubsub_service_dict['subscription_id'],
- 'data': web_pubsub_service_data,
- 'tags': web_pubsub_service_dict.get('tags', {}),
- 'region_code': web_pubsub_service_data.location,
- 'reference': ReferenceModel(web_pubsub_service_data.reference())
- })
-
- self.set_region_code(web_pubsub_service_data['location'])
- web_pubsub_responses.append(WebPubSubServiceResponse({'resource': web_pubsub_service_resource}))
+ web_pubsub_service_resource = WebPubSubServiceResource(
+ {
+ "name": resource_name,
+ "account": web_pubsub_service_dict["subscription_id"],
+ "data": web_pubsub_service_data,
+ "tags": web_pubsub_service_dict.get("tags", {}),
+ "region_code": web_pubsub_service_data.location,
+ "reference": ReferenceModel(
+ web_pubsub_service_data.reference()
+ ),
+ }
+ )
+
+ self.set_region_code(web_pubsub_service_data["location"])
+ web_pubsub_responses.append(
+ WebPubSubServiceResponse({"resource": web_pubsub_service_resource})
+ )
except Exception as e:
- _LOGGER.error(f'[list_instances] {web_pubsub_service_id} {e}', exc_info=True)
- error_response = self.generate_resource_error_response(e, 'Service', 'WebPubSubService',
- web_pubsub_service_id)
+ _LOGGER.error(
+ f"[list_instances] {web_pubsub_service_id} {e}", exc_info=True
+ )
+ error_response = self.generate_resource_error_response(
+ e, "Service", "WebPubSubService", web_pubsub_service_id
+ )
error_responses.append(error_response)
- _LOGGER.debug(f'** Web PubSub Service Finished {time.time() - start_time} Seconds **')
+ _LOGGER.debug(
+ f"** Web PubSub Service Finished {time.time() - start_time} Seconds **"
+ )
return web_pubsub_responses, error_responses
- def _collect_web_pubsub_hub(self, web_pubsub_hubs_dict, subscription_info, location):
+ def _collect_web_pubsub_hub(
+ self, web_pubsub_hubs_dict, subscription_info, location, tenant_id
+ ):
web_pubsub_hub_responses = []
error_responses = []
for web_pubsub_hub_dict in web_pubsub_hubs_dict:
- web_pubsub_hub_id = ''
+ web_pubsub_hub_id = ""
try:
- web_pubsub_hub_id = web_pubsub_hub_dict['id']
+ web_pubsub_hub_id = web_pubsub_hub_dict["id"]
resource_group_name = self.get_resource_group_from_id(web_pubsub_hub_id)
- web_pubsub_hub_dict.update({
- 'location': location,
- 'resource_group': resource_group_name,
- 'subscription_id': subscription_info['subscription_id'],
- 'subscription_name': subscription_info['subscription_name'],
- 'azure_monitor': {'resource_id': web_pubsub_hub_id},
- 'web_pubsub_svc_name': self.get_web_pubsub_name_from_id(web_pubsub_hub_id),
- 'web_pubsub_hub_evnet_handler_count_display': len(web_pubsub_hub_dict.get('properties', {}).get('event_handlers', []))
- })
+ web_pubsub_hub_dict.update(
+ {
+ "tenant_id": tenant_id,
+ "location": location,
+ "resource_group": resource_group_name,
+ "subscription_id": subscription_info["subscription_id"],
+ "subscription_name": subscription_info["subscription_name"],
+ "azure_monitor": {"resource_id": web_pubsub_hub_id},
+ "web_pubsub_svc_name": self.get_web_pubsub_name_from_id(
+ web_pubsub_hub_id
+ ),
+ "web_pubsub_hub_evnet_handler_count_display": len(
+ web_pubsub_hub_dict.get("properties", {}).get(
+ "event_handlers", []
+ )
+ ),
+ }
+ )
web_pubsub_hub_data = WebPubSubHub(web_pubsub_hub_dict, strict=False)
- web_pubsub_hub_resource = WebPubSubHubResource({
- 'name': web_pubsub_hub_data.name,
- 'account': web_pubsub_hub_dict['subscription_id'],
- 'data': web_pubsub_hub_data,
- 'tags': web_pubsub_hub_dict.get('tags', {}),
- 'region_code': web_pubsub_hub_data.location,
- 'reference': ReferenceModel(web_pubsub_hub_data.reference())
- })
- web_pubsub_hub_responses.append(WebPubSubHubResponse({'resource': web_pubsub_hub_resource}))
+ web_pubsub_hub_resource = WebPubSubHubResource(
+ {
+ "name": web_pubsub_hub_data.name,
+ "account": web_pubsub_hub_dict["subscription_id"],
+ "data": web_pubsub_hub_data,
+ "tags": web_pubsub_hub_dict.get("tags", {}),
+ "region_code": web_pubsub_hub_data.location,
+ "reference": ReferenceModel(web_pubsub_hub_data.reference()),
+ }
+ )
+ web_pubsub_hub_responses.append(
+ WebPubSubHubResponse({"resource": web_pubsub_hub_resource})
+ )
except Exception as e:
- print(e)
- _LOGGER.error(f'[list_instances] {web_pubsub_hub_id} {e}', exc_info=True)
- error_response = self.generate_resource_error_response(e, 'Hub', 'WebPubSubService', web_pubsub_hub_id)
+ _LOGGER.error(
+ f"[list_instances] {web_pubsub_hub_id} {e}", exc_info=True
+ )
+ error_response = self.generate_resource_error_response(
+ e, "Hub", "WebPubSubService", web_pubsub_hub_id
+ )
error_responses.append(error_response)
return web_pubsub_hub_responses, error_responses
@staticmethod
def get_resource_name_from_id(dict_id):
- resource_name = dict_id.split('/')[-1]
+ resource_name = dict_id.split("/")[-1]
return resource_name
@staticmethod
def get_web_pubsub_name_from_id(dict_id):
- svc_name = dict_id.split('/')[-3]
+ svc_name = dict_id.split("/")[-3]
return svc_name
-
-
diff --git a/src/spaceone/inventory/metrics/ApplicationGateways/Instance/instance_count.yaml b/src/spaceone/inventory/metrics/ApplicationGateways/Instance/instance_count.yaml
new file mode 100644
index 00000000..3bd083ba
--- /dev/null
+++ b/src/spaceone/inventory/metrics/ApplicationGateways/Instance/instance_count.yaml
@@ -0,0 +1,32 @@
+---
+metric_id: metric-azure-ags-instance-count
+name: Instance Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.ApplicationGateways.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ default: true
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group.resource_group_name
+ name: Resource Group
+ - key: data.operational_state
+ name: Operational State
+ default: true
+ - key: data.subnet
+ name: Subnet
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-ags-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/ApplicationGateways/Instance/namespace.yaml b/src/spaceone/inventory/metrics/ApplicationGateways/Instance/namespace.yaml
new file mode 100644
index 00000000..3659a5f1
--- /dev/null
+++ b/src/spaceone/inventory/metrics/ApplicationGateways/Instance/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-ags-instance
+name: ApplicationGateways/Instance
+category: ASSET
+resource_type: inventory.CloudService:azure.ApplicationGateways.Instance
+group: azure
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-application-gateways.svg
+version: '1.0'
diff --git a/src/spaceone/inventory/metrics/ContainerInstances/Container/container_count.yaml b/src/spaceone/inventory/metrics/ContainerInstances/Container/container_count.yaml
new file mode 100644
index 00000000..58dcc669
--- /dev/null
+++ b/src/spaceone/inventory/metrics/ContainerInstances/Container/container_count.yaml
@@ -0,0 +1,39 @@
+---
+metric_id: metric-azure-cis-container-count
+name: Container Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.ContainerInstances.Container
+query_options:
+ unwind:
+ path: data.zones
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.zones
+ name: Zone
+ - key: data.sku
+ name: Sku Tier
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ - key: data.os_type
+ name: OS Type
+ fields:
+ value:
+ key: data.container_count_display
+ operator: count
+unit: Count
+namespace_id: ns-azure-cis-container
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/ContainerInstances/Container/gpu_size.yaml b/src/spaceone/inventory/metrics/ContainerInstances/Container/gpu_size.yaml
new file mode 100644
index 00000000..572a4b8d
--- /dev/null
+++ b/src/spaceone/inventory/metrics/ContainerInstances/Container/gpu_size.yaml
@@ -0,0 +1,38 @@
+---
+metric_id: metric-azure-cis-container-gpu-size
+name: GPU Size
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.ContainerInstances.Container
+query_options:
+ unwind:
+ path: data.zones
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.zones
+ name: Zone
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.sku
+ name: Sku Tier
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ - key: data.os_type
+ name: OS Type
+ fields:
+ value:
+ key: data.gpu_count_display
+ operator: sum
+unit: GB
+namespace_id: ns-azure-cis-container
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/ContainerInstances/Container/memory_size.yaml b/src/spaceone/inventory/metrics/ContainerInstances/Container/memory_size.yaml
new file mode 100644
index 00000000..7a7cbe74
--- /dev/null
+++ b/src/spaceone/inventory/metrics/ContainerInstances/Container/memory_size.yaml
@@ -0,0 +1,38 @@
+---
+metric_id: metric-azure-cis-container-memory-size
+name: Memory Size
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.ContainerInstances.Container
+query_options:
+ unwind:
+ path: data.zones
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.zones
+ name: Zone
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.sku
+ name: Sku Tier
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ - key: data.os_type
+ name: OS Type
+ fields:
+ value:
+ key: data.memory_size_display
+ operator: sum
+unit: GB
+namespace_id: ns-azure-cis-container
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/ContainerInstances/Container/namespace.yaml b/src/spaceone/inventory/metrics/ContainerInstances/Container/namespace.yaml
new file mode 100644
index 00000000..1f40fbe2
--- /dev/null
+++ b/src/spaceone/inventory/metrics/ContainerInstances/Container/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-cis-container
+name: ContainerInstances/Container
+resource_type: inventory.CloudService:azure.ContainerInstances.Container
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-container-instances.svg
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/ContainerInstances/Container/vcpu_count.yaml b/src/spaceone/inventory/metrics/ContainerInstances/Container/vcpu_count.yaml
new file mode 100644
index 00000000..e1481fc9
--- /dev/null
+++ b/src/spaceone/inventory/metrics/ContainerInstances/Container/vcpu_count.yaml
@@ -0,0 +1,38 @@
+---
+metric_id: metric-azure-cis-container-vcpu-count
+name: vCPU Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.ContainerInstances.Container
+query_options:
+ unwind:
+ path: data.zones
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.zones
+ name: Zone
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.sku
+ name: Sku Tier
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ - key: data.os_type
+ name: OS Type
+ fields:
+ value:
+ key: data.cpu_count_display
+ operator: sum
+unit: Core
+namespace_id: ns-azure-cis-container
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/CosmosDB/Instance/instance_count.yaml b/src/spaceone/inventory/metrics/CosmosDB/Instance/instance_count.yaml
new file mode 100644
index 00000000..abf0ea46
--- /dev/null
+++ b/src/spaceone/inventory/metrics/CosmosDB/Instance/instance_count.yaml
@@ -0,0 +1,31 @@
+---
+metric_id: metric-azure-cosmos-db-instance-count
+name: Instance Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.CosmosDB.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ - key: data.kind
+ name: Kind
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-cosmos-db-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/CosmosDB/Instance/namespace.yaml b/src/spaceone/inventory/metrics/CosmosDB/Instance/namespace.yaml
new file mode 100644
index 00000000..3aa32269
--- /dev/null
+++ b/src/spaceone/inventory/metrics/CosmosDB/Instance/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-cosmos-db-instance
+name: CosmosDB/Instance
+resource_type: inventory.CloudService:azure.CosmosDB.Instance
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-cosmos-db.svg
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/CosmosDB/Instance/sql_database_count.yaml b/src/spaceone/inventory/metrics/CosmosDB/Instance/sql_database_count.yaml
new file mode 100644
index 00000000..fa2967bb
--- /dev/null
+++ b/src/spaceone/inventory/metrics/CosmosDB/Instance/sql_database_count.yaml
@@ -0,0 +1,34 @@
+---
+metric_id: metric-azure-cosmos-db-instance-sql-db-count
+name: SQL Database Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.CosmosDB.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.instance_type
+ name: Instance Type
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ - key: data.kind
+ name: Kind
+ fields:
+ value:
+ key: data.sql_databases_count_display
+ operator: sum
+unit: Count
+namespace_id: ns-azure-cosmos-db-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/Disks/Disk/disk_count.yaml b/src/spaceone/inventory/metrics/Disks/Disk/disk_count.yaml
new file mode 100644
index 00000000..4744998a
--- /dev/null
+++ b/src/spaceone/inventory/metrics/Disks/Disk/disk_count.yaml
@@ -0,0 +1,46 @@
+---
+metric_id: metric-azure-disks-disk-count
+name: Disk Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.Disks.Disk
+query_options:
+ unwind:
+ path: data.zones
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.zones
+ name: Zone
+ - key: data.sku.name
+ name: Sku name
+ - key: data.sku.tier
+ name: Sku Tier
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: instance_type
+ name: Disk Type
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ - key: data.disk_state
+ name: Disk State
+ default: true
+ - key: data.os_type
+ name: OS Type
+ - key: data.managed_by
+ name: Managed By
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-disks-disk
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/Disks/Disk/disk_size.yaml b/src/spaceone/inventory/metrics/Disks/Disk/disk_size.yaml
new file mode 100644
index 00000000..7b949a25
--- /dev/null
+++ b/src/spaceone/inventory/metrics/Disks/Disk/disk_size.yaml
@@ -0,0 +1,52 @@
+---
+metric_id: metric-azure-disks-disk-size
+name: Disk Size
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.Disks.Disk
+query_options:
+ unwind:
+ path: data.zones
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.sku.name
+ name: Sku name
+ - key: data.sku.tier
+ name: Sku Tier
+ - key: data.zones
+ name: Zone
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: instance_type
+ name: Disk Type
+ - key: data.provisioning_state
+ name: Provisioning State
+
+ default: true
+ - key: data.disk_state
+ name: Disk State
+ default: true
+ - key: data.os_type
+ name: OS Type
+ - key: data.managed_by
+ name: Managed By
+ - key: data.sku.name
+ name: Sku name
+ - key: data.sku.tier
+ name: Sku Tier
+ fields:
+ value:
+ key: data.disk_size_gb
+ operator: sum
+unit: GB
+namespace_id: ns-azure-disks-disk
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/Disks/Disk/namespace.yaml b/src/spaceone/inventory/metrics/Disks/Disk/namespace.yaml
new file mode 100644
index 00000000..a2c4f412
--- /dev/null
+++ b/src/spaceone/inventory/metrics/Disks/Disk/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-disks-disk
+name: Disks/Disk
+resource_type: inventory.CloudService:azure.Disks.Disk
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-disk.svg
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/KeyVaults/Instance/certificates_count.yaml b/src/spaceone/inventory/metrics/KeyVaults/Instance/certificates_count.yaml
new file mode 100644
index 00000000..7a400a02
--- /dev/null
+++ b/src/spaceone/inventory/metrics/KeyVaults/Instance/certificates_count.yaml
@@ -0,0 +1,31 @@
+---
+metric_id: metric-azure-kvs-instance-certificate-count
+name: Certificate Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.KeyVaults.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.sku.name
+ name: Sku Name
+ - key: data.properties.provisioning_state
+ name: Provisioning State
+ fields:
+ value:
+ key: data.certificate_count
+ operator: sum
+unit: Count
+namespace_id: ns-azure-key-vaults-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/KeyVaults/Instance/credentials_count.yaml b/src/spaceone/inventory/metrics/KeyVaults/Instance/credentials_count.yaml
new file mode 100644
index 00000000..a21492ad
--- /dev/null
+++ b/src/spaceone/inventory/metrics/KeyVaults/Instance/credentials_count.yaml
@@ -0,0 +1,31 @@
+---
+metric_id: metric-azure-kvs-instance-credentials-count
+name: Credentials Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.KeyVaults.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.sku.name
+ name: Sku Name
+ - key: data.properties.provisioning_state
+ name: Provisioning State
+ fields:
+ value:
+ key: data.total_credentials_count
+ operator: sum
+unit: Count
+namespace_id: ns-azure-key-vaults-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/KeyVaults/Instance/instance_count.yaml b/src/spaceone/inventory/metrics/KeyVaults/Instance/instance_count.yaml
new file mode 100644
index 00000000..42e987fb
--- /dev/null
+++ b/src/spaceone/inventory/metrics/KeyVaults/Instance/instance_count.yaml
@@ -0,0 +1,32 @@
+---
+metric_id: metric-azure-kvs-instance-count
+name: Instance Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.KeyVaults.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.sku.name
+ name: Sku Name
+ - key: data.properties.provisioning_state
+ name: Provisioning State
+ - key: data.system_data.created_by
+ name: Created By
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-key-vaults-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/KeyVaults/Instance/key_count.yaml b/src/spaceone/inventory/metrics/KeyVaults/Instance/key_count.yaml
new file mode 100644
index 00000000..7b547101
--- /dev/null
+++ b/src/spaceone/inventory/metrics/KeyVaults/Instance/key_count.yaml
@@ -0,0 +1,31 @@
+---
+metric_id: metric-azure-kvs-instance-key-count
+name: Key Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.KeyVaults.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.sku.name
+ name: Sku Name
+ - key: data.properties.provisioning_state
+ name: Provisioning State
+ fields:
+ value:
+ key: data.key_count
+ operator: count
+unit: Count
+namespace_id: ns-azure-key-vaults-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/KeyVaults/Instance/namespace.yaml b/src/spaceone/inventory/metrics/KeyVaults/Instance/namespace.yaml
new file mode 100644
index 00000000..06748e76
--- /dev/null
+++ b/src/spaceone/inventory/metrics/KeyVaults/Instance/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-key-vaults-instance
+name: KeyVaults/Instance
+resource_type: inventory.CloudService:azure.KeyVaults.Instance
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-key-vault.svg
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/KeyVaults/Instance/secret_count.yaml b/src/spaceone/inventory/metrics/KeyVaults/Instance/secret_count.yaml
new file mode 100644
index 00000000..079a74f9
--- /dev/null
+++ b/src/spaceone/inventory/metrics/KeyVaults/Instance/secret_count.yaml
@@ -0,0 +1,31 @@
+---
+metric_id: metric-azure-kvs-instance-secret-count
+name: Secret Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.KeyVaults.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.sku.name
+ name: Sku Name
+ - key: data.properties.provisioning_state
+ name: Provisioning State
+ fields:
+ value:
+ key: data.secret_count
+ operator: count
+unit: Count
+namespace_id: ns-azure-key-vaults-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/LoadBalancers/Instance/instance_count.yaml b/src/spaceone/inventory/metrics/LoadBalancers/Instance/instance_count.yaml
new file mode 100644
index 00000000..61392477
--- /dev/null
+++ b/src/spaceone/inventory/metrics/LoadBalancers/Instance/instance_count.yaml
@@ -0,0 +1,29 @@
+---
+metric_id: metric-azure-lbs-instance-count
+name: Instance Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.LoadBalancers.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-lbs-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/LoadBalancers/Instance/namespace.yaml b/src/spaceone/inventory/metrics/LoadBalancers/Instance/namespace.yaml
new file mode 100644
index 00000000..66e413dd
--- /dev/null
+++ b/src/spaceone/inventory/metrics/LoadBalancers/Instance/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-lbs-instance
+name: LoadBalancers/Instance
+resource_type: inventory.CloudService:azure.LoadBalancers.Instance
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-loadbalancers.svg
+version: '1.0'
diff --git a/src/spaceone/inventory/metrics/MySQLServers/Server/namespace.yaml b/src/spaceone/inventory/metrics/MySQLServers/Server/namespace.yaml
new file mode 100644
index 00000000..fc075c5b
--- /dev/null
+++ b/src/spaceone/inventory/metrics/MySQLServers/Server/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-mysql-servers-server
+name: MySQLServers/Server
+resource_type: inventory.CloudService:azure.MySQLServers.Server
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-mysql-servers.svg
+version: '1.0'
diff --git a/src/spaceone/inventory/metrics/MySQLServers/Server/server_count.yaml b/src/spaceone/inventory/metrics/MySQLServers/Server/server_count.yaml
new file mode 100644
index 00000000..852f3834
--- /dev/null
+++ b/src/spaceone/inventory/metrics/MySQLServers/Server/server_count.yaml
@@ -0,0 +1,29 @@
+---
+metric_id: metric-azure-mysql-servers-server-count
+name: Server Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.MySQLServers.Server
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-mysql-servers-server
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/NATGateways/Instance/instance_count.yaml b/src/spaceone/inventory/metrics/NATGateways/Instance/instance_count.yaml
new file mode 100644
index 00000000..5f3d533a
--- /dev/null
+++ b/src/spaceone/inventory/metrics/NATGateways/Instance/instance_count.yaml
@@ -0,0 +1,39 @@
+---
+metric_id: metric-azure-nat-gateways-instance
+name: Instance Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.NATGateways.Instance
+query_options:
+ unwind:
+ path: data.zones
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.zones
+ name: Zone
+ - key: data.sku.name
+ name: SKU Name
+ - key: data.sku.tier
+ name: SKU Tier
+ - key: data.tenant_id
+ name: Tenant ID
+ default: true
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-nat-gateways-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/NATGateways/Instance/namespace.yaml b/src/spaceone/inventory/metrics/NATGateways/Instance/namespace.yaml
new file mode 100644
index 00000000..dc841110
--- /dev/null
+++ b/src/spaceone/inventory/metrics/NATGateways/Instance/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-nat-gateways-instance
+name: NATGateways/Instance
+resource_type: inventory.CloudService:azure.NATGateways.Instance
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-nat.svg
+version: '1.0'
diff --git a/src/spaceone/inventory/metrics/NetworkSecurityGroups/Instance/instance_count.yaml b/src/spaceone/inventory/metrics/NetworkSecurityGroups/Instance/instance_count.yaml
new file mode 100644
index 00000000..bcf19aeb
--- /dev/null
+++ b/src/spaceone/inventory/metrics/NetworkSecurityGroups/Instance/instance_count.yaml
@@ -0,0 +1,30 @@
+---
+metric_id: metric-azure-network-sgs-instance
+name: Instance Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.NetworkSecurityGroups.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ default: true
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-network-sgs-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/NetworkSecurityGroups/Instance/namespace.yaml b/src/spaceone/inventory/metrics/NetworkSecurityGroups/Instance/namespace.yaml
new file mode 100644
index 00000000..81a69da7
--- /dev/null
+++ b/src/spaceone/inventory/metrics/NetworkSecurityGroups/Instance/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-network-sgs-instance
+name: NetworkSecurityGroups/Instance
+resource_type: inventory.CloudService:azure.NetworkSecurityGroups.Instance
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-network-security-groups.svg
+version: '1.0'
diff --git a/src/spaceone/inventory/metrics/PostgreSQLServers/Server/namespace.yaml b/src/spaceone/inventory/metrics/PostgreSQLServers/Server/namespace.yaml
new file mode 100644
index 00000000..cd6ff421
--- /dev/null
+++ b/src/spaceone/inventory/metrics/PostgreSQLServers/Server/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-postgresql-servers-server
+name: PostgreSQL/Server
+resource_type: inventory.CloudService:azure.PostgreSQLServers.Server
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-sql-postgresql-server.svg
+version: '1.0'
diff --git a/src/spaceone/inventory/metrics/PostgreSQLServers/Server/server_count.yaml b/src/spaceone/inventory/metrics/PostgreSQLServers/Server/server_count.yaml
new file mode 100644
index 00000000..2b3cee47
--- /dev/null
+++ b/src/spaceone/inventory/metrics/PostgreSQLServers/Server/server_count.yaml
@@ -0,0 +1,29 @@
+---
+metric_id: metric-azure-postgersql-servers-server-count
+name: Server Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.PostgreSQLServers.Server
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-postgresql-servers-server
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/PublicIPAddresses/IpAddress/ip_addresses_count.yaml b/src/spaceone/inventory/metrics/PublicIPAddresses/IpAddress/ip_addresses_count.yaml
new file mode 100644
index 00000000..9d3d3471
--- /dev/null
+++ b/src/spaceone/inventory/metrics/PublicIPAddresses/IpAddress/ip_addresses_count.yaml
@@ -0,0 +1,39 @@
+---
+metric_id: metric-azure-pubipaddrs-ipaddrs-count
+name: IP Address Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.PublicIPAddresses.IPAddress
+query_options:
+ unwind:
+ path: data.zones
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.zones
+ name: Zone
+ - key: data.sku.name
+ name: Sku name
+ - key: data.sku.tier
+ name: Sku Tier
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ - key: data.associated_to
+ name: Associated To
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-pubipaddrs-ipaddrs
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/PublicIPAddresses/IpAddress/namespace.yaml b/src/spaceone/inventory/metrics/PublicIPAddresses/IpAddress/namespace.yaml
new file mode 100644
index 00000000..54add6ef
--- /dev/null
+++ b/src/spaceone/inventory/metrics/PublicIPAddresses/IpAddress/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-pubipaddrs-ipaddrs
+name: PublicIPAddresses/IPAddresses
+resource_type: inventory.CloudService:azure.PublicIPAddresses.IPAddresses
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-public-ip-address.svg
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/SQLDatabases/Database/database_count.yaml b/src/spaceone/inventory/metrics/SQLDatabases/Database/database_count.yaml
new file mode 100644
index 00000000..be4494d8
--- /dev/null
+++ b/src/spaceone/inventory/metrics/SQLDatabases/Database/database_count.yaml
@@ -0,0 +1,42 @@
+---
+metric_id: metric-azure-sql-databases-database-count
+name: Database Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.SQLDatabases.Database
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.sku.name
+ name: Sku Name
+ - key: data.sku.tier
+ name: Sku Tier
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.status
+ name: Status
+ default: true
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.server_name
+ name: Server Name
+ - key: data.compute_tier
+ name: Compute Tier
+ - key: data.default_secondary_location
+ name: Default Secondary Location
+ - key: data.read_scale
+ name: Read Scale
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-sql-databases-database
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/SQLDatabases/Database/database_size.yaml b/src/spaceone/inventory/metrics/SQLDatabases/Database/database_size.yaml
new file mode 100644
index 00000000..7a9ef8dd
--- /dev/null
+++ b/src/spaceone/inventory/metrics/SQLDatabases/Database/database_size.yaml
@@ -0,0 +1,43 @@
+---
+metric_id: metric-azure-sql-databases-database-size
+name: Database Size
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.SQLDatabases.Database
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.sku.name
+ name: Sku Name
+ - key: data.sku.tier
+ name: Sku Tier
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.status
+ name: Status
+ default: true
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.server_name
+ name: Server Name
+ - key: data.compute_tier
+ name: Compute Tier
+ - key: data.default_secondary_location
+ name: Default Secondary Location
+ - key: data.read_scale
+ name: Read Scale
+ fields:
+ value:
+ key: data.max_size_gb
+ operator: sum
+unit: GB
+namespace_id: ns-azure-sql-databases-database
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/SQLDatabases/Database/namespace.yaml b/src/spaceone/inventory/metrics/SQLDatabases/Database/namespace.yaml
new file mode 100644
index 00000000..278f21e6
--- /dev/null
+++ b/src/spaceone/inventory/metrics/SQLDatabases/Database/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-sql-databases-database
+name: SQLDatabases/Database
+resource_type: inventory.CloudService:azure.SQLDatabases.Database
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-sql-databases.svg
+version: '1.0'
diff --git a/src/spaceone/inventory/metrics/SQLServers/Server/namespace.yaml b/src/spaceone/inventory/metrics/SQLServers/Server/namespace.yaml
new file mode 100644
index 00000000..f19ed717
--- /dev/null
+++ b/src/spaceone/inventory/metrics/SQLServers/Server/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-sql-servers-server
+name: SQLServers/Server
+resource_type: inventory.CloudService:azure.SQLServers.Server
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-sql-servers.svg
+version: '1.0'
diff --git a/src/spaceone/inventory/metrics/SQLServers/Server/server_count.yaml b/src/spaceone/inventory/metrics/SQLServers/Server/server_count.yaml
new file mode 100644
index 00000000..69a632fd
--- /dev/null
+++ b/src/spaceone/inventory/metrics/SQLServers/Server/server_count.yaml
@@ -0,0 +1,36 @@
+---
+metric_id: metric-azure-sql-servers-server-count
+name: Server Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.SQLServers.Server
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.state
+ name: Server State
+ default: true
+ - key: data.version
+ name: Version
+ - key: data.minimal_tls_version
+ name: Minimal TLS Version
+ - key: data.administrators.principal_type
+ name: Administrator Principal Type
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-sql-servers-server
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/Snapshots/Instance/instance_count.yaml b/src/spaceone/inventory/metrics/Snapshots/Instance/instance_count.yaml
new file mode 100644
index 00000000..d774af4c
--- /dev/null
+++ b/src/spaceone/inventory/metrics/Snapshots/Instance/instance_count.yaml
@@ -0,0 +1,47 @@
+---
+metric_id: metric-azure-snapshots-instance-count
+name: Instance Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.Snapshots.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.sku.name
+ name: Sku name
+ - key: data.sku.tier
+ name: Sku Tier
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: instance_type
+ name: Disk Type
+ - key: data.provisioning_state
+ name: Provisioning State
+ - key: data.os_type
+ name: OS Type
+ - key: data.disk_state
+ name: Disk State
+ - key: data.source_disk_name
+ name: Source Disk Name
+ - key: data.supports_hibernation
+ name: Supports Hibernation
+ - key: data.supported_capabilities.architecture
+ name: Architecture
+ - key: data.hyper_v_generation
+ name: Hyper-V Generation
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-snapshots-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/Snapshots/Instance/instance_size.yaml b/src/spaceone/inventory/metrics/Snapshots/Instance/instance_size.yaml
new file mode 100644
index 00000000..a9011873
--- /dev/null
+++ b/src/spaceone/inventory/metrics/Snapshots/Instance/instance_size.yaml
@@ -0,0 +1,48 @@
+---
+metric_id: metric-azure-snapshots-instance-size
+name: Instance Size
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.Snapshots.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.sku.name
+ name: Sku name
+ - key: data.sku.tier
+ name: Sku Tier
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: instance_type
+ name: Disk Type
+ - key: data.provisioning_state
+ name: Provisioning State
+ - key: data.os_type
+ name: OS Type
+ - key: data.disk_state
+ name: Disk State
+ - key: data.source_disk_name
+ name: Source Disk Name
+ - key: data.supports_hibernation
+ name: Supports Hibernation
+ - key: data.supported_capabilities.architecture
+ name: Architecture
+ - key: data.hyper_v_generation
+ name: Hyper-V Generation
+ fields:
+ value:
+ key: data.disk_size_gb
+ operator: sum
+unit: GB
+namespace_id: ns-azure-snapshots-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/Snapshots/Instance/namespace.yaml b/src/spaceone/inventory/metrics/Snapshots/Instance/namespace.yaml
new file mode 100644
index 00000000..0ad0482b
--- /dev/null
+++ b/src/spaceone/inventory/metrics/Snapshots/Instance/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-snapshots-instance
+name: Snapshots/Instance
+resource_type: inventory.CloudService:azure.Snapshots.Instance
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-disk-snapshot.svg
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/StorageAccounts/Instance/blob_count.yaml b/src/spaceone/inventory/metrics/StorageAccounts/Instance/blob_count.yaml
new file mode 100644
index 00000000..bc579c81
--- /dev/null
+++ b/src/spaceone/inventory/metrics/StorageAccounts/Instance/blob_count.yaml
@@ -0,0 +1,35 @@
+---
+metric_id: metric-azure-stg-accounts-instance-blob-count
+name: Blob Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.StorageAccounts.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ - key: data.kind
+ name: Account Kind
+ - key: data.access_tier
+ name: Access Tier
+ fields:
+ value:
+ key: data.blob_count_display
+ operator: sum
+unit: Count
+namespace_id: ns-azure-stg-accounts-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/StorageAccounts/Instance/blob_size.yaml b/src/spaceone/inventory/metrics/StorageAccounts/Instance/blob_size.yaml
new file mode 100644
index 00000000..ca447be7
--- /dev/null
+++ b/src/spaceone/inventory/metrics/StorageAccounts/Instance/blob_size.yaml
@@ -0,0 +1,36 @@
+---
+metric_id: metric-azure-stg-accounts-instance-blob-size
+name: Blob Size
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.StorageAccounts.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ - key: data.kind
+ name: Account Kind
+ - key: data.access_tier
+ name: Access Tier
+
+ fields:
+ value:
+ key: data.blob_size_display
+ operator: sum
+unit: Bytes
+namespace_id: ns-azure-stg-accounts-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/StorageAccounts/Instance/container_count.yaml b/src/spaceone/inventory/metrics/StorageAccounts/Instance/container_count.yaml
new file mode 100644
index 00000000..bea57a8f
--- /dev/null
+++ b/src/spaceone/inventory/metrics/StorageAccounts/Instance/container_count.yaml
@@ -0,0 +1,36 @@
+---
+metric_id: metric-azure-stg-accounts-instance-container-count
+name: Container Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.StorageAccounts.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ - key: data.kind
+ name: Account Kind
+ - key: data.access_tier
+ name: Access Tier
+
+ fields:
+ value:
+ key: data.container_count_display
+ operator: sum
+unit: Count
+namespace_id: ns-azure-stg-accounts-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/StorageAccounts/Instance/instance_count.yaml b/src/spaceone/inventory/metrics/StorageAccounts/Instance/instance_count.yaml
new file mode 100644
index 00000000..cbcef9d6
--- /dev/null
+++ b/src/spaceone/inventory/metrics/StorageAccounts/Instance/instance_count.yaml
@@ -0,0 +1,35 @@
+---
+metric_id: metric-azure-stg-accounts-instance
+name: Instance Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.StorageAccounts.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ - key: data.kind
+ name: Account Kind
+ - key: data.access_tier
+ name: Access Tier
+
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-stg-accounts-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/StorageAccounts/Instance/namespace.yaml b/src/spaceone/inventory/metrics/StorageAccounts/Instance/namespace.yaml
new file mode 100644
index 00000000..bc3fccc7
--- /dev/null
+++ b/src/spaceone/inventory/metrics/StorageAccounts/Instance/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-stg-accounts-instance
+name: StorageAccounts/Instance
+resource_type: inventory.CloudService:azure.StorageAccounts.Instance
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-service-accounts.svg
+version: '1.0'
diff --git a/src/spaceone/inventory/metrics/VMScaleSets/ScaleSet/namespace.yaml b/src/spaceone/inventory/metrics/VMScaleSets/ScaleSet/namespace.yaml
new file mode 100644
index 00000000..b3c6c85e
--- /dev/null
+++ b/src/spaceone/inventory/metrics/VMScaleSets/ScaleSet/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-vm-scale-sets-scale-set
+name: VMScaleSets/ScaleSet
+resource_type: inventory.CloudService:azure.VMScaleSets.ScaleSet
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-vm-scale-set.svg
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/VMScaleSets/ScaleSet/sacle_set_count.yaml b/src/spaceone/inventory/metrics/VMScaleSets/ScaleSet/sacle_set_count.yaml
new file mode 100644
index 00000000..d607b249
--- /dev/null
+++ b/src/spaceone/inventory/metrics/VMScaleSets/ScaleSet/sacle_set_count.yaml
@@ -0,0 +1,36 @@
+---
+metric_id: metric-azure-vm-scale-sets-scale-set-count
+name: Scale Set Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.VMScaleSets.ScaleSet
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.sku.name
+ name: Sku Name
+ - key: data.sku.tier
+ name: Sku Tier
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.sku
+ name: Sku Tier
+ - key: data.provisioning_state
+ name: Provisioning State
+ - key: instance_type
+ name: Instance Type
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-vm-scale-sets-scale-set
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/VirtualMachines/Instance/instance_count.yaml b/src/spaceone/inventory/metrics/VirtualMachines/Instance/instance_count.yaml
new file mode 100644
index 00000000..5b0a284b
--- /dev/null
+++ b/src/spaceone/inventory/metrics/VirtualMachines/Instance/instance_count.yaml
@@ -0,0 +1,34 @@
+---
+metric_id: metric-azure-vms-instance-count
+name: Instance Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.VirtualMachines.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.subscription.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group.resource_group_name
+ name: Resource Group
+ - key: instance_type
+ name: Instance Type
+ default: true
+ - key: data.compute.instance_state
+ name: Instance State
+ default: true
+ - key: data.os.os_type
+ name: OS Type
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-vms-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/VirtualMachines/Instance/memory_size.yaml b/src/spaceone/inventory/metrics/VirtualMachines/Instance/memory_size.yaml
new file mode 100644
index 00000000..76c3ee9c
--- /dev/null
+++ b/src/spaceone/inventory/metrics/VirtualMachines/Instance/memory_size.yaml
@@ -0,0 +1,35 @@
+---
+metric_id: metric-azure-vms-Instance-memory-size
+name: Memory Size
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.VirtualMachines.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.subscription.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group.resource_group_name
+ name: Resource Group
+ - key: instance_type
+ name: Instance Type
+ default: true
+ - key: data.compute.instance_state
+ name: Instance State
+ default: true
+ - key: data.os.os_type
+ name: OS Type
+ fields:
+ value:
+ key: data.hardware.memory
+ operator: sum
+unit: GB
+namespace_id: ns-azure-vms-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/VirtualMachines/Instance/namespace.yaml b/src/spaceone/inventory/metrics/VirtualMachines/Instance/namespace.yaml
new file mode 100644
index 00000000..ffe23faa
--- /dev/null
+++ b/src/spaceone/inventory/metrics/VirtualMachines/Instance/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-vms-instance
+name: VirtualMachines/Instance
+resource_type: inventory.CloudService:azure.VirtualMachines.Instance
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-vm.svg
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/VirtualMachines/Instance/vcpu_count.yaml b/src/spaceone/inventory/metrics/VirtualMachines/Instance/vcpu_count.yaml
new file mode 100644
index 00000000..e16207f1
--- /dev/null
+++ b/src/spaceone/inventory/metrics/VirtualMachines/Instance/vcpu_count.yaml
@@ -0,0 +1,30 @@
+---
+metric_id: metric-azure-vms-instance-vcpu-count
+name: vCPU Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.VirtualMachines.Instance
+query_options:
+ group_by:
+ - key: data.subscription.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group.resource_group_name
+ name: Resource Group
+ - key: instance_type
+ name: Instance Type
+ default: true
+ - key: data.compute.instance_state
+ name: Instance State
+ default: true
+ - key: data.os.os_type
+ name: OS Type
+ fields:
+ value:
+ key: data.hardware.core
+ operator: sum
+unit: Core
+namespace_id: ns-azure-vms-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/VirtualNetworks/instance/instance_count.yaml b/src/spaceone/inventory/metrics/VirtualNetworks/instance/instance_count.yaml
new file mode 100644
index 00000000..bcc9923e
--- /dev/null
+++ b/src/spaceone/inventory/metrics/VirtualNetworks/instance/instance_count.yaml
@@ -0,0 +1,32 @@
+---
+metric_id: metric-azure-vnets-instance-count
+name: Instance Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.VirtualNetworks.Instance
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.sku
+ name: Sku Tier
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-vnets-instance
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/VirtualNetworks/instance/namespace.yaml b/src/spaceone/inventory/metrics/VirtualNetworks/instance/namespace.yaml
new file mode 100644
index 00000000..181f1e29
--- /dev/null
+++ b/src/spaceone/inventory/metrics/VirtualNetworks/instance/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-vnets-instance
+name: VirtualNetworks/Instance
+resource_type: inventory.CloudService:azure.VirtualNetworks.Instance
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-virtual-networks.svg
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/WebPubSubService/Hub/hub_count.yaml b/src/spaceone/inventory/metrics/WebPubSubService/Hub/hub_count.yaml
new file mode 100644
index 00000000..197bde1d
--- /dev/null
+++ b/src/spaceone/inventory/metrics/WebPubSubService/Hub/hub_count.yaml
@@ -0,0 +1,31 @@
+---
+metric_id: metric-azure-web-pub-sub-service-hub
+name: Hub Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.WebPubSubService.Hub
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.sku.name
+ name: Sku name
+ - key: data.sku.tier
+ name: Sku Tier
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-web-pub-sub-service-hub
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/WebPubSubService/Hub/namespace.yaml b/src/spaceone/inventory/metrics/WebPubSubService/Hub/namespace.yaml
new file mode 100644
index 00000000..52dfc59a
--- /dev/null
+++ b/src/spaceone/inventory/metrics/WebPubSubService/Hub/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-web-pub-sub-service-hub
+name: WebPubSubService/Hub
+resource_type: inventory.CloudService:azure.WebPubSubService.Hub
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-web-pubsub-service.svg
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/WebPubSubService/Service/namespace.yaml b/src/spaceone/inventory/metrics/WebPubSubService/Service/namespace.yaml
new file mode 100644
index 00000000..c4465194
--- /dev/null
+++ b/src/spaceone/inventory/metrics/WebPubSubService/Service/namespace.yaml
@@ -0,0 +1,8 @@
+---
+namespace_id: ns-azure-web-pub-sub-service-service
+name: WebPubSubService/Service
+resource_type: inventory.CloudService:azure.WebPubSubService.Service
+group: azure
+category: ASSET
+icon: https://spaceone-custom-assets.s3.ap-northeast-2.amazonaws.com/console-assets/icons/cloud-services/azure/azure-web-pubsub-service.svg
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/metrics/WebPubSubService/Service/service_count.yaml b/src/spaceone/inventory/metrics/WebPubSubService/Service/service_count.yaml
new file mode 100644
index 00000000..77dada5f
--- /dev/null
+++ b/src/spaceone/inventory/metrics/WebPubSubService/Service/service_count.yaml
@@ -0,0 +1,34 @@
+---
+metric_id: metric-azure-web-pub-sub-service-service
+name: Service Count
+metric_type: GAUGE
+resource_type: inventory.CloudService:azure.WebPubSubService.Service
+query_options:
+ group_by:
+ - key: region_code
+ name: Region
+ reference:
+ resource_type: inventory.Region
+ reference_key: region_code
+ - key: data.sku.name
+ name: Sku name
+ - key: data.sku.tier
+ name: Sku Tier
+ - key: data.tenant_id
+ name: Tenant ID
+ - key: data.subscription_name
+ name: Subscription Name
+ default: true
+ - key: account
+ name: Subscription ID
+ - key: data.resource_group
+ name: Resource Group
+ - key: data.provisioning_state
+ name: Provisioning State
+ default: true
+ fields:
+ value:
+ operator: count
+unit: Count
+namespace_id: ns-azure-web-pub-sub-service-service
+version: '1.0'
\ No newline at end of file
diff --git a/src/spaceone/inventory/model/cosmos_db/data.py b/src/spaceone/inventory/model/cosmos_db/data.py
index 88e7d9c2..e325137c 100644
--- a/src/spaceone/inventory/model/cosmos_db/data.py
+++ b/src/spaceone/inventory/model/cosmos_db/data.py
@@ -1,6 +1,14 @@
from schematics import Model
-from schematics.types import ModelType, ListType, StringType, IntType, BooleanType, DateTimeType, UTCDateTimeType, \
- FloatType
+from schematics.types import (
+ ModelType,
+ ListType,
+ StringType,
+ IntType,
+ BooleanType,
+ DateTimeType,
+ UTCDateTimeType,
+ FloatType,
+)
from spaceone.inventory.libs.schema.resource import AzureCloudService
@@ -20,18 +28,30 @@ class ApiProperties(Model):
class ManagedServiceIdentity(Model):
principal_id = StringType(serialize_when_none=False)
tenant_id = StringType(serialize_when_none=False)
- type = StringType(choices=('None', 'SystemAssigned', 'SystemAssigned, UserAssigned', 'UserAssigned'), serialize_when_none=False)
+ type = StringType(
+ choices=(
+ "None",
+ "SystemAssigned",
+ "SystemAssigned, UserAssigned",
+ "UserAssigned",
+ ),
+ serialize_when_none=False,
+ )
user_assigned_identities = StringType(serialize_when_none=False)
class PeriodicModeProperties(Model):
backup_interval_in_minutes = IntType(serialize_when_none=False)
backup_retention_interval_in_hours = IntType(serialize_when_none=False)
- backup_storage_redundancy = StringType(choices=('Geo', 'Local', 'Zone'), serialize_when_none=False)
+ backup_storage_redundancy = StringType(
+ choices=("Geo", "Local", "Zone"), serialize_when_none=False
+ )
class PeriodicModeBackupPolicy(Model):
- periodic_mode_properties = ModelType(PeriodicModeProperties, serialize_when_none=False)
+ periodic_mode_properties = ModelType(
+ PeriodicModeProperties, serialize_when_none=False
+ )
type = StringType(serialize_when_none=False)
@@ -40,7 +60,16 @@ class Capability(Model):
class ConsistencyPolicy(Model):
- default_consistency_level = StringType(choices=('BoundedStaleness', 'ConsistentPrefix', 'Eventual', 'Session', 'Strong'), serialize_when_none=False)
+ default_consistency_level = StringType(
+ choices=(
+ "BoundedStaleness",
+ "ConsistentPrefix",
+ "Eventual",
+ "Session",
+ "Strong",
+ ),
+ serialize_when_none=False,
+ )
max_interval_in_seconds = IntType(serialize_when_none=False)
max_staleness_prefix = IntType(serialize_when_none=False)
@@ -88,7 +117,9 @@ class PrivateEndpointConnection(Model):
name = StringType(serialize_when_none=False)
group_id = StringType(serialize_when_none=False)
private_endpoint = ModelType(PrivateEndpointProperty, serialize_when_none=False)
- private_link_service_connection_state = ModelType(PrivateLinkServiceConnectionStateProperty, serialize_when_none=False)
+ private_link_service_connection_state = ModelType(
+ PrivateLinkServiceConnectionStateProperty, serialize_when_none=False
+ )
provisioning_state = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -103,7 +134,9 @@ class RestoreMode(Model):
class RestoreParameters(Model):
- databases_to_restore = ListType(ModelType(DatabaseRestoreResource), serialize_when_none=False)
+ databases_to_restore = ListType(
+ ModelType(DatabaseRestoreResource), serialize_when_none=False
+ )
restore_mode = ModelType(RestoreMode, serialize_when_none=False)
restore_source = StringType(serialize_when_none=False)
restore_timestamp_in_utc = UTCDateTimeType(serialize_when_none=False)
@@ -117,10 +150,16 @@ class VirtualNetworkRule(Model):
class SystemData(Model):
created_at = DateTimeType(serialize_when_none=False)
created_by = StringType(serialize_when_none=False)
- created_by_type = StringType(choices=('Application', 'Key', 'ManagedIdentity', 'User'), serialize_when_none=False)
+ created_by_type = StringType(
+ choices=("Application", "Key", "ManagedIdentity", "User"),
+ serialize_when_none=False,
+ )
last_modified_at = DateTimeType(serialize_when_none=False)
last_modified_by = StringType(serialize_when_none=False)
- last_modified_by_type = StringType(choices=('Application', 'Key', 'ManagedIdentity', 'User'), serialize_when_none=False)
+ last_modified_by_type = StringType(
+ choices=("Application", "Key", "ManagedIdentity", "User"),
+ serialize_when_none=False,
+ )
class DatabaseAccountListKeysResult(Model):
@@ -143,8 +182,10 @@ class DatabaseAccountGetResults(AzureCloudService): # Main Class
id = StringType(serialize_when_none=False)
identity = ModelType(ManagedServiceIdentity, serialize_when_none=False)
location = StringType(serialize_when_none=False)
- kind = StringType(choices=('GlobalDocumentDB', 'MongoDB', 'Parse'), serialize_when_none=False)
- name = StringType(default='-', serialize_when_none=False)
+ kind = StringType(
+ choices=("GlobalDocumentDB", "MongoDB", "Parse"), serialize_when_none=False
+ )
+ name = StringType(default="-", serialize_when_none=False)
api_properties = ModelType(ApiProperties, serialize_when_none=False)
backup_policy = ModelType(PeriodicModeBackupPolicy, serialize_when_none=False)
capabilities = ListType(ModelType(Capability), serialize_when_none=False)
@@ -153,7 +194,7 @@ class DatabaseAccountGetResults(AzureCloudService): # Main Class
consistency_policy = ModelType(ConsistencyPolicy, serialize_when_none=False)
cors = ListType(ModelType(CorsPolicy), serialize_when_none=False)
cors_display = ListType(StringType, serialize_when_none=False)
- create_mode = StringType(choices=('Default', 'Restore'), serialize_when_none=False)
+ create_mode = StringType(choices=("Default", "Restore"), serialize_when_none=False)
database_account_offer_type = StringType(serialize_when_none=False)
default_identity = StringType(serialize_when_none=False)
disable_key_based_metadata_write_access = BooleanType(serialize_when_none=False)
@@ -170,16 +211,27 @@ class DatabaseAccountGetResults(AzureCloudService): # Main Class
key_vault_key_uri = BooleanType(serialize_when_none=False)
keys = ModelType(DatabaseAccountListKeysResult, serialize_when_none=False)
locations = ListType(ModelType(Location), serialize_when_none=False)
- network_acl_bypass = StringType(choices=('AzureServices', 'None'), serialize_when_none=False)
+ network_acl_bypass = StringType(
+ choices=("AzureServices", "None"), serialize_when_none=False
+ )
network_acl_bypass_resource_ids = ListType(StringType, serialize_when_none=False)
- private_endpoint_connections = ListType(ModelType(PrivateEndpointConnection), serialize_when_none=False)
+ private_endpoint_connections = ListType(
+ ModelType(PrivateEndpointConnection), serialize_when_none=False
+ )
provisioning_state = StringType(serialize_when_none=False)
- public_network_access = StringType(choices=('Disabled', 'Enabled'), serialize_when_none=False)
+ public_network_access = StringType(
+ choices=("Disabled", "Enabled"), serialize_when_none=False
+ )
read_locations = ListType(ModelType(Location), serialize_when_none=False)
restore_parameters = ModelType(RestoreParameters, serialize_when_none=False)
- virtual_network_rules = ListType(ModelType(VirtualNetworkRule), serialize_when_none=False)
+ virtual_network_rules = ListType(
+ ModelType(VirtualNetworkRule), serialize_when_none=False
+ )
virtual_network_display = ListType(StringType, serialize_when_none=False)
- sql_databases = ListType(ModelType(SqlDatabaseGetResults), serialize_when_none=False)
+ sql_databases = ListType(
+ ModelType(SqlDatabaseGetResults), serialize_when_none=False
+ )
+ sql_databases_count_display = IntType(serialize_when_none=False)
write_locations = ListType(ModelType(Location), serialize_when_none=False)
system_data = ModelType(SystemData, serialize_when_none=False)
instance_type = StringType(serialize_when_none=False)
diff --git a/src/spaceone/inventory/model/disks/cloud_service_type.py b/src/spaceone/inventory/model/disks/cloud_service_type.py
index afeef004..b78e27aa 100644
--- a/src/spaceone/inventory/model/disks/cloud_service_type.py
+++ b/src/spaceone/inventory/model/disks/cloud_service_type.py
@@ -1,10 +1,22 @@
import os
from spaceone.inventory.libs.utils import *
-from spaceone.inventory.libs.schema.metadata.dynamic_widget import CardWidget, ChartWidget
-from spaceone.inventory.libs.schema.metadata.dynamic_field import TextDyField, SearchField, DateTimeDyField, ListDyField, \
- EnumDyField, SizeField
-from spaceone.inventory.libs.schema.cloud_service_type import CloudServiceTypeResource, CloudServiceTypeResponse, \
- CloudServiceTypeMeta
+from spaceone.inventory.libs.schema.metadata.dynamic_widget import (
+ CardWidget,
+ ChartWidget,
+)
+from spaceone.inventory.libs.schema.metadata.dynamic_field import (
+ TextDyField,
+ SearchField,
+ DateTimeDyField,
+ ListDyField,
+ EnumDyField,
+ SizeField,
+)
+from spaceone.inventory.libs.schema.cloud_service_type import (
+ CloudServiceTypeResource,
+ CloudServiceTypeResponse,
+ CloudServiceTypeMeta,
+)
from spaceone.inventory.conf.cloud_service_conf import ASSET_URL
current_dir = os.path.abspath(os.path.dirname(__file__))
@@ -12,78 +24,112 @@
"""
DISK
"""
-disks_count_by_account_conf = os.path.join(current_dir, 'widget/disks_count_by_account.yaml')
-disks_count_by_region_conf = os.path.join(current_dir, 'widget/disks_count_by_region.yaml')
-disks_count_by_resource_group_conf = os.path.join(current_dir, 'widget/disks_count_by_resource_group.yaml')
-disks_size_by_region_conf = os.path.join(current_dir, 'widget/disks_size_by_region.yaml')
-disks_size_by_status_conf = os.path.join(current_dir, 'widget/disks_size_by_status.yaml')
-disks_size_by_subscription_conf = os.path.join(current_dir, 'widget/disks_size_by_subscription.yaml')
-disks_size_by_type_conf = os.path.join(current_dir, 'widget/disks_size_by_type.yaml')
-disks_total_size_conf = os.path.join(current_dir, 'widget/disks_total_size.yaml')
-disks_total_count_conf = os.path.join(current_dir, 'widget/disks_total_count.yaml')
+disks_count_by_account_conf = os.path.join(
+ current_dir, "widget/disks_count_by_account.yaml"
+)
+disks_count_by_region_conf = os.path.join(
+ current_dir, "widget/disks_count_by_region.yaml"
+)
+disks_count_by_resource_group_conf = os.path.join(
+ current_dir, "widget/disks_count_by_resource_group.yaml"
+)
+disks_size_by_region_conf = os.path.join(
+ current_dir, "widget/disks_size_by_region.yaml"
+)
+disks_size_by_status_conf = os.path.join(
+ current_dir, "widget/disks_size_by_status.yaml"
+)
+disks_size_by_subscription_conf = os.path.join(
+ current_dir, "widget/disks_size_by_subscription.yaml"
+)
+disks_size_by_type_conf = os.path.join(current_dir, "widget/disks_size_by_type.yaml")
+disks_total_size_conf = os.path.join(current_dir, "widget/disks_total_size.yaml")
+disks_total_count_conf = os.path.join(current_dir, "widget/disks_total_count.yaml")
cst_disks = CloudServiceTypeResource()
-cst_disks.group = 'Disks'
-cst_disks.name = 'Disk'
-cst_disks.provider = 'azure'
-cst_disks.labels = ['Compute', 'Storage']
-cst_disks.service_code = 'Microsoft.Compute/disks'
+cst_disks.group = "Disks"
+cst_disks.name = "Disk"
+cst_disks.provider = "azure"
+cst_disks.labels = ["Compute", "Storage"]
+cst_disks.service_code = "Microsoft.Compute/disks"
cst_disks.is_major = True
cst_disks.is_primary = True
cst_disks.tags = {
- 'spaceone:icon': f'{ASSET_URL}/azure-disk.svg',
- 'spaceone:display_name': 'Disk'
+ "spaceone:icon": f"{ASSET_URL}/azure-disk.svg",
+ "spaceone:display_name": "Disk",
}
cst_disks._metadata = CloudServiceTypeMeta.set_meta(
fields=[
- TextDyField.data_source('Storage Account Type', 'data.sku.name'),
- SizeField.data_source('Size', 'data.size'),
- EnumDyField.data_source('Disk State', 'data.disk_state', default_state={
- 'safe': ['ActiveSAS', 'ActiveUpload', 'Attached', 'Reserved'],
- 'warning': ['ReadyToUpload'],
- 'available': ['Unattached']
- }),
- TextDyField.data_source('Owner', 'data.managed_by'),
- TextDyField.data_source('Resource Group', 'data.resource_group'),
- TextDyField.data_source('Location', 'data.location'),
- TextDyField.data_source('Subscription Name', 'data.subscription_name'),
-
+ TextDyField.data_source("Storage Account Type", "data.sku.name"),
+ SizeField.data_source("Size", "data.size"),
+ EnumDyField.data_source(
+ "Disk State",
+ "data.disk_state",
+ default_state={
+ "safe": ["ActiveSAS", "ActiveUpload", "Attached", "Reserved"],
+ "warning": ["ReadyToUpload"],
+ "available": ["Unattached"],
+ },
+ ),
+ TextDyField.data_source("Owner", "data.managed_by"),
+ TextDyField.data_source("Resource Group", "data.resource_group"),
+ TextDyField.data_source("Location", "data.location"),
+ TextDyField.data_source("Subscription Name", "data.subscription_name"),
# is_optional - Default
- TextDyField.data_source('Subscription ID', 'data.subscription_id', options={
- 'is_optional': True
- }),
- ListDyField.data_source('Zones', 'data.zones', options={
- 'delimiter': '
',
- 'is_optional': True
- }),
- TextDyField.data_source('Encryption Type', 'data.encryption.type', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Networking', 'data.network_access_policy_display', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Max Shares', 'data.max_shares', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Time Created', 'data.time_created', options={
- 'is_optional': True
- }),
+ TextDyField.data_source(
+ "Subscription ID", "data.subscription_id", options={"is_optional": True}
+ ),
+ ListDyField.data_source(
+ "Zone", "data.zones", options={"delimiter": "
", "is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Encryption Type", "data.encryption.type", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Networking",
+ "data.network_access_policy_display",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Max Shares", "data.max_shares", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Time Created", "data.time_created", options={"is_optional": True}
+ ),
],
search=[
- SearchField.set(name='Tier', key='data.tier', data_type='string'),
- SearchField.set(name='Subscription ID', key='data.subscription_id', data_type='string'),
- SearchField.set(name='Subscription Name', key='data.subscription_name', data_type='string'),
- SearchField.set(name='Resource Group', key='data.resource_group', data_type='string'),
- SearchField.set(name='Location', key='data.location', data_type='string'),
- SearchField.set(name='Zone', key='data.zones', data_type='string'),
- SearchField.set(name='Storage Account Type', key='data.sku.name', data_type='string'),
- SearchField.set(name='Disk Size (Bytes)', key='data.disk_size_bytes', data_type='integer'),
- SearchField.set(name='Disk Size (GB)', key='data.disk_size_gb', data_type='integer'),
- SearchField.set(name='Disk IOPS', key='data.disk_iops_read_write', data_type='integer'),
- SearchField.set(name='OS Type', key='data.os_type', data_type='string'),
- SearchField.set(name='Provisioning State', key='data.provisioning_state', data_type='string'),
- SearchField.set(name='Time Created', key='data.time_created', data_type='datetime'),
+ SearchField.set(name="Tier", key="data.tier", data_type="string"),
+ SearchField.set(
+ name="Subscription ID", key="data.subscription_id", data_type="string"
+ ),
+ SearchField.set(
+ name="Subscription Name", key="data.subscription_name", data_type="string"
+ ),
+ SearchField.set(
+ name="Resource Group", key="data.resource_group", data_type="string"
+ ),
+ SearchField.set(name="Location", key="data.location", data_type="string"),
+ SearchField.set(name="Zone", key="data.zones", data_type="string"),
+ SearchField.set(
+ name="Storage Account Type", key="data.sku.name", data_type="string"
+ ),
+ SearchField.set(
+ name="Disk Size (Bytes)", key="data.disk_size_bytes", data_type="integer"
+ ),
+ SearchField.set(
+ name="Disk Size (GB)", key="data.disk_size_gb", data_type="integer"
+ ),
+ SearchField.set(
+ name="Disk IOPS", key="data.disk_iops_read_write", data_type="integer"
+ ),
+ SearchField.set(name="OS Type", key="data.os_type", data_type="string"),
+ SearchField.set(
+ name="Provisioning State", key="data.provisioning_state", data_type="string"
+ ),
+ SearchField.set(
+ name="Time Created", key="data.time_created", data_type="datetime"
+ ),
],
widget=[
CardWidget.set(**get_data_from_yaml(disks_total_count_conf)),
@@ -94,11 +140,8 @@
ChartWidget.set(**get_data_from_yaml(disks_size_by_region_conf)),
ChartWidget.set(**get_data_from_yaml(disks_size_by_status_conf)),
ChartWidget.set(**get_data_from_yaml(disks_size_by_subscription_conf)),
- ChartWidget.set(**get_data_from_yaml(disks_size_by_type_conf))
- ]
+ ChartWidget.set(**get_data_from_yaml(disks_size_by_type_conf)),
+ ],
)
-
-CLOUD_SERVICE_TYPES = [
- CloudServiceTypeResponse({'resource': cst_disks})
-]
+CLOUD_SERVICE_TYPES = [CloudServiceTypeResponse({"resource": cst_disks})]
diff --git a/src/spaceone/inventory/model/job_model.py b/src/spaceone/inventory/model/job_model.py
new file mode 100644
index 00000000..9ed2ccb7
--- /dev/null
+++ b/src/spaceone/inventory/model/job_model.py
@@ -0,0 +1,18 @@
+from schematics.models import Model
+from schematics.types import ListType, StringType
+from schematics.types.compound import ModelType
+
+__all__ = ["Tasks"]
+
+
+class TaskOptions(Model):
+ resource_type = StringType(serialize_when_none=False)
+ cloud_service_types = ListType(StringType, serialize_when_none=False)
+
+
+class Task(Model):
+ task_options = ModelType(TaskOptions, required=True)
+
+
+class Tasks(Model):
+ tasks = ListType(ModelType(Task), required=True)
diff --git a/src/spaceone/inventory/model/key_vaults/cloud_service.py b/src/spaceone/inventory/model/key_vaults/cloud_service.py
index b75c09e3..56add802 100644
--- a/src/spaceone/inventory/model/key_vaults/cloud_service.py
+++ b/src/spaceone/inventory/model/key_vaults/cloud_service.py
@@ -1,93 +1,176 @@
-from schematics.types import ModelType, StringType, PolyModelType, FloatType, DateTimeType
-
-from spaceone.inventory.libs.schema.metadata.dynamic_field import TextDyField, DateTimeDyField, EnumDyField, \
- ListDyField, SizeField, StateItemDyField
-from spaceone.inventory.libs.schema.metadata.dynamic_layout import ItemDynamicLayout, TableDynamicLayout, \
- ListDynamicLayout, SimpleTableDynamicLayout
-from spaceone.inventory.libs.schema.cloud_service import CloudServiceResource, CloudServiceResponse, CloudServiceMeta
+from schematics.types import (
+ ModelType,
+ StringType,
+ PolyModelType,
+ FloatType,
+ DateTimeType,
+)
+
+from spaceone.inventory.libs.schema.metadata.dynamic_field import (
+ TextDyField,
+ DateTimeDyField,
+ EnumDyField,
+ ListDyField,
+ SizeField,
+ StateItemDyField,
+)
+from spaceone.inventory.libs.schema.metadata.dynamic_layout import (
+ ItemDynamicLayout,
+ TableDynamicLayout,
+ ListDynamicLayout,
+ SimpleTableDynamicLayout,
+ QuerySearchTableDynamicLayout,
+)
+from spaceone.inventory.libs.schema.cloud_service import (
+ CloudServiceResource,
+ CloudServiceResponse,
+ CloudServiceMeta,
+)
from spaceone.inventory.model.key_vaults.data import KeyVault
-'''
+"""
KEY_VAULT
-'''
+"""
# TAB - Default
-key_vault_info_meta = ItemDynamicLayout.set_fields('Key Vault', fields=[
- TextDyField.data_source('Name', 'name'),
- TextDyField.data_source('Resource ID', 'data.id'),
- TextDyField.data_source('Resource Group', 'data.resource_group'),
- TextDyField.data_source('Location', 'data.location'),
- TextDyField.data_source('Subscription', 'data.subscription_name'),
- TextDyField.data_source('Subscription ID', 'account'),
- TextDyField.data_source('Vault URI', 'data.properties.vault_uri'),
- TextDyField.data_source('Sku (Pricing Tier)', 'instance_type'),
- TextDyField.data_source('Directory ID', 'data.properties.tenant_id'),
- # TextDyField.data_source('Directory Name', 'data.'),
- TextDyField.data_source('Soft-delete', 'data.properties.enable_soft_delete'),
- TextDyField.data_source('Purge Protection', 'data.properties.enable_purge_protection_str'),
-])
-
+key_vault_info_meta = ItemDynamicLayout.set_fields(
+ "Key Vault",
+ fields=[
+ TextDyField.data_source("Name", "name"),
+ TextDyField.data_source("Resource ID", "data.id"),
+ TextDyField.data_source("Resource Group", "data.resource_group"),
+ TextDyField.data_source("Location", "data.location"),
+ TextDyField.data_source("Subscription", "data.subscription_name"),
+ TextDyField.data_source("Subscription ID", "account"),
+ TextDyField.data_source("Vault URI", "data.properties.vault_uri"),
+ TextDyField.data_source("Sku (Pricing Tier)", "instance_type"),
+ TextDyField.data_source("Directory ID", "data.properties.tenant_id"),
+ # TextDyField.data_source('Directory Name', 'data.'),
+ TextDyField.data_source("Soft-delete", "data.properties.enable_soft_delete"),
+ TextDyField.data_source(
+ "Purge Protection", "data.properties.enable_purge_protection_str"
+ ),
+ TextDyField.data_source(
+ "Total Credentials Count", "data.total_credentials_count"
+ ),
+ TextDyField.data_source("Keys Count", "data.key_count"),
+ TextDyField.data_source("Secrets Count", "data.secret_count"),
+ TextDyField.data_source("Certificates Count", "data.certificate_count"),
+ ],
+)
+
+# TAB - KeyVaults Permissions
+key_vault_permissions = ItemDynamicLayout.set_fields(
+ "Permissions description",
+ fields=[
+ TextDyField.data_source("Keys", "data.keys_permissions_description_display"),
+ TextDyField.data_source(
+ "Secrets", "data.secrets_permissions_description_display"
+ ),
+ TextDyField.data_source(
+ "Certificates", "data.certificates_permissions_description_display"
+ ),
+ ],
+)
# TAB - Keys
-key_vault_keys = SimpleTableDynamicLayout.set_fields('Keys', 'data.keys', fields=[
- TextDyField.data_source('Name', 'name'),
- TextDyField.data_source('Type', 'instance_type'),
- TextDyField.data_source('Location', 'location'),
- TextDyField.data_source('Status', 'attributes.enabled'),
- DateTimeDyField.data_source('Expiration Date', 'attributes.expires'),
- DateTimeDyField.data_source('Creation Date', 'attributes.created'),
- TextDyField.data_source('Key URI', 'key_uri')
-])
-
+key_vault_keys = QuerySearchTableDynamicLayout.set_fields(
+ "Keys",
+ root_path="data.keys",
+ fields=[
+ TextDyField.data_source("Name", "name"),
+ TextDyField.data_source("Type", "instance_type"),
+ TextDyField.data_source("Location", "location"),
+ TextDyField.data_source("Status", "attributes.enabled"),
+ DateTimeDyField.data_source("Expiration Date", "attributes.expires"),
+ DateTimeDyField.data_source("Creation Date", "attributes.created"),
+ TextDyField.data_source("Key URI", "key_uri"),
+ ],
+)
# TAB - Secrets
-key_vault_secrets = SimpleTableDynamicLayout.set_fields('Secrets', 'data.secrets', fields=[
- TextDyField.data_source('ID', '_id'),
- TextDyField.data_source('Type', '_content_type'),
- TextDyField.data_source('Status', '_attributes.enabled'),
- DateTimeDyField.data_source('Updated Date', '_attributes.updated'),
- DateTimeDyField.data_source('Creation Date', '_attributes.created'),
- TextDyField.data_source('Recoverable Days', '_attributes.recoverable_days')
-])
-
+key_vault_secrets = QuerySearchTableDynamicLayout.set_fields(
+ "Secrets",
+ root_path="data.secrets",
+ fields=[
+ TextDyField.data_source("ID", "_id"),
+ TextDyField.data_source("Type", "_content_type"),
+ TextDyField.data_source("Status", "_attributes.enabled"),
+ DateTimeDyField.data_source("Updated Date", "_attributes.updated"),
+ DateTimeDyField.data_source("Creation Date", "_attributes.created"),
+ TextDyField.data_source("Recoverable Days", "_attributes.recoverable_days"),
+ ],
+)
# TAB - Certificates
-key_vault_certificates = SimpleTableDynamicLayout.set_fields('Certificates', 'data.certificates', fields=[
- TextDyField.data_source('ID', '_id'),
- TextDyField.data_source('Status', '_attributes.enabled'),
- DateTimeDyField.data_source('Updated Date', '_attributes.updated'),
- DateTimeDyField.data_source('Creation Date', '_attributes.created'),
- TextDyField.data_source('Recoverable Days', '_attributes.recoverable_days')
-
-])
+key_vault_certificates = QuerySearchTableDynamicLayout.set_fields(
+ "Certificates",
+ root_path="data.certificates",
+ fields=[
+ TextDyField.data_source("ID", "_id"),
+ TextDyField.data_source("Status", "_attributes.enabled"),
+ DateTimeDyField.data_source("Updated Date", "_attributes.updated"),
+ DateTimeDyField.data_source("Creation Date", "_attributes.created"),
+ TextDyField.data_source("Recoverable Days", "_attributes.recoverable_days"),
+ ],
+)
# TAB - Access Policies
-key_vault_access_policies = ItemDynamicLayout.set_fields('Access Policies', fields=[
- TextDyField.data_source('Enable for Azure VM Deployment', 'data.properties.enabled_for_deployment'),
- TextDyField.data_source('Enable for Disk Encryption', 'data.properties.enabled_for_disk_encryption'),
- TextDyField.data_source('Enable for Template Deployment', 'data.properties.enabled_for_template_deployment'),
- TextDyField.data_source('Enable RBAC Authorization', 'data.properties.enable_rbac_authorization')
-])
-
+key_vault_access_policies = ItemDynamicLayout.set_fields(
+ "Access Policies",
+ fields=[
+ TextDyField.data_source(
+ "Enable for Azure VM Deployment", "data.properties.enabled_for_deployment"
+ ),
+ TextDyField.data_source(
+ "Enable for Disk Encryption", "data.properties.enabled_for_disk_encryption"
+ ),
+ TextDyField.data_source(
+ "Enable for Template Deployment",
+ "data.properties.enabled_for_template_deployment",
+ ),
+ TextDyField.data_source(
+ "Enable RBAC Authorization", "data.properties.enable_rbac_authorization"
+ ),
+ ],
+)
# TAB - Networking
-key_vault_networking = SimpleTableDynamicLayout.set_fields('Private Endpoint Connections', 'data.properties.private_endpoint_connections', fields=[
- TextDyField.data_source('Connection Name', 'name'),
- TextDyField.data_source('Connection State', 'private_link_service_connection_state.status'),
- EnumDyField.data_source('Provisioning State', 'provisioning_state', default_state={
- 'safe': ['Succeeded', 'RegisteringDns']
- }),
- TextDyField.data_source('Private Endpoint', 'private_endpoint.id'),
-])
+key_vault_networking = QuerySearchTableDynamicLayout.set_fields(
+ "Private Endpoint Connections",
+ root_path="data.properties.private_endpoint_connections",
+ fields=[
+ TextDyField.data_source("Connection Name", "name"),
+ TextDyField.data_source(
+ "Connection State", "private_link_service_connection_state.status"
+ ),
+ EnumDyField.data_source(
+ "Provisioning State",
+ "provisioning_state",
+ default_state={"safe": ["Succeeded", "RegisteringDns"]},
+ ),
+ TextDyField.data_source("Private Endpoint", "private_endpoint.id"),
+ ],
+)
key_vault_meta = CloudServiceMeta.set_layouts(
- [key_vault_info_meta, key_vault_keys, key_vault_secrets, key_vault_certificates, key_vault_access_policies,
- key_vault_networking])
+ [
+ key_vault_info_meta,
+ key_vault_permissions,
+ key_vault_keys,
+ key_vault_secrets,
+ key_vault_certificates,
+ key_vault_access_policies,
+ key_vault_networking,
+ ]
+)
class KeyVaultResource(CloudServiceResource):
- cloud_service_group = StringType(default='KeyVaults')
- cloud_service_type = StringType(default='Instance')
+ cloud_service_group = StringType(default="KeyVaults")
+ cloud_service_type = StringType(default="Instance")
data = ModelType(KeyVault)
- _metadata = ModelType(CloudServiceMeta, default=key_vault_meta, serialized_name='metadata')
+ _metadata = ModelType(
+ CloudServiceMeta, default=key_vault_meta, serialized_name="metadata"
+ )
name = StringType()
account = StringType(serialize_when_none=False)
instance_type = StringType(serialize_when_none=False)
diff --git a/src/spaceone/inventory/model/key_vaults/data.py b/src/spaceone/inventory/model/key_vaults/data.py
index e2df38d2..cdddfc1d 100644
--- a/src/spaceone/inventory/model/key_vaults/data.py
+++ b/src/spaceone/inventory/model/key_vaults/data.py
@@ -1,5 +1,13 @@
from schematics import Model
-from schematics.types import ModelType, ListType, StringType, IntType, BooleanType, DateTimeType
+from schematics.types import (
+ ModelType,
+ ListType,
+ StringType,
+ IntType,
+ BooleanType,
+ DateTimeType,
+ DictType,
+)
from spaceone.inventory.libs.schema.resource import AzureCloudService
@@ -36,10 +44,12 @@ class VirtualNetworkRule(Model):
class NetworkRuleSet(Model):
- bypass = StringType(choices=('AzureServices', 'None'), serialize_when_none=False)
- default_action = StringType(choices=('Allow', 'Deny'), serialize_when_none=False)
+ bypass = StringType(choices=("AzureServices", "None"), serialize_when_none=False)
+ default_action = StringType(choices=("Allow", "Deny"), serialize_when_none=False)
ip_rules = ListType(ModelType(IPRule), serialize_when_none=False)
- virtual_network_rules = ListType(ModelType(VirtualNetworkRule), serialize_when_none=False)
+ virtual_network_rules = ListType(
+ ModelType(VirtualNetworkRule), serialize_when_none=False
+ )
class PrivateEndpoint(Model):
@@ -49,7 +59,10 @@ class PrivateEndpoint(Model):
class PrivateLinkServiceConnectionState(Model):
actions_required = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
- status = StringType(choices=('Approved', 'Disconnected', 'Pending', 'Rejected'), serialize_when_none=False)
+ status = StringType(
+ choices=("Approved", "Disconnected", "Pending", "Rejected"),
+ serialize_when_none=False,
+ )
class PrivateEndpointConnectionItem(Model):
@@ -57,13 +70,25 @@ class PrivateEndpointConnectionItem(Model):
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
private_endpoint = ModelType(PrivateEndpoint, serialize_when_none=False)
- private_link_service_connection_state = ModelType(PrivateLinkServiceConnectionState, serialize_when_none=False)
- provisioning_state = StringType(choices=('Creating', 'Deleting', 'Disconnected', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ private_link_service_connection_state = ModelType(
+ PrivateLinkServiceConnectionState, serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=(
+ "Creating",
+ "Deleting",
+ "Disconnected",
+ "Failed",
+ "Succeeded",
+ "Updating",
+ ),
+ serialize_when_none=False,
+ )
class Sku(Model):
family = StringType(serialize_when_none=False)
- name = StringType(choices=('premium', 'standard'), serialize_when_none=False)
+ name = StringType(choices=("premium", "standard"), serialize_when_none=False)
class SecretAttributes(Model):
@@ -72,9 +97,18 @@ class SecretAttributes(Model):
exp = DateTimeType(serialize_when_none=False)
nbf = DateTimeType(serialize_when_none=False)
recoverable_days = IntType(serialize_when_none=False)
- recovery_level = StringType(choices=('CustomizedRecoverable', 'CustomizedRecoverable+ProtectedSubscription',
- 'CustomizedRecoverable+Purgeable', 'Purgeable', 'Recoverable', 'Recoverable+ProtectedSubscription',
- 'Recoverable+Purgeable'), serialize_when_none=False)
+ recovery_level = StringType(
+ choices=(
+ "CustomizedRecoverable",
+ "CustomizedRecoverable+ProtectedSubscription",
+ "CustomizedRecoverable+Purgeable",
+ "Purgeable",
+ "Recoverable",
+ "Recoverable+ProtectedSubscription",
+ "Recoverable+Purgeable",
+ ),
+ serialize_when_none=False,
+ )
updated = DateTimeType(serialize_when_none=False)
@@ -104,10 +138,18 @@ class CertificateAttributes(Model):
exp = DateTimeType(serialize_when_none=False)
nbf = DateTimeType(serialize_when_none=False)
recoverable_days = IntType(serialize_when_none=False)
- recovery_level = StringType(choices=('CustomizedRecoverable', 'CustomizedRecoverable+ProtectedSubscription',
- 'CustomizedRecoverable+Purgeable', 'Purgeable', 'Recoverable',
- 'Recoverable+ProtectedSubscription',
- 'Recoverable+Purgeable'), serialize_when_none=False)
+ recovery_level = StringType(
+ choices=(
+ "CustomizedRecoverable",
+ "CustomizedRecoverable+ProtectedSubscription",
+ "CustomizedRecoverable+Purgeable",
+ "Purgeable",
+ "Recoverable",
+ "Recoverable+ProtectedSubscription",
+ "Recoverable+Purgeable",
+ ),
+ serialize_when_none=False,
+ )
updated = DateTimeType(serialize_when_none=False)
@@ -121,9 +163,11 @@ class CertificateItem(Model):
class VaultProperties(Model):
access_policies = ListType(ModelType(AccessPolicyEntry), serialize_when_none=False)
- create_mode = StringType(choices=('default', 'recover'), serialize_when_none=False)
+ create_mode = StringType(choices=("default", "recover"), serialize_when_none=False)
enable_purge_protection = BooleanType(default=False, serialize_when_none=False)
- enable_purge_protection_str = StringType(serialize_when_none=False, default='Disabled')
+ enable_purge_protection_str = StringType(
+ serialize_when_none=False, default="Disabled"
+ )
enable_rbac_authorization = BooleanType(serialize_when_none=False)
enable_soft_delete = BooleanType(serialize_when_none=False)
enabled_for_deployment = BooleanType(serialize_when_none=False)
@@ -131,8 +175,12 @@ class VaultProperties(Model):
enabled_for_template_deployment = BooleanType(serialize_when_none=False)
hsm_pool_resource_id = StringType(serialize_when_none=False)
network_acls = ModelType(NetworkRuleSet, serialize_when_none=False)
- private_endpoint_connections = ListType(ModelType(PrivateEndpointConnectionItem), serialize_when_none=False)
- provisioning_state = StringType(choices=('RegisteringDns', 'Succeeded'), serialize_when_none=False)
+ private_endpoint_connections = ListType(
+ ModelType(PrivateEndpointConnectionItem), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("RegisteringDns", "Succeeded"), serialize_when_none=False
+ )
sku = ModelType(Sku, serialize_when_none=False)
soft_delete_retention_in_days = IntType(serialize_when_none=False)
tenant_id = StringType(serialize_when_none=False)
@@ -145,10 +193,18 @@ class KeyAttributes(Model):
exp = DateTimeType(serialize_when_none=False)
nbf = DateTimeType(serialize_when_none=False)
recoverable_days = IntType(serialize_when_none=False)
- recovery_level = StringType(choices=('CustomizedRecoverable', 'CustomizedRecoverable+ProtectedSubscription',
- 'CustomizedRecoverable+Purgeable', 'Purgeable', 'Recoverable',
- 'Recoverable+ProtectedSubscription',
- 'Recoverable+Purgeable'), serialize_when_none=False)
+ recovery_level = StringType(
+ choices=(
+ "CustomizedRecoverable",
+ "CustomizedRecoverable+ProtectedSubscription",
+ "CustomizedRecoverable+Purgeable",
+ "Purgeable",
+ "Recoverable",
+ "Recoverable+ProtectedSubscription",
+ "Recoverable+Purgeable",
+ ),
+ serialize_when_none=False,
+ )
updated = DateTimeType(serialize_when_none=False)
@@ -164,9 +220,19 @@ class KeyItem(Model):
tags = ModelType(Tags, serialize_when_none=False)
+class SystemData(Model):
+ created_at = DateTimeType(serialize_when_none=False)
+ created_by = StringType(serialize_when_none=False)
+ created_by_type = StringType(serialize_when_none=False)
+ last_modified_at = DateTimeType(serialize_when_none=False)
+ last_modified_by = StringType(serialize_when_none=False)
+ last_modified_by_type = StringType(serialize_when_none=False)
+
+
class KeyVault(AzureCloudService): # Main class
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
+ sku = ModelType(Sku, serialize_when_none=False)
name = StringType(serialize_when_none=False)
location = StringType(serialize_when_none=False)
properties = ModelType(VaultProperties, serialize_when_none=False)
@@ -174,8 +240,15 @@ class KeyVault(AzureCloudService): # Main class
secrets = ListType(ModelType(SecretItem), serialize_when_none=False)
certificates = ListType(ModelType(CertificateItem), serialize_when_none=False)
key_count = IntType(serialize_when_none=False)
+ secret_count = IntType(serialize_when_none=False)
+ certificate_count = IntType(serialize_when_none=False)
+ total_credentials_count = IntType(serialize_when_none=False)
+ system_data = ModelType(SystemData, serialize_when_none=False)
type = StringType(serialize_when_none=False)
launched_at = DateTimeType(serialize_when_none=False)
+ keys_permissions_description_display = StringType(serialize_when_none=False)
+ secrets_permissions_description_display = StringType(serialize_when_none=False)
+ certificates_permissions_description_display = StringType(serialize_when_none=False)
def reference(self):
return {
diff --git a/src/spaceone/inventory/model/nat_gateways/data.py b/src/spaceone/inventory/model/nat_gateways/data.py
index 785295ce..8cb55dcd 100644
--- a/src/spaceone/inventory/model/nat_gateways/data.py
+++ b/src/spaceone/inventory/model/nat_gateways/data.py
@@ -12,9 +12,11 @@ class SubResource(Model):
id = StringType()
-'''
+"""
START OF REF CLASSES
-'''
+"""
+
+
class NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties(Model):
fqdns = ListType(StringType, serialize_when_none=False)
group_id = StringType(serialize_when_none=False)
@@ -31,34 +33,55 @@ class ApplicationSecurityGroupRef(Model):
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
resource_guid = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
-class NetworkInterfaceIPConfigurationRef(Model): # ip configuration in a network interface
+class NetworkInterfaceIPConfigurationRef(
+ Model
+): # ip configuration in a network interface
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- application_security_groups = ListType(ModelType(ApplicationSecurityGroupRef), serialize_when_none=False)
+ application_security_groups = ListType(
+ ModelType(ApplicationSecurityGroupRef), serialize_when_none=False
+ )
primary = BooleanType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
- private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
- private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
- private_link_connection_properties = ModelType(NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties,
- serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
- public_ip_address = StringType(default='', serialize_when_none=False) # Change Public IP Address to id
- subnet = StringType(default='', serialize_when_none=False) # Change Subnet to id
+ private_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), serialize_when_none=False
+ )
+ private_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
+ private_link_connection_properties = ModelType(
+ NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties,
+ serialize_when_none=False,
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
+ public_ip_address = StringType(
+ default="", serialize_when_none=False
+ ) # Change Public IP Address to id
+ subnet = StringType(default="", serialize_when_none=False) # Change Subnet to id
virtual_network_taps = ListType(ModelType(SubResource), serialize_when_none=False)
-'''
+
+"""
END OF REF CLASSES
-'''
+"""
+
+
class AddressSpace(Model):
address_count = IntType(serialize_when_none=False, default=0)
- address_prefixes = ListType(StringType, default=['-'])
+ address_prefixes = ListType(StringType, default=["-"])
class AutoApproval(Model):
@@ -69,12 +92,17 @@ class ApplicationGatewayIPConfiguration(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
subnet = ModelType(SubResource, serialize_when_none=False)
type = StringType(serialize_when_none=False)
-class ConnectedDevice(Model): # Customized class, model for connected device lists attached to this virtual network
+class ConnectedDevice(
+ Model
+): # Customized class, model for connected device lists attached to this virtual network
name = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
device = StringType(serialize_when_none=False)
@@ -88,19 +116,24 @@ class CustomDnsConfigPropertiesFormat(Model):
class DdosSettings(Model):
ddos_custom_policy = ModelType(SubResource, serialize_when_none=False)
protected_ip = BooleanType(serialize_when_none=False)
- protection_coverage = StringType(choices=('Basic', 'Standard'), serialize_when_none=False)
+ protection_coverage = StringType(
+ choices=("Basic", "Standard"), serialize_when_none=False
+ )
class DhcpOptions(Model):
- dns_servers = ListType(StringType, default=['Azure provided DNS service'])
+ dns_servers = ListType(StringType, default=["Azure provided DNS service"])
class Delegation(Model):
etag = StringType(serialize_when_none=False)
id = StringType()
- name = StringType(default='-', serialize_when_none=False)
+ name = StringType(default="-", serialize_when_none=False)
actions = ListType(StringType, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
service_name = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -125,8 +158,11 @@ class InboundNatPool(Model):
frontend_port_range_end = IntType(serialize_when_none=False)
frontend_port_range_start = IntType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
- protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ protocol = StringType(choices=("All", "Tcp", "Udp"), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -134,7 +170,9 @@ class InboundNatRule(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- backend_ip_configurations = ListType(ModelType(NetworkInterfaceIPConfigurationRef), serialize_when_none=False)
+ backend_ip_configurations = ListType(
+ ModelType(NetworkInterfaceIPConfigurationRef), serialize_when_none=False
+ )
target_virtual_machine = ListType(StringType, serialize_when_none=False)
backend_port = IntType(serialize_when_none=False)
enable_floating_ip = BooleanType(serialize_when_none=False)
@@ -144,8 +182,11 @@ class InboundNatRule(Model):
frontend_port = IntType(serialize_when_none=False)
port_mapping_display = StringType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
- protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ protocol = StringType(choices=("All", "Tcp", "Udp"), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -163,11 +204,16 @@ class LoadBalancingRule(Model):
frontend_ip_configuration_display = StringType(serialize_when_none=False)
frontend_port = IntType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
- load_distribution = StringType(choices=('Default', 'SourceIP', 'SourceIPProtocol'), serialize_when_none=False)
+ load_distribution = StringType(
+ choices=("Default", "SourceIP", "SourceIPProtocol"), serialize_when_none=False
+ )
load_distribution_display = StringType(serialize_when_none=False)
probe = ModelType(SubResource, serialize_when_none=False)
- protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ protocol = StringType(choices=("All", "Tcp", "Udp"), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -178,10 +224,15 @@ class OutboundRule(Model):
allocated_outbound_ports = IntType(serialize_when_none=False)
backend_address_pool = ModelType(SubResource, serialize_when_none=False)
enable_tcp_reset = BooleanType(serialize_when_none=False)
- frontend_ip_configurations = ListType(ModelType(SubResource), serialize_when_none=False)
+ frontend_ip_configurations = ListType(
+ ModelType(SubResource), serialize_when_none=False
+ )
idle_timeout_in_minutes = IntType(serialize_when_none=False)
- protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ protocol = StringType(choices=("All", "Tcp", "Udp"), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -197,7 +248,7 @@ class IpTagRef(Model):
class NatGatewaySkuRef(Model):
- name = StringType(choices=('Standard', None), serialize_when_none=False)
+ name = StringType(choices=("Standard", None), serialize_when_none=False)
class NatGatewayRef(Model):
@@ -206,7 +257,10 @@ class NatGatewayRef(Model):
name = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
public_ip_addresses = ListType(ModelType(SubResource), serialize_when_none=False)
public_ip_prefixes = ListType(ModelType(SubResource), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
@@ -218,8 +272,8 @@ class NatGatewayRef(Model):
class PublicIPAddressSkuRef(Model):
- name = StringType(choices=('Basic', 'Standard'), serialize_when_none=False)
- tier = StringType(choices=('Global', 'Regional'), serialize_when_none=False)
+ name = StringType(choices=("Basic", "Standard"), serialize_when_none=False)
+ tier = StringType(choices=("Global", "Regional"), serialize_when_none=False)
class PublicIPAddressRef(Model):
@@ -232,14 +286,26 @@ class PublicIPAddressRef(Model):
dns_settings = ModelType(PublicIPAddressDnsSettingsRef, serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
ip_address = StringType(serialize_when_none=False)
- ip_configuration = StringType(serialize_when_none=False) # Change to IP Configuration id
+ ip_configuration = StringType(
+ serialize_when_none=False
+ ) # Change to IP Configuration id
ip_tags = ListType(ModelType(IpTagRef), serialize_when_none=False)
# linked_public_ip_address = ModelType(PublicIPAddress, serialize_when_none=False)
- migration_phase = StringType(choices=('Abort', 'Commit', 'Committed', 'None', 'Prepare'), serialize_when_none=False)
+ migration_phase = StringType(
+ choices=("Abort", "Commit", "Committed", "None", "Prepare"),
+ serialize_when_none=False,
+ )
nat_gateway = ModelType(NatGatewayRef, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
- public_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
- public_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
+ public_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), serialize_when_none=False
+ )
+ public_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
public_ip_prefix = ModelType(SubResource, serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
sku = ModelType(PublicIPAddressSkuRef, serialize_when_none=False)
@@ -254,12 +320,21 @@ class FrontendIPConfiguration(Model):
name = StringType(serialize_when_none=False)
inbound_nat_pools = ListType(ModelType(InboundNatPool), serialize_when_none=False)
inbound_nat_rules = ListType(ModelType(InboundNatRule), serialize_when_none=False)
- load_balancing_rules = ListType(ModelType(LoadBalancingRule), serialize_when_none=False)
+ load_balancing_rules = ListType(
+ ModelType(LoadBalancingRule), serialize_when_none=False
+ )
outbound_rules = ListType(ModelType(OutboundRule), serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
- private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
- private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ private_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), serialize_when_none=False
+ )
+ private_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
public_ip_address = ModelType(PublicIPAddressRef, serialize_when_none=False)
public_ip_prefix = ModelType(SubResource, serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
@@ -281,8 +356,9 @@ class TrafficAnalyticsConfigurationProperties(Model):
class TrafficAnalyticsProperties(Model):
- network_watcher_flow_analytics_configuration = ModelType(TrafficAnalyticsConfigurationProperties,
- serialize_when_none=False)
+ network_watcher_flow_analytics_configuration = ModelType(
+ TrafficAnalyticsConfigurationProperties, serialize_when_none=False
+ )
class FlowLog(Model):
@@ -291,9 +367,14 @@ class FlowLog(Model):
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
enable = BooleanType(serialize_when_none=False)
- flow_analytics_configuration = ModelType(TrafficAnalyticsProperties, serialize_when_none=False)
+ flow_analytics_configuration = ModelType(
+ TrafficAnalyticsProperties, serialize_when_none=False
+ )
format = ModelType(FlowLogFormatParameters, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
retention_policy = ModelType(RetentionPolicyParameters, serialize_when_none=False)
storage_id = StringType(serialize_when_none=False)
target_resource_guid = StringType(serialize_when_none=False)
@@ -306,7 +387,10 @@ class IPConfigurationProfile(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
type = StringType(serialize_when_none=False)
@@ -316,9 +400,16 @@ class IPConfiguration(Model):
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
- public_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
- public_ip_address = StringType(serialize_when_none=False) # Change to PublicIPAddress ID
+ public_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
+ public_ip_address = StringType(
+ serialize_when_none=False
+ ) # Change to PublicIPAddress ID
subnet = StringType(serialize_when_none=False)
@@ -335,7 +426,10 @@ class ServiceAssociationLink(Model):
link = StringType(serialize_when_none=False)
linked_resource_type = StringType(serialize_when_none=False)
locations = ListType(ModelType(ExtendedLocation), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -344,7 +438,10 @@ class ServiceEndpointPolicyDefinition(Model):
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
service = StringType(serialize_when_none=False)
service_resources = ListType(StringType)
@@ -354,10 +451,14 @@ class ServiceEndpointPolicy(Model):
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
resource_guid = StringType(serialize_when_none=False)
- service_endpoint_policy_definitions = ListType(ModelType(ServiceEndpointPolicyDefinition),
- serialize_when_none=False)
+ service_endpoint_policy_definitions = ListType(
+ ModelType(ServiceEndpointPolicyDefinition), serialize_when_none=False
+ )
subnets = ListType(StringType, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -365,7 +466,10 @@ class ServiceEndpointPolicy(Model):
class ServiceEndpointPropertiesFormat(Model):
locations = ListType(StringType, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
service = StringType(serialize_when_none=False)
subnet = StringType(serialize_when_none=False)
@@ -376,7 +480,10 @@ class ResourceNavigationLink(Model):
name = StringType(serialize_when_none=False)
link = StringType(serialize_when_none=False)
linked_resource_type = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -386,9 +493,20 @@ class Route(Model):
name = StringType(serialize_when_none=False)
address_prefix = StringType(serialize_when_none=False)
next_hop_ip_address = StringType(serialize_when_none=False)
- next_hop_type = StringType(choices=('Internet', 'None', 'VirtualAppliance', 'VirtualNetworkGateway', 'VnetLocal'),
- serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ next_hop_type = StringType(
+ choices=(
+ "Internet",
+ "None",
+ "VirtualAppliance",
+ "VirtualNetworkGateway",
+ "VnetLocal",
+ ),
+ serialize_when_none=False,
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
class RouteTable(Model):
@@ -397,7 +515,10 @@ class RouteTable(Model):
name = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
disable_bgp_route_propagation = BooleanType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
routes = ListType(ModelType(Route), serialize_when_none=False)
subnets = ListType(StringType, default=[], serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
@@ -405,7 +526,8 @@ class RouteTable(Model):
class NatGatewaySku(Model):
- name = StringType(choices=('Standard', None), serialize_when_none=False)
+ name = StringType(choices=("Standard", None), serialize_when_none=False)
+ tier = StringType(serialize_when_none=False)
class PrivateLinkServiceConnectionState(Model):
@@ -419,9 +541,14 @@ class PrivateLinkServiceConnection(Model):
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
group_ids = ListType(StringType, serialize_when_none=False)
- private_link_service_connection_state = ModelType(PrivateLinkServiceConnectionState, serialize_when_none=False)
+ private_link_service_connection_state = ModelType(
+ PrivateLinkServiceConnectionState, serialize_when_none=False
+ )
private_link_service_id = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
request_message = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -432,9 +559,16 @@ class PrivateLinkServiceIpConfiguration(Model):
name = StringType(serialize_when_none=False)
primary = BooleanType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
- private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
- private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ private_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), serialize_when_none=False
+ )
+ private_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
type = StringType(serialize_when_none=False)
@@ -449,12 +583,22 @@ class PrivateEndpointRef(Model):
location = ModelType(ExtendedLocation, serialize_when_none=False)
extended_location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
- custom_dns_configs = ListType(ModelType(CustomDnsConfigPropertiesFormat), serialize_when_none=False)
- manual_private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection),
- serialize_when_none=False)
- network_interfaces = ListType(StringType(), serialize_when_none=False) # Change to network interfaces id
- private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ custom_dns_configs = ListType(
+ ModelType(CustomDnsConfigPropertiesFormat), serialize_when_none=False
+ )
+ manual_private_link_service_connections = ListType(
+ ModelType(PrivateLinkServiceConnection), serialize_when_none=False
+ )
+ network_interfaces = ListType(
+ StringType(), serialize_when_none=False
+ ) # Change to network interfaces id
+ private_link_service_connections = ListType(
+ ModelType(PrivateLinkServiceConnection), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
subnet = StringType(serialize_when_none=False) # Change to subnet ID
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -466,8 +610,13 @@ class PrivateEndpointConnection(Model):
name = StringType(serialize_when_none=False)
link_identifier = StringType(serialize_when_none=False)
private_endpoint = ModelType(PrivateEndpointRef)
- private_link_service_connection_state = ModelType(PrivateLinkServiceConnectionState, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ private_link_service_connection_state = ModelType(
+ PrivateLinkServiceConnectionState, serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -480,11 +629,22 @@ class PrivateLinkService(Model):
auto_approval = ModelType(AutoApproval, serialize_when_none=False)
enable_proxy_protocol = BooleanType(serialize_when_none=False)
fqdns = ListType(StringType, serialize_when_none=False)
- ip_configurations = ListType(ModelType(PrivateLinkServiceIpConfiguration), serialize_when_none=False)
- loadBalancer_frontend_ip_configurations = ListType(ModelType(FrontendIPConfiguration), serialize_when_none=False)
- network_interfaces = ListType(StringType, serialize_when_none=False) # Change to network interfaces' id
- private_endpoint_connections = ListType(ModelType(PrivateEndpointConnection), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ ip_configurations = ListType(
+ ModelType(PrivateLinkServiceIpConfiguration), serialize_when_none=False
+ )
+ loadBalancer_frontend_ip_configurations = ListType(
+ ModelType(FrontendIPConfiguration), serialize_when_none=False
+ )
+ network_interfaces = ListType(
+ StringType, serialize_when_none=False
+ ) # Change to network interfaces' id
+ private_endpoint_connections = ListType(
+ ModelType(PrivateEndpointConnection), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
visibility = ModelType(Visibility, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -497,8 +657,8 @@ class PublicIPAddressDnsSettings(Model):
class PublicIPAddressSku(Model):
- name = StringType(choices=('Basic', 'Standard'), serialize_when_none=False)
- tier = StringType(choices=('Global', 'Regional'), serialize_when_none=False)
+ name = StringType(choices=("Basic", "Standard"), serialize_when_none=False)
+ tier = StringType(choices=("Global", "Regional"), serialize_when_none=False)
class ReferencedPublicIPAddress(Model):
@@ -515,12 +675,21 @@ class PublicIPPrefix(Model):
ip_prefix = StringType(serialize_when_none=False)
ip_tags = ListType(ModelType(IpTag), serialize_when_none=False)
load_balancer_frontend_ip_configuration = ModelType(SubResource)
- nat_gateway = StringType(serialize_when_none=False), # Change to NAT id
- prefix_length = IntType(serialize_when_none=False),
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
- public_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
- public_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
- public_ip_addresses = ListType(ModelType(ReferencedPublicIPAddress), serialize_when_none=False)
+ nat_gateway = (StringType(serialize_when_none=False),) # Change to NAT id
+ prefix_length = (IntType(serialize_when_none=False),)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
+ public_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), serialize_when_none=False
+ )
+ public_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
+ public_ip_addresses = ListType(
+ ModelType(ReferencedPublicIPAddress), serialize_when_none=False
+ )
resource_guid = StringType(serialize_when_none=False)
sku = ModelType(PublicIPAddressSku, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
@@ -530,7 +699,7 @@ class PublicIPPrefix(Model):
class PublicIPPrefixSku(Model):
name = StringType(serialize_when_none=False)
- tier = StringType(choices=('Global', 'Regional'), serialize_when_none=False)
+ tier = StringType(choices=("Global", "Regional"), serialize_when_none=False)
class PublicIPAddress(Model):
@@ -546,10 +715,20 @@ class PublicIPAddress(Model):
ip_configuration = ModelType(IPConfiguration, serialize_when_none=False)
ip_tags = ListType(ModelType(IpTag), serialize_when_none=False)
# linked_public_ip_address = ModelType(PublicIPAddress, serialize_when_none=False)
- migration_phase = StringType(choices=('Abort', 'Commit', 'Committed', 'None', 'Prepare'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
- public_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
- public_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
+ migration_phase = StringType(
+ choices=("Abort", "Commit", "Committed", "None", "Prepare"),
+ serialize_when_none=False,
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
+ public_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), serialize_when_none=False
+ )
+ public_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
public_ip_prefix = ModelType(SubResource, serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
sku = ModelType(PublicIPPrefixSku, serialize_when_none=False)
@@ -563,7 +742,10 @@ class ApplicationSecurityGroup(Model):
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
resource_guid = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -573,20 +755,29 @@ class SecurityRule(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- access = StringType(choices=('Allow', 'Deny'), serialize_when_none=False)
+ access = StringType(choices=("Allow", "Deny"), serialize_when_none=False)
description = StringType(serialize_when_none=False)
destination_address_prefix = StringType(serialize_when_none=False)
destination_address_prefixes = ListType(StringType, serialize_when_none=False)
- destination_application_security_groups = ListType(ModelType(ApplicationSecurityGroup), serialize_when_none=False)
+ destination_application_security_groups = ListType(
+ ModelType(ApplicationSecurityGroup), serialize_when_none=False
+ )
destination_port_range = StringType(serialize_when_none=False)
destination_port_ranges = ListType(StringType, serialize_when_none=False)
- direction = StringType(choices=('Inbound', 'Outbound'), serialize_when_none=False)
+ direction = StringType(choices=("Inbound", "Outbound"), serialize_when_none=False)
priority = IntType(serialize_when_none=False)
- protocol = StringType(choices=('*', 'Ah', 'Esp', 'Icmp', 'Tcp', 'Udp'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ protocol = StringType(
+ choices=("*", "Ah", "Esp", "Icmp", "Tcp", "Udp"), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
source_address_prefix = StringType(serialize_when_none=False)
source_address_prefixes = ListType(StringType, serialize_when_none=False)
- source_application_security_groups = ListType(ModelType(ApplicationSecurityGroup), serialize_when_none=False)
+ source_application_security_groups = ListType(
+ ModelType(ApplicationSecurityGroup), serialize_when_none=False
+ )
source_port_range = StringType(serialize_when_none=False)
source_port_ranges = ListType(StringType, serialize_when_none=False)
@@ -603,14 +794,25 @@ class NetworkInterfaceIPConfiguration(Model): # ip configuration in a network i
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- application_security_groups = ListType(ModelType(ApplicationSecurityGroup), serialize_when_none=False)
+ application_security_groups = ListType(
+ ModelType(ApplicationSecurityGroup), serialize_when_none=False
+ )
primary = BooleanType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
- private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
- private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
- private_link_connection_properties = ModelType(NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties,
- serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ private_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), serialize_when_none=False
+ )
+ private_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
+ private_link_connection_properties = ModelType(
+ NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties,
+ serialize_when_none=False,
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
public_ip_address = ModelType(PublicIPAddress, serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
virtual_network_taps = ListType(ModelType(SubResource), serialize_when_none=False)
@@ -620,7 +822,10 @@ class NetworkInterfaceTapConfiguration(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -628,11 +833,18 @@ class NetworkSecurityGroup(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
- name = StringType(default='-', serialize_when_none=False)
- default_security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
+ name = StringType(default="-", serialize_when_none=False)
+ default_security_rules = ListType(
+ ModelType(SecurityRule), serialize_when_none=False
+ )
flow_logs = ListType(ModelType(FlowLog), serialize_when_none=False)
- network_interfaces = StringType(serialize_when_none=False) # Change to Network interfaces' Id
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ network_interfaces = StringType(
+ serialize_when_none=False
+ ) # Change to Network interfaces' Id
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
resource_guid = StringType(serialize_when_none=False)
security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
subnets = ListType(StringType, serialize_when_none=False) # Change to Subnet IDs
@@ -651,17 +863,27 @@ class NetworkInterface(Model):
enable_accelerated_networking = BooleanType(serialize_when_none=False)
enable_ip_forwarding = BooleanType(serialize_when_none=False)
hosted_workloads = ListType(StringType, serialize_when_none=False)
- ip_configurations = ListType(ModelType(NetworkInterfaceIPConfiguration), serialize_when_none=False)
+ ip_configurations = ListType(
+ ModelType(NetworkInterfaceIPConfiguration), serialize_when_none=False
+ )
mac_address = StringType(serialize_when_none=False)
- migration_phase = StringType(choices=('Abort', 'Commit', 'Committed', 'None', 'Prepare'), serialize_when_none=False)
- nic_type = StringType(choices=('Elastic', 'Standard'), serialize_when_none=False)
+ migration_phase = StringType(
+ choices=("Abort", "Commit", "Committed", "None", "Prepare"),
+ serialize_when_none=False,
+ )
+ nic_type = StringType(choices=("Elastic", "Standard"), serialize_when_none=False)
network_security_group = ModelType(NetworkSecurityGroup, serialize_when_none=False)
primary = BooleanType(serialize_when_none=False)
private_endpoint = ModelType(PrivateEndpointRef, serialize_when_none=False)
private_link_service = ModelType(PrivateLinkService, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
resource_guid = StringType(serialize_when_none=False)
- tap_configurations = ListType(ModelType(NetworkInterfaceTapConfiguration), serialize_when_none=False)
+ tap_configurations = ListType(
+ ModelType(NetworkInterfaceTapConfiguration), serialize_when_none=False
+ )
virtual_machine = ModelType(SubResource, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -673,12 +895,22 @@ class PrivateEndpoint(Model):
location = ModelType(ExtendedLocation, serialize_when_none=False)
extended_location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
- custom_dns_configs = ListType(ModelType(CustomDnsConfigPropertiesFormat), serialize_when_none=False)
- manual_private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection),
- serialize_when_none=False)
- network_interfaces = ListType(ModelType(NetworkInterface), serialize_when_none=False)
- private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ custom_dns_configs = ListType(
+ ModelType(CustomDnsConfigPropertiesFormat), serialize_when_none=False
+ )
+ manual_private_link_service_connections = ListType(
+ ModelType(PrivateLinkServiceConnection), serialize_when_none=False
+ )
+ network_interfaces = ListType(
+ ModelType(NetworkInterface), serialize_when_none=False
+ )
+ private_link_service_connections = ListType(
+ ModelType(PrivateLinkServiceConnection), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
subnet = StringType(serialize_when_none=False)
resource_group = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
@@ -687,19 +919,23 @@ class PrivateEndpoint(Model):
###### Firewall Classes ######
class AzureFirewallRCAction(Model):
- type = StringType(choices=('Allow', 'Deny'), serialize_when_none=False)
+ type = StringType(choices=("Allow", "Deny"), serialize_when_none=False)
class AzureFirewallApplicationRuleProtocol(Model):
port = IntType(serialize_when_none=False)
- protocol_type = StringType(choices=('Http', 'Https', 'Mssql'), serialize_when_none=False)
+ protocol_type = StringType(
+ choices=("Http", "Https", "Mssql"), serialize_when_none=False
+ )
class AzureFirewallApplicationRule(Model):
description = StringType(serialize_when_none=False)
fqdn_tags = ListType(StringType, serialize_when_none=False)
name = StringType(serialize_when_none=False)
- protocols = ListType(ModelType(AzureFirewallApplicationRuleProtocol), serialize_when_none=False)
+ protocols = ListType(
+ ModelType(AzureFirewallApplicationRuleProtocol), serialize_when_none=False
+ )
source_addresses = ListType(StringType, serialize_when_none=False)
source_ip_groups = ListType(StringType, serialize_when_none=False)
target_fqdns = ListType(StringType, serialize_when_none=False)
@@ -711,7 +947,10 @@ class AzureFirewallApplicationRuleCollection(Model):
name = StringType(serialize_when_none=False)
action = ModelType(AzureFirewallRCAction, serialize_when_none=False)
priority = IntType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
rules = ListType(ModelType(AzureFirewallApplicationRule), serialize_when_none=False)
@@ -720,7 +959,10 @@ class AzureFirewallIPConfiguration(Model):
id = StringType()
name = StringType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
public_ip_address = ModelType(SubResource, serialize_when_none=False)
subnet = ModelType(SubResource, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -731,7 +973,9 @@ class AzureFirewallPublicIPAddress(Model):
class HubPublicIPAddresses(Model):
- address = ListType(ModelType(AzureFirewallPublicIPAddress), serialize_when_none=False)
+ address = ListType(
+ ModelType(AzureFirewallPublicIPAddress), serialize_when_none=False
+ )
count = IntType(serialize_when_none=False)
@@ -777,9 +1021,12 @@ class AzureFirewallNatRuleCollection(Model):
etag = StringType()
id = StringType()
name = StringType(serialize_when_none=False)
- action = StringType(choices=('Dnat', 'Snat'), serialize_when_none=False)
+ action = StringType(choices=("Dnat", "Snat"), serialize_when_none=False)
priority = IntType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
rules = ListType(ModelType(AzureFirewallNatRule), serialize_when_none=False)
@@ -789,13 +1036,16 @@ class AzureFirewallNetworkRuleCollection(Model):
name = StringType(serialize_when_none=False)
action = ModelType(AzureFirewallRCAction, serialize_when_none=False)
priority = IntType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
rules = ListType(ModelType(AzureFirewallNetworkRule), serialize_when_none=False)
class AzureFirewallSku(Model):
- name = StringType(choices=('AZFW_Hub', 'AZFW_VNet'), serialize_when_none=False)
- tier = StringType(choices=('Premium', 'Standard'), serialize_when_none=False)
+ name = StringType(choices=("AZFW_Hub", "AZFW_VNet"), serialize_when_none=False)
+ tier = StringType(choices=("Premium", "Standard"), serialize_when_none=False)
class AzureFirewall(Model):
@@ -804,17 +1054,32 @@ class AzureFirewall(Model):
location = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
subnet = StringType(serialize_when_none=False)
- application_rule_collections = ListType(ModelType(AzureFirewallApplicationRuleCollection), serialize_when_none=False)
+ application_rule_collections = ListType(
+ ModelType(AzureFirewallApplicationRuleCollection), serialize_when_none=False
+ )
firewall_policy = ModelType(SubResource, serialize_when_none=False)
hub_ip_addresses = ModelType(HubIPAddresses, serialize_when_none=False)
- ip_configurations = ListType(ModelType(AzureFirewallIPConfiguration), serialize_when_none=False)
+ ip_configurations = ListType(
+ ModelType(AzureFirewallIPConfiguration), serialize_when_none=False
+ )
ip_groups = ListType(ModelType(AzureFirewallIpGroups), serialize_when_none=False)
- management_ip_configuration = ModelType(AzureFirewallIPConfiguration, serialize_when_none=False)
- nat_rule_collections = ListType(ModelType(AzureFirewallNatRuleCollection), serialize_when_none=False)
- network_rule_collections = ListType(ModelType(AzureFirewallNetworkRuleCollection), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ management_ip_configuration = ModelType(
+ AzureFirewallIPConfiguration, serialize_when_none=False
+ )
+ nat_rule_collections = ListType(
+ ModelType(AzureFirewallNatRuleCollection), serialize_when_none=False
+ )
+ network_rule_collections = ListType(
+ ModelType(AzureFirewallNetworkRuleCollection), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
sku = ModelType(AzureFirewallSku, serialize_when_none=False)
- threat_intel_mode = StringType(choices=('Alert', 'Deny', 'Off'), serialize_when_none=False)
+ threat_intel_mode = StringType(
+ choices=("Alert", "Deny", "Off"), serialize_when_none=False
+ )
virtual_hub = ModelType(SubResource, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -828,24 +1093,43 @@ class Subnet(Model):
virtual_network = StringType(serialize_when_none=False)
address_prefix = StringType(serialize_when_none=False)
address_prefixes = ListType(StringType, serialize_when_none=False)
- application_gateway_ip_configurations = ModelType(ApplicationGatewayIPConfiguration, serialize_when_none=False)
+ application_gateway_ip_configurations = ModelType(
+ ApplicationGatewayIPConfiguration, serialize_when_none=False
+ )
delegations = ListType(ModelType(Delegation), serialize_when_none=False)
ip_allocations = ListType(ModelType(SubResource), serialize_when_none=False)
- ip_configuration_profiles = ListType(ModelType(IPConfigurationProfile), serialize_when_none=False)
+ ip_configuration_profiles = ListType(
+ ModelType(IPConfigurationProfile), serialize_when_none=False
+ )
ip_configurations = ListType(ModelType(IPConfiguration), serialize_when_none=False)
azure_firewall = ListType(ModelType(AzureFirewall), serialize_when_none=False)
nat_gateway = ModelType(SubResource, serialize_when_none=False)
network_security_group = ModelType(NetworkSecurityGroup, serialize_when_none=False)
- private_endpoint_network_policies = StringType(choices=('Disabled', 'Enabled'), serialize_when_none=False)
+ private_endpoint_network_policies = StringType(
+ choices=("Disabled", "Enabled"), serialize_when_none=False
+ )
private_endpoints = ListType(ModelType(PrivateEndpoint), serialize_when_none=False)
- private_link_service_network_policies = StringType(choices=('Disabled', 'Enabled'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ private_link_service_network_policies = StringType(
+ choices=("Disabled", "Enabled"), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
purpose = StringType(serialize_when_none=False)
- resource_navigation_links = ListType(ModelType(ResourceNavigationLink, serialize_when_none=False))
+ resource_navigation_links = ListType(
+ ModelType(ResourceNavigationLink, serialize_when_none=False)
+ )
route_table = ModelType(RouteTable, serialize_when_none=False)
- service_association_links = ListType(ModelType(ServiceAssociationLink), serialize_when_none=False)
- service_endpoint_policies = ListType(ModelType(ServiceEndpointPolicy), serialize_when_none=False)
- service_endpoints = ListType(ModelType(ServiceEndpointPropertiesFormat), serialize_when_none=False)
+ service_association_links = ListType(
+ ModelType(ServiceAssociationLink), serialize_when_none=False
+ )
+ service_endpoint_policies = ListType(
+ ModelType(ServiceEndpointPolicy), serialize_when_none=False
+ )
+ service_endpoints = ListType(
+ ModelType(ServiceEndpointPropertiesFormat), serialize_when_none=False
+ )
type = StringType(serialize_when_none=False)
@@ -862,10 +1146,17 @@ class VirtualNetworkPeering(Model):
allow_gateway_transit = BooleanType(serialize_when_none=False)
allow_virtual_network_access = BooleanType(serialize_when_none=False)
do_not_verify_remote_gateways = BooleanType(serialize_when_none=False)
- peering_state = StringType(choices=('Connected', 'Disconnected', 'Initiated'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ peering_state = StringType(
+ choices=("Connected", "Disconnected", "Initiated"), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
remote_address_space = ModelType(AddressSpace, serialize_when_none=False)
- remote_bgp_communities = ModelType(VirtualNetworkBgpCommunities, serialize_when_none=False)
+ remote_bgp_communities = ModelType(
+ VirtualNetworkBgpCommunities, serialize_when_none=False
+ )
remote_virtual_network = ModelType(SubResource, serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
use_remote_gateways = BooleanType(serialize_when_none=False)
@@ -878,8 +1169,13 @@ class NatGateway(AzureCloudService): # Main class
name = StringType(serialize_when_none=False)
location = StringType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
- public_ip_addresses = ListType(ModelType(PublicIPAddress), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
+ public_ip_addresses = ListType(
+ ModelType(PublicIPAddress), serialize_when_none=False
+ )
public_ip_addresses_count = IntType(default=0)
public_ip_prefixes = ListType(ModelType(PublicIPPrefix), serialize_when_none=False)
public_ip_prefixes_count = IntType(default=0)
diff --git a/src/spaceone/inventory/model/network_security_groups/data.py b/src/spaceone/inventory/model/network_security_groups/data.py
index d6760648..792b1841 100644
--- a/src/spaceone/inventory/model/network_security_groups/data.py
+++ b/src/spaceone/inventory/model/network_security_groups/data.py
@@ -22,7 +22,10 @@ class ApplicationSecurityGroup(Model):
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
resource_guid = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -32,20 +35,29 @@ class SecurityRule(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- access = StringType(choices=('Allow', 'Deny'), serialize_when_none=False)
+ access = StringType(choices=("Allow", "Deny"), serialize_when_none=False)
description = StringType(serialize_when_none=False)
destination_address_prefix = StringType(serialize_when_none=False)
destination_address_prefixes = ListType(StringType, serialize_when_none=False)
- destination_application_security_groups = ListType(ModelType(ApplicationSecurityGroup), serialize_when_none=False)
+ destination_application_security_groups = ListType(
+ ModelType(ApplicationSecurityGroup), serialize_when_none=False
+ )
destination_port_range = StringType(serialize_when_none=False)
destination_port_ranges = ListType(StringType, serialize_when_none=False)
- direction = StringType(choices=('Inbound', 'Outbound'), serialize_when_none=False)
+ direction = StringType(choices=("Inbound", "Outbound"), serialize_when_none=False)
priority = IntType(serialize_when_none=False)
- protocol = StringType(choices=('*', 'Ah', 'Esp', 'Icmp', 'Tcp', 'Udp'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ protocol = StringType(
+ choices=("*", "Ah", "Esp", "Icmp", "Tcp", "Udp"), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
source_address_prefix = StringType(serialize_when_none=False)
source_address_prefixes = ListType(StringType, serialize_when_none=False)
- source_application_security_groups = ListType(ModelType(ApplicationSecurityGroup), serialize_when_none=False)
+ source_application_security_groups = ListType(
+ ModelType(ApplicationSecurityGroup), serialize_when_none=False
+ )
source_port_range = StringType(serialize_when_none=False)
source_port_ranges = ListType(StringType, serialize_when_none=False)
@@ -59,8 +71,9 @@ class TrafficAnalyticsConfigurationProperties(Model):
class TrafficAnalyticsProperties(Model):
- network_watcher_flow_analytics_configuration = ModelType(TrafficAnalyticsConfigurationProperties,
- serialize_when_none=False)
+ network_watcher_flow_analytics_configuration = ModelType(
+ TrafficAnalyticsConfigurationProperties, serialize_when_none=False
+ )
class FlowLogFormatType(Model):
@@ -83,9 +96,14 @@ class FlowLog(Model):
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
enable = BooleanType(serialize_when_none=False)
- flow_analytics_configuration = ModelType(TrafficAnalyticsProperties, serialize_when_none=False)
+ flow_analytics_configuration = ModelType(
+ TrafficAnalyticsProperties, serialize_when_none=False
+ )
format = ModelType(FlowLogFormatParameters, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
retention_policy = ModelType(RetentionPolicyParameters, serialize_when_none=False)
storage_id = StringType(serialize_when_none=False)
target_resource_guid = StringType(serialize_when_none=False)
@@ -109,8 +127,8 @@ class NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties(Model):
class PublicIPAddressSku(Model):
- name = StringType(choices=('Basic', 'Standard'), serialize_when_none=False)
- tier = StringType(choices=('Global', 'Regional'), serialize_when_none=False)
+ name = StringType(choices=("Basic", "Standard"), serialize_when_none=False)
+ tier = StringType(choices=("Global", "Regional"), serialize_when_none=False)
class IpTag(Model):
@@ -121,7 +139,9 @@ class IpTag(Model):
class DdosSettings(Model):
ddos_custom_policy = ModelType(SubResource, serialize_when_none=False)
protected_ip = BooleanType(serialize_when_none=False)
- protection_coverage = StringType(choices=('Basic', 'Standard'), serialize_when_none=False)
+ protection_coverage = StringType(
+ choices=("Basic", "Standard"), serialize_when_none=False
+ )
class PublicIPAddressDnsSettings(Model):
@@ -135,14 +155,21 @@ class IPConfiguration(Model):
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
- private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
- public_ip_address = StringType(serialize_when_none=False) # Change to Public IP Address's ID
+ private_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
+ public_ip_address = StringType(
+ serialize_when_none=False
+ ) # Change to Public IP Address's ID
subnet = StringType(serialize_when_none=False)
class NatGatewaySku(Model):
- name = StringType(choices=('Standard', None), serialize_when_none=False)
+ name = StringType(choices=("Standard", None), serialize_when_none=False)
class NatGateway(Model):
@@ -151,7 +178,10 @@ class NatGateway(Model):
name = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
public_ip_addresses = ListType(ModelType(SubResource), serialize_when_none=False)
public_ip_prefixes = ListType(ModelType(SubResource), serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
@@ -175,11 +205,21 @@ class PublicIPAddress(Model):
ip_configuration = ModelType(IPConfiguration, serialize_when_none=False)
ip_tags = ListType(ModelType(IpTag), serialize_when_none=False)
# linked_public_ip_address = ModelType(PublicIPAddress, serialize_when_none=False)
- migration_phase = StringType(choices=('Abort', 'Commit', 'Committed', 'None', 'Prepare'), serialize_when_none=False)
+ migration_phase = StringType(
+ choices=("Abort", "Commit", "Committed", "None", "Prepare"),
+ serialize_when_none=False,
+ )
nat_gateway = ModelType(NatGateway, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
- public_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
- public_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
+ public_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), serialize_when_none=False
+ )
+ public_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
public_ip_prefix = ModelType(SubResource, serialize_when_none=False)
resource_guid = StringType(serialize_when_none=False)
sku = ModelType(PublicIPAddressSku, serialize_when_none=False)
@@ -192,14 +232,25 @@ class NetworkInterfaceIPConfiguration(Model): # ip configuration in a network i
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- application_security_groups = ListType(ModelType(ApplicationSecurityGroup), serialize_when_none=False)
+ application_security_groups = ListType(
+ ModelType(ApplicationSecurityGroup), serialize_when_none=False
+ )
primary = BooleanType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
- private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
- private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
- private_link_connection_properties = ModelType(NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties,
- serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ private_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), serialize_when_none=False
+ )
+ private_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
+ private_link_connection_properties = ModelType(
+ NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties,
+ serialize_when_none=False,
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
public_ip_address = ModelType(PublicIPAddress, serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
virtual_network_taps = ListType(ModelType(SubResource), serialize_when_none=False)
@@ -209,11 +260,18 @@ class NetworkSecurityGroupRef(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
- name = StringType(default='-', serialize_when_none=False)
- default_security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
+ name = StringType(default="-", serialize_when_none=False)
+ default_security_rules = ListType(
+ ModelType(SecurityRule), serialize_when_none=False
+ )
flow_logs = ListType(ModelType(FlowLog), serialize_when_none=False)
- network_interfaces = StringType(serialize_when_none=False) # Change to Network interfaces' Id
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ network_interfaces = StringType(
+ serialize_when_none=False
+ ) # Change to Network interfaces' Id
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
resource_guid = StringType(serialize_when_none=False)
security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
subnets = ListType(StringType, serialize_when_none=False) # Change to Subnet IDs
@@ -232,9 +290,14 @@ class PrivateLinkServiceConnection(Model):
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
group_ids = ListType(StringType, serialize_when_none=False)
- private_link_service_connection_state = ModelType(PrivateLinkServiceConnectionState, serialize_when_none=False)
+ private_link_service_connection_state = ModelType(
+ PrivateLinkServiceConnectionState, serialize_when_none=False
+ )
private_link_service_id = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
request_message = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -250,12 +313,22 @@ class PrivateEndpointRef(Model):
location = ModelType(ExtendedLocation, serialize_when_none=False)
extended_location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
- custom_dns_configs = ListType(ModelType(CustomDnsConfigPropertiesFormat), serialize_when_none=False)
- manual_private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection),
- serialize_when_none=False)
- network_interfaces = ListType(StringType(), serialize_when_none=False) # Change to network interfaces id
- private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ custom_dns_configs = ListType(
+ ModelType(CustomDnsConfigPropertiesFormat), serialize_when_none=False
+ )
+ manual_private_link_service_connections = ListType(
+ ModelType(PrivateLinkServiceConnection), serialize_when_none=False
+ )
+ network_interfaces = ListType(
+ StringType(), serialize_when_none=False
+ ) # Change to network interfaces id
+ private_link_service_connections = ListType(
+ ModelType(PrivateLinkServiceConnection), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
subnet = StringType(serialize_when_none=False) # Change to subnet ID
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -271,9 +344,16 @@ class PrivateLinkServiceIpConfiguration(Model):
name = StringType(serialize_when_none=False)
primary = BooleanType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
- private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
- private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ private_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), serialize_when_none=False
+ )
+ private_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
type = StringType(serialize_when_none=False)
@@ -289,8 +369,11 @@ class InboundNatPool(Model):
frontend_port_range_end = IntType(serialize_when_none=False)
frontend_port_range_start = IntType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
- protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ protocol = StringType(choices=("All", "Tcp", "Udp"), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -299,26 +382,44 @@ class ApplicationSecurityGroupRef(Model):
id = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
resource_guid = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
-class NetworkInterfaceIPConfigurationRef(Model): # ip configuration in a network interface
+class NetworkInterfaceIPConfigurationRef(
+ Model
+): # ip configuration in a network interface
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- application_security_groups = ListType(ModelType(ApplicationSecurityGroupRef), serialize_when_none=False)
+ application_security_groups = ListType(
+ ModelType(ApplicationSecurityGroupRef), serialize_when_none=False
+ )
primary = BooleanType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
- private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
- private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
- private_link_connection_properties = ModelType(NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties,
- serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
- public_ip_address = StringType(default='', serialize_when_none=False) # Change Public IP Address to id
- subnet = StringType(default='', serialize_when_none=False) # Change Subnet to id
+ private_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), serialize_when_none=False
+ )
+ private_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
+ private_link_connection_properties = ModelType(
+ NetworkInterfaceIPConfigurationPrivateLinkConnectionProperties,
+ serialize_when_none=False,
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
+ public_ip_address = StringType(
+ default="", serialize_when_none=False
+ ) # Change Public IP Address to id
+ subnet = StringType(default="", serialize_when_none=False) # Change Subnet to id
virtual_network_taps = ListType(ModelType(SubResource), serialize_when_none=False)
@@ -326,7 +427,9 @@ class InboundNatRule(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- backend_ip_configurations = ListType(ModelType(NetworkInterfaceIPConfigurationRef), serialize_when_none=False)
+ backend_ip_configurations = ListType(
+ ModelType(NetworkInterfaceIPConfigurationRef), serialize_when_none=False
+ )
target_virtual_machine = ListType(StringType, serialize_when_none=False)
backend_port = IntType(serialize_when_none=False)
enable_floating_ip = BooleanType(serialize_when_none=False)
@@ -336,8 +439,11 @@ class InboundNatRule(Model):
frontend_port = IntType(serialize_when_none=False)
port_mapping_display = StringType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
- protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ protocol = StringType(choices=("All", "Tcp", "Udp"), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -355,11 +461,16 @@ class LoadBalancingRule(Model):
frontend_ip_configuration_display = StringType(serialize_when_none=False)
frontend_port = IntType(serialize_when_none=False)
idle_timeout_in_minutes = IntType(serialize_when_none=False)
- load_distribution = StringType(choices=('Default', 'SourceIP', 'SourceIPProtocol'), serialize_when_none=False)
+ load_distribution = StringType(
+ choices=("Default", "SourceIP", "SourceIPProtocol"), serialize_when_none=False
+ )
load_distribution_display = StringType(serialize_when_none=False)
probe = ModelType(SubResource, serialize_when_none=False)
- protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ protocol = StringType(choices=("All", "Tcp", "Udp"), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -370,10 +481,15 @@ class OutboundRule(Model):
allocated_outbound_ports = IntType(serialize_when_none=False)
backend_address_pool = ModelType(SubResource, serialize_when_none=False)
enable_tcp_reset = BooleanType(serialize_when_none=False)
- frontend_ip_configurations = ListType(ModelType(SubResource), serialize_when_none=False)
+ frontend_ip_configurations = ListType(
+ ModelType(SubResource), serialize_when_none=False
+ )
idle_timeout_in_minutes = IntType(serialize_when_none=False)
- protocol = StringType(choices=('All', 'Tcp', 'Udp'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ protocol = StringType(choices=("All", "Tcp", "Udp"), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -383,12 +499,21 @@ class FrontendIPConfiguration(Model):
name = StringType(serialize_when_none=False)
inbound_nat_pools = ListType(ModelType(InboundNatPool), serialize_when_none=False)
inbound_nat_rules = ListType(ModelType(InboundNatRule), serialize_when_none=False)
- load_balancing_rules = ListType(ModelType(LoadBalancingRule), serialize_when_none=False)
+ load_balancing_rules = ListType(
+ ModelType(LoadBalancingRule), serialize_when_none=False
+ )
outbound_rules = ListType(ModelType(OutboundRule), serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
- private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), serialize_when_none=False)
- private_ip_allocation_method = StringType(choices=('Dynamic', 'Static'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ private_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), serialize_when_none=False
+ )
+ private_ip_allocation_method = StringType(
+ choices=("Dynamic", "Static"), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
public_ip_address = StringType(serialize_when_none=False)
public_ip_prefix = ModelType(SubResource, serialize_when_none=False)
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
@@ -402,8 +527,13 @@ class PrivateEndpointConnection(Model):
name = StringType(serialize_when_none=False)
link_identifier = StringType(serialize_when_none=False)
private_endpoint = ModelType(PrivateEndpointRef)
- private_link_service_connection_state = ModelType(PrivateLinkServiceConnectionState, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ private_link_service_connection_state = ModelType(
+ PrivateLinkServiceConnectionState, serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -420,11 +550,22 @@ class PrivateLinkService(Model):
auto_approval = ModelType(AutoApproval, serialize_when_none=False)
enable_proxy_protocol = BooleanType(serialize_when_none=False)
fqdns = ListType(StringType, serialize_when_none=False)
- ip_configurations = ListType(ModelType(PrivateLinkServiceIpConfiguration), serialize_when_none=False)
- loadBalancer_frontend_ip_configurations = ListType(ModelType(FrontendIPConfiguration), serialize_when_none=False)
- network_interfaces = ListType(StringType, serialize_when_none=False) # Change to network interfaces' id
- private_endpoint_connections = ListType(ModelType(PrivateEndpointConnection), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ ip_configurations = ListType(
+ ModelType(PrivateLinkServiceIpConfiguration), serialize_when_none=False
+ )
+ loadBalancer_frontend_ip_configurations = ListType(
+ ModelType(FrontendIPConfiguration), serialize_when_none=False
+ )
+ network_interfaces = ListType(
+ StringType, serialize_when_none=False
+ ) # Change to network interfaces' id
+ private_endpoint_connections = ListType(
+ ModelType(PrivateEndpointConnection), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
visibility = ModelType(Visibility, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -434,7 +575,10 @@ class NetworkInterfaceTapConfiguration(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -449,21 +593,33 @@ class NetworkInterface(Model):
enable_accelerated_networking = BooleanType(serialize_when_none=False)
enable_ip_forwarding = BooleanType(serialize_when_none=False)
hosted_workloads = ListType(StringType, serialize_when_none=False)
- ip_configurations = ListType(ModelType(NetworkInterfaceIPConfiguration), serialize_when_none=False)
+ ip_configurations = ListType(
+ ModelType(NetworkInterfaceIPConfiguration), serialize_when_none=False
+ )
private_ip_address = StringType(serialize_when_none=False)
public_ip_address = StringType(serialize_when_none=False)
mac_address = StringType(serialize_when_none=False)
- migration_phase = StringType(choices=('Abort', 'Commit', 'Committed', 'None', 'Prepare'), serialize_when_none=False)
- nic_type = StringType(choices=('Elastic', 'Standard'), serialize_when_none=False)
- network_security_group = ModelType(NetworkSecurityGroupRef, serialize_when_none=False)
+ migration_phase = StringType(
+ choices=("Abort", "Commit", "Committed", "None", "Prepare"),
+ serialize_when_none=False,
+ )
+ nic_type = StringType(choices=("Elastic", "Standard"), serialize_when_none=False)
+ network_security_group = ModelType(
+ NetworkSecurityGroupRef, serialize_when_none=False
+ )
primary = BooleanType(serialize_when_none=False)
private_endpoint = ModelType(PrivateEndpointRef, serialize_when_none=False)
private_link_service = ModelType(PrivateLinkService, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
resource_guid = StringType(serialize_when_none=False)
- tap_configurations = ListType(ModelType(NetworkInterfaceTapConfiguration), serialize_when_none=False)
+ tap_configurations = ListType(
+ ModelType(NetworkInterfaceTapConfiguration), serialize_when_none=False
+ )
virtual_machine = ModelType(SubResource, serialize_when_none=False)
- virtual_machine_display = StringType(default='-')
+ virtual_machine_display = StringType(default="-")
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -471,7 +627,10 @@ class NetworkInterface(Model):
### Subnet Class ###
class ServiceEndpointPropertiesFormat(Model):
locations = ListType(StringType, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
service = StringType(serialize_when_none=False)
subnet = StringType(serialize_when_none=False)
@@ -480,7 +639,10 @@ class ApplicationGatewayIPConfiguration(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
subnet = ModelType(SubResource, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -488,9 +650,12 @@ class ApplicationGatewayIPConfiguration(Model):
class Delegation(Model):
etag = StringType(serialize_when_none=False)
id = StringType()
- name = StringType(default='-', serialize_when_none=False)
+ name = StringType(default="-", serialize_when_none=False)
actions = ListType(StringType, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
service_name = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -499,25 +664,32 @@ class IPConfigurationProfile(Model):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
subnet = StringType(serialize_when_none=False) # Change to Subnet ID
type = StringType(serialize_when_none=False)
class AzureFirewallRCAction(Model):
- type = StringType(choices=('Allow', 'Deny'), serialize_when_none=False)
+ type = StringType(choices=("Allow", "Deny"), serialize_when_none=False)
class AzureFirewallApplicationRuleProtocol(Model):
port = IntType(serialize_when_none=False)
- protocol_type = StringType(choices=('Http', 'Https', 'Mssql'), serialize_when_none=False)
+ protocol_type = StringType(
+ choices=("Http", "Https", "Mssql"), serialize_when_none=False
+ )
class AzureFirewallApplicationRule(Model):
description = StringType(serialize_when_none=False)
fqdn_tags = ListType(StringType, serialize_when_none=False)
name = StringType(serialize_when_none=False)
- protocols = ListType(ModelType(AzureFirewallApplicationRuleProtocol), serialize_when_none=False)
+ protocols = ListType(
+ ModelType(AzureFirewallApplicationRuleProtocol), serialize_when_none=False
+ )
source_addresses = ListType(StringType, serialize_when_none=False)
source_ip_groups = ListType(StringType, serialize_when_none=False)
target_fqdns = ListType(StringType, serialize_when_none=False)
@@ -529,7 +701,10 @@ class AzureFirewallApplicationRuleCollection(Model):
name = StringType(serialize_when_none=False)
action = ModelType(AzureFirewallRCAction, serialize_when_none=False)
priority = IntType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
rules = ListType(ModelType(AzureFirewallApplicationRule), serialize_when_none=False)
@@ -538,7 +713,9 @@ class AzureFirewallPublicIPAddress(Model):
class HubPublicIPAddresses(Model):
- address = ListType(ModelType(AzureFirewallPublicIPAddress), serialize_when_none=False)
+ address = ListType(
+ ModelType(AzureFirewallPublicIPAddress), serialize_when_none=False
+ )
count = IntType(serialize_when_none=False)
@@ -552,7 +729,10 @@ class AzureFirewallIPConfiguration(Model):
id = StringType()
name = StringType(serialize_when_none=False)
private_ip_address = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
public_ip_address = ModelType(SubResource, serialize_when_none=False)
subnet = ModelType(SubResource, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -580,9 +760,12 @@ class AzureFirewallNatRuleCollection(Model):
etag = StringType()
id = StringType()
name = StringType(serialize_when_none=False)
- action = StringType(choices=('Dnat', 'Snat'), serialize_when_none=False)
+ action = StringType(choices=("Dnat", "Snat"), serialize_when_none=False)
priority = IntType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
rules = ListType(ModelType(AzureFirewallNatRule), serialize_when_none=False)
@@ -607,13 +790,16 @@ class AzureFirewallNetworkRuleCollection(Model):
name = StringType(serialize_when_none=False)
action = ModelType(AzureFirewallRCAction, serialize_when_none=False)
priority = IntType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
rules = ListType(ModelType(AzureFirewallNetworkRule), serialize_when_none=False)
class AzureFirewallSku(Model):
- name = StringType(choices=('AZFW_Hub', 'AZFW_VNet'), serialize_when_none=False)
- tier = StringType(choices=('Premium', 'Standard'), serialize_when_none=False)
+ name = StringType(choices=("AZFW_Hub", "AZFW_VNet"), serialize_when_none=False)
+ tier = StringType(choices=("Premium", "Standard"), serialize_when_none=False)
class AzureFirewall(Model):
@@ -622,17 +808,32 @@ class AzureFirewall(Model):
location = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
subnet = StringType(serialize_when_none=False)
- application_rule_collections = ListType(ModelType(AzureFirewallApplicationRuleCollection), serialize_when_none=False)
+ application_rule_collections = ListType(
+ ModelType(AzureFirewallApplicationRuleCollection), serialize_when_none=False
+ )
firewall_policy = ModelType(SubResource, serialize_when_none=False)
hub_ip_addresses = ModelType(HubIPAddresses, serialize_when_none=False)
- ip_configurations = ListType(ModelType(AzureFirewallIPConfiguration), serialize_when_none=False)
+ ip_configurations = ListType(
+ ModelType(AzureFirewallIPConfiguration), serialize_when_none=False
+ )
ip_groups = ListType(ModelType(AzureFirewallIpGroups), serialize_when_none=False)
- management_ip_configuration = ModelType(AzureFirewallIPConfiguration, serialize_when_none=False)
- nat_rule_collections = ListType(ModelType(AzureFirewallNatRuleCollection), serialize_when_none=False)
- network_rule_collections = ListType(ModelType(AzureFirewallNetworkRuleCollection), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ management_ip_configuration = ModelType(
+ AzureFirewallIPConfiguration, serialize_when_none=False
+ )
+ nat_rule_collections = ListType(
+ ModelType(AzureFirewallNatRuleCollection), serialize_when_none=False
+ )
+ network_rule_collections = ListType(
+ ModelType(AzureFirewallNetworkRuleCollection), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
sku = ModelType(AzureFirewallSku, serialize_when_none=False)
- threat_intel_mode = StringType(choices=('Alert', 'Deny', 'Off'), serialize_when_none=False)
+ threat_intel_mode = StringType(
+ choices=("Alert", "Deny", "Off"), serialize_when_none=False
+ )
virtual_hub = ModelType(SubResource, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -645,7 +846,10 @@ class ResourceNavigationLink(Model):
name = StringType(serialize_when_none=False)
link = StringType(serialize_when_none=False)
linked_resource_type = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -655,9 +859,20 @@ class Route(Model):
name = StringType(serialize_when_none=False)
address_prefix = StringType(serialize_when_none=False)
next_hop_ip_address = StringType(serialize_when_none=False)
- next_hop_type = StringType(choices=('Internet', 'None', 'VirtualAppliance', 'VirtualNetworkGateway', 'VnetLocal'),
- serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ next_hop_type = StringType(
+ choices=(
+ "Internet",
+ "None",
+ "VirtualAppliance",
+ "VirtualNetworkGateway",
+ "VnetLocal",
+ ),
+ serialize_when_none=False,
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
class RouteTable(Model):
@@ -666,7 +881,10 @@ class RouteTable(Model):
name = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
disable_bgp_route_propagation = BooleanType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
routes = ListType(ModelType(Route), serialize_when_none=False)
subnets = ListType(StringType, default=[], serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
@@ -679,12 +897,22 @@ class PrivateEndpoint(Model):
location = ModelType(ExtendedLocation, serialize_when_none=False)
extended_location = ModelType(ExtendedLocation, serialize_when_none=False)
name = StringType(serialize_when_none=False)
- custom_dns_configs = ListType(ModelType(CustomDnsConfigPropertiesFormat), serialize_when_none=False)
- manual_private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection),
- serialize_when_none=False)
- network_interfaces = ListType(ModelType(NetworkInterface), serialize_when_none=False)
- private_link_service_connections = ListType(ModelType(PrivateLinkServiceConnection), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ custom_dns_configs = ListType(
+ ModelType(CustomDnsConfigPropertiesFormat), serialize_when_none=False
+ )
+ manual_private_link_service_connections = ListType(
+ ModelType(PrivateLinkServiceConnection), serialize_when_none=False
+ )
+ network_interfaces = ListType(
+ ModelType(NetworkInterface), serialize_when_none=False
+ )
+ private_link_service_connections = ListType(
+ ModelType(PrivateLinkServiceConnection), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
subnet = StringType(serialize_when_none=False)
resource_group = StringType(serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
@@ -699,7 +927,10 @@ class ServiceAssociationLink(Model):
link = StringType(serialize_when_none=False)
linked_resource_type = StringType(serialize_when_none=False)
locations = ListType(ModelType(ExtendedLocation), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -708,7 +939,10 @@ class ServiceEndpointPolicyDefinition(Model):
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
description = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
service = StringType(serialize_when_none=False)
service_resources = ListType(StringType)
@@ -718,10 +952,14 @@ class ServiceEndpointPolicy(Model):
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
location = ModelType(ExtendedLocation, serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
resource_guid = StringType(serialize_when_none=False)
- service_endpoint_policy_definitions = ListType(ModelType(ServiceEndpointPolicyDefinition),
- serialize_when_none=False)
+ service_endpoint_policy_definitions = ListType(
+ ModelType(ServiceEndpointPolicyDefinition), serialize_when_none=False
+ )
subnets = ListType(StringType, serialize_when_none=False)
tags = ModelType(Tags, serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -734,24 +972,45 @@ class Subnet(Model):
name = StringType(serialize_when_none=False)
address_prefix = StringType(serialize_when_none=False)
address_prefixes = ListType(StringType, serialize_when_none=False)
- application_gateway_ip_configurations = ListType(ModelType(ApplicationGatewayIPConfiguration, serialize_when_none=False))
+ application_gateway_ip_configurations = ListType(
+ ModelType(ApplicationGatewayIPConfiguration, serialize_when_none=False)
+ )
delegations = ListType(ModelType(Delegation), serialize_when_none=False)
ip_allocations = ListType(ModelType(SubResource), serialize_when_none=False)
- ip_configuration_profiles = ListType(ModelType(IPConfigurationProfile), serialize_when_none=False)
+ ip_configuration_profiles = ListType(
+ ModelType(IPConfigurationProfile), serialize_when_none=False
+ )
ip_configurations = ListType(ModelType(IPConfiguration), serialize_when_none=False)
azure_firewall = ListType(ModelType(AzureFirewall), serialize_when_none=False)
nat_gateway = ModelType(SubResource, serialize_when_none=False)
- network_security_group = ModelType(NetworkSecurityGroupRef, serialize_when_none=False)
- private_endpoint_network_policies = StringType(choices=('Disabled', 'Enabled'), serialize_when_none=False)
+ network_security_group = ModelType(
+ NetworkSecurityGroupRef, serialize_when_none=False
+ )
+ private_endpoint_network_policies = StringType(
+ choices=("Disabled", "Enabled"), serialize_when_none=False
+ )
private_endpoints = ListType(ModelType(PrivateEndpoint), serialize_when_none=False)
- private_link_service_network_policies = StringType(choices=('Disabled', 'Enabled'), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ private_link_service_network_policies = StringType(
+ choices=("Disabled", "Enabled"), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
purpose = StringType(serialize_when_none=False)
- resource_navigation_links = ListType(ModelType(ResourceNavigationLink, serialize_when_none=False))
+ resource_navigation_links = ListType(
+ ModelType(ResourceNavigationLink, serialize_when_none=False)
+ )
route_table = ModelType(RouteTable, serialize_when_none=False)
- service_association_links = ListType(ModelType(ServiceAssociationLink), serialize_when_none=False)
- service_endpoint_policies = ListType(ModelType(ServiceEndpointPolicy), serialize_when_none=False)
- service_endpoints = ListType(ModelType(ServiceEndpointPropertiesFormat), serialize_when_none=False)
+ service_association_links = ListType(
+ ModelType(ServiceAssociationLink), serialize_when_none=False
+ )
+ service_endpoint_policies = ListType(
+ ModelType(ServiceEndpointPolicy), serialize_when_none=False
+ )
+ service_endpoints = ListType(
+ ModelType(ServiceEndpointPropertiesFormat), serialize_when_none=False
+ )
type = StringType(serialize_when_none=False)
@@ -759,13 +1018,24 @@ class NetworkSecurityGroup(AzureCloudService):
etag = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
location = StringType(serialize_when_none=False)
- name = StringType(default='-', serialize_when_none=False)
- default_security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
- inbound_security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
- outbound_security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
+ name = StringType(default="-", serialize_when_none=False)
+ default_security_rules = ListType(
+ ModelType(SecurityRule), serialize_when_none=False
+ )
+ inbound_security_rules = ListType(
+ ModelType(SecurityRule), serialize_when_none=False
+ )
+ outbound_security_rules = ListType(
+ ModelType(SecurityRule), serialize_when_none=False
+ )
flow_logs = ListType(ModelType(FlowLog), serialize_when_none=False)
- network_interfaces = ListType(ModelType(NetworkInterface), serialize_when_none=False)
- provisioning_state = StringType(choices=('Deleting', 'Failed', 'Succeeded', 'Updating'), serialize_when_none=False)
+ network_interfaces = ListType(
+ ModelType(NetworkInterface), serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Deleting", "Failed", "Succeeded", "Updating"),
+ serialize_when_none=False,
+ )
resource_guid = StringType(serialize_when_none=False)
security_rules = ListType(ModelType(SecurityRule), serialize_when_none=False)
subnets = ListType(ModelType(Subnet), serialize_when_none=False)
diff --git a/src/spaceone/inventory/model/postgresql_servers/cloud_service_type.py b/src/spaceone/inventory/model/postgresql_servers/cloud_service_type.py
index 78fa10ed..f9dc2a08 100644
--- a/src/spaceone/inventory/model/postgresql_servers/cloud_service_type.py
+++ b/src/spaceone/inventory/model/postgresql_servers/cloud_service_type.py
@@ -1,155 +1,216 @@
import os
from spaceone.inventory.libs.utils import *
-from spaceone.inventory.libs.schema.metadata.dynamic_widget import CardWidget, ChartWidget
-from spaceone.inventory.libs.schema.metadata.dynamic_field import TextDyField, SearchField, DateTimeDyField, ListDyField, \
- EnumDyField, SizeField
-from spaceone.inventory.libs.schema.cloud_service_type import CloudServiceTypeResource, CloudServiceTypeResponse, \
- CloudServiceTypeMeta
+from spaceone.inventory.libs.schema.metadata.dynamic_widget import (
+ CardWidget,
+ ChartWidget,
+)
+from spaceone.inventory.libs.schema.metadata.dynamic_field import (
+ TextDyField,
+ SearchField,
+ DateTimeDyField,
+ ListDyField,
+ EnumDyField,
+ SizeField,
+)
+from spaceone.inventory.libs.schema.cloud_service_type import (
+ CloudServiceTypeResource,
+ CloudServiceTypeResponse,
+ CloudServiceTypeMeta,
+)
from spaceone.inventory.conf.cloud_service_conf import ASSET_URL
current_dir = os.path.abspath(os.path.dirname(__file__))
-postgresql_count_by_account_conf = os.path.join(current_dir, 'widget/postgresql_count_by_account.yaml')
-postgresql_count_by_region_conf = os.path.join(current_dir, 'widget/postgresql_count_by_region.yaml')
-postgresql_count_by_subscription_conf = os.path.join(current_dir, 'widget/postgresql_count_by_subscription.yaml')
-postgresql_count_by_tier_conf = os.path.join(current_dir, 'widget/postgresql_count_by_tier.yaml')
-postgresql_total_count_conf = os.path.join(current_dir, 'widget/postgresql_total_count.yaml')
-
+postgresql_count_by_account_conf = os.path.join(
+ current_dir, "widget/postgresql_count_by_account.yaml"
+)
+postgresql_count_by_region_conf = os.path.join(
+ current_dir, "widget/postgresql_count_by_region.yaml"
+)
+postgresql_count_by_subscription_conf = os.path.join(
+ current_dir, "widget/postgresql_count_by_subscription.yaml"
+)
+postgresql_count_by_tier_conf = os.path.join(
+ current_dir, "widget/postgresql_count_by_tier.yaml"
+)
+postgresql_total_count_conf = os.path.join(
+ current_dir, "widget/postgresql_total_count.yaml"
+)
cst_postgre_sql_servers = CloudServiceTypeResource()
-cst_postgre_sql_servers.name = 'Server'
-cst_postgre_sql_servers.group = 'PostgreSQLServers'
-cst_postgre_sql_servers.service_code = 'Microsoft.DBforPostgreSQL/servers'
-cst_postgre_sql_servers.labels = ['Database']
+cst_postgre_sql_servers.name = "Server"
+cst_postgre_sql_servers.group = "PostgreSQLServers"
+cst_postgre_sql_servers.service_code = "Microsoft.DBforPostgreSQL/servers"
+cst_postgre_sql_servers.labels = ["Database"]
cst_postgre_sql_servers.is_primary = True
cst_postgre_sql_servers.is_major = True
cst_postgre_sql_servers.tags = {
- 'spaceone:icon': f'{ASSET_URL}/azure-sql-postgresql-server.svg',
+ "spaceone:icon": f"{ASSET_URL}/azure-sql-postgresql-server.svg",
}
cst_postgre_sql_servers._metadata = CloudServiceTypeMeta.set_meta(
fields=[
- TextDyField.data_source('Type', 'instance_type'),
- EnumDyField.data_source('Status', 'data.user_visible_state', default_state={
- 'safe': ['Ready'],
- 'warning': ['Disabled', 'Dropping', 'Inaccessible']
- }),
- TextDyField.data_source('Resource Group', 'data.resource_group', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Location', 'data.location', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Subscription Name', 'data.subscription_name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Subscription ID', 'account', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Server Name', 'data.fully_qualified_domain_name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Admin Username', 'data.administrator_login', options={
- 'is_optional': True
- }),
- TextDyField.data_source('PostgreSQL Version', 'data.version', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Performance Configuration Tier', 'instance_type', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Performance Configuration Name', 'data.sku.name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Performance Configuration Capacity', 'data.sku.capacity', options={
- 'is_optional': True
- }),
- TextDyField.data_source('SSL Enforce Status', 'data.ssl_enforcement', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Public Network Access', 'data.public_network_access', options={
- 'is_optional': True
- }),
- TextDyField.data_source('SSL Enforcement', 'data.ssl_enforcement', options={
- 'is_optional': True
- }),
- TextDyField.data_source('TLS Setting', 'data.minimal_tls_version', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Firewall Rule Name', 'data.firewall_rules.name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Start IP', 'data.firewall_rules.start_ip_address', options={
- 'is_optional': True
- }),
- TextDyField.data_source('End IP', 'data.firewall_rules.end_ip_address', options={
- 'is_optional': True
- }),
-
+ TextDyField.data_source("Type", "instance_type"),
+ EnumDyField.data_source(
+ "Status",
+ "data.user_visible_state",
+ default_state={
+ "safe": ["Ready"],
+ "warning": ["Disabled", "Dropping", "Inaccessible"],
+ },
+ ),
+ TextDyField.data_source(
+ "Resource Group", "data.resource_group", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Location", "data.location", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Subscription Name", "data.subscription_name", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Subscription ID", "account", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Server Name",
+ "data.fully_qualified_domain_name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Admin Username", "data.administrator_login", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "PostgreSQL Version", "data.version", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Performance Configuration Tier",
+ "instance_type",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Performance Configuration Name",
+ "data.sku.name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Performance Configuration Capacity",
+ "data.sku.capacity",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "SSL Enforce Status", "data.ssl_enforcement", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Public Network Access",
+ "data.public_network_access",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "SSL Enforcement", "data.ssl_enforcement", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "TLS Setting", "data.minimal_tls_version", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Firewall Rule Name",
+ "data.firewall_rules.name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Start IP",
+ "data.firewall_rules.start_ip_address",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "End IP",
+ "data.firewall_rules.end_ip_address",
+ options={"is_optional": True},
+ ),
# VNet Rules
- TextDyField.data_source('Virtual Network Rule Name', 'data.virtual_network_rules.name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Virtual Network', 'data.virtual_network_rules.virtual_network_name_display', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Subnet', 'data.virtual_network_rules.subnet_name', options={
- 'is_optional': True
- }),
-
+ TextDyField.data_source(
+ "Virtual Network Rule Name",
+ "data.virtual_network_rules.name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Virtual Network",
+ "data.virtual_network_rules.virtual_network_name_display",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Subnet",
+ "data.virtual_network_rules.subnet_name",
+ options={"is_optional": True},
+ ),
# Replicas
- TextDyField.data_source('Replicas Name', 'data.replicas.name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Replicas Location', 'data.replicas.location', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Replicas Master Server Name', 'data.replicas.master_server_name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Active Directory Name', 'data.server_administrators.name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Login', 'data.server_administrators.login', options={
- 'is_optional': True
- }),
- TextDyField.data_source('SID', 'data.server_administrators.sid', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Tenant ID', 'data.server_administrators.tenant_id', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Compute Generation', 'data.sku.name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Compute Tier', 'instance_type', options={
- 'is_optional': True
- }),
- TextDyField.data_source('vCore', 'data.sku.capacity', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Backup Retention Period (Days)', 'data.storage_profile.backup_retention_days', options={
- 'is_optional': True
- })
+ TextDyField.data_source(
+ "Replicas Name", "data.replicas.name", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Replicas Location", "data.replicas.location", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Replicas Master Server Name",
+ "data.replicas.master_server_name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Active Directory Name",
+ "data.server_administrators.name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Login", "data.server_administrators.login", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "SID", "data.server_administrators.sid", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Tenant ID",
+ "data.server_administrators.tenant_id",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Compute Generation", "data.sku.name", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Compute Tier", "instance_type", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "vCore", "data.sku.capacity", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Backup Retention Period (Days)",
+ "data.storage_profile.backup_retention_days",
+ options={"is_optional": True},
+ ),
],
search=[
- SearchField.set(name='Subscription ID', key='account'),
- SearchField.set(name='Subscription Name', key='data.subscription_name'),
- SearchField.set(name='Resource Group', key='data.resource_group'),
- SearchField.set(name='Location', key='data.location'),
- SearchField.set(name='Server Name', key='data.fully_qualified_domain_name'),
- SearchField.set(name='Admin Username', key='data.administrator_login'),
- SearchField.set(name='PostgreSQL Version', key='data.version'),
- SearchField.set(name='Performance Configuration Tier', key='instance_type'),
- SearchField.set(name='Performance Configuration Name', key='data.sku.name'),
- SearchField.set(name='Performance Configuration Capacity', key='data.sku.capacity', data_type='integer'),
- SearchField.set(name='SSL Enforce Status', key='data.ssl_enforcement'),
- SearchField.set(name='SSL Enforcement', key='data.ssl_enforcement'),
- SearchField.set(name='Public Network Access', key='data.public_network_access'),
- SearchField.set(name='TLS Setting', key='data.minimal_tls_version'),
- SearchField.set(name='Firewall Rule Name', key='data.firewall_rules.name'),
- SearchField.set(name='Firewall Rule Start IP', key='data.firewall_rules.start_ip_address'),
- SearchField.set(name='Firewall Rule End IP', key='data.firewall_rules.end_ip_address'),
+ SearchField.set(name="Subscription ID", key="account"),
+ SearchField.set(name="Subscription Name", key="data.subscription_name"),
+ SearchField.set(name="Resource Group", key="data.resource_group"),
+ SearchField.set(name="Location", key="data.location"),
+ SearchField.set(name="Server Name", key="data.fully_qualified_domain_name"),
+ SearchField.set(name="Admin Username", key="data.administrator_login"),
+ SearchField.set(name="PostgreSQL Version", key="data.version"),
+ SearchField.set(name="Performance Configuration Tier", key="instance_type"),
+ SearchField.set(name="Performance Configuration Name", key="data.sku.name"),
+ SearchField.set(
+ name="Performance Configuration Capacity",
+ key="data.sku.capacity",
+ data_type="integer",
+ ),
+ SearchField.set(name="SSL Enforce Status", key="data.ssl_enforcement"),
+ SearchField.set(name="SSL Enforcement", key="data.ssl_enforcement"),
+ SearchField.set(name="Public Network Access", key="data.public_network_access"),
+ SearchField.set(name="TLS Setting", key="data.minimal_tls_version"),
+ SearchField.set(name="Firewall Rule Name", key="data.firewall_rules.name"),
+ SearchField.set(
+ name="Firewall Rule Start IP", key="data.firewall_rules.start_ip_address"
+ ),
+ SearchField.set(
+ name="Firewall Rule End IP", key="data.firewall_rules.end_ip_address"
+ ),
],
widget=[
ChartWidget.set(**get_data_from_yaml(postgresql_count_by_account_conf)),
@@ -157,9 +218,9 @@
ChartWidget.set(**get_data_from_yaml(postgresql_count_by_subscription_conf)),
ChartWidget.set(**get_data_from_yaml(postgresql_count_by_tier_conf)),
CardWidget.set(**get_data_from_yaml(postgresql_total_count_conf)),
- ]
+ ],
)
CLOUD_SERVICE_TYPES = [
- CloudServiceTypeResponse({'resource': cst_postgre_sql_servers}),
+ CloudServiceTypeResponse({"resource": cst_postgre_sql_servers}),
]
diff --git a/src/spaceone/inventory/model/snapshots/data.py b/src/spaceone/inventory/model/snapshots/data.py
index 5c8ca276..a8d352cc 100644
--- a/src/spaceone/inventory/model/snapshots/data.py
+++ b/src/spaceone/inventory/model/snapshots/data.py
@@ -1,12 +1,21 @@
from schematics import Model
-from schematics.types import ModelType, ListType, StringType, DateTimeType, IntType, BooleanType
+from schematics.types import (
+ ModelType,
+ ListType,
+ StringType,
+ DateTimeType,
+ IntType,
+ BooleanType,
+)
from spaceone.inventory.libs.schema.resource import AzureCloudService
class Sku(Model):
- name = StringType(choices=('Standard_LRS', 'Premium_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS'),
- serialize_when_none=False)
- tier = StringType(choices=('Premium', 'Standard'), serialize_when_none=False)
+ name = StringType(
+ choices=("Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS"),
+ serialize_when_none=False,
+ )
+ tier = StringType(choices=("Premium", "Standard"), serialize_when_none=False)
class ImageDiskReference(Model):
@@ -15,8 +24,10 @@ class ImageDiskReference(Model):
class CreationData(Model):
- creation_option = StringType(choices=('Attach', 'Copy', 'Empty', 'FromImage', 'Import', 'Restore', 'Upload'),
- serialize_when_none=False)
+ creation_option = StringType(
+ choices=("Attach", "Copy", "Empty", "FromImage", "Import", "Restore", "Upload"),
+ serialize_when_none=False,
+ )
image_reference = ModelType(ImageDiskReference, serialize_when_none=False)
gallery_image_reference = ModelType(ImageDiskReference, serialize_when_none=False)
logical_sector_size = IntType(serialize_when_none=False)
@@ -47,10 +58,16 @@ class EncryptionSettingsCollection(Model):
class Encryption(Model):
- disk_encryption_set_id = StringType(default='', serialize_when_none=False)
- type = StringType(choices=('EncryptionAtRestWithCustomerKey', 'EncryptionAtRestWithPlatformAndCustomerKeys',
- 'EncryptionAtRestWithPlatformKey'),
- default='EncryptionAtRestWithPlatformKey', serialize_when_none=False)
+ disk_encryption_set_id = StringType(default="", serialize_when_none=False)
+ type = StringType(
+ choices=(
+ "EncryptionAtRestWithCustomerKey",
+ "EncryptionAtRestWithPlatformAndCustomerKeys",
+ "EncryptionAtRestWithPlatformKey",
+ ),
+ default="EncryptionAtRestWithPlatformKey",
+ serialize_when_none=False,
+ )
type_display = StringType()
@@ -58,6 +75,10 @@ class ShareInfoElement(Model):
vm_uri = StringType(serialize_when_none=False)
+class SupportedCapabilities(Model):
+ architecture = StringType(serialize_when_none=False)
+
+
class Snapshot(AzureCloudService):
id = StringType()
location = StringType()
@@ -69,23 +90,38 @@ class Snapshot(AzureCloudService):
disk_m_bps_read_write = StringType(serialize_when_none=False)
disk_size_bytes = IntType()
disk_size_gb = IntType()
- disk_state = StringType(choices=('ActiveSAS', 'ActiveUpload', 'Attached', 'ReadyToUpload',
- 'Reserved', 'Unattached'))
+ disk_state = StringType(
+ choices=(
+ "ActiveSAS",
+ "ActiveUpload",
+ "Attached",
+ "ReadyToUpload",
+ "Reserved",
+ "Unattached",
+ )
+ )
encryption = ModelType(Encryption)
- encryption_settings_collection = ModelType(EncryptionSettingsCollection, serialize_when_none=False)
+ encryption_settings_collection = ModelType(
+ EncryptionSettingsCollection, serialize_when_none=False
+ )
hyper_v_generation = StringType(serialize_when_none=False)
incremental = BooleanType()
- incremental_display = StringType(default='Full', serialize_when_none=False)
- network_access_policy = StringType(choices=('AllowAll', 'AllowPrivate', 'DenyAll'), serialize_when_none=False)
+ incremental_display = StringType(default="Full", serialize_when_none=False)
+ network_access_policy = StringType(
+ choices=("AllowAll", "AllowPrivate", "DenyAll"), serialize_when_none=False
+ )
network_access_policy_display = StringType(serialize_when_none=False)
os_type = StringType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Failed', 'Succeeded'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Failed", "Succeeded"), serialize_when_none=False
+ )
time_created = DateTimeType(serialize_when_none=False)
unique_id = StringType()
size = IntType()
sku = ModelType(Sku)
source_disk_name = StringType()
- tier_display = StringType(default='')
+ supported_capabilities = ModelType(SupportedCapabilities, serialize_when_none=False)
+ tier_display = StringType(default="")
type = StringType(serialize_when_none=False)
def reference(self):
diff --git a/src/spaceone/inventory/model/sql_servers/cloud_service.py b/src/spaceone/inventory/model/sql_servers/cloud_service.py
index af86a324..6d17ac8b 100644
--- a/src/spaceone/inventory/model/sql_servers/cloud_service.py
+++ b/src/spaceone/inventory/model/sql_servers/cloud_service.py
@@ -1,243 +1,372 @@
-from schematics.types import ModelType, StringType, PolyModelType, FloatType, DateTimeType
+from schematics.types import (
+ ModelType,
+ StringType,
+ PolyModelType,
+ FloatType,
+ DateTimeType,
+)
from spaceone.inventory.model.sql_servers.data import SQLServer
-from spaceone.inventory.libs.schema.metadata.dynamic_field import TextDyField, DateTimeDyField, EnumDyField, \
- ListDyField
-from spaceone.inventory.libs.schema.metadata.dynamic_layout import ItemDynamicLayout, TableDynamicLayout, \
- ListDynamicLayout, SimpleTableDynamicLayout
-from spaceone.inventory.libs.schema.cloud_service import CloudServiceResource, CloudServiceResponse, CloudServiceMeta
+from spaceone.inventory.libs.schema.metadata.dynamic_field import (
+ TextDyField,
+ DateTimeDyField,
+ EnumDyField,
+ ListDyField,
+)
+from spaceone.inventory.libs.schema.metadata.dynamic_layout import (
+ ItemDynamicLayout,
+ TableDynamicLayout,
+ ListDynamicLayout,
+ SimpleTableDynamicLayout,
+)
+from spaceone.inventory.libs.schema.cloud_service import (
+ CloudServiceResource,
+ CloudServiceResponse,
+ CloudServiceMeta,
+)
-'''
+"""
SQL SERVERS
-'''
+"""
# TAB - Default
# Resource Group, Status, Location, Subscription, Subscription ID, Server Admin, Firewalls, Active Directory admin, Server name
-sql_servers_info_meta = ItemDynamicLayout.set_fields('SQL Servers', fields=[
- TextDyField.data_source('Name', 'name'),
- TextDyField.data_source('Resource Group', 'data.resource_group'),
- TextDyField.data_source('Resource ID', 'data.id'),
- EnumDyField.data_source('Status', 'data.state', default_state={
- 'safe': ['Ready'],
- 'warning': ['Disabled']
- }),
- TextDyField.data_source('Location', 'data.location'),
- TextDyField.data_source('Subscription', 'data.subscription_name'),
- TextDyField.data_source('Subscription ID', 'account'),
- TextDyField.data_source('Server Admin', 'data.administrator_login'),
- TextDyField.data_source('Active Directory Admin', 'data.azure_ad_admin_name'),
- TextDyField.data_source('Server Name', 'data.fully_qualified_domain_name')
-])
+sql_servers_info_meta = ItemDynamicLayout.set_fields(
+ "SQL Servers",
+ fields=[
+ TextDyField.data_source("Name", "name"),
+ TextDyField.data_source("Resource Group", "data.resource_group"),
+ TextDyField.data_source("Resource ID", "data.id"),
+ EnumDyField.data_source(
+ "Status",
+ "data.state",
+ default_state={"safe": ["Ready"], "warning": ["Disabled"]},
+ ),
+ TextDyField.data_source("Location", "data.location"),
+ TextDyField.data_source("Subscription", "data.subscription_name"),
+ TextDyField.data_source("Subscription ID", "account"),
+ TextDyField.data_source("Server Admin", "data.administrator_login"),
+ TextDyField.data_source("Active Directory Admin", "data.azure_ad_admin_name"),
+ TextDyField.data_source("Server Name", "data.fully_qualified_domain_name"),
+ ],
+)
# TAB - Failover Groups
# Name, Primary Server, Secondary Server, Read/Write Failover Policy, Grace Period (minutes), Database count
-sql_server_failover_group = TableDynamicLayout.set_fields('Failover Groups', 'data.failover_groups', fields=[
- TextDyField.data_source('ID', 'id'),
- TextDyField.data_source('Name', 'name'),
- TextDyField.data_source('Primary Server', 'primary_server'),
- TextDyField.data_source('Secondary Server', 'secondary_server'),
- TextDyField.data_source('Read/Write Failover Policy', 'failover_policy_display'),
- TextDyField.data_source('Grace Period (minutes)', 'grace_period_display'),
- # TextDyField.data_source('Database count', ''),
-])
+sql_server_failover_group = TableDynamicLayout.set_fields(
+ "Failover Groups",
+ "data.failover_groups",
+ fields=[
+ TextDyField.data_source("ID", "id"),
+ TextDyField.data_source("Name", "name"),
+ TextDyField.data_source("Primary Server", "primary_server"),
+ TextDyField.data_source("Secondary Server", "secondary_server"),
+ TextDyField.data_source(
+ "Read/Write Failover Policy", "failover_policy_display"
+ ),
+ TextDyField.data_source("Grace Period (minutes)", "grace_period_display"),
+ # TextDyField.data_source('Database count', ''),
+ ],
+)
# TAB - Backups
# Database, Earliest PITR restore point (UTC), Available LTR backups
-sql_server_backups = TableDynamicLayout.set_fields('Backups', 'data.databases', fields=[
- TextDyField.data_source('Database', 'name'),
- TextDyField.data_source('Earliest PITR Restore Point (UTC)', 'earliest_restore_date'),
- TextDyField.data_source('Available LTR backups', 'long_term_retention_backup_resource_id'),
-])
+sql_server_backups = TableDynamicLayout.set_fields(
+ "Backups",
+ "data.databases",
+ fields=[
+ TextDyField.data_source("Database", "name"),
+ TextDyField.data_source(
+ "Earliest PITR Restore Point (UTC)", "earliest_restore_date"
+ ),
+ TextDyField.data_source(
+ "Available LTR backups", "long_term_retention_backup_resource_id"
+ ),
+ ],
+)
# TAB - Active Directory Admin
# Active Directory Admin
-sql_servers_active_directory_admin = ItemDynamicLayout.set_fields('Active Directory Admin', fields=[
- TextDyField.data_source('Active Directory Admin', 'data.azure_ad_admin_name')
-])
+sql_servers_active_directory_admin = ItemDynamicLayout.set_fields(
+ "Active Directory Admin",
+ fields=[
+ TextDyField.data_source("Active Directory Admin", "data.azure_ad_admin_name")
+ ],
+)
# TAB - SQL Databases - Default
-sql_servers_databases = TableDynamicLayout.set_fields('Databases', 'data.databases', fields=[
- TextDyField.data_source('Database', 'name'),
- TextDyField.data_source('Resource ID', 'id'),
- EnumDyField.data_source('Status', 'status', default_state={
- 'safe': ['Online', 'Creating', 'Copying', 'Creating', 'OnlineChangingDwPerformanceTiers', 'Restoring', 'Resuming', 'Scaling', 'Standby'],
- 'warning': ['AutoClosed', 'Inaccessible', 'Offline', 'OfflineChangingDwPerformanceTiers', 'OfflineSecondary', 'Pausing', 'Recovering', 'RecoveryPending', 'Suspect'],
- 'disable':['Disabled', 'Paused', 'Shutdown'],
- 'alert': ['EmergencyMode']
- }),
- TextDyField.data_source('Resource Group', 'resource_group'),
- TextDyField.data_source('Subscription ID', 'subscription_id'),
- TextDyField.data_source('Location', 'location'),
- TextDyField.data_source('Server Name', 'server_name'),
- TextDyField.data_source('Elastic Pool', ''),
- # TextDyField.data_source('Connection Strings', ''),
- TextDyField.data_source('Pricing Tier', 'pricing_tier_display'),
- TextDyField.data_source('Earliest Restore Point', 'earliest_restore_date'),
-])
+sql_servers_databases = TableDynamicLayout.set_fields(
+ "Databases",
+ "data.databases",
+ fields=[
+ TextDyField.data_source("Database", "name"),
+ TextDyField.data_source("Resource ID", "id"),
+ EnumDyField.data_source(
+ "Status",
+ "status",
+ default_state={
+ "safe": [
+ "Online",
+ "Creating",
+ "Copying",
+ "Creating",
+ "OnlineChangingDwPerformanceTiers",
+ "Restoring",
+ "Resuming",
+ "Scaling",
+ "Standby",
+ ],
+ "warning": [
+ "AutoClosed",
+ "Inaccessible",
+ "Offline",
+ "OfflineChangingDwPerformanceTiers",
+ "OfflineSecondary",
+ "Pausing",
+ "Recovering",
+ "RecoveryPending",
+ "Suspect",
+ ],
+ "disable": ["Disabled", "Paused", "Shutdown"],
+ "alert": ["EmergencyMode"],
+ },
+ ),
+ TextDyField.data_source("Resource Group", "resource_group"),
+ TextDyField.data_source("Subscription ID", "subscription_id"),
+ TextDyField.data_source("Location", "location"),
+ TextDyField.data_source("Server Name", "server_name"),
+ TextDyField.data_source("Elastic Pool", ""),
+ # TextDyField.data_source('Connection Strings', ''),
+ TextDyField.data_source("Pricing Tier", "pricing_tier_display"),
+ TextDyField.data_source("Earliest Restore Point", "earliest_restore_date"),
+ ],
+)
# TAB - SQL Databases - Configure
-sql_servers_databases_configure = TableDynamicLayout.set_fields('Databases Configure', 'data.databases', fields=[
- TextDyField.data_source('Service Tier', 'service_tier_display'),
- TextDyField.data_source('Compute Tier', 'compute_tier'),
- TextDyField.data_source('Compute Hardware', 'sku.family'),
- TextDyField.data_source('License Type', 'license_type'),
- TextDyField.data_source('vCores', 'sku.capacity'),
- TextDyField.data_source('Data Max Size', 'max_size_gb'),
- TextDyField.data_source('Zone Redundant', 'zone_redundant'),
- ListDyField.data_source('Sync Groups', 'sync_group_display'),
- ListDyField.data_source('Sync Agents', 'sync_agent_display'),
- TextDyField.data_source('Collation', 'collation'),
- DateTimeDyField.data_source('Creation Date', 'creation_date'),
- # TextDyField.data_source('Server Admin Login', '') # Remove: DB is already under the specific server
- # TextDyField.data_source('Active Directory Login', ''), # Remove: DB is already under the specific server
-
-])
+sql_servers_databases_configure = TableDynamicLayout.set_fields(
+ "Databases Configure",
+ "data.databases",
+ fields=[
+ TextDyField.data_source("Service Tier", "service_tier_display"),
+ TextDyField.data_source("Compute Tier", "compute_tier"),
+ TextDyField.data_source("Compute Hardware", "sku.family"),
+ TextDyField.data_source("License Type", "license_type"),
+ TextDyField.data_source("vCores", "sku.capacity"),
+ TextDyField.data_source("Data Max Size", "max_size_gb"),
+ TextDyField.data_source("Zone Redundant", "zone_redundant"),
+ ListDyField.data_source("Sync Groups", "sync_group_display"),
+ ListDyField.data_source("Sync Agents", "sync_agent_display"),
+ TextDyField.data_source("Collation", "collation"),
+ DateTimeDyField.data_source("Creation Date", "creation_date"),
+ # TextDyField.data_source('Server Admin Login', '') # Remove: DB is already under the specific server
+ # TextDyField.data_source('Active Directory Login', ''), # Remove: DB is already under the specific server
+ ],
+)
# TAB - SQL Databases - tags
-sql_databases_info_tags = TableDynamicLayout.set_fields('Tags', 'data.tags', fields=[
- TextDyField.data_source('Key', 'key'),
- TextDyField.data_source('Value', 'value')
-])
+sql_databases_info_tags = TableDynamicLayout.set_fields(
+ "Tags",
+ "data.tags",
+ fields=[
+ TextDyField.data_source("Key", "key"),
+ TextDyField.data_source("Value", "value"),
+ ],
+)
# TAB - Dynamic Data Masking : "Masking rules: + Tab "Recommended fields to mask" # TODO: confirm!!
-sql_servers_databases_info = ListDynamicLayout.set_layouts('SQL Databases',
- layouts=[sql_servers_databases,
- sql_servers_databases_configure,
- sql_databases_info_tags])
+sql_servers_databases_info = ListDynamicLayout.set_layouts(
+ "SQL Databases",
+ layouts=[
+ sql_servers_databases,
+ sql_servers_databases_configure,
+ sql_databases_info_tags,
+ ],
+)
# TAB - Elastic Pools
# Name, Pricing tier, Per DB settings, of DBs, Storage, unit, avg, peak, average utilization over past hour
-sql_servers_elastic_pools = TableDynamicLayout.set_fields('Elastic Pools', 'data.elastic_pools', fields=[
- TextDyField.data_source('Name', 'name'),
- TextDyField.data_source('Resource Group', 'resource_group_display'),
- TextDyField.data_source('Per DB Settings', 'per_db_settings_display'),
- TextDyField.data_source('Pricing Tier', 'pricing_tier_display'),
- TextDyField.data_source('# of DBs', 'number_of_databases'),
- TextDyField.data_source('Unit', 'unit_display'),
- EnumDyField.data_source('Status', 'state', default_state={
- 'safe': ['Ready', 'Creating'],
- 'warning': ['Disabled']
- }),
- # TextDyField.data_source('Storage[%]', ''),
- # TextDyField.data_source('Avg[%]', ''),
- # TextDyField.data_source('Peak[%]', ''),
- # TextDyField.data_source('Utilization Over Past Hour[%]', ''),
- # TextDyField.data_source('Utilization Over Past Hour[%]', ''),
- TextDyField.data_source('Server Name', 'server_name_display'),
- TextDyField.data_source('Resource Configuration', 'pricing_tier_display'),
- TextDyField.data_source('Maximum Storage Size', 'max_size_gb'),
- ListDyField.data_source('Tags', 'tags')
-
-])
+sql_servers_elastic_pools = TableDynamicLayout.set_fields(
+ "Elastic Pools",
+ "data.elastic_pools",
+ fields=[
+ TextDyField.data_source("Name", "name"),
+ TextDyField.data_source("Resource Group", "resource_group_display"),
+ TextDyField.data_source("Per DB Settings", "per_db_settings_display"),
+ TextDyField.data_source("Pricing Tier", "pricing_tier_display"),
+ TextDyField.data_source("# of DBs", "number_of_databases"),
+ TextDyField.data_source("Unit", "unit_display"),
+ EnumDyField.data_source(
+ "Status",
+ "state",
+ default_state={"safe": ["Ready", "Creating"], "warning": ["Disabled"]},
+ ),
+ # TextDyField.data_source('Storage[%]', ''),
+ # TextDyField.data_source('Avg[%]', ''),
+ # TextDyField.data_source('Peak[%]', ''),
+ # TextDyField.data_source('Utilization Over Past Hour[%]', ''),
+ # TextDyField.data_source('Utilization Over Past Hour[%]', ''),
+ TextDyField.data_source("Server Name", "server_name_display"),
+ TextDyField.data_source("Resource Configuration", "pricing_tier_display"),
+ TextDyField.data_source("Maximum Storage Size", "max_size_gb"),
+ ListDyField.data_source("Tags", "tags"),
+ ],
+)
# TAB - Deleted Databases
-sql_servers_deleted_databases = TableDynamicLayout.set_fields('Deleted Databases', 'data.deleted_databases', fields=[
- TextDyField.data_source('Database', 'database_name'),
- DateTimeDyField.data_source('Deletion Time (UTC)', 'deletion_date'),
- DateTimeDyField.data_source('Creation Time (UTC)', 'creation_date'),
- TextDyField.data_source('Edition Time (UTC)', 'edition')
-])
+sql_servers_deleted_databases = TableDynamicLayout.set_fields(
+ "Deleted Databases",
+ "data.deleted_databases",
+ fields=[
+ TextDyField.data_source("Database", "database_name"),
+ DateTimeDyField.data_source("Deletion Time (UTC)", "deletion_date"),
+ DateTimeDyField.data_source("Creation Time (UTC)", "creation_date"),
+ TextDyField.data_source("Edition Time (UTC)", "edition"),
+ ],
+)
# TAB - Auditing
-sql_servers_auditing = ItemDynamicLayout.set_fields('Auditing', 'data.server_auditing_settings', fields=[
- EnumDyField.data_source('Enable SQL Auditing', 'state', default_state={
- 'safe': ['Enabled'],
- 'warning': ['Disabled']
- }),
- TextDyField.data_source('Audit Log Destination', 'storage_endpoint'),
- TextDyField.data_source('Storage Account ID', 'storage_account_subscription_id'),
-])
+sql_servers_auditing = ItemDynamicLayout.set_fields(
+ "Auditing",
+ "data.server_auditing_settings",
+ fields=[
+ EnumDyField.data_source(
+ "Enable SQL Auditing",
+ "state",
+ default_state={"safe": ["Enabled"], "warning": ["Disabled"]},
+ ),
+ TextDyField.data_source("Audit Log Destination", "storage_endpoint"),
+ TextDyField.data_source(
+ "Storage Account ID", "storage_account_subscription_id"
+ ),
+ ],
+)
# TAB - Firewalls and Virtual Networks
-sql_servers_network = ItemDynamicLayout.set_fields('Network', fields=[
- EnumDyField.data_source('Public Network access', 'data.public_network_access', default_state={
- 'safe': ['Enabled'],
- 'warning': ['Disabled']
- }),
- TextDyField.data_source('Minimum TLS Version', 'data.minimal_tls_version'),
- TextDyField.data_source('Connection Policy', 'data.server_auditing_settings.name'),
- TextDyField.data_source('Allow Azure Services and Resources to Access this server',
- 'data.server_auditing_settings.is_azure_monitor_target_enabled')
-
-])
-sql_servers_firewall_rules = TableDynamicLayout.set_fields('Firewall Rules', 'data.firewall_rules', fields=[
- TextDyField.data_source('Rule Name', 'name'),
- TextDyField.data_source('Start IP', 'start_ip_address'),
- TextDyField.data_source('End IP', 'end_ip_address')
-])
-
-sql_servers_virtual_network_rules = TableDynamicLayout.set_fields('Virtual Network Rules', 'data.virtual_network_rules',
- fields=[
- TextDyField.data_source('Rule Name', 'name'),
- TextDyField.data_source('Virtual Network',
- 'virtual_network_name_display'),
- TextDyField.data_source('Subnet ID',
- 'virtual_network_subnet_id'),
- # TextDyField.data_source('Address Range', ''),
- # TextDyField.data_source('Endpoint Status', ''),
- TextDyField.data_source('Resource Group',
- 'resource_group'),
- TextDyField.data_source('Subscription',
- 'subscription_id'),
- EnumDyField.data_source('State', 'state',
- default_state={
- 'safe': ['Ready',
- 'InProgress',
- 'Initializing'],
- 'warning': [
- 'Deleting',
- 'Unknown']
- })
- ])
-
-sql_servers_firewalls_and_vn = ListDynamicLayout.set_layouts('Firewalls and Network',
- layouts=[sql_servers_network, sql_servers_firewall_rules,
- sql_servers_virtual_network_rules])
+sql_servers_network = ItemDynamicLayout.set_fields(
+ "Network",
+ fields=[
+ EnumDyField.data_source(
+ "Public Network access",
+ "data.public_network_access",
+ default_state={"safe": ["Enabled"], "warning": ["Disabled"]},
+ ),
+ TextDyField.data_source("Minimum TLS Version", "data.minimal_tls_version"),
+ TextDyField.data_source(
+ "Connection Policy", "data.server_auditing_settings.name"
+ ),
+ TextDyField.data_source(
+ "Allow Azure Services and Resources to Access this server",
+ "data.server_auditing_settings.is_azure_monitor_target_enabled",
+ ),
+ ],
+)
+sql_servers_firewall_rules = TableDynamicLayout.set_fields(
+ "Firewall Rules",
+ "data.firewall_rules",
+ fields=[
+ TextDyField.data_source("Rule Name", "name"),
+ TextDyField.data_source("Start IP", "start_ip_address"),
+ TextDyField.data_source("End IP", "end_ip_address"),
+ ],
+)
+
+sql_servers_virtual_network_rules = TableDynamicLayout.set_fields(
+ "Virtual Network Rules",
+ "data.virtual_network_rules",
+ fields=[
+ TextDyField.data_source("Rule Name", "name"),
+ TextDyField.data_source("Virtual Network", "virtual_network_name_display"),
+ TextDyField.data_source("Subnet ID", "virtual_network_subnet_id"),
+ # TextDyField.data_source('Address Range', ''),
+ # TextDyField.data_source('Endpoint Status', ''),
+ TextDyField.data_source("Resource Group", "resource_group"),
+ TextDyField.data_source("Subscription", "subscription_id"),
+ EnumDyField.data_source(
+ "State",
+ "state",
+ default_state={
+ "safe": ["Ready", "InProgress", "Initializing"],
+ "warning": ["Deleting", "Unknown"],
+ },
+ ),
+ ],
+)
+
+sql_servers_firewalls_and_vn = ListDynamicLayout.set_layouts(
+ "Firewalls and Network",
+ layouts=[
+ sql_servers_network,
+ sql_servers_firewall_rules,
+ sql_servers_virtual_network_rules,
+ ],
+)
# TAB - Private Endpoint Connections
-sql_servers_private_endpoint_connections = TableDynamicLayout.set_fields('Private Endpoint Connections',
- 'data.private_endpoint_connections', fields=[
- TextDyField.data_source('Connection ID', 'connection_id'),
- TextDyField.data_source('State', 'status'),
- TextDyField.data_source('Private Endpoint Name', 'private_endpoint_name'),
- TextDyField.data_source('Request / Response Message', 'description')
-])
+sql_servers_private_endpoint_connections = TableDynamicLayout.set_fields(
+ "Private Endpoint Connections",
+ "data.private_endpoint_connections",
+ fields=[
+ TextDyField.data_source("Connection ID", "connection_id"),
+ TextDyField.data_source("State", "status"),
+ TextDyField.data_source("Private Endpoint Name", "private_endpoint_name"),
+ TextDyField.data_source("Request / Response Message", "description"),
+ ],
+)
# TAB - Transparent Data Encryption
-sql_servers_transparent_data_encryption = TableDynamicLayout.set_fields('Transparent Data Encryption',
- 'data.encryption_protectors', fields=[
- TextDyField.data_source('Transparent Data Encryption', 'kind'),
- TextDyField.data_source('Key', 'server_key_name'),
- TextDyField.data_source('Key Type', 'server_key_type'),
- TextDyField.data_source('Uri', 'uri')
-])
+sql_servers_transparent_data_encryption = TableDynamicLayout.set_fields(
+ "Transparent Data Encryption",
+ "data.encryption_protectors",
+ fields=[
+ TextDyField.data_source("Transparent Data Encryption", "kind"),
+ TextDyField.data_source("Key", "server_key_name"),
+ TextDyField.data_source("Key Type", "server_key_type"),
+ TextDyField.data_source("Uri", "uri"),
+ ],
+)
# TAB - Automatic Tuning
-sql_servers_automatic_tuning_options = TableDynamicLayout.set_fields('Tuning Options',
- 'data.server_automatic_tuning.options', fields=[
- TextDyField.data_source('Tuning Type', 'tuning_type'),
- TextDyField.data_source('Desired State', 'desired_state'),
- TextDyField.data_source('Current State', 'actual_state'),
-])
-
+sql_servers_automatic_tuning_options = TableDynamicLayout.set_fields(
+ "Tuning Options",
+ "data.server_automatic_tuning.options",
+ fields=[
+ TextDyField.data_source("Tuning Type", "tuning_type"),
+ TextDyField.data_source("Desired State", "desired_state"),
+ TextDyField.data_source("Current State", "actual_state"),
+ ],
+)
# TAB - SQL Databases
sql_servers_meta = CloudServiceMeta.set_layouts(
- [sql_servers_info_meta, sql_server_failover_group, sql_server_backups, sql_servers_active_directory_admin,
- sql_servers_databases_info, sql_servers_elastic_pools, sql_servers_deleted_databases, sql_servers_auditing,
- sql_servers_network, sql_servers_transparent_data_encryption, sql_servers_automatic_tuning_options,
- sql_servers_firewalls_and_vn, sql_servers_private_endpoint_connections])
+ [
+ sql_servers_info_meta,
+ sql_server_failover_group,
+ sql_server_backups,
+ sql_servers_active_directory_admin,
+ sql_servers_databases_info,
+ sql_servers_elastic_pools,
+ sql_servers_deleted_databases,
+ sql_servers_auditing,
+ sql_servers_network,
+ sql_servers_transparent_data_encryption,
+ sql_servers_automatic_tuning_options,
+ sql_servers_firewalls_and_vn,
+ sql_servers_private_endpoint_connections,
+ ]
+)
class DatabaseResource(CloudServiceResource):
- cloud_service_group = StringType(default='SQLServers')
+ cloud_service_group = StringType(default="SQLServers")
class SQLServerResource(DatabaseResource):
- cloud_service_type = StringType(default='Server')
+ cloud_service_type = StringType(default="Server")
data = ModelType(SQLServer)
- _metadata = ModelType(CloudServiceMeta, default=sql_servers_meta, serialized_name='metadata')
+ _metadata = ModelType(
+ CloudServiceMeta, default=sql_servers_meta, serialized_name="metadata"
+ )
name = StringType()
account = StringType(serialize_when_none=False)
instance_type = StringType(serialize_when_none=False)
diff --git a/src/spaceone/inventory/model/sql_servers/cloud_service_type.py b/src/spaceone/inventory/model/sql_servers/cloud_service_type.py
index 77d9db9d..e195a090 100644
--- a/src/spaceone/inventory/model/sql_servers/cloud_service_type.py
+++ b/src/spaceone/inventory/model/sql_servers/cloud_service_type.py
@@ -1,272 +1,477 @@
import os
from spaceone.inventory.libs.utils import *
-from spaceone.inventory.libs.schema.metadata.dynamic_widget import CardWidget, ChartWidget
-from spaceone.inventory.libs.schema.metadata.dynamic_field import TextDyField, SearchField, DateTimeDyField, ListDyField, \
- EnumDyField, SizeField
-from spaceone.inventory.libs.schema.cloud_service_type import CloudServiceTypeResource, CloudServiceTypeResponse, \
- CloudServiceTypeMeta
+from spaceone.inventory.libs.schema.metadata.dynamic_widget import (
+ CardWidget,
+ ChartWidget,
+)
+from spaceone.inventory.libs.schema.metadata.dynamic_field import (
+ TextDyField,
+ SearchField,
+ DateTimeDyField,
+ ListDyField,
+ EnumDyField,
+ SizeField,
+)
+from spaceone.inventory.libs.schema.cloud_service_type import (
+ CloudServiceTypeResource,
+ CloudServiceTypeResponse,
+ CloudServiceTypeMeta,
+)
from spaceone.inventory.conf.cloud_service_conf import ASSET_URL
current_dir = os.path.abspath(os.path.dirname(__file__))
-sql_databases_count_by_server_conf = os.path.join(current_dir, 'widget/sql_databases_count_by_server.yaml')
-sql_databases_count_by_subscription_conf = os.path.join(current_dir, 'widget/sql_databases_count_by_subscription.yaml')
-sql_databases_count_by_tier_conf = os.path.join(current_dir, 'widget/sql_databases_count_by_tier.yaml')
-sql_servers_count_by_account_conf = os.path.join(current_dir, 'widget/sql_servers_count_by_account.yaml')
-sql_servers_count_by_region_conf = os.path.join(current_dir, 'widget/sql_servers_count_by_region.yaml')
-sql_servers_count_by_subscription_conf = os.path.join(current_dir, 'widget/sql_servers_count_by_subscription.yaml')
-sql_servers_failover_count_by_region_conf = os.path.join(current_dir, 'widget/sql_servers_failover_count_by_region.yaml')
-sql_servers_failover_count_by_server_conf = os.path.join(current_dir, 'widget/sql_servers_failover_count_by_server.yaml')
-sql_servers_total_count_conf = os.path.join(current_dir, 'widget/sql_servers_total_count.yaml')
-
+sql_databases_count_by_server_conf = os.path.join(
+ current_dir, "widget/sql_databases_count_by_server.yaml"
+)
+sql_databases_count_by_subscription_conf = os.path.join(
+ current_dir, "widget/sql_databases_count_by_subscription.yaml"
+)
+sql_databases_count_by_tier_conf = os.path.join(
+ current_dir, "widget/sql_databases_count_by_tier.yaml"
+)
+sql_servers_count_by_account_conf = os.path.join(
+ current_dir, "widget/sql_servers_count_by_account.yaml"
+)
+sql_servers_count_by_region_conf = os.path.join(
+ current_dir, "widget/sql_servers_count_by_region.yaml"
+)
+sql_servers_count_by_subscription_conf = os.path.join(
+ current_dir, "widget/sql_servers_count_by_subscription.yaml"
+)
+sql_servers_failover_count_by_region_conf = os.path.join(
+ current_dir, "widget/sql_servers_failover_count_by_region.yaml"
+)
+sql_servers_failover_count_by_server_conf = os.path.join(
+ current_dir, "widget/sql_servers_failover_count_by_server.yaml"
+)
+sql_servers_total_count_conf = os.path.join(
+ current_dir, "widget/sql_servers_total_count.yaml"
+)
cst_sql_servers = CloudServiceTypeResource()
-cst_sql_servers.name = 'Server'
-cst_sql_servers.group = 'SQLServers'
-cst_sql_servers.service_code = 'Microsoft.Sql/servers'
-cst_sql_servers.labels = ['Database']
+cst_sql_servers.name = "Server"
+cst_sql_servers.group = "SQLServers"
+cst_sql_servers.service_code = "Microsoft.Sql/servers"
+cst_sql_servers.labels = ["Database"]
cst_sql_servers.is_primary = True
cst_sql_servers.is_major = True
cst_sql_servers.tags = {
- 'spaceone:icon': f'{ASSET_URL}/azure-sql-servers.svg',
+ "spaceone:icon": f"{ASSET_URL}/azure-sql-servers.svg",
}
cst_sql_servers._metadata = CloudServiceTypeMeta.set_meta(
fields=[
- EnumDyField.data_source('Status', 'data.state', default_state={
- 'safe': ['Ready'],
- 'warning': ['Disabled']
- }),
- TextDyField.data_source('Resource Group', 'data.resource_group'),
- TextDyField.data_source('Location', 'data.location'),
- TextDyField.data_source('Subscription Name', 'data.subscription_name'),
-
+ EnumDyField.data_source(
+ "Status",
+ "data.state",
+ default_state={"safe": ["Ready"], "warning": ["Disabled"]},
+ ),
+ TextDyField.data_source("Resource Group", "data.resource_group"),
+ TextDyField.data_source("Location", "data.location"),
+ TextDyField.data_source("Subscription Name", "data.subscription_name"),
# is_optional fields - Default
- TextDyField.data_source('Subscription ID', 'account', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Server Admin', 'data.administrator_login', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Active Directory Admin', 'data.azure_ad_admin_name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Server Name', 'data.fully_qualified_domain_name', options={
- 'is_optional': True
- }),
-
+ TextDyField.data_source(
+ "Subscription ID", "account", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Server Admin", "data.administrator_login", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Active Directory Admin",
+ "data.azure_ad_admin_name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Server Name",
+ "data.fully_qualified_domain_name",
+ options={"is_optional": True},
+ ),
# is_optional fields - Failover Groups
- TextDyField.data_source('Failover Group ID', 'data.failover_groups.id', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Failover Group Name', 'data.failover_groups.name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Failover Groups Primary Server', 'data.failover_groups.primary_server', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Failover Groups Secondary Server', 'data.failover_groups.secondary_server', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Read/Write Failover Policy', 'data.failover_groups.failover_policy_display', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Grace Period (minutes)', 'data.failover_groups.grace_period_display', options={
- 'is_optional': True
- }),
+ TextDyField.data_source(
+ "Failover Group ID",
+ "data.failover_groups.id",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Failover Group Name",
+ "data.failover_groups.name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Failover Groups Primary Server",
+ "data.failover_groups.primary_server",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Failover Groups Secondary Server",
+ "data.failover_groups.secondary_server",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Read/Write Failover Policy",
+ "data.failover_groups.failover_policy_display",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Grace Period (minutes)",
+ "data.failover_groups.grace_period_display",
+ options={"is_optional": True},
+ ),
# is_optional fields - Backups
- TextDyField.data_source('Backup Database', 'data.databases.name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Backup Earliest PITR Restore Point (UTC)', 'data.databases.earliest_restore_date', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Backup Available LTR backups', 'data.databases.long_term_retention_backup_resource_id', options={
- 'is_optional': True
- }),
-
+ TextDyField.data_source(
+ "Backup Database", "data.databases.name", options={"is_optional": True}
+ ),
+ TextDyField.data_source(
+ "Backup Earliest PITR Restore Point (UTC)",
+ "data.databases.earliest_restore_date",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Backup Available LTR backups",
+ "data.databases.long_term_retention_backup_resource_id",
+ options={"is_optional": True},
+ ),
# is_optional fields - Active Directory Admin
- TextDyField.data_source('Active Directory Admin', 'data.azure_ad_admin_name', options={
- 'is_optional': True
- }),
-
+ TextDyField.data_source(
+ "Active Directory Admin",
+ "data.azure_ad_admin_name",
+ options={"is_optional": True},
+ ),
# is_optional fields - Elastic Pools
- TextDyField.data_source('Elastic Pool Name', 'data.elastic_pools.name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Elastic Pool Resource Group', 'data.elastic_pools.resource_group_display', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Per DB Settings', 'data.elastic_pools.per_db_settings_display', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Pricing Tier', 'data.elastic_pools.pricing_tier_display', options={
- 'is_optional': True
- }),
- TextDyField.data_source('# of DBs', 'data.elastic_pools.number_of_databases', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Elastic Pool Unit', 'data.elastic_pools.unit_display', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Elastic Pool Server Name', 'data.elastic_pools.server_name_display', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Elastic Pool Resource Configuration', 'data.elastic_pools.pricing_tier_display', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Elastic Pool Maximum Storage Size', 'data.elastic_pools.max_size_gb', options={
- 'is_optional': True
- }),
-
+ TextDyField.data_source(
+ "Elastic Pool Name",
+ "data.elastic_pools.name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Elastic Pool Resource Group",
+ "data.elastic_pools.resource_group_display",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Per DB Settings",
+ "data.elastic_pools.per_db_settings_display",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Pricing Tier",
+ "data.elastic_pools.pricing_tier_display",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "# of DBs",
+ "data.elastic_pools.number_of_databases",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Elastic Pool Unit",
+ "data.elastic_pools.unit_display",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Elastic Pool Server Name",
+ "data.elastic_pools.server_name_display",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Elastic Pool Resource Configuration",
+ "data.elastic_pools.pricing_tier_display",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Elastic Pool Maximum Storage Size",
+ "data.elastic_pools.max_size_gb",
+ options={"is_optional": True},
+ ),
# is_optional fields - Deleted Databases
- TextDyField.data_source('Deleted Database', 'data.deleted_databases.database_name', options={
- 'is_optional': True
- }),
- DateTimeDyField.data_source('Deletion Time (UTC)', 'data.deleted_databases.deletion_date', options={
- 'is_optional': True
- }),
- DateTimeDyField.data_source('Deleted Databases Creation Time (UTC)', 'data.deleted_databases.creation_date', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Deleted Databases Edition Time (UTC)', 'data.deleted_databases.edition', options={
- 'is_optional': True
- }),
-
+ TextDyField.data_source(
+ "Deleted Database",
+ "data.deleted_databases.database_name",
+ options={"is_optional": True},
+ ),
+ DateTimeDyField.data_source(
+ "Deletion Time (UTC)",
+ "data.deleted_databases.deletion_date",
+ options={"is_optional": True},
+ ),
+ DateTimeDyField.data_source(
+ "Deleted Databases Creation Time (UTC)",
+ "data.deleted_databases.creation_date",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Deleted Databases Edition Time (UTC)",
+ "data.deleted_databases.edition",
+ options={"is_optional": True},
+ ),
# is_optional fields - Auditing
- TextDyField.data_source('Audit Log Destination', 'data.server_auditing_settings.storage_endpoint', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Audit Storage Account ID', 'data.server_auditing_settings.storage_account_subscription_id', options={
- 'is_optional': True
- }),
-
+ TextDyField.data_source(
+ "Audit Log Destination",
+ "data.server_auditing_settings.storage_endpoint",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Audit Storage Account ID",
+ "data.server_auditing_settings.storage_account_subscription_id",
+ options={"is_optional": True},
+ ),
# is_optional fields - Firewalls and Vnets
- TextDyField.data_source('Minimum TLS Version', 'data.minimal_tls_version', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Connection Policy', 'data.server_auditing_settings.name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Allow Azure Services and Resources to Access this server',
- 'data.server_auditing_settings.is_azure_monitor_target_enabled', options={
- 'is_optional': True
- }),
-
+ TextDyField.data_source(
+ "Minimum TLS Version",
+ "data.minimal_tls_version",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Connection Policy",
+ "data.server_auditing_settings.name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Allow Azure Services and Resources to Access this server",
+ "data.server_auditing_settings.is_azure_monitor_target_enabled",
+ options={"is_optional": True},
+ ),
# is_optional fields - Firewall Rules
- TextDyField.data_source('Firewall Rule Name', 'data.firewall_rules.name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Firewall Start IP', 'data.firewall_rules.start_ip_address', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Firewall End IP', 'data.firewall_rules.end_ip_address', options={
- 'is_optional': True
- }),
-
+ TextDyField.data_source(
+ "Firewall Rule Name",
+ "data.firewall_rules.name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Firewall Start IP",
+ "data.firewall_rules.start_ip_address",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Firewall End IP",
+ "data.firewall_rules.end_ip_address",
+ options={"is_optional": True},
+ ),
# is_optional fields - Private Endpoint Connections
- TextDyField.data_source('Private Endpoint Connection ID', 'data.private_endpoint_connections.connection_id', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Private Endpoint State', 'data.private_endpoint_connections.status', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Private Endpoint Name', 'data.private_endpoint_connections.private_endpoint_name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Request / Response Message', 'data.private_endpoint_connections.description', options={
- 'is_optional': True
- }),
-
+ TextDyField.data_source(
+ "Private Endpoint Connection ID",
+ "data.private_endpoint_connections.connection_id",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Private Endpoint State",
+ "data.private_endpoint_connections.status",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Private Endpoint Name",
+ "data.private_endpoint_connections.private_endpoint_name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Request / Response Message",
+ "data.private_endpoint_connections.description",
+ options={"is_optional": True},
+ ),
# is_optional fields - Transparent Data Encryption
- TextDyField.data_source('Transparent Data Encryption', 'data.encryption_protectors.kind', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Encryption Key', 'data.encryption_protectors.server_key_name', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Encryption Key Type', 'data.encryption_protectors.server_key_type', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Encryption URI', 'data.encryption_protectors.uri', options={
- 'is_optional': True
- }),
-
+ TextDyField.data_source(
+ "Transparent Data Encryption",
+ "data.encryption_protectors.kind",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Encryption Key",
+ "data.encryption_protectors.server_key_name",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Encryption Key Type",
+ "data.encryption_protectors.server_key_type",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Encryption URI",
+ "data.encryption_protectors.uri",
+ options={"is_optional": True},
+ ),
# is_optional fields - Automatic Tuning
- TextDyField.data_source('Tuning Type', 'data.server_automatic_tuning.options.tuning_type', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Tuning Desired State', 'data.server_automatic_tuning.options.desired_state', options={
- 'is_optional': True
- }),
- TextDyField.data_source('Tuning Current State', 'data.server_automatic_tuning.options.actual_state', options={
- 'is_optional': True
- })
+ TextDyField.data_source(
+ "Tuning Type",
+ "data.server_automatic_tuning.options.tuning_type",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Tuning Desired State",
+ "data.server_automatic_tuning.options.desired_state",
+ options={"is_optional": True},
+ ),
+ TextDyField.data_source(
+ "Tuning Current State",
+ "data.server_automatic_tuning.options.actual_state",
+ options={"is_optional": True},
+ ),
],
search=[
- SearchField.set(name='Subscription ID', key='account'),
- SearchField.set(name='Subscription Name', key='data.subscription_name'),
- SearchField.set(name='Resource Group', key='data.resource_group'),
- SearchField.set(name='Location', key='data.location'),
- SearchField.set(name='Server Admin', key='data.administrator_login'),
- SearchField.set(name='Active Directory Admin', key='data.azure_ad_admin_name'),
- SearchField.set(name='Server Name', key='data.fully_qualified_domain_name'),
- SearchField.set(name='Failover Group ID', key='data.failover_groups.id'),
- SearchField.set(name='Failover Group Name', key='data.failover_groups.name'),
- SearchField.set(name='Failover Groups Primary Server', key='data.failover_groups.primary_server'),
- SearchField.set(name='Failover Groups Secondary Server', key='data.failover_groups.secondary_server'),
- SearchField.set(name='Read/Write Failover Policy', key='data.failover_groups.failover_policy_display'),
- SearchField.set(name='Grace Period (minutes)', key='data.failover_groups.grace_period_display', data_type='integer'),
- SearchField.set(name='Backup Database', key='data.databases.name'),
- SearchField.set(name='Backup Earliest PITR Restore Point (UTC)', key='data.databases.earliest_restore_date', data_type='datetime'),
- SearchField.set(name='Backup Available LTR backups', key='data.databases.long_term_retention_backup_resource_id'),
- SearchField.set(name='Active Directory Admin', key='data.azure_ad_admin_name'),
- SearchField.set(name='Elastic Pool Name', key='data.elastic_pools.name'),
- SearchField.set(name='Elastic Pool Resource Group', key='data.elastic_pools.resource_group_display'),
- SearchField.set(name='Per DB Settings', key='data.elastic_pools.per_db_settings_display'),
- SearchField.set(name='Pricing Tier', key='data.elastic_pools.pricing_tier_display'),
- SearchField.set(name='Number of DBs', key='data.elastic_pools.number_of_databases', data_type='integer'),
- SearchField.set(name='Elastic Pool Unit', key='data.elastic_pools.unit_display'),
- SearchField.set(name='Elastic Pool Server Name', key='data.elastic_pools.server_name_display'),
- SearchField.set(name='Elastic Pool Resource Configuration', key='data.elastic_pools.pricing_tier_display'),
- SearchField.set(name='Elastic Pool Maximum Storage Size', key='data.elastic_pools.max_size_gb'),
- SearchField.set(name='Deleted Database', key='data.deleted_databases.database_name'),
- SearchField.set(name='Deletion Time (UTC)', key='data.deleted_databases.deletion_date', data_type='datetime'),
- SearchField.set(name='Deleted Databases Creation Time (UTC)', key='data.deleted_databases.creation_date', data_type='datetime'),
- SearchField.set(name='Deleted Databases Edition Time (UTC)', key='data.deleted_databases.edition', data_type='datetime'),
- SearchField.set(name='Audit Log Destination', key='data.server_auditing_settings.storage_endpoint'),
- SearchField.set(name='Audit Storage Account ID', key='data.server_auditing_settings.storage_account_subscription_id'),
- SearchField.set(name='Minimum TLS Version', key='data.minimal_tls_version'),
- SearchField.set(name='Connection Policy', key='data.server_auditing_settings.name'),
- SearchField.set(name='Allow Azure Services and Resources to Access this server', key='data.server_auditing_settings.is_azure_monitor_target_enabled'),
- SearchField.set(name='Firewall Rule Name', key='data.firewall_rules.name'),
- SearchField.set(name='Firewall Start IP', key='data.firewall_rules.start_ip_address'),
- SearchField.set(name='Firewall End IP', key='data.firewall_rules.end_ip_address'),
- SearchField.set(name='Private Endpoint Connection ID', key='data.private_endpoint_connections.connection_id'),
- SearchField.set(name='Private Endpoint State', key='data.private_endpoint_connections.status'),
- SearchField.set(name='Private Endpoint Name', key='data.private_endpoint_connections.private_endpoint_name'),
- SearchField.set(name='Request / Response Message', key='data.private_endpoint_connections.description'),
- SearchField.set(name='Transparent Data Encryption', key='data.encryption_protectors.kind'),
- SearchField.set(name='Encryption Key', key='data.encryption_protectors.server_key_name'),
- SearchField.set(name='Encryption Key Type', key='data.encryption_protectors.server_key_type'),
- SearchField.set(name='Encryption URI', key='data.encryption_protectors.uri'),
- SearchField.set(name='Tuning Type', key='data.server_automatic_tuning.options.tuning_type'),
- SearchField.set(name='Tuning Desired State', key='data.server_automatic_tuning.options.desired_state'),
- SearchField.set(name='Tuning Current State', key='data.server_automatic_tuning.options.actual_state'),
+ SearchField.set(name="Subscription ID", key="account"),
+ SearchField.set(name="Subscription Name", key="data.subscription_name"),
+ SearchField.set(name="Resource Group", key="data.resource_group"),
+ SearchField.set(name="Location", key="data.location"),
+ SearchField.set(name="Server Admin", key="data.administrator_login"),
+ SearchField.set(name="Active Directory Admin", key="data.azure_ad_admin_name"),
+ SearchField.set(name="Server Name", key="data.fully_qualified_domain_name"),
+ SearchField.set(name="Failover Group ID", key="data.failover_groups.id"),
+ SearchField.set(name="Failover Group Name", key="data.failover_groups.name"),
+ SearchField.set(
+ name="Failover Groups Primary Server",
+ key="data.failover_groups.primary_server",
+ ),
+ SearchField.set(
+ name="Failover Groups Secondary Server",
+ key="data.failover_groups.secondary_server",
+ ),
+ SearchField.set(
+ name="Read/Write Failover Policy",
+ key="data.failover_groups.failover_policy_display",
+ ),
+ SearchField.set(
+ name="Grace Period (minutes)",
+ key="data.failover_groups.grace_period_display",
+ data_type="integer",
+ ),
+ SearchField.set(name="Backup Database", key="data.databases.name"),
+ SearchField.set(
+ name="Backup Earliest PITR Restore Point (UTC)",
+ key="data.databases.earliest_restore_date",
+ data_type="datetime",
+ ),
+ SearchField.set(
+ name="Backup Available LTR backups",
+ key="data.databases.long_term_retention_backup_resource_id",
+ ),
+ SearchField.set(name="Active Directory Admin", key="data.azure_ad_admin_name"),
+ SearchField.set(name="Elastic Pool Name", key="data.elastic_pools.name"),
+ SearchField.set(
+ name="Elastic Pool Resource Group",
+ key="data.elastic_pools.resource_group_display",
+ ),
+ SearchField.set(
+ name="Per DB Settings", key="data.elastic_pools.per_db_settings_display"
+ ),
+ SearchField.set(
+ name="Pricing Tier", key="data.elastic_pools.pricing_tier_display"
+ ),
+ SearchField.set(
+ name="Number of DBs",
+ key="data.elastic_pools.number_of_databases",
+ data_type="integer",
+ ),
+ SearchField.set(
+ name="Elastic Pool Unit", key="data.elastic_pools.unit_display"
+ ),
+ SearchField.set(
+ name="Elastic Pool Server Name",
+ key="data.elastic_pools.server_name_display",
+ ),
+ SearchField.set(
+ name="Elastic Pool Resource Configuration",
+ key="data.elastic_pools.pricing_tier_display",
+ ),
+ SearchField.set(
+ name="Elastic Pool Maximum Storage Size",
+ key="data.elastic_pools.max_size_gb",
+ ),
+ SearchField.set(
+ name="Deleted Database", key="data.deleted_databases.database_name"
+ ),
+ SearchField.set(
+ name="Deletion Time (UTC)",
+ key="data.deleted_databases.deletion_date",
+ data_type="datetime",
+ ),
+ SearchField.set(
+ name="Deleted Databases Creation Time (UTC)",
+ key="data.deleted_databases.creation_date",
+ data_type="datetime",
+ ),
+ SearchField.set(
+ name="Deleted Databases Edition Time (UTC)",
+ key="data.deleted_databases.edition",
+ data_type="datetime",
+ ),
+ SearchField.set(
+ name="Audit Log Destination",
+ key="data.server_auditing_settings.storage_endpoint",
+ ),
+ SearchField.set(
+ name="Audit Storage Account ID",
+ key="data.server_auditing_settings.storage_account_subscription_id",
+ ),
+ SearchField.set(name="Minimum TLS Version", key="data.minimal_tls_version"),
+ SearchField.set(
+ name="Connection Policy", key="data.server_auditing_settings.name"
+ ),
+ SearchField.set(
+ name="Allow Azure Services and Resources to Access this server",
+ key="data.server_auditing_settings.is_azure_monitor_target_enabled",
+ ),
+ SearchField.set(name="Firewall Rule Name", key="data.firewall_rules.name"),
+ SearchField.set(
+ name="Firewall Start IP", key="data.firewall_rules.start_ip_address"
+ ),
+ SearchField.set(
+ name="Firewall End IP", key="data.firewall_rules.end_ip_address"
+ ),
+ SearchField.set(
+ name="Private Endpoint Connection ID",
+ key="data.private_endpoint_connections.connection_id",
+ ),
+ SearchField.set(
+ name="Private Endpoint State",
+ key="data.private_endpoint_connections.status",
+ ),
+ SearchField.set(
+ name="Private Endpoint Name",
+ key="data.private_endpoint_connections.private_endpoint_name",
+ ),
+ SearchField.set(
+ name="Request / Response Message",
+ key="data.private_endpoint_connections.description",
+ ),
+ SearchField.set(
+ name="Transparent Data Encryption", key="data.encryption_protectors.kind"
+ ),
+ SearchField.set(
+ name="Encryption Key", key="data.encryption_protectors.server_key_name"
+ ),
+ SearchField.set(
+ name="Encryption Key Type", key="data.encryption_protectors.server_key_type"
+ ),
+ SearchField.set(name="Encryption URI", key="data.encryption_protectors.uri"),
+ SearchField.set(
+ name="Tuning Type", key="data.server_automatic_tuning.options.tuning_type"
+ ),
+ SearchField.set(
+ name="Tuning Desired State",
+ key="data.server_automatic_tuning.options.desired_state",
+ ),
+ SearchField.set(
+ name="Tuning Current State",
+ key="data.server_automatic_tuning.options.actual_state",
+ ),
],
widget=[
ChartWidget.set(**get_data_from_yaml(sql_servers_count_by_account_conf)),
ChartWidget.set(**get_data_from_yaml(sql_servers_count_by_region_conf)),
ChartWidget.set(**get_data_from_yaml(sql_servers_count_by_subscription_conf)),
- ChartWidget.set(**get_data_from_yaml(sql_servers_failover_count_by_region_conf)),
- ChartWidget.set(**get_data_from_yaml(sql_servers_failover_count_by_server_conf)),
+ ChartWidget.set(
+ **get_data_from_yaml(sql_servers_failover_count_by_region_conf)
+ ),
+ ChartWidget.set(
+ **get_data_from_yaml(sql_servers_failover_count_by_server_conf)
+ ),
ChartWidget.set(**get_data_from_yaml(sql_databases_count_by_server_conf)),
ChartWidget.set(**get_data_from_yaml(sql_databases_count_by_subscription_conf)),
ChartWidget.set(**get_data_from_yaml(sql_databases_count_by_tier_conf)),
- CardWidget.set(**get_data_from_yaml(sql_servers_total_count_conf))
- ]
+ CardWidget.set(**get_data_from_yaml(sql_servers_total_count_conf)),
+ ],
)
CLOUD_SERVICE_TYPES = [
- CloudServiceTypeResponse({'resource': cst_sql_servers}),
+ CloudServiceTypeResponse({"resource": cst_sql_servers}),
]
diff --git a/src/spaceone/inventory/model/sql_servers/data.py b/src/spaceone/inventory/model/sql_servers/data.py
index 038ab542..c674077a 100644
--- a/src/spaceone/inventory/model/sql_servers/data.py
+++ b/src/spaceone/inventory/model/sql_servers/data.py
@@ -1,12 +1,20 @@
from schematics import Model
-from schematics.types import ModelType, ListType, StringType, IntType, BooleanType, FloatType, DateTimeType
+from schematics.types import (
+ ModelType,
+ ListType,
+ StringType,
+ IntType,
+ BooleanType,
+ FloatType,
+ DateTimeType,
+)
from spaceone.inventory.libs.schema.resource import AzureCloudService, AzureTags
class ResourceIdentity(Model):
principal_id = StringType(serialize_when_none=False)
tenant_id = StringType(serialize_when_none=False)
- type = StringType(choices=('None', 'SystemAssigned', 'UserAssigned'))
+ type = StringType(choices=("None", "SystemAssigned", "UserAssigned"))
class PrivateEndpointProperty(Model):
@@ -14,16 +22,22 @@ class PrivateEndpointProperty(Model):
class PrivateLinkServiceConnectionStateProperty(Model):
- actions_required = StringType(choices=('None', ''), serialize_when_none=False)
+ actions_required = StringType(choices=("None", ""), serialize_when_none=False)
description = StringType(serialize_when_none=False)
- status = StringType(choices=('Approved', 'Disconnected', 'Pending', 'Rejected'), serialize_when_none=False)
+ status = StringType(
+ choices=("Approved", "Disconnected", "Pending", "Rejected"),
+ serialize_when_none=False,
+ )
class PrivateEndpointConnectionProperties(Model):
private_endpoint = ModelType(PrivateEndpointProperty, serialize_when_none=False)
- private_link_service_connection_state = ModelType(PrivateLinkServiceConnectionStateProperty,
- serialize_when_none=False)
- provisioning_state = StringType(choices=('Approving', 'Dropping', 'Failed', 'Ready', 'Rejecting'))
+ private_link_service_connection_state = ModelType(
+ PrivateLinkServiceConnectionStateProperty, serialize_when_none=False
+ )
+ provisioning_state = StringType(
+ choices=("Approving", "Dropping", "Failed", "Ready", "Rejecting")
+ )
class ServerPrivateEndpointConnection(Model):
@@ -38,26 +52,41 @@ class ServerPrivateEndpointConnection(Model):
class ServerAzureADAdministrator(Model):
id = StringType()
name = StringType(serialize_when_none=False)
- administrator_type = StringType(choices=('ActiveDirectory', ''), serialize_when_none=False)
+ administrator_type = StringType(
+ choices=("ActiveDirectory", ""), serialize_when_none=False
+ )
login = StringType(serialize_when_none=False)
sid = StringType(serialize_when_none=False)
tenant_id = StringType(serialize_when_none=False)
class AutomaticTuningServerOptions(Model):
- actual_state = StringType(choices=('Off', 'On'), serialize_when_none=False)
- desired_state = StringType(choices=('Default', 'Off', 'On'), serialize_when_none=False)
+ actual_state = StringType(choices=("Off", "On"), serialize_when_none=False)
+ desired_state = StringType(
+ choices=("Default", "Off", "On"), serialize_when_none=False
+ )
reason_code = IntType(serialize_when_none=False)
- reason_desc = StringType(choices=('AutoConfigured', 'Default', 'Disabled'), serialize_when_none=False)
- tuning_type = StringType(choices=('createIndex', 'dropIndex', 'forceLastGoodPlan'), serialize_when_none=False)
+ reason_desc = StringType(
+ choices=("AutoConfigured", "Default", "Disabled"), serialize_when_none=False
+ )
+ tuning_type = StringType(
+ choices=("createIndex", "dropIndex", "forceLastGoodPlan"),
+ serialize_when_none=False,
+ )
class ServerAutomaticTuning(Model):
name = StringType()
id = StringType()
- actual_state = StringType(choices=('Auto', 'Custom', 'Unspecified'), serialize_when_none=False)
- desired_state = StringType(choices=('Default', 'Off', 'On'), serialize_when_none=False)
- options = ListType(ModelType(AutomaticTuningServerOptions, serialize_when_none=False))
+ actual_state = StringType(
+ choices=("Auto", "Custom", "Unspecified"), serialize_when_none=False
+ )
+ desired_state = StringType(
+ choices=("Default", "Off", "On"), serialize_when_none=False
+ )
+ options = ListType(
+ ModelType(AutomaticTuningServerOptions, serialize_when_none=False)
+ )
type = StringType(serialize_when_none=False)
@@ -69,25 +98,31 @@ class ServerBlobAuditingPolicy(Model):
is_storage_secondary_key_in_use = BooleanType(serialize_when_none=False)
queue_delay_ms = IntType(serialize_when_none=False)
retention_days = IntType(serialize_when_none=False)
- state = StringType(choices=('Disabled', 'Enabled'), serialize_when_none=False)
+ state = StringType(choices=("Disabled", "Enabled"), serialize_when_none=False)
storage_account_access_key = StringType(serialize_when_none=False)
storage_account_subscription_id = StringType(serialize_when_none=False)
- storage_endpoint = StringType(default='-')
+ storage_endpoint = StringType(default="-")
type = StringType(serialize_when_none=False)
class PartnerInfo(Model):
id = StringType()
location = StringType()
- replication_role = StringType(choices=('Primary', 'Secondary'), serialize_when_none=False)
+ replication_role = StringType(
+ choices=("Primary", "Secondary"), serialize_when_none=False
+ )
class FailoverGroupReadOnlyEndpoint(Model):
- failover_policy = StringType(choices=('Disabled', 'Enabled'), serialize_when_none=False)
+ failover_policy = StringType(
+ choices=("Disabled", "Enabled"), serialize_when_none=False
+ )
class FailoverGroupReadWriteEndpoint(Model):
- failover_policy = StringType(choices=('Automatic', 'Manual'), serialize_when_none=False)
+ failover_policy = StringType(
+ choices=("Automatic", "Manual"), serialize_when_none=False
+ )
failover_with_data_loss_grace_period_minutes = IntType(serialize_when_none=False)
@@ -99,9 +134,15 @@ class FailoverGroup(Model):
partner_servers = ListType(ModelType(PartnerInfo), serialize_when_none=False)
primary_server = StringType(serialize_when_none=False)
secondary_server = StringType(serialize_when_none=False)
- read_only_endpoint = ModelType(FailoverGroupReadOnlyEndpoint, serialize_when_none=False)
- read_write_endpoint = ModelType(FailoverGroupReadWriteEndpoint, serialize_when_none=False)
- replication_role = StringType(choices=('Primary', 'Secondary'), serialize_when_none=False)
+ read_only_endpoint = ModelType(
+ FailoverGroupReadOnlyEndpoint, serialize_when_none=False
+ )
+ read_write_endpoint = ModelType(
+ FailoverGroupReadWriteEndpoint, serialize_when_none=False
+ )
+ replication_role = StringType(
+ choices=("Primary", "Secondary"), serialize_when_none=False
+ )
replication_state = StringType(serialize_when_none=False)
failover_policy_display = StringType(serialize_when_none=False)
grace_period_display = StringType(serialize_when_none=False)
@@ -128,14 +169,19 @@ class SyncGroupSchema(Model):
class SyncGroup(Model):
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- conflict_resolution_policy = StringType(choices=('HubWin', 'MemberWin'), serialize_when_none=False)
+ conflict_resolution_policy = StringType(
+ choices=("HubWin", "MemberWin"), serialize_when_none=False
+ )
hub_database_password = StringType(serialize_when_none=False)
hub_database_user_name = StringType(serialize_when_none=False)
interval = IntType(serialize_when_none=False)
last_sync_time = DateTimeType(serialize_when_none=False)
schema = ModelType(SyncGroupSchema, serialize_when_none=False)
sync_database_id = StringType(serialize_when_none=False)
- sync_state = StringType(choices=('Error', 'Good', 'NotReady', 'Progressing', 'Warning'), serialize_when_none=False)
+ sync_state = StringType(
+ choices=("Error", "Good", "NotReady", "Progressing", "Warning"),
+ serialize_when_none=False,
+ )
type = StringType(serialize_when_none=False)
@@ -146,7 +192,9 @@ class SyncAgent(Model):
is_up_to_date = BooleanType(serialize_when_none=False)
last_alive_time = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- state = StringType(choices=('NeverConnected', 'Offline', 'Online'), serialize_when_none=False)
+ state = StringType(
+ choices=("NeverConnected", "Offline", "Online"), serialize_when_none=False
+ )
sync_database_id = StringType(serialize_when_none=False)
version = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -191,14 +239,21 @@ class ReplicationLink(Model):
is_termination_allowed = BooleanType(serialize_when_none=False)
partner_database = StringType(serialize_when_none=False)
partner_location = StringType(serialize_when_none=False)
- partner_role = StringType(choices=('Copy', 'NonReadableSecondary', 'Primary', 'Secondary', 'Source'),
- serialize_when_none=False)
- partner_server = StringType(default='-')
+ partner_role = StringType(
+ choices=("Copy", "NonReadableSecondary", "Primary", "Secondary", "Source"),
+ serialize_when_none=False,
+ )
+ partner_server = StringType(default="-")
percent_complete = IntType(serialize_when_none=False)
replication_mode = StringType(serialize_when_none=False)
- replication_state = StringType(choices=('CATCH_UP', 'PENDING', 'SEEDING', 'SUSPENDED'), serialize_when_none=False)
- role = StringType(choices=('Copy', 'NonReadableSecondary', 'Primary', 'Secondary', 'Source'),
- serialize_when_none=False)
+ replication_state = StringType(
+ choices=("CATCH_UP", "PENDING", "SEEDING", "SUSPENDED"),
+ serialize_when_none=False,
+ )
+ role = StringType(
+ choices=("Copy", "NonReadableSecondary", "Primary", "Secondary", "Source"),
+ serialize_when_none=False,
+ )
start_time = DateTimeType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -221,14 +276,28 @@ class Database(Model):
subscription_id = StringType(serialize_when_none=False)
resource_group = StringType(serialize_when_none=False)
- administrator_login = StringType(default='-')
+ administrator_login = StringType(default="-")
auto_pause_delay = IntType(serialize_when_none=False)
- catalog_collation = StringType(choices=('DATABASE_DEFAULT', 'SQL_Latin1_General_CP1_CI_AS'),
- serialize_when_none=False)
+ catalog_collation = StringType(
+ choices=("DATABASE_DEFAULT", "SQL_Latin1_General_CP1_CI_AS"),
+ serialize_when_none=False,
+ )
collation = StringType(serialize_when_none=False)
- create_mode = StringType(choices=(
- 'Copy', 'Default', 'OnlineSecondary', 'PointInTimeRestore', 'Recovery', 'Restore', 'RestoreExternalBackup',
- 'RestoreExternalBackupSecondary', 'RestoreLongTermRetentionBackup', 'Secondary'), serialize_when_none=False)
+ create_mode = StringType(
+ choices=(
+ "Copy",
+ "Default",
+ "OnlineSecondary",
+ "PointInTimeRestore",
+ "Recovery",
+ "Restore",
+ "RestoreExternalBackup",
+ "RestoreExternalBackupSecondary",
+ "RestoreLongTermRetentionBackup",
+ "Secondary",
+ ),
+ serialize_when_none=False,
+ )
creation_date = DateTimeType(serialize_when_none=False)
current_service_objective_name = StringType(serialize_when_none=False)
current_sku = ModelType(Sku, serialize_when_none=False)
@@ -238,42 +307,71 @@ class Database(Model):
elastic_pool_id = StringType(serialize_when_none=False)
failover_group_id = StringType(serialize_when_none=False)
high_availability_replica_count = IntType(serialize_when_none=False)
- license_type = StringType(choices=('BasePrice', 'LicenseIncluded'), serialize_when_none=False)
- long_term_retention_backup_resource_id = StringType(default='-')
+ license_type = StringType(
+ choices=("BasePrice", "LicenseIncluded"), serialize_when_none=False
+ )
+ long_term_retention_backup_resource_id = StringType(default="-")
maintenance_configuration_id = StringType(serialize_when_none=False)
max_log_size_bytes = IntType(serialize_when_none=False)
max_size_bytes = IntType(serialize_when_none=False)
max_size_gb = FloatType(serialize_when_none=False)
min_capacity = FloatType(serialize_when_none=False)
paused_date = DateTimeType(serialize_when_none=False)
- read_scale = StringType(choices=('Disabled', 'Enabled'), default='Disabled')
+ read_scale = StringType(choices=("Disabled", "Enabled"), default="Disabled")
recoverable_database_id = StringType(serialize_when_none=False)
recovery_services_recovery_point_id = StringType(serialize_when_none=False)
requested_service_objective_name = StringType(serialize_when_none=False)
restorable_dropped_database_id = StringType(serialize_when_none=False)
restore_point_in_time = StringType(serialize_when_none=False)
resumed_date = DateTimeType(serialize_when_none=False)
- sample_name = StringType(choices=('AdventureWorksLT', 'WideWorldImportersFull', 'WideWorldImportersStd'),
- serialize_when_none=False)
- secondary_type = StringType(choices=('Geo', 'Named'), serialize_when_none=False)
+ sample_name = StringType(
+ choices=("AdventureWorksLT", "WideWorldImportersFull", "WideWorldImportersStd"),
+ serialize_when_none=False,
+ )
+ secondary_type = StringType(choices=("Geo", "Named"), serialize_when_none=False)
source_database_deletion_date = StringType(serialize_when_none=False)
source_database_id = StringType(serialize_when_none=False)
- status = StringType(choices=(
- 'AutoClosed', 'Copying', 'Creating', 'Disabled', 'EmergencyMode', 'Inaccessible', 'Offline',
- 'OfflineChangingDwPerformanceTiers', 'OfflineSecondary', 'Online',
- 'OnlineChangingDwPerformanceTiers', 'Paused', 'Pausing', 'Recovering', 'RecoveryPending', 'Restoring',
- 'Resuming', 'Scaling', 'Shutdown', 'Standby', 'Suspect'), serialize_when_none=False)
- storage_account_type = StringType(choices=('GRS', 'LRS', 'ZRS'), serialize_when_none=False)
+ status = StringType(
+ choices=(
+ "AutoClosed",
+ "Copying",
+ "Creating",
+ "Disabled",
+ "EmergencyMode",
+ "Inaccessible",
+ "Offline",
+ "OfflineChangingDwPerformanceTiers",
+ "OfflineSecondary",
+ "Online",
+ "OnlineChangingDwPerformanceTiers",
+ "Paused",
+ "Pausing",
+ "Recovering",
+ "RecoveryPending",
+ "Restoring",
+ "Resuming",
+ "Scaling",
+ "Shutdown",
+ "Standby",
+ "Suspect",
+ ),
+ serialize_when_none=False,
+ )
+ storage_account_type = StringType(
+ choices=("GRS", "LRS", "ZRS"), serialize_when_none=False
+ )
zone_redundant = BooleanType(serialize_when_none=False)
- diagnostic_settings_resource = ListType(ModelType(DiagnosticSettingsResource), serialize_when_none=False)
+ diagnostic_settings_resource = ListType(
+ ModelType(DiagnosticSettingsResource), serialize_when_none=False
+ )
replication_link = ListType(ModelType(ReplicationLink), serialize_when_none=False)
sync_group = ListType(ModelType(SyncGroup), serialize_when_none=False)
sync_agent = ListType(ModelType(SyncAgent), serialize_when_none=False)
sync_group_display = ListType(StringType, serialize_when_none=False)
sync_agent_display = ListType(StringType, serialize_when_none=False)
sku = ModelType(Sku, serialize_when_none=False)
- pricing_tier_display = StringType(default='-')
- service_tier_display = StringType(default='-')
+ pricing_tier_display = StringType(default="-")
+ service_tier_display = StringType(default="-")
compute_tier = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -289,19 +387,25 @@ class ElasticPool(Model):
kind = StringType(serialize_when_none=False)
location = StringType()
creation_date = DateTimeType(serialize_when_none=False)
- license_type = StringType(choices=('BasePrice', 'LicenseIncluded'), default='BasePrice')
+ license_type = StringType(
+ choices=("BasePrice", "LicenseIncluded"), default="BasePrice"
+ )
maintenance_configuration_id = StringType(serialize_when_none=False)
max_size_bytes = IntType(serialize_when_none=False)
max_size_gb = FloatType(serialize_when_none=False, default=0)
- per_database_settings = ModelType(ElasticPoolPerDatabaseSettings, serialize_when_none=False)
- state = StringType(choices=('Creating', 'Disabled', 'Ready'), serialize_when_none=False)
+ per_database_settings = ModelType(
+ ElasticPoolPerDatabaseSettings, serialize_when_none=False
+ )
+ state = StringType(
+ choices=("Creating", "Disabled", "Ready"), serialize_when_none=False
+ )
zone_redundant = BooleanType(serialize_when_none=False)
sku = ModelType(Sku)
per_db_settings_display = StringType(serialize_when_none=False)
pricing_tier_display = StringType(serialize_when_none=False)
databases = ListType(ModelType(Database))
number_of_databases = IntType(serialize_when_none=False, default=0)
- unit_display = StringType(serialize_when_none=False),
+ unit_display = (StringType(serialize_when_none=False),)
server_name_display = StringType(serialize_when_none=False)
resource_group_display = StringType(serialize_when_none=False)
tags = ModelType(AzureTags)
@@ -314,7 +418,9 @@ class EncryptionProtector(Model):
location = StringType()
name = StringType()
server_key_name = StringType(serialize_when_none=False)
- server_key_type = StringType(choices=('AzureKeyVault', 'ServiceManaged'), default='ServiceManaged')
+ server_key_type = StringType(
+ choices=("AzureKeyVault", "ServiceManaged"), default="ServiceManaged"
+ )
subregion = StringType(serialize_when_none=False)
thumbprint = StringType(serialize_when_none=False)
uri = StringType(serialize_when_none=False)
@@ -330,7 +436,7 @@ class RestorableDroppedDatabase(Model):
deletion_date = DateTimeType(serialize_when_none=False)
earliest_restore_date = DateTimeType(serialize_when_none=False)
edition = StringType(serialize_when_none=False)
- elastic_pool_name = StringType(default='-')
+ elastic_pool_name = StringType(default="-")
max_size_bytes = StringType(serialize_when_none=False)
service_level_objective = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -342,8 +448,10 @@ class VirtualNetworkRule(Model):
subscription_id = StringType(serialize_when_none=False)
resource_group = StringType(serialize_when_none=False)
ignore_missing_vnet_service_endpoint = BooleanType(serialize_when_none=False)
- state = StringType(choices=('Deleting', 'InProgress', 'Initializing', 'Ready', 'Unknown'),
- serialize_when_none=False)
+ state = StringType(
+ choices=("Deleting", "InProgress", "Initializing", "Ready", "Unknown"),
+ serialize_when_none=False,
+ )
virtual_network_subnet_id = StringType(serialize_when_none=False)
virtual_network_name_display = StringType(serialize_when_none=False)
type = StringType(serialize_when_none=False)
@@ -367,24 +475,39 @@ class SQLServer(AzureCloudService):
location = StringType()
type = StringType()
administrator_login = StringType(serialize_when_none=False)
- azure_ad_admin_name = StringType(default='Not configured')
+ azure_ad_admin_name = StringType(default="Not configured")
administrator_login_password = StringType(serialize_when_none=False)
- encryption_protectors = ListType(ModelType(EncryptionProtector), serialize_when_none=False)
+ encryption_protectors = ListType(
+ ModelType(EncryptionProtector), serialize_when_none=False
+ )
fully_qualified_domain_name = StringType(serialize_when_none=False)
- minimal_tls_version = StringType(choices=('1.0', '1.1', '1.2'), serialize_when_none=False)
+ minimal_tls_version = StringType(
+ choices=("1.0", "1.1", "1.2"), serialize_when_none=False
+ )
private_endpoint_connections = ListType(ModelType(ServerPrivateEndpointConnection))
- public_network_access = StringType(choices=('Disabled', 'Enabled'))
+ public_network_access = StringType(choices=("Disabled", "Enabled"))
state = StringType(serialize_when_none=False)
version = StringType(serialize_when_none=False)
- azure_ad_administrator = ModelType(ServerAzureADAdministrator, serialize_when_none=False)
- server_automatic_tuning = ModelType(ServerAutomaticTuning, serialize_when_none=False)
+ administrators = ModelType(ServerAzureADAdministrator, serialize_when_none=False)
+ azure_ad_administrators = ListType(
+ ModelType(ServerAzureADAdministrator, serialize_when_none=False)
+ )
+ server_automatic_tuning = ModelType(
+ ServerAutomaticTuning, serialize_when_none=False
+ )
server_automatic_tuning_display = BooleanType(serialize_when_none=False)
- server_auditing_settings = ModelType(ServerBlobAuditingPolicy, serialize_when_none=False)
+ server_auditing_settings = ModelType(
+ ServerBlobAuditingPolicy, serialize_when_none=False
+ )
failover_groups = ListType(ModelType(FailoverGroup), serialize_when_none=False)
databases = ListType(ModelType(Database), serialize_when_none=False)
elastic_pools = ListType(ModelType(ElasticPool), serialize_when_none=False)
- deleted_databases = ListType(ModelType(RestorableDroppedDatabase), serialize_when_none=False)
- virtual_network_rules = ListType(ModelType(VirtualNetworkRule), serialize_when_none=False)
+ deleted_databases = ListType(
+ ModelType(RestorableDroppedDatabase), serialize_when_none=False
+ )
+ virtual_network_rules = ListType(
+ ModelType(VirtualNetworkRule), serialize_when_none=False
+ )
firewall_rules = ListType(ModelType(FirewallRule), serialize_when_none=False)
def reference(self):
diff --git a/src/spaceone/inventory/model/virtual_machines/cloud_service.py b/src/spaceone/inventory/model/virtual_machines/cloud_service.py
index 853ee2cf..d37ea67e 100644
--- a/src/spaceone/inventory/model/virtual_machines/cloud_service.py
+++ b/src/spaceone/inventory/model/virtual_machines/cloud_service.py
@@ -1,169 +1,286 @@
-from schematics.types import ModelType, StringType, PolyModelType, FloatType, DateTimeType, DictType, ListType
-from spaceone.inventory.libs.schema.metadata.dynamic_field import TextDyField, DateTimeDyField, EnumDyField, SizeField, \
- ListDyField
-from spaceone.inventory.libs.schema.metadata.dynamic_layout import ItemDynamicLayout, TableDynamicLayout, \
- ListDynamicLayout
-from spaceone.inventory.libs.schema.cloud_service import CloudServiceResource, CloudServiceResponse, CloudServiceMeta, Tags
+from schematics.types import (
+ ModelType,
+ StringType,
+ PolyModelType,
+ FloatType,
+ DateTimeType,
+ DictType,
+ ListType,
+)
+from spaceone.inventory.libs.schema.metadata.dynamic_field import (
+ TextDyField,
+ DateTimeDyField,
+ EnumDyField,
+ SizeField,
+ ListDyField,
+)
+from spaceone.inventory.libs.schema.metadata.dynamic_layout import (
+ ItemDynamicLayout,
+ TableDynamicLayout,
+ ListDynamicLayout,
+)
+from spaceone.inventory.libs.schema.cloud_service import (
+ CloudServiceResource,
+ CloudServiceResponse,
+ CloudServiceMeta,
+ Tags,
+)
from spaceone.inventory.model.virtual_machines.data import VirtualMachine
-'''
+"""
Virtual Machine
-'''
+"""
# TAB Default
-# instance
-virtual_machine = ItemDynamicLayout.set_fields('Virtual Machine', fields=[
- TextDyField.data_source('Resource ID', 'data.compute.instance_id'),
- TextDyField.data_source('VM ID', 'data.compute.tags.vm_id'),
- EnumDyField.data_source('VM State', 'data.compute.instance_state', default_state={
- 'safe': ['RUNNING'],
- 'warning': ['STARTING', 'DEALLOCATING', 'STOPPING', 'DEALLOCATING'],
- 'disable': ['DEALLOCATED'],
- 'alert': ['STOPPED']
- }),
- TextDyField.data_source('Instance Type', 'data.compute.instance_type'),
- TextDyField.data_source('Image', 'data.compute.image'),
- EnumDyField.data_source('Azure Priority', 'data.azure.priority', default_badge={
- 'indigo.500': ['Regular'], 'coral.600': ['Low'], 'peacock.600': ['Spot']
- }),
- TextDyField.data_source('Region', 'region_code'),
- TextDyField.data_source('Availability Zone', 'data.compute.az'),
- TextDyField.data_source('Key Pair', 'data.compute.keypair'),
- EnumDyField.data_source('Ultra SSD Enabled', 'data.azure.ultra_ssd_enabled', default_badge={
- 'indigo.500': ['true'], 'coral.600': ['false'],
- }),
- EnumDyField.data_source('Write Accelerator Enabled', 'data.azure.write_accelerator_enabled', default_badge={
- 'indigo.500': ['true'], 'coral.600': ['false']
- }),
- EnumDyField.data_source('Boot Diagnostics', 'data.azure.boot_diagnostics', default_badge={
- 'indigo.500': ['true'], 'coral.600': ['false']
- }),
- ListDyField.data_source('Public IP', 'data.nics', options={
- 'sub_key': 'public_ip_address',
- 'delimiter': '
'
- }),
- ListDyField.data_source('Security Groups', 'data.compute.security_groups', options={
- 'sub_key': 'display',
- 'delimiter': '
'
- }),
- DateTimeDyField.data_source('Launched At', 'data.compute.launched_at'),
-])
-
-vnet = ItemDynamicLayout.set_fields('Virtual Network', fields=[
- TextDyField.data_source('VNet ID', 'data.vnet.vnet_id'),
- TextDyField.data_source('VNet Name', 'data.vnet.vnet_name'),
- TextDyField.data_source('Subnet ID', 'data.subnet.subnet_id'),
- TextDyField.data_source('Subnet Name', 'data.subnet.subnet_name'),
-])
-
-vm_os = ItemDynamicLayout.set_fields('Operating System', fields=[
- TextDyField.data_source('OS Type', 'data.os.os_type', options={
- 'translation_id': 'PAGE_SCHEMA.OS_TYPE'
- }),
- TextDyField.data_source('OS Distribution', 'data.os.os_distro', options={
- 'translation_id': 'PAGE_SCHEMA.OS_DISTRO',
- }),
- TextDyField.data_source('OS Architecture', 'data.os.os_arch', options={
- 'translation_id': 'PAGE_SCHEMA.OS_ARCH',
- }),
- TextDyField.data_source('OS Version Details', 'data.os.details', options={
- 'translation_id': 'PAGE_SCHEMA.OS_DETAILS',
- }),
- TextDyField.data_source('OS License', 'data.os.os_license', options={
- 'translation_id': 'PAGE_SCHEMA.OS_LICENSE',
- }),
-])
-
-vm_hw = ItemDynamicLayout.set_fields('Hardware', fields=[
- TextDyField.data_source('Core', 'data.hardware.core', options={
- 'translation_id': 'PAGE_SCHEMA.CPU_CORE',
- }),
- TextDyField.data_source('Memory', 'data.hardware.memory', options={
- 'translation_id': 'PAGE_SCHEMA.MEMORY',
- }),
-])
-
-azure_vm = ListDynamicLayout.set_layouts('Azure VM', layouts=[virtual_machine, vm_os, vm_hw, vnet])
+# Instance
+virtual_machine = ItemDynamicLayout.set_fields(
+ "Virtual Machine",
+ fields=[
+ TextDyField.data_source("Resource ID", "data.compute.instance_id"),
+ TextDyField.data_source("VM ID", "data.compute.tags.vm_id"),
+ EnumDyField.data_source(
+ "VM State",
+ "data.compute.instance_state",
+ default_state={
+ "safe": ["RUNNING"],
+ "warning": ["STARTING", "DEALLOCATING", "STOPPING", "DEALLOCATING"],
+ "disable": ["DEALLOCATED"],
+ "alert": ["STOPPED"],
+ },
+ ),
+ TextDyField.data_source("Instance Type", "data.compute.instance_type"),
+ TextDyField.data_source("Image", "data.compute.image"),
+ EnumDyField.data_source(
+ "Azure Priority",
+ "data.azure.priority",
+ default_badge={
+ "indigo.500": ["Regular"],
+ "coral.600": ["Low"],
+ "peacock.600": ["Spot"],
+ },
+ ),
+ TextDyField.data_source("Region", "region_code"),
+ TextDyField.data_source("Availability Zone", "data.compute.az"),
+ TextDyField.data_source("Key Pair", "data.compute.keypair"),
+ EnumDyField.data_source(
+ "Ultra SSD Enabled",
+ "data.azure.ultra_ssd_enabled",
+ default_badge={
+ "indigo.500": ["true"],
+ "coral.600": ["false"],
+ },
+ ),
+ EnumDyField.data_source(
+ "Write Accelerator Enabled",
+ "data.azure.write_accelerator_enabled",
+ default_badge={"indigo.500": ["true"], "coral.600": ["false"]},
+ ),
+ EnumDyField.data_source(
+ "Boot Diagnostics",
+ "data.azure.boot_diagnostics",
+ default_badge={"indigo.500": ["true"], "coral.600": ["false"]},
+ ),
+ ListDyField.data_source(
+ "Public IP",
+ "data.nics",
+ options={"sub_key": "public_ip_address", "delimiter": "
"},
+ ),
+ ListDyField.data_source(
+ "Security Groups",
+ "data.compute.security_groups",
+ options={"sub_key": "display", "delimiter": "
"},
+ ),
+ DateTimeDyField.data_source("Launched At", "data.compute.launched_at"),
+ ],
+)
+
+vnet = ItemDynamicLayout.set_fields(
+ "Virtual Network",
+ fields=[
+ TextDyField.data_source("VNet ID", "data.vnet.vnet_id"),
+ TextDyField.data_source("VNet Name", "data.vnet.vnet_name"),
+ TextDyField.data_source("Subnet ID", "data.subnet.subnet_id"),
+ TextDyField.data_source("Subnet Name", "data.subnet.subnet_name"),
+ ],
+)
+
+vm_os = ItemDynamicLayout.set_fields(
+ "Operating System",
+ fields=[
+ TextDyField.data_source(
+ "OS Type",
+ "data.os.os_type",
+ options={"translation_id": "PAGE_SCHEMA.OS_TYPE"},
+ ),
+ TextDyField.data_source(
+ "OS Distribution",
+ "data.os.os_distro",
+ options={
+ "translation_id": "PAGE_SCHEMA.OS_DISTRO",
+ },
+ ),
+ TextDyField.data_source(
+ "OS Architecture",
+ "data.os.os_arch",
+ options={
+ "translation_id": "PAGE_SCHEMA.OS_ARCH",
+ },
+ ),
+ TextDyField.data_source(
+ "OS Version Details",
+ "data.os.details",
+ options={
+ "translation_id": "PAGE_SCHEMA.OS_DETAILS",
+ },
+ ),
+ TextDyField.data_source(
+ "OS License",
+ "data.os.os_license",
+ options={
+ "translation_id": "PAGE_SCHEMA.OS_LICENSE",
+ },
+ ),
+ ],
+)
+
+vm_hw = ItemDynamicLayout.set_fields(
+ "Hardware",
+ fields=[
+ TextDyField.data_source(
+ "Core",
+ "data.hardware.core",
+ options={
+ "translation_id": "PAGE_SCHEMA.CPU_CORE",
+ },
+ ),
+ TextDyField.data_source(
+ "Memory",
+ "data.hardware.memory",
+ options={
+ "translation_id": "PAGE_SCHEMA.MEMORY",
+ },
+ ),
+ ],
+)
+
+azure_vm = ListDynamicLayout.set_layouts(
+ "Azure VM", layouts=[virtual_machine, vm_os, vm_hw, vnet]
+)
# Tab Disk
-disk = TableDynamicLayout.set_fields('Disk', root_path='data.disks', fields=[
- TextDyField.data_source('Index', 'device_index'),
- TextDyField.data_source('Name', 'tags.disk_name'),
- SizeField.data_source('Size', 'size'),
- TextDyField.data_source('Disk ID', 'tags.disk_id'),
- TextDyField.data_source('Storage Account Type', 'tags.storage_Account_type'),
- TextDyField.data_source('IOPS', 'tags.iops'),
- TextDyField.data_source('Throughput (mbps)', 'tags.throughput_mbps'),
- TextDyField.data_source('Encryption Set', 'tags.disk_encryption_set'),
- TextDyField.data_source('Caching', 'tags.caching'),
-])
+disk = TableDynamicLayout.set_fields(
+ "Disk",
+ root_path="data.disks",
+ fields=[
+ TextDyField.data_source("Index", "device_index"),
+ TextDyField.data_source("Name", "tags.disk_name"),
+ SizeField.data_source("Size", "size"),
+ TextDyField.data_source("Disk ID", "tags.disk_id"),
+ TextDyField.data_source("Storage Account Type", "tags.storage_Account_type"),
+ TextDyField.data_source("IOPS", "tags.iops"),
+ TextDyField.data_source("Throughput (mbps)", "tags.throughput_mbps"),
+ TextDyField.data_source("Encryption Set", "tags.disk_encryption_set"),
+ TextDyField.data_source("Caching", "tags.caching"),
+ ],
+)
# Tab - NIC
-nic = TableDynamicLayout.set_fields('NIC', root_path='data.nics', fields=[
- TextDyField.data_source('Index', 'device_index'),
- TextDyField.data_source('Name', 'tags.name'),
- ListDyField.data_source('IP Addresses', 'ip_addresses', options={'delimiter': '
'}),
- TextDyField.data_source('Public IP', 'public_ip_address'),
- TextDyField.data_source('MAC Address', 'mac_address'),
- TextDyField.data_source('CIDR', 'cidr'),
- TextDyField.data_source('etag', 'tags.etag'),
- EnumDyField.data_source('Enable Accelerated Networking', 'tags.enable_accelerated_networking',
- default_badge={
- 'indigo.500': ['true'], 'coral.600': ['false']
- }),
- EnumDyField.data_source('Enable IP Forwarding', 'tags.enable_ip_forwarding', default_badge={
- 'indigo.500': ['true'], 'coral.600': ['false']
- }),
-])
+nic = TableDynamicLayout.set_fields(
+ "NIC",
+ root_path="data.nics",
+ fields=[
+ TextDyField.data_source("Index", "device_index"),
+ TextDyField.data_source("Name", "tags.name"),
+ ListDyField.data_source(
+ "IP Addresses", "ip_addresses", options={"delimiter": "
"}
+ ),
+ TextDyField.data_source("Public IP", "public_ip_address"),
+ TextDyField.data_source("MAC Address", "mac_address"),
+ TextDyField.data_source("CIDR", "cidr"),
+ TextDyField.data_source("etag", "tags.etag"),
+ EnumDyField.data_source(
+ "Enable Accelerated Networking",
+ "tags.enable_accelerated_networking",
+ default_badge={"indigo.500": ["true"], "coral.600": ["false"]},
+ ),
+ EnumDyField.data_source(
+ "Enable IP Forwarding",
+ "tags.enable_ip_forwarding",
+ default_badge={"indigo.500": ["true"], "coral.600": ["false"]},
+ ),
+ ],
+)
# Tab - Security Group
-security_group = TableDynamicLayout.set_fields('Network Security Groups', root_path='data.security_group', fields=[
- EnumDyField.data_source('Direction', 'direction', default_badge={
- 'indigo.500': ['inbound'],
- 'coral.600': ['outbound']
- }),
- TextDyField.data_source('Name', 'security_group_name'),
- EnumDyField.data_source('Protocol', 'protocol', default_outline_badge=['ALL', 'TCP',
- 'UDP',
- 'ICMP']),
- TextDyField.data_source('Port Range', 'port'),
- TextDyField.data_source('Remote', 'remote'),
- TextDyField.data_source('Priority', 'priority'),
- EnumDyField.data_source('Action', 'action', default_badge={
- 'indigo.500': ['allow'], 'coral.600': ['deny']
- }),
- TextDyField.data_source('Description', 'description'),
-])
+security_group = TableDynamicLayout.set_fields(
+ "Network Security Groups",
+ root_path="data.security_group",
+ fields=[
+ EnumDyField.data_source(
+ "Direction",
+ "direction",
+ default_badge={"indigo.500": ["inbound"], "coral.600": ["outbound"]},
+ ),
+ TextDyField.data_source("Name", "security_group_name"),
+ EnumDyField.data_source(
+ "Protocol", "protocol", default_outline_badge=["ALL", "TCP", "UDP", "ICMP"]
+ ),
+ TextDyField.data_source("Port Range", "port"),
+ TextDyField.data_source("Remote", "remote"),
+ TextDyField.data_source("Priority", "priority"),
+ EnumDyField.data_source(
+ "Action",
+ "action",
+ default_badge={"indigo.500": ["allow"], "coral.600": ["deny"]},
+ ),
+ TextDyField.data_source("Description", "description"),
+ ],
+)
# Tab - Load Balancer
-lb = TableDynamicLayout.set_fields('Load Balancer', root_path='data.load_balancer', fields=[
- TextDyField.data_source('Name', 'name'),
- TextDyField.data_source('Endpoint', 'endpoint'),
- EnumDyField.data_source('Type', 'type', default_badge={
- 'indigo.500': ['network'], 'coral.600': ['application']
- }),
- ListDyField.data_source('Protocol', 'protocol', options={'delimiter': '
'}),
- ListDyField.data_source('Port', 'port', options={'delimiter': '
'}),
- EnumDyField.data_source('Scheme', 'scheme', default_badge={
- 'indigo.500': ['internet-facing'], 'coral.600': ['internal']
- }),
-])
-
-virtual_machine_meta = CloudServiceMeta.set_layouts([azure_vm, disk, nic, security_group, lb])
+lb = TableDynamicLayout.set_fields(
+ "Load Balancer",
+ root_path="data.load_balancer",
+ fields=[
+ TextDyField.data_source("Name", "name"),
+ TextDyField.data_source("Endpoint", "endpoint"),
+ EnumDyField.data_source(
+ "Type",
+ "type",
+ default_badge={"indigo.500": ["network"], "coral.600": ["application"]},
+ ),
+ ListDyField.data_source("Protocol", "protocol", options={"delimiter": "
"}),
+ ListDyField.data_source("Port", "port", options={"delimiter": "
"}),
+ EnumDyField.data_source(
+ "Scheme",
+ "scheme",
+ default_badge={
+ "indigo.500": ["internet-facing"],
+ "coral.600": ["internal"],
+ },
+ ),
+ ],
+)
+
+virtual_machine_meta = CloudServiceMeta.set_layouts(
+ [azure_vm, disk, nic, security_group, lb]
+)
class ComputeResource(CloudServiceResource):
- cloud_service_group = StringType(default='VirtualMachines')
+ cloud_service_group = StringType(default="VirtualMachines")
class VirtualMachineResource(ComputeResource):
- cloud_service_type = StringType(default='Instance')
+ cloud_service_type = StringType(default="Instance")
data = ModelType(VirtualMachine)
- _metadata = ModelType(CloudServiceMeta, default=virtual_machine_meta, serialized_name='metadata')
+ _metadata = ModelType(
+ CloudServiceMeta, default=virtual_machine_meta, serialized_name="metadata"
+ )
name = StringType()
account = StringType(serialize_when_none=False)
ip_addresses = ListType(StringType())
- server_type = StringType(default='VM')
+ server_type = StringType(default="VM")
instance_type = StringType(serialize_when_none=False)
instance_size = FloatType(serialize_when_none=False)
launched_at = DateTimeType(serialize_when_none=False)
diff --git a/src/spaceone/inventory/model/virtual_machines/data.py b/src/spaceone/inventory/model/virtual_machines/data.py
index 8797b41e..8e9d88d7 100644
--- a/src/spaceone/inventory/model/virtual_machines/data.py
+++ b/src/spaceone/inventory/model/virtual_machines/data.py
@@ -1,8 +1,18 @@
from schematics import Model
-from schematics.types import ModelType, ListType, StringType, FloatType, DateTimeType, BooleanType, IntType
+from schematics.types import (
+ ModelType,
+ ListType,
+ StringType,
+ FloatType,
+ DateTimeType,
+ BooleanType,
+ IntType,
+)
from spaceone.inventory.libs.schema.resource import AzureCloudService, AzureTags
from spaceone.inventory.libs.schema.region import RegionResource
+from pydantic import BaseModel
+
# Activity Log
class ActivityLog(Model):
@@ -14,7 +24,7 @@ class Azure(Model):
ultra_ssd_enabled = BooleanType(default=False)
write_accelerator_enabled = BooleanType(default=False)
boot_diagnostics = BooleanType(default=True)
- priority = StringType(choices=('Regular', 'Low', 'Spot'), default='Regular')
+ priority = StringType(choices=("Regular", "Low", "Spot"), default="Regular")
tags = ListType(ModelType(AzureTags))
@@ -32,23 +42,34 @@ class ComputeTags(Model):
class Compute(Model):
keypair = StringType()
az = StringType()
- instance_state = StringType(choices=('STARTING', 'RUNNING', 'STOPPING', 'STOPPED', 'DEALLOCATING', 'DEALLOCATED'))
+ instance_state = StringType(
+ choices=(
+ "STARTING",
+ "RUNNING",
+ "STOPPING",
+ "STOPPED",
+ "DEALLOCATING",
+ "DEALLOCATED",
+ )
+ )
instance_type = StringType()
launched_at = DateTimeType()
- instance_id = StringType(default='')
- instance_name = StringType(default='')
+ instance_id = StringType(default="")
+ instance_name = StringType(default="")
security_groups = ListType(ModelType(SecurityGroups))
image = StringType()
- account = StringType(default='')
+ account = StringType(default="")
tags = ModelType(ComputeTags, default={})
# Disk
class DiskTags(Model):
disk_name = StringType()
- caching = StringType(choices=('None', 'ReadOnly', 'ReadWrite'))
- storage_account_type = StringType(choices=('Standard_LRS', 'Premium_LRS', 'StandardSSD_LRS', 'UltraSSD_LRS'))
- disk_encryption_set = StringType(choices=('PMK', 'CMK'), default='PMK')
+ caching = StringType(choices=("None", "ReadOnly", "ReadWrite"))
+ storage_account_type = StringType(
+ choices=("Standard_LRS", "Premium_LRS", "StandardSSD_LRS", "UltraSSD_LRS")
+ )
+ disk_encryption_set = StringType(choices=("PMK", "CMK"), default="PMK")
iops = IntType()
throughput_mbps = IntType()
disk_id = StringType()
@@ -56,8 +77,8 @@ class DiskTags(Model):
class Disk(Model):
device_index = IntType()
- device = StringType(default='')
- disk_type = StringType(choices=('os_disk', 'data_disk'))
+ device = StringType(default="")
+ disk_type = StringType(choices=("os_disk", "data_disk"))
size = FloatType()
tags = ModelType(DiskTags, default={})
@@ -74,12 +95,12 @@ class LoadBalancerTags(Model):
class LoadBalancer(Model):
- type = StringType(choices=('application', 'network'))
+ type = StringType(choices=("application", "network"))
endpoint = StringType()
port = ListType(IntType())
name = StringType()
protocol = ListType(StringType())
- scheme = StringType(choices=('internet-facing', 'internal'))
+ scheme = StringType(choices=("internet-facing", "internal"))
tags = ModelType(LoadBalancerTags, default={})
@@ -105,9 +126,9 @@ class NIC(Model):
# OS
class OS(Model):
os_distro = StringType()
- os_arch = StringType(default='x86_64')
+ os_arch = StringType(default="x86_64")
details = StringType()
- os_type = StringType(choices=('LINUX', 'WINDOWS'))
+ os_type = StringType(choices=("LINUX", "WINDOWS"))
# Resource Group
@@ -175,11 +196,11 @@ class VirtualMachine(AzureCloudService): # Main Class
subnet = ModelType(Subnet)
vmss = ModelType(VMSS, serialize_when_none=False)
activity_log = ModelType(ActivityLog, serialize_when_none=False)
- primary_ip_address = StringType(default='')
+ primary_ip_address = StringType(default="")
disks = ListType(ModelType(Disk))
nics = ListType(ModelType(NIC))
subscription = ModelType(Subscription)
- resource_group = ModelType(ResourceGroup)
+ resource_group = StringType(serialize_when_none=False)
def reference(self):
return {
diff --git a/src/spaceone/inventory/model/vm_scale_sets/cloud_service.py b/src/spaceone/inventory/model/vm_scale_sets/cloud_service.py
index 9bd7a809..d44ed302 100644
--- a/src/spaceone/inventory/model/vm_scale_sets/cloud_service.py
+++ b/src/spaceone/inventory/model/vm_scale_sets/cloud_service.py
@@ -1,175 +1,290 @@
-from schematics.types import ModelType, StringType, PolyModelType, FloatType, DateTimeType
+from schematics.types import (
+ ModelType,
+ StringType,
+ PolyModelType,
+ FloatType,
+ DateTimeType,
+)
from spaceone.inventory.model.vm_scale_sets.data import VirtualMachineScaleSet
-from spaceone.inventory.libs.schema.metadata.dynamic_field import TextDyField, DateTimeDyField, EnumDyField, \
- ListDyField, SizeField, StateItemDyField
-from spaceone.inventory.libs.schema.metadata.dynamic_layout import ItemDynamicLayout, TableDynamicLayout, \
- ListDynamicLayout, SimpleTableDynamicLayout
-from spaceone.inventory.libs.schema.cloud_service import CloudServiceResource, CloudServiceResponse, CloudServiceMeta
+from spaceone.inventory.libs.schema.metadata.dynamic_field import (
+ TextDyField,
+ DateTimeDyField,
+ EnumDyField,
+ ListDyField,
+ SizeField,
+ StateItemDyField,
+)
+from spaceone.inventory.libs.schema.metadata.dynamic_layout import (
+ ItemDynamicLayout,
+ TableDynamicLayout,
+ ListDynamicLayout,
+ SimpleTableDynamicLayout,
+)
+from spaceone.inventory.libs.schema.cloud_service import (
+ CloudServiceResource,
+ CloudServiceResponse,
+ CloudServiceMeta,
+)
-'''
+"""
VM_SCALE_SET
-'''
+"""
# TAB - Default
-# instance termination notification(Configuration Tab), over provisioning, proximity placement group, Termination Notification
+# Instance termination notification(Configuration Tab), over provisioning, proximity placement group, Termination Notification
# application health monitoring(Health and repair Tab), Upgrade Policy(Upgrade Policy Tab),
-vm_scale_set_info_meta = ItemDynamicLayout.set_fields('VmScaleSets', fields=[
- TextDyField.data_source('Name', 'name'),
- TextDyField.data_source('Resource ID', 'data.id'),
- TextDyField.data_source('Resource Group', 'data.resource_group'),
- TextDyField.data_source('Location', 'data.location'),
- TextDyField.data_source('Subscription', 'data.subscription_name'),
- TextDyField.data_source('Subscription ID', 'account'),
- TextDyField.data_source('Instances', 'data.instance_count'),
- TextDyField.data_source('Operating System', 'data.virtual_machine_profile.os_profile.operating_system'),
- TextDyField.data_source('Size', 'instance_type'),
- TextDyField.data_source('Virtual network/subnet', 'data.virtual_machine_profile.network_profile.primary_vnet'),
- TextDyField.data_source('Host group', 'data.host_group.id'),
- TextDyField.data_source('Ephemeral OS Disk',
- 'data.virtual_machine_profile.storage_profile.os_disk.diff_disk_settings.option.local'),
- TextDyField.data_source('Azure Spot Eviction Policy', 'data.virtual_machine_profile.eviction_policy'),
- TextDyField.data_source('Azure Spot Max Price', 'data.virtual_machine_profile.billing_profile.max_price'),
- TextDyField.data_source('Termination Notification', 'data.terminate_notification_display'),
- TextDyField.data_source('OverProvisioning', 'data.overprovision'),
- TextDyField.data_source('Proximity Placement Group', 'data.proximity_placement_group_display'),
- TextDyField.data_source('Automatic Repairs', 'data.automatic_repairs_policy.enabled'),
- TextDyField.data_source('Upgrade Policy', 'data.upgrade_policy.mode'),
- TextDyField.data_source('Fault Domains', 'data.platform_fault_domain_count'),
-
-])
+vm_scale_set_info_meta = ItemDynamicLayout.set_fields(
+ "VmScaleSets",
+ fields=[
+ TextDyField.data_source("Name", "name"),
+ TextDyField.data_source("Resource ID", "data.id"),
+ TextDyField.data_source("Resource Group", "data.resource_group"),
+ TextDyField.data_source("Location", "data.location"),
+ TextDyField.data_source("Subscription", "data.subscription_name"),
+ TextDyField.data_source("Subscription ID", "account"),
+ TextDyField.data_source("Instances", "data.instance_count"),
+ TextDyField.data_source(
+ "Operating System",
+ "data.virtual_machine_profile.os_profile.operating_system",
+ ),
+ TextDyField.data_source("Size", "instance_type"),
+ TextDyField.data_source(
+ "Virtual network/subnet",
+ "data.virtual_machine_profile.network_profile.primary_vnet",
+ ),
+ TextDyField.data_source("Host group", "data.host_group.id"),
+ TextDyField.data_source(
+ "Ephemeral OS Disk",
+ "data.virtual_machine_profile.storage_profile.os_disk.diff_disk_settings.option.local",
+ ),
+ TextDyField.data_source(
+ "Azure Spot Eviction Policy", "data.virtual_machine_profile.eviction_policy"
+ ),
+ TextDyField.data_source(
+ "Azure Spot Max Price",
+ "data.virtual_machine_profile.billing_profile.max_price",
+ ),
+ TextDyField.data_source(
+ "Termination Notification", "data.terminate_notification_display"
+ ),
+ TextDyField.data_source("OverProvisioning", "data.overprovision"),
+ TextDyField.data_source(
+ "Proximity Placement Group", "data.proximity_placement_group_display"
+ ),
+ TextDyField.data_source(
+ "Automatic Repairs", "data.automatic_repairs_policy.enabled"
+ ),
+ TextDyField.data_source("Upgrade Policy", "data.upgrade_policy.mode"),
+ TextDyField.data_source("Fault Domains", "data.platform_fault_domain_count"),
+ ],
+)
# TAB - Instances
# name, computer name, location, status, provisioning state, fault domain,
# protection policy, and latest model
-vm_scale_set_instance = TableDynamicLayout.set_fields('Instances', 'data.vm_instances', fields=[
- TextDyField.data_source('Name', 'name'),
- TextDyField.data_source('Computer Name', 'os_profile.computer_name'),
- TextDyField.data_source('Location', 'location'),
- EnumDyField.data_source('Status', 'vm_instance_status_profile.statuses.code', default_state={
- 'safe': ['PowerState/running', 'PowerState/starting'],
- 'warning':['PowerState/deallocated', 'PowerState/deallocating', 'PowerState/stopped', 'PowerState/stopping', 'PowerState/unknown']
- }),
- TextDyField.data_source('Provisioning State', 'provisioning_state'),
- TextDyField.data_source('Protection From Scale-in', 'protection_policy.protect_from_scale_in'),
- TextDyField.data_source('Protection From Scale-set Actions', 'protection_policy.protect_from_scale_set_actions'),
- TextDyField.data_source('Latest Model', 'latest_model_applied'),
- TextDyField.data_source('Virtual Network', 'primary_vnet')
-])
+vm_scale_set_instance = TableDynamicLayout.set_fields(
+ "Instances",
+ "data.vm_instances",
+ fields=[
+ TextDyField.data_source("Name", "name"),
+ TextDyField.data_source("Computer Name", "os_profile.computer_name"),
+ TextDyField.data_source("Location", "location"),
+ EnumDyField.data_source(
+ "Status",
+ "vm_instance_status_profile.statuses.code",
+ default_state={
+ "safe": ["PowerState/running", "PowerState/starting"],
+ "warning": [
+ "PowerState/deallocated",
+ "PowerState/deallocating",
+ "PowerState/stopped",
+ "PowerState/stopping",
+ "PowerState/unknown",
+ ],
+ },
+ ),
+ TextDyField.data_source("Provisioning State", "provisioning_state"),
+ TextDyField.data_source(
+ "Protection From Scale-in", "protection_policy.protect_from_scale_in"
+ ),
+ TextDyField.data_source(
+ "Protection From Scale-set Actions",
+ "protection_policy.protect_from_scale_set_actions",
+ ),
+ TextDyField.data_source("Latest Model", "latest_model_applied"),
+ TextDyField.data_source("Virtual Network", "primary_vnet"),
+ ],
+)
# TAB - Networking
# IP Configuration, Network interface, Virtual Network, Accelerated Networking,
# Inbound /Outbound port rules(x) , Load balancing(x)
-vm_scale_set_info_networking = ItemDynamicLayout.set_fields('Networking',
- 'data.virtual_machine_profile.network_profile', fields=[
- TextDyField.data_source('Virtual Network', 'primary_vnet'),
- ])
-
-vm_scale_set_info_network_configuration = SimpleTableDynamicLayout.set_fields('Network Configuration',
- 'data.virtual_machine_profile.network_profile.network_interface_configurations',
- fields=[
- TextDyField.data_source('Name',
- 'name'),
- TextDyField.data_source(
- 'Network interface',
- 'enable_accelerated_networking_display'),
- TextDyField.data_source(
- 'Accelerated Networking',
- 'enable_accelerated_networking_display'),
- TextDyField.data_source('Primary',
- 'primary'),
- ])
-
-vm_scale_set_info_ip_configurations = SimpleTableDynamicLayout.set_fields('IP Configurations',
- 'data.virtual_machine_profile.network_profile.network_interface_configurations.ip_configurations',
- fields=[
-
- TextDyField.data_source(
- 'Public Ip Address Configuration',
- 'public_ip_address_configuration'),
- TextDyField.data_source(
- 'Private IP Address Version',
- 'private_ip_address_version'),
- ])
-
-vm_scale_set_info_network = ListDynamicLayout.set_layouts('Networking', layouts=[vm_scale_set_info_networking,
- vm_scale_set_info_network_configuration,
- vm_scale_set_info_ip_configurations])
+vm_scale_set_info_networking = ItemDynamicLayout.set_fields(
+ "Networking",
+ "data.virtual_machine_profile.network_profile",
+ fields=[
+ TextDyField.data_source("Virtual Network", "primary_vnet"),
+ ],
+)
+
+vm_scale_set_info_network_configuration = SimpleTableDynamicLayout.set_fields(
+ "Network Configuration",
+ "data.virtual_machine_profile.network_profile.network_interface_configurations",
+ fields=[
+ TextDyField.data_source("Name", "name"),
+ TextDyField.data_source(
+ "Network interface", "enable_accelerated_networking_display"
+ ),
+ TextDyField.data_source(
+ "Accelerated Networking", "enable_accelerated_networking_display"
+ ),
+ TextDyField.data_source("Primary", "primary"),
+ ],
+)
+
+vm_scale_set_info_ip_configurations = SimpleTableDynamicLayout.set_fields(
+ "IP Configurations",
+ "data.virtual_machine_profile.network_profile.network_interface_configurations.ip_configurations",
+ fields=[
+ TextDyField.data_source(
+ "Public Ip Address Configuration", "public_ip_address_configuration"
+ ),
+ TextDyField.data_source(
+ "Private IP Address Version", "private_ip_address_version"
+ ),
+ ],
+)
+
+vm_scale_set_info_network = ListDynamicLayout.set_layouts(
+ "Networking",
+ layouts=[
+ vm_scale_set_info_networking,
+ vm_scale_set_info_network_configuration,
+ vm_scale_set_info_ip_configurations,
+ ],
+)
# TAB - Scaling
# Instance Count, Scale-in policy
-vm_scale_set_scaling_info = ItemDynamicLayout.set_fields('Scaling', fields=[
- TextDyField.data_source('Instance Count', 'data.instance_count'),
- ListDyField.data_source('Scale-in Policy', 'data.scale_in_policy.rules', options={
- 'delimiter': '
'
- })
-])
-
-vm_scale_set_scaling_rules = SimpleTableDynamicLayout.set_fields('Autoscale Settings', 'data.virtual_machine_scale_set_power_state', fields=[
- TextDyField.data_source('Name', 'name'),
- ListDyField.data_source('Profiles', 'profiles_display', options={
- 'delimiter': '
'
- }),
- TextDyField.data_source('Default', 'profiles.capacity.default'),
- TextDyField.data_source('Max', 'profiles.capacity.maximum'),
- TextDyField.data_source('Min', 'profiles.capacity.minimum'),
-
-])
-vm_scale_set_info_scaling = ListDynamicLayout.set_layouts('Scaling', layouts=[vm_scale_set_scaling_info, vm_scale_set_scaling_rules])
+vm_scale_set_scaling_info = ItemDynamicLayout.set_fields(
+ "Scaling",
+ fields=[
+ TextDyField.data_source("Instance Count", "data.instance_count"),
+ ListDyField.data_source(
+ "Scale-in Policy",
+ "data.scale_in_policy.rules",
+ options={"delimiter": "
"},
+ ),
+ ],
+)
+vm_scale_set_scaling_rules = SimpleTableDynamicLayout.set_fields(
+ "Autoscale Settings",
+ "data.virtual_machine_scale_set_power_state",
+ fields=[
+ TextDyField.data_source("Name", "name"),
+ ListDyField.data_source(
+ "Profiles", "profiles_display", options={"delimiter": "
"}
+ ),
+ TextDyField.data_source("Default", "profiles.capacity.default"),
+ TextDyField.data_source("Max", "profiles.capacity.maximum"),
+ TextDyField.data_source("Min", "profiles.capacity.minimum"),
+ ],
+)
+vm_scale_set_info_scaling = ListDynamicLayout.set_layouts(
+ "Scaling", layouts=[vm_scale_set_scaling_info, vm_scale_set_scaling_rules]
+)
# TAB - Disks OS Disks and Data Disks
# Image reference, Storage Type, Size, MAX iops, max throughput, encryption, host caching
# : LUN, Storage Type, Size, MAx iops, max throughput, encryption, host caching
-os_disk = ItemDynamicLayout.set_fields('OS Disk', 'data.virtual_machine_profile.storage_profile', fields=[
- TextDyField.data_source('Image Reference', 'image_reference_display'),
- TextDyField.data_source('Storage Account Type', 'os_disk.managed_disk.storage_account_type'),
- SizeField.data_source('Size', 'os_disk.disk_size_gb', options={
- 'source_unit': 'GB'
- }),
- TextDyField.data_source('Host Caching', 'os_disk.caching')
-
-])
-data_disks = SimpleTableDynamicLayout.set_fields('Data Disks', 'data.virtual_machine_profile.storage_profile.data_disks', fields=[
- TextDyField.data_source('Name', 'name'),
- TextDyField.data_source('Storage Type', 'managed_disk.storage_type'),
- SizeField.data_source('Size', 'disk_size_gb', options={
- 'source_unit': 'GB'
- }),
- TextDyField.data_source('Max IOPS', 'disk_iops_read_write'),
- TextDyField.data_source('MAX Throughput(MBps)', 'disk_m_bps_read_write'),
- TextDyField.data_source('Encryption', 'disk_encryption_set.id'),
- TextDyField.data_source('Host Caching', 'caching'),
- TextDyField.data_source('LUN', 'lun')
-])
-vm_scale_set_info_disk = ListDynamicLayout.set_layouts('Disks', layouts=[os_disk, data_disks])
+os_disk = ItemDynamicLayout.set_fields(
+ "OS Disk",
+ "data.virtual_machine_profile.storage_profile",
+ fields=[
+ TextDyField.data_source("Image Reference", "image_reference_display"),
+ TextDyField.data_source(
+ "Storage Account Type", "os_disk.managed_disk.storage_account_type"
+ ),
+ SizeField.data_source(
+ "Size", "os_disk.disk_size_gb", options={"source_unit": "GB"}
+ ),
+ TextDyField.data_source("Host Caching", "os_disk.caching"),
+ ],
+)
+data_disks = SimpleTableDynamicLayout.set_fields(
+ "Data Disks",
+ "data.virtual_machine_profile.storage_profile.data_disks",
+ fields=[
+ TextDyField.data_source("Name", "name"),
+ TextDyField.data_source("Storage Type", "managed_disk.storage_type"),
+ SizeField.data_source("Size", "disk_size_gb", options={"source_unit": "GB"}),
+ TextDyField.data_source("Max IOPS", "disk_iops_read_write"),
+ TextDyField.data_source("MAX Throughput(MBps)", "disk_m_bps_read_write"),
+ TextDyField.data_source("Encryption", "disk_encryption_set.id"),
+ TextDyField.data_source("Host Caching", "caching"),
+ TextDyField.data_source("LUN", "lun"),
+ ],
+)
+vm_scale_set_info_disk = ListDynamicLayout.set_layouts(
+ "Disks", layouts=[os_disk, data_disks]
+)
# TAB - Operating System
# Operating system, image reference, computer name prefix, administrator username,
# password authentication, vm agent, enable automatic OS upgrades, custom data and cloud init
-vm_scale_set_info_os_profile = ItemDynamicLayout.set_fields('Operating System', fields=[
- TextDyField.data_source('Computer Name Prefix', 'data.virtual_machine_profile.os_profile.computer_name_prefix'),
- TextDyField.data_source('Administrator Username', 'data.virtual_machine_profile.os_profile.admin_username'),
- TextDyField.data_source('Operating System', 'data.virtual_machine_profile.os_profile.operating_system'),
- TextDyField.data_source('VM Agent', 'data.virtual_machine_profile.os_profile.linux_configuration.provision_vm_agent'),
- TextDyField.data_source('Automatic OS Upgrades',
- 'data.upgrade_policy.automatic_os_upgrade_policy.enable_automatic_os_upgrade'),
- TextDyField.data_source('Custom Data', 'data.virtual_machine_profile.os_profile.custom_data')
- ])
+vm_scale_set_info_os_profile = ItemDynamicLayout.set_fields(
+ "Operating System",
+ fields=[
+ TextDyField.data_source(
+ "Computer Name Prefix",
+ "data.virtual_machine_profile.os_profile.computer_name_prefix",
+ ),
+ TextDyField.data_source(
+ "Administrator Username",
+ "data.virtual_machine_profile.os_profile.admin_username",
+ ),
+ TextDyField.data_source(
+ "Operating System",
+ "data.virtual_machine_profile.os_profile.operating_system",
+ ),
+ TextDyField.data_source(
+ "VM Agent",
+ "data.virtual_machine_profile.os_profile.linux_configuration.provision_vm_agent",
+ ),
+ TextDyField.data_source(
+ "Automatic OS Upgrades",
+ "data.upgrade_policy.automatic_os_upgrade_policy.enable_automatic_os_upgrade",
+ ),
+ TextDyField.data_source(
+ "Custom Data", "data.virtual_machine_profile.os_profile.custom_data"
+ ),
+ ],
+)
vm_scale_set_meta = CloudServiceMeta.set_layouts(
- [vm_scale_set_info_meta, vm_scale_set_instance, vm_scale_set_info_network, vm_scale_set_info_scaling,
- vm_scale_set_info_disk, vm_scale_set_info_os_profile])
+ [
+ vm_scale_set_info_meta,
+ vm_scale_set_instance,
+ vm_scale_set_info_network,
+ vm_scale_set_info_scaling,
+ vm_scale_set_info_disk,
+ vm_scale_set_info_os_profile,
+ ]
+)
class ComputeResource(CloudServiceResource):
- cloud_service_group = StringType(default='VMScaleSets')
+ cloud_service_group = StringType(default="VMScaleSets")
class VmScaleSetResource(ComputeResource):
- cloud_service_type = StringType(default='ScaleSet')
+ cloud_service_type = StringType(default="ScaleSet")
data = ModelType(VirtualMachineScaleSet)
- _metadata = ModelType(CloudServiceMeta, default=vm_scale_set_meta, serialized_name='metadata')
+ _metadata = ModelType(
+ CloudServiceMeta, default=vm_scale_set_meta, serialized_name="metadata"
+ )
name = StringType()
account = StringType(serialize_when_none=False)
instance_type = StringType(serialize_when_none=False)
diff --git a/src/spaceone/inventory/model/vm_scale_sets/data.py b/src/spaceone/inventory/model/vm_scale_sets/data.py
index 0b8d3c79..1b2aecff 100644
--- a/src/spaceone/inventory/model/vm_scale_sets/data.py
+++ b/src/spaceone/inventory/model/vm_scale_sets/data.py
@@ -1,6 +1,14 @@
from schematics import Model
-from schematics.types import ModelType, ListType, StringType, IntType, BooleanType, DateTimeType, TimedeltaType, \
- FloatType
+from schematics.types import (
+ ModelType,
+ ListType,
+ StringType,
+ IntType,
+ BooleanType,
+ DateTimeType,
+ TimedeltaType,
+ FloatType,
+)
from spaceone.inventory.libs.schema.resource import AzureCloudService, AzureTags
@@ -40,19 +48,36 @@ class AutoscaleNotification(Model):
class ScaleRuleMetricDimension(Model):
dimension_name = StringType(serialize_when_none=False)
- operator = StringType(choices=('Equals', 'NotEquals'), serialize_when_none=False)
+ operator = StringType(choices=("Equals", "NotEquals"), serialize_when_none=False)
values = ListType(StringType, serialize_when_none=False)
class MetricTrigger(Model):
- dimensions = ListType(ModelType(ScaleRuleMetricDimension), serialize_when_none=False)
+ dimensions = ListType(
+ ModelType(ScaleRuleMetricDimension), serialize_when_none=False
+ )
metric_name = StringType(serialize_when_none=False)
metric_namespace = StringType(serialize_when_none=False)
metric_resource_uri = StringType(serialize_when_none=False)
- operator = StringType(choices=('Equals', 'GreaterThan', 'GreaterThanOrEqual', 'LessThan', 'LessThanOrEqual', 'NotEquals'), serialize_when_none=False)
- statistic = StringType(choices=('Average', 'Max', 'Min', 'Sum'), serialize_when_none=False)
+ operator = StringType(
+ choices=(
+ "Equals",
+ "GreaterThan",
+ "GreaterThanOrEqual",
+ "LessThan",
+ "LessThanOrEqual",
+ "NotEquals",
+ ),
+ serialize_when_none=False,
+ )
+ statistic = StringType(
+ choices=("Average", "Max", "Min", "Sum"), serialize_when_none=False
+ )
threshold = IntType(serialize_when_none=False)
- time_aggregation = StringType(choices=('Average', 'Count', 'Last', 'Maximum', 'Minimum', 'Total'), serialize_when_none=False)
+ time_aggregation = StringType(
+ choices=("Average", "Count", "Last", "Maximum", "Minimum", "Total"),
+ serialize_when_none=False,
+ )
time_grain = TimedeltaType(serialize_when_none=False)
time_window = TimedeltaType(serialize_when_none=False)
@@ -77,14 +102,21 @@ class RecurrentSchedule(Model):
class Recurrence(Model):
- frequency = StringType(choices=('Day', 'Hour', 'Minute', 'Month', 'None', 'Second', 'Week', 'Year'),
- serialize_when_none=False)
+ frequency = StringType(
+ choices=("Day", "Hour", "Minute", "Month", "None", "Second", "Week", "Year"),
+ serialize_when_none=False,
+ )
schedule = ModelType(RecurrentSchedule)
class ScaleAction(Model):
- direction = StringType(choices=('Decrease', 'Increase', 'None'), serialize_when_none=False)
- type = StringType(choices=('ChangeCount', 'ExactCount', 'PercentChangeCount'), serialize_when_none=False)
+ direction = StringType(
+ choices=("Decrease", "Increase", "None"), serialize_when_none=False
+ )
+ type = StringType(
+ choices=("ChangeCount", "ExactCount", "PercentChangeCount"),
+ serialize_when_none=False,
+ )
value = StringType(serialize_when_none=False)
cooldown = TimedeltaType(serialize_when_none=False)
@@ -107,7 +139,9 @@ class AutoscaleSettingResource(Model): # belongs to VmScaleSet
location = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
enabled = BooleanType(default=True)
- notifications = ListType(ModelType(AutoscaleNotification), serialize_when_none=False)
+ notifications = ListType(
+ ModelType(AutoscaleNotification), serialize_when_none=False
+ )
profiles = ListType(ModelType(AutoscaleProfile), serialize_when_none=False)
profiles_display = ListType(StringType, serialize_when_none=False)
target_resource_uri = StringType(serialize_when_none=False)
@@ -120,7 +154,9 @@ class AutoscaleSettingResourceCollection(Model):
value = ListType(ModelType(AutoscaleSettingResource), serialize_when_none=False)
-class ApiEntityReference(Model): # belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
+class ApiEntityReference(
+ Model
+): # belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
id = StringType(serialize_when_none=False)
@@ -139,11 +175,15 @@ class DiffDiskOptions(Model): # belongs to VmScaleSet >> DiffDiskSettings
class DiffDiskSettings(Model):
- option = ModelType(DiffDiskOptions, serialize_when_none=False)
- placement = StringType(choices=('CacheDisk', 'ResourceDisk'), serialize_when_none=False)
+ option = StringType(serialize_when_none=False)
+ placement = StringType(
+ choices=("CacheDisk", "ResourceDisk"), serialize_when_none=False
+ )
-class DiagnosticsProfile(Model): # belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
+class DiagnosticsProfile(
+ Model
+): # belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
boot_diagnostics = ModelType(BootDiagnostics, serialize_when_none=False)
@@ -156,7 +196,9 @@ class ImageReference(Model):
version = StringType(serialize_when_none=False)
-class SshPublicKey(Model): # belongs to VmScaleSet >> LinuxConfiguration >> SshConfiguration
+class SshPublicKey(
+ Model
+): # belongs to VmScaleSet >> LinuxConfiguration >> SshConfiguration
key_data = StringType(serialize_when_none=False)
path = StringType(serialize_when_none=False)
@@ -165,7 +207,9 @@ class SshConfiguration(Model): # belongs to VmScaleSet >> LinuxConfiguration
public_keys = ListType(ModelType(SshPublicKey), serialize_when_none=False)
-class LinuxConfiguration(Model): # belongs to VmScaleSet >> VirtualMachineScaleSetOSProfile
+class LinuxConfiguration(
+ Model
+): # belongs to VmScaleSet >> VirtualMachineScaleSetOSProfile
disable_password_authentication = BooleanType(serialize_when_none=False)
provision_vm_agent = BooleanType(serialize_when_none=False, default=True)
ssh = ModelType(SshConfiguration, serialize_when_none=False)
@@ -181,21 +225,27 @@ class Plan(Model): # belongs to VmScaleSet
class RollingUpgradePolicy(Model): # belongs to VmScaleSet >> UpgradePolicy
max_batch_instance_percent = IntType(default=20, serialize_when_none=False)
max_unhealthy_instance_percent = IntType(default=20, serialize_when_none=False)
- max_unhealthy_upgraded_instance_percent = IntType(default=20, serialize_when_none=False)
- pause_time_between_batches = StringType(default='PT0S', serialize_when_none=False)
+ max_unhealthy_upgraded_instance_percent = IntType(
+ default=20, serialize_when_none=False
+ )
+ pause_time_between_batches = StringType(default="PT0S", serialize_when_none=False)
class ScaleInPolicy(Model): # belongs to VmScaleSet
rules = ListType(StringType, serialize_when_none=False)
-class TerminateNotificationProfile(Model): # belongs to VmScaleSet >> ScheduledEventsProfile
+class TerminateNotificationProfile(
+ Model
+): # belongs to VmScaleSet >> ScheduledEventsProfile
enable = BooleanType(serialize_when_none=False)
- not_before_timeout = StringType(default='PT5M', serialize_when_none=False)
+ not_before_timeout = StringType(default="PT5M", serialize_when_none=False)
class ScheduledEventsProfile(Model): # belongs to VmScaleSet
- terminate_notification_profile = ModelType(TerminateNotificationProfile, serialize_when_none=False)
+ terminate_notification_profile = ModelType(
+ TerminateNotificationProfile, serialize_when_none=False
+ )
class SecurityProfile(Model): # belongs to VmScaleSet
@@ -211,7 +261,11 @@ class Settings(Model):
class Sku(Model): # belongs to VmScaleSet
capacity = IntType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- tier = StringType(choices=('Standard', 'Basic', '', None), default='Standard', serialize_when_none=False)
+ tier = StringType(
+ choices=("Standard", "Basic", "", None),
+ default="Standard",
+ serialize_when_none=False,
+ )
class SubResource(Model): # belongs to VmScaleSet
@@ -219,25 +273,35 @@ class SubResource(Model): # belongs to VmScaleSet
class InGuestPatchMode(Model): # belongs to VmScaleSet >> PatchSettings
- in_guest_patch_mode = StringType(choices=('AutomaticByOS', 'Manual', 'AutomaticByPlatform', '', None), serialize_when_none=False)
+ in_guest_patch_mode = StringType(
+ choices=("AutomaticByOS", "Manual", "AutomaticByPlatform", "", None),
+ serialize_when_none=False,
+ )
class PatchSettings(Model): # belongs to VmScaleSet
# patch_mode = ModelType(InGuestPatchMode, serialize_when_none=False)
- patch_mode = StringType(choices=('AutomaticByOS', 'Manual', 'AutomaticByPlatform'), serialize_when_none=False)
+ patch_mode = StringType(
+ choices=("AutomaticByOS", "Manual", "AutomaticByPlatform"),
+ serialize_when_none=False,
+ )
class AdditionalUnattendedContent(Model): # belongs to VmScaleSet
- component_name = StringType(choices=('Microsoft-Windows-Shell-Setup', ''), serialize_when_none=False)
+ component_name = StringType(
+ choices=("Microsoft-Windows-Shell-Setup", ""), serialize_when_none=False
+ )
content = StringType(serialize_when_none=False)
- pass_name = StringType(choices=('OobeSystem', '', None), serialize_when_none=False)
- setting_name = StringType(choices=('AutoLogon', 'FirstLogonCommands', '', None), serialize_when_none=False)
+ pass_name = StringType(choices=("OobeSystem", "", None), serialize_when_none=False)
+ setting_name = StringType(
+ choices=("AutoLogon", "FirstLogonCommands", "", None), serialize_when_none=False
+ )
class WinRMListener(Model):
# belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile >> WindowsConfiguration >> WinRMConfiguration
certificate_url = StringType(serialize_when_none=False)
- protocol_types = StringType(choices=('http', 'https'), serialize_when_none=False)
+ protocol_types = StringType(choices=("http", "https"), serialize_when_none=False)
class WinRMConfiguration(Model):
@@ -247,7 +311,9 @@ class WinRMConfiguration(Model):
class WindowsConfiguration(Model):
# belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
- additional_unattended_content = ListType(ModelType(AdditionalUnattendedContent), serialize_when_none=False)
+ additional_unattended_content = ListType(
+ ModelType(AdditionalUnattendedContent), serialize_when_none=False
+ )
enable_automatic_updates = BooleanType(serialize_when_none=False)
patch_settings = ModelType(PatchSettings, serialize_when_none=False)
provision_vm_agent = BooleanType(serialize_when_none=False)
@@ -256,16 +322,29 @@ class WindowsConfiguration(Model):
class UpgradePolicy(Model): # belongs to VmScaleSet
- automatic_os_upgrade_policy = ModelType(AutomaticOSUpgradePolicy, serialize_when_none=False)
- mode = StringType(choices=('Manual', 'Automatic', 'Rolling', None, ''), serialize_when_none=False)
+ automatic_os_upgrade_policy = ModelType(
+ AutomaticOSUpgradePolicy, serialize_when_none=False
+ )
+ mode = StringType(
+ choices=("Manual", "Automatic", "Rolling", None, ""), serialize_when_none=False
+ )
rolling_upgrade_policy = ModelType(RollingUpgradePolicy, serialize_when_none=False)
class VirtualMachineScaleSetIdentity(Model): # belongs to VmScaleSet
principal_id = StringType(serialize_when_none=False)
tenant_id = StringType(serialize_when_none=False)
- type = StringType(choices=('None', 'SystemAssigned', ' SystemAssigned,UserAssigned', 'UserAssigned', '', None),
- serialize_when_none=False)
+ type = StringType(
+ choices=(
+ "None",
+ "SystemAssigned",
+ " SystemAssigned,UserAssigned",
+ "UserAssigned",
+ "",
+ None,
+ ),
+ serialize_when_none=False,
+ )
class VirtualMachineScaleSetExtension(Model):
@@ -286,7 +365,9 @@ class VirtualMachineScaleSetExtension(Model):
class VirtualMachineScaleSetExtensionProfile(Model):
# belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
- extensions = ListType(ModelType(VirtualMachineScaleSetExtension), serialize_when_none=False)
+ extensions = ListType(
+ ModelType(VirtualMachineScaleSetExtension), serialize_when_none=False
+ )
extensions_time_budget = StringType(serialize_when_none=False) # ISO 8601 format
@@ -316,10 +397,17 @@ class VirtualMachineScaleSetPublicIPAddressConfiguration(Model):
# >> VirtualMachineScaleSetNetworkProfile >> VirtualMachineScaleSetNetworkConfiguration
# >> VirtualMachineScaleSetIPConfiguration
name = StringType()
- dns_settings = ModelType(VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings, serialize_when_none=False)
+ dns_settings = ModelType(
+ VirtualMachineScaleSetPublicIPAddressConfigurationDnsSettings,
+ serialize_when_none=False,
+ )
idle_timeout_in_minutes = IntType(serialize_when_none=False)
- ip_tags = ListType(ModelType(VirtualMachineScaleSetIpTag), serialize_when_none=False)
- public_ip_address_version = StringType(choices=('IPv4', 'IPv6'), default='IPv4', serialize_when_none=False)
+ ip_tags = ListType(
+ ModelType(VirtualMachineScaleSetIpTag), serialize_when_none=False
+ )
+ public_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), default="IPv4", serialize_when_none=False
+ )
public_ip_prefix = ModelType(SubResource, serialize_when_none=False)
@@ -328,14 +416,25 @@ class VirtualMachineScaleSetIPConfiguration(Model):
# >> VirtualMachineScaleSetNetworkProfile >> VirtualMachineScaleSetNetworkConfiguration
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- application_gateway_backend_address_pools = ListType(ModelType(SubResource), serialize_when_none=False)
- application_security_groups = ListType(ModelType(SubResource), serialize_when_none=False)
- load_balancer_backend_address_pools = ListType(ModelType(SubResource), serialize_when_none=False)
- load_balancer_inbound_nat_pools = ListType(ModelType(SubResource), serialize_when_none=False)
+ application_gateway_backend_address_pools = ListType(
+ ModelType(SubResource), serialize_when_none=False
+ )
+ application_security_groups = ListType(
+ ModelType(SubResource), serialize_when_none=False
+ )
+ load_balancer_backend_address_pools = ListType(
+ ModelType(SubResource), serialize_when_none=False
+ )
+ load_balancer_inbound_nat_pools = ListType(
+ ModelType(SubResource), serialize_when_none=False
+ )
primary = BooleanType(serialize_when_none=False)
- private_ip_address_version = StringType(choices=('IPv4', 'IPv6'), default='IPv4', serialize_when_none=False)
- public_ip_address_configuration = ModelType(VirtualMachineScaleSetPublicIPAddressConfiguration,
- serialize_when_none=False)
+ private_ip_address_version = StringType(
+ choices=("IPv4", "IPv6"), default="IPv4", serialize_when_none=False
+ )
+ public_ip_address_configuration = ModelType(
+ VirtualMachineScaleSetPublicIPAddressConfiguration, serialize_when_none=False
+ )
subnet = ModelType(ApiEntityReference, serialize_when_none=False)
@@ -343,10 +442,14 @@ class VirtualMachineScaleSetNetworkConfiguration(Model):
# belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile >> VirtualMachineScaleSetNetworkProfile
id = StringType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- dns_settings = ModelType(VirtualMachineScaleSetNetworkConfigurationDNSSettings, serialize_when_none=False)
+ dns_settings = ModelType(
+ VirtualMachineScaleSetNetworkConfigurationDNSSettings, serialize_when_none=False
+ )
enable_accelerated_networking = BooleanType(serialize_when_none=False)
enable_ip_forwarding = BooleanType(serialize_when_none=False)
- ip_configurations = ListType(ModelType(VirtualMachineScaleSetIPConfiguration), serialize_when_none=False)
+ ip_configurations = ListType(
+ ModelType(VirtualMachineScaleSetIPConfiguration), serialize_when_none=False
+ )
network_security_group = ModelType(SubResource, serialize_when_none=False)
primary = BooleanType(serialize_when_none=False)
@@ -357,24 +460,33 @@ class VaultCertificate(Model):
certificate_uri = StringType(serialize_when_none=False)
-class VaultSecretGroup(Model): # belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
+class VaultSecretGroup(
+ Model
+): # belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
source_vault = ModelType(SubResource, serialize_when_none=False)
- vault_certificates = ListType(ModelType(VaultCertificate), serialize_when_none=False)
+ vault_certificates = ListType(
+ ModelType(VaultCertificate), serialize_when_none=False
+ )
-class VirtualMachineScaleSetNetworkProfile(Model): # belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
+class VirtualMachineScaleSetNetworkProfile(
+ Model
+): # belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
# belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
health_probe = ModelType(ApiEntityReference, serialize_when_none=False)
- network_interface_configurations = ListType(ModelType(VirtualMachineScaleSetNetworkConfiguration),
- serialize_when_none=False)
+ network_interface_configurations = ListType(
+ ModelType(VirtualMachineScaleSetNetworkConfiguration), serialize_when_none=False
+ )
primary_vnet = StringType(serialize_when_none=False)
-class VirtualMachineScaleSetOSProfile(Model): # belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
+class VirtualMachineScaleSetOSProfile(
+ Model
+): # belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
admin_username = StringType(serialize_when_none=False)
admin_password = StringType(serialize_when_none=False)
computer_name_prefix = StringType(serialize_when_none=False)
- custom_data = StringType(serialize_when_none=False, default='')
+ custom_data = StringType(serialize_when_none=False, default="")
linux_configuration = ModelType(LinuxConfiguration, serialize_when_none=False)
secrets = ListType(ModelType(VaultSecretGroup), serialize_when_none=False)
windows_configuration = ModelType(WindowsConfiguration, serialize_when_none=False)
@@ -388,20 +500,30 @@ class DiskEncryptionSetParameters(Model):
class VirtualMachineScaleSetManagedDiskParameters(Model):
# belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
# >> VirtualMachineScaleSetStorageProfile >> VirtualMachineScaleSetDataDisk
- disk_encryption_set = ModelType(DiskEncryptionSetParameters, serialize_when_none=False)
+ disk_encryption_set = ModelType(
+ DiskEncryptionSetParameters, serialize_when_none=False
+ )
storage_account_type = StringType(serialize_when_none=False)
class VirtualMachineScaleSetDataDisk(Model):
# belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile >> VirtualMachineScaleSetStorageProfile
name = StringType(serialize_when_none=False)
- caching = StringType(choices=('None', 'ReadOnly', 'ReadWrite', '', None), serialize_when_none=False)
- create_option = StringType(choices=('Attach', 'Empty', 'FromImage', None, ''), default='Empty', serialize_when_none=False)
+ caching = StringType(
+ choices=("None", "ReadOnly", "ReadWrite", "", None), serialize_when_none=False
+ )
+ create_option = StringType(
+ choices=("Attach", "Empty", "FromImage", None, ""),
+ default="Empty",
+ serialize_when_none=False,
+ )
disk_iops_read_write = IntType(serialize_when_none=False)
disk_m_bps_read_write = IntType(serialize_when_none=False)
disk_size_gb = IntType(serialize_when_none=False)
lun = IntType(serialize_when_none=False)
- managed_disk = ModelType(VirtualMachineScaleSetManagedDiskParameters, serialize_when_none=False)
+ managed_disk = ModelType(
+ VirtualMachineScaleSetManagedDiskParameters, serialize_when_none=False
+ )
write_accelerator_enabled = BooleanType(serialize_when_none=False)
@@ -414,19 +536,35 @@ class VirtualHardDisk(Model):
class VirtualMachineScaleSetOSDisk(Model):
# belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile >> VirtualMachineScaleSetStorageProfile
name = StringType()
- caching = StringType(choices=('None', 'ReadOnly', 'ReadWrite'), default='None', serialize_when_none=False)
- create_option = StringType(choices=('Attach', 'Empty', 'FromImage'), default='Empty', serialize_when_none=False)
- diff_disk_settings = ModelType(DiffDiskSettings, serialize_when_none=False)
+ caching = StringType(
+ choices=("None", "ReadOnly", "ReadWrite"),
+ default="None",
+ serialize_when_none=False,
+ )
+ create_option = StringType(
+ choices=("Attach", "Empty", "FromImage"),
+ default="Empty",
+ serialize_when_none=False,
+ )
+ diff_disk_settings = ModelType(
+ DiffDiskSettings, default=None, serialize_when_none=False
+ )
disk_size_gb = IntType(serialize_when_none=False)
image = ModelType(VirtualHardDisk, serialize_when_none=False)
- managed_disk = ModelType(VirtualMachineScaleSetManagedDiskParameters, serialize_when_none=False)
- os_type = StringType(choices=('Linux', 'Windows'), serialize_when_none=False)
+ managed_disk = ModelType(
+ VirtualMachineScaleSetManagedDiskParameters, serialize_when_none=False
+ )
+ os_type = StringType(choices=("Linux", "Windows"), serialize_when_none=False)
vhd_containers = ListType(StringType, serialize_when_none=False)
write_accelerator_enabled = BooleanType(serialize_when_none=False)
-class VirtualMachineScaleSetStorageProfile(Model): # belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
- data_disks = ListType(ModelType(VirtualMachineScaleSetDataDisk), serialize_when_none=False)
+class VirtualMachineScaleSetStorageProfile(
+ Model
+): # belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
+ data_disks = ListType(
+ ModelType(VirtualMachineScaleSetDataDisk), serialize_when_none=False
+ )
image_reference = ModelType(ImageReference, serialize_when_none=False)
os_disk = ModelType(VirtualMachineScaleSetOSDisk, serialize_when_none=False)
image_reference_display = StringType(serialize_when_none=False)
@@ -435,15 +573,30 @@ class VirtualMachineScaleSetStorageProfile(Model): # belongs to VmScaleSet >> V
class VirtualMachineScaleSetVMProfile(Model): # belongs to VmScaleSet
billing_profile = ModelType(BillingProfile, serialize_when_none=False)
diagnostics_profile = ModelType(DiagnosticsProfile, serialize_when_none=False)
- eviction_policy = StringType(choices=('Deallocate', 'Delete', 'None'), default='None')
- extension_profile = ModelType(VirtualMachineScaleSetExtensionProfile, serialize_when_none=False)
- license_type = StringType(choices=('Windows_Client', 'Windows_Server', 'RHEL_BYOS', 'SLES_BYOS', None), serialize_when_none=False)
- network_profile = ModelType(VirtualMachineScaleSetNetworkProfile, serialize_when_none=False)
+ eviction_policy = StringType(
+ choices=("Deallocate", "Delete", "None"), default="None"
+ )
+ extension_profile = ModelType(
+ VirtualMachineScaleSetExtensionProfile, serialize_when_none=False
+ )
+ license_type = StringType(
+ choices=("Windows_Client", "Windows_Server", "RHEL_BYOS", "SLES_BYOS", None),
+ serialize_when_none=False,
+ )
+ network_profile = ModelType(
+ VirtualMachineScaleSetNetworkProfile, serialize_when_none=False
+ )
os_profile = ModelType(VirtualMachineScaleSetOSProfile, serialize_when_none=False)
- priority = StringType(choices=('Low', 'Regular', 'Spot', '', None), serialize_when_none=False)
- scheduled_events_profile = ModelType(ScheduledEventsProfile, serialize_when_none=False)
+ priority = StringType(
+ choices=("Low", "Regular", "Spot", "", None), serialize_when_none=False
+ )
+ scheduled_events_profile = ModelType(
+ ScheduledEventsProfile, serialize_when_none=False
+ )
security_profile = ModelType(SecurityProfile, serialize_when_none=False)
- storage_profile = ModelType(VirtualMachineScaleSetStorageProfile, serialize_when_none=False)
+ storage_profile = ModelType(
+ VirtualMachineScaleSetStorageProfile, serialize_when_none=False
+ )
terminate_notification_display = StringType(serialize_when_none=False)
@@ -451,7 +604,7 @@ class VirtualMachineScaleSetVMProfile(Model): # belongs to VmScaleSet
class InstanceViewStatus(Model):
code = StringType(serialize_when_none=False)
display_status = StringType(serialize_when_none=False)
- level = StringType(choices=('Error', 'Info', 'Warning'), serialize_when_none=False)
+ level = StringType(choices=("Error", "Info", "Warning"), serialize_when_none=False)
message = StringType(serialize_when_none=False)
time = DateTimeType(serialize_when_none=False)
@@ -501,7 +654,9 @@ class DiskEncryptionSettings(Model):
class ManagedDiskParameters(Model):
# belongs to VmScaleSet >> VirtualMachineScaleSetVMProfile
# >> VirtualMachineScaleSetStorageProfile >> VirtualMachineScaleSetDataDisk
- disk_encryption_set = ModelType(DiskEncryptionSetParameters, serialize_when_none=False)
+ disk_encryption_set = ModelType(
+ DiskEncryptionSetParameters, serialize_when_none=False
+ )
storage_account_type = StringType(serialize_when_none=False)
storage_type = StringType(serialize_when_none=False)
id = StringType(serialize_when_none=False)
@@ -509,23 +664,36 @@ class ManagedDiskParameters(Model):
class OSDisk(Model):
name = StringType(serialize_when_none=False)
- caching = StringType(choices=('None', 'ReadOnly', 'ReadWrite'), default='None', serialize_when_none=False)
- create_option = StringType(choices=('Attach', 'Empty', 'FromImage'), default='Empty', serialize_when_none=False)
+ caching = StringType(
+ choices=("None", "ReadOnly", "ReadWrite"),
+ default="None",
+ serialize_when_none=False,
+ )
+ create_option = StringType(
+ choices=("Attach", "Empty", "FromImage"),
+ default="Empty",
+ serialize_when_none=False,
+ )
diff_disk_settings = ModelType(DiffDiskSettings, serialize_when_none=False)
disk_size_gb = IntType(serialize_when_none=False)
encryption_settings = ModelType(DiskEncryptionSettings, serialize_when_none=False)
image = ModelType(VirtualHardDisk, serialize_when_none=False)
managed_disk = ModelType(ManagedDiskParameters, serialize_when_none=False)
- os_type = StringType(choices=('Linux', 'Windows'), serialize_when_none=False)
+ os_type = StringType(choices=("Linux", "Windows"), serialize_when_none=False)
vhd = ModelType(VirtualHardDisk, serialize_when_none=False)
write_accelerator_enabled = BooleanType(serialize_when_none=False)
class DataDisk(Model):
- caching = StringType(choices=('None', 'ReadOnly', 'ReadWrite', None), serialize_when_none=False)
+ caching = StringType(
+ choices=("None", "ReadOnly", "ReadWrite", None), serialize_when_none=False
+ )
name = StringType(serialize_when_none=False)
- create_option = StringType(choices=('Attach', 'Empty', 'FromImage', None), default='Empty',
- serialize_when_none=False)
+ create_option = StringType(
+ choices=("Attach", "Empty", "FromImage", None),
+ default="Empty",
+ serialize_when_none=False,
+ )
disk_iops_read_write = IntType(serialize_when_none=False)
disk_m_bps_read_write = IntType(serialize_when_none=False)
disk_size_gb = IntType(serialize_when_none=False)
@@ -570,11 +738,15 @@ class NetworkInterfaceReference(Model):
class NetworkProfile(Model): # belongs to VirtualMachineScaleSetVM
- network_interfaces = ListType(ModelType(NetworkInterfaceReference), serialize_when_none=False)
+ network_interfaces = ListType(
+ ModelType(NetworkInterfaceReference), serialize_when_none=False
+ )
class VirtualMachineScaleSetVMNetworkProfileConfiguration(Model):
- network_interface_configurations = ListType(ModelType(VirtualMachineScaleSetNetworkConfiguration), serialize_when_none=False)
+ network_interface_configurations = ListType(
+ ModelType(VirtualMachineScaleSetNetworkConfiguration), serialize_when_none=False
+ )
class VirtualMachineAgentInstanceView(Model):
@@ -586,7 +758,9 @@ class VirtualMachineAgentInstanceView(Model):
class VirtualMachineExtensionVMInstanceView(Model):
assigned_host = StringType(serialize_when_none=False)
# boot_diagnostics = ModelType(BootDiagnosticsInstanceView, serialize_when_none=False)
- extensions = ListType(ModelType(VirtualMachineExtensionInstanceView), serialize_when_none=False)
+ extensions = ListType(
+ ModelType(VirtualMachineExtensionInstanceView), serialize_when_none=False
+ )
placement_group_id = StringType(serialize_when_none=False)
statuses = ListType(ModelType(InstanceViewStatus), serialize_when_none=False)
display_status = StringType(serialize_when_none=False)
@@ -595,11 +769,13 @@ class VirtualMachineExtensionVMInstanceView(Model):
class VirtualMachineScaleSetVM(Model): # data model for actual instances
id = StringType()
- instance_id = IntType()
+ instance_id = StringType()
location = StringType()
name = StringType()
plan = ModelType(Plan, serialize_when_none=False)
- additional_capabilities = ModelType(AdditionalCapabilities, serialize_when_none=False)
+ additional_capabilities = ModelType(
+ AdditionalCapabilities, serialize_when_none=False
+ )
available_set = ModelType(SubResource, serialize_when_none=False)
diagnostics_profile = ModelType(DiagnosticsProfile, serialize_when_none=False)
hardware_profile = ModelType(HardwareProfile, serialize_when_none=False)
@@ -607,16 +783,22 @@ class VirtualMachineScaleSetVM(Model): # data model for actual instances
licence_type = StringType(serialize_when_none=False)
model_definition_applied = StringType(serialize_when_none=False)
network_profile = ModelType(NetworkProfile, serialize_when_none=False)
- network_profile_configuration = ModelType(VirtualMachineScaleSetVMNetworkProfileConfiguration, serialize_when_none=False)
+ network_profile_configuration = ModelType(
+ VirtualMachineScaleSetVMNetworkProfileConfiguration, serialize_when_none=False
+ )
primary_vnet = StringType(serialize_when_none=False)
os_profile = ModelType(OSProfile, serialize_when_none=False)
- protection_policy = ModelType(VirtualMachineScaleSetVMProtectionPolicy, serialize_when_none=False)
+ protection_policy = ModelType(
+ VirtualMachineScaleSetVMProtectionPolicy, serialize_when_none=False
+ )
provisioning_state = StringType(serialize_when_none=False)
vm_instance_status_display = StringType(serialize_when_none=False)
security_profile = ModelType(SecurityProfile, serialize_when_none=False)
storage_profile = ModelType(StorageProfile, serialize_when_none=False)
vm_id = StringType(serialize_when_none=False)
- vm_instance_status_profile = ModelType(VirtualMachineExtensionVMInstanceView, serialize_when_none=False)
+ vm_instance_status_profile = ModelType(
+ VirtualMachineExtensionVMInstanceView, serialize_when_none=False
+ )
resources = ListType(ModelType(VirtualMachineExtension), serialize_when_none=False)
sku = ModelType(Sku, serialize_when_none=False)
tags = ModelType(AzureTags, serialize_when_none=False)
@@ -633,40 +815,60 @@ class VirtualMachineScaleSetPowerState(Model):
profiles = ListType(ModelType(AutoscaleProfile), serialize_when_none=False)
enabled = BooleanType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
- notifications = ListType(ModelType(AutoscaleNotification), serialize_when_none=False)
+ notifications = ListType(
+ ModelType(AutoscaleNotification), serialize_when_none=False
+ )
target_resource_uri = StringType(serialize_when_none=False)
tags = ModelType(AzureTags, serialize_when_none=False)
class VirtualMachineScaleSet(AzureCloudService):
id = StringType(serialize_when_none=False)
- autoscale_setting_resource_collection = ModelType(AutoscaleSettingResourceCollection, serialize_when_none=False)
+ autoscale_setting_resource_collection = ModelType(
+ AutoscaleSettingResourceCollection, serialize_when_none=False
+ )
location = StringType(serialize_when_none=False)
identity = ModelType(VirtualMachineScaleSetIdentity, serialize_when_none=False)
instance_count = IntType(serialize_when_none=False)
name = StringType(serialize_when_none=False)
plan = ModelType(Plan, serialize_when_none=False)
- additional_capabilities = ModelType(AdditionalCapabilities, serialize_when_none=False)
- automatic_repairs_policy = ModelType(AutomaticRepairsPolicy, serialize_when_none=False)
- do_not_run_extensions_on_overprovisioned_v_ms = BooleanType(serialize_when_none=False)
+ additional_capabilities = ModelType(
+ AdditionalCapabilities, serialize_when_none=False
+ )
+ automatic_repairs_policy = ModelType(
+ AutomaticRepairsPolicy, serialize_when_none=False
+ )
+ do_not_run_extensions_on_overprovisioned_v_ms = BooleanType(
+ serialize_when_none=False
+ )
host_group = ModelType(SubResource, serialize_when_none=False)
overprovision = BooleanType(default=True, serialize_when_none=False)
platform_fault_domain_count = IntType(serialize_when_none=False)
- provisioning_state = StringType(choices=('Failed', 'Succeeded'), serialize_when_none=False)
+ provisioning_state = StringType(
+ choices=("Failed", "Succeeded"), serialize_when_none=False
+ )
proximity_placement_group = ModelType(SubResource, serialize_when_none=False)
- proximity_placement_group_display = StringType(serialize_when_none=False, default='None')
+ proximity_placement_group_display = StringType(
+ serialize_when_none=False, default="None"
+ )
scale_in_policy = ModelType(ScaleInPolicy, serialize_when_none=False)
single_placement_group = BooleanType(serialize_when_none=False)
unique_id = StringType(serialize_when_none=False)
upgrade_policy = ModelType(UpgradePolicy, serialize_when_none=False)
- virtual_machine_profile = ModelType(VirtualMachineScaleSetVMProfile, serialize_when_none=False)
+ virtual_machine_profile = ModelType(
+ VirtualMachineScaleSetVMProfile, serialize_when_none=False
+ )
terminate_notification_display = StringType(serialize_when_none=False)
- virtual_machine_scale_set_power_state = ListType(ModelType(VirtualMachineScaleSetPowerState))
+ virtual_machine_scale_set_power_state = ListType(
+ ModelType(VirtualMachineScaleSetPowerState)
+ )
zone_balance = BooleanType(serialize_when_none=False)
sku = ModelType(Sku, serialize_when_none=False)
type = StringType(serialize_when_none=False)
zones = ListType(StringType, serialize_when_none=False)
- vm_instances = ListType(ModelType(VirtualMachineScaleSetVM), serialize_when_none=False)
+ vm_instances = ListType(
+ ModelType(VirtualMachineScaleSetVM), serialize_when_none=False
+ )
def reference(self):
return {
diff --git a/src/spaceone/inventory/service/__init__.py b/src/spaceone/inventory/service/__init__.py
index f1c86449..5880d009 100644
--- a/src/spaceone/inventory/service/__init__.py
+++ b/src/spaceone/inventory/service/__init__.py
@@ -1 +1,2 @@
-from spaceone.inventory.service.collector_service import CollectorService
\ No newline at end of file
+from spaceone.inventory.service.collector_service import CollectorService
+from spaceone.inventory.service.job_service import JobService
diff --git a/src/spaceone/inventory/service/collector_service.py b/src/spaceone/inventory/service/collector_service.py
index 31f0d789..24bd263a 100644
--- a/src/spaceone/inventory/service/collector_service.py
+++ b/src/spaceone/inventory/service/collector_service.py
@@ -1,33 +1,43 @@
-import time
-import logging
import concurrent.futures
+import logging
+import time
+import os
+
from spaceone.inventory.libs.manager import AzureManager
-from spaceone.inventory.manager.subscriptions.subscription_manager import SubscriptionsManager
+from spaceone.inventory.manager.subscriptions.subscription_manager import (
+ SubscriptionsManager,
+)
+from spaceone.core import utils
from spaceone.core.service import *
from spaceone.inventory.conf.cloud_service_conf import *
_LOGGER = logging.getLogger(__name__)
+_CURRENT_DIR = os.path.dirname(__file__)
+_BEFORE_CURRENT_DIR, _ = _CURRENT_DIR.rsplit("/", 1)
+_METRIC_DIR = os.path.join(_BEFORE_CURRENT_DIR, "metrics/")
+
@authentication_handler
class CollectorService(BaseService):
+ resource = "Collector"
+
def __init__(self, metadata):
super().__init__(metadata)
- @check_required(['options'])
+ @check_required(["options"])
def init(self, params):
- """ init plugin by options
- """
+ """init plugin by options"""
capability = {
- 'filter_format': FILTER_FORMAT,
- 'supported_resource_type': SUPPORTED_RESOURCE_TYPE,
- 'supported_features': SUPPORTED_FEATURES,
- 'supported_schedules': SUPPORTED_SCHEDULES
+ "filter_format": FILTER_FORMAT,
+ "supported_resource_type": SUPPORTED_RESOURCE_TYPE,
+ "supported_features": SUPPORTED_FEATURES,
+ "supported_schedules": SUPPORTED_SCHEDULES,
}
- return {'metadata': capability}
+ return {"metadata": capability}
@transaction
- @check_required(['options', 'secret_data'])
+ @check_required(["options", "secret_data"])
def verify(self, params):
"""
Args:
@@ -35,8 +45,8 @@ def verify(self, params):
- options
- secret_data
"""
- options = params['options']
- secret_data = params.get('secret_data', {})
+ options = params["options"]
+ secret_data = params.get("secret_data", {})
if secret_data != {}:
azure_manager = AzureManager()
active = azure_manager.verify({}, secret_data=secret_data)
@@ -44,8 +54,8 @@ def verify(self, params):
return {}
@transaction
- @check_required(['options', 'secret_data', 'filter'])
- def collect(self, params):
+ @check_required(["options", "secret_data", "filter"])
+ def collect(self, params: dict):
"""
Args:
params:
@@ -53,51 +63,72 @@ def collect(self, params):
- schema
- secret_data
- filter
+ - task_options
"""
start_time = time.time()
- secret_data = params.get('secret_data', {})
- params.update({
- 'subscription_info': self.get_subscription_info(params)
- })
+ options = params.get("options", {})
+ task_options = params.get("task_options", {})
+ params.update({"subscription_info": self.get_subscription_info(params)})
_LOGGER.debug("[ EXECUTOR START: Azure Cloud Service ]")
- target_execute_managers = self._get_target_execute_manger(params.get('options', {}))
+ target_execute_managers = self._get_target_execute_manger(options, task_options)
# Thread per cloud services
with concurrent.futures.ThreadPoolExecutor(max_workers=MAX_WORKER) as executor:
future_executors = []
for execute_manager in target_execute_managers:
- _LOGGER.info(f'@@@ {execute_manager} @@@')
+ _LOGGER.info(f"@@@ {execute_manager} @@@")
_manager = self.locator.get_manager(execute_manager)
- future_executors.append(executor.submit(_manager.collect_resources, params))
+ future_executors.append(
+ executor.submit(_manager.collect_resources, params)
+ )
for future in concurrent.futures.as_completed(future_executors):
for result in future.result():
- yield result
+ yield result.to_primitive()
- '''
+ """
for manager in self.execute_managers:
_LOGGER.debug(f'@@@ {manager} @@@')
_manager = self.locator.get_manager(manager)
for resource in _manager.collect_resources(params):
yield resource.to_primitive()
- '''
- _LOGGER.debug(f'TOTAL TIME : {time.time() - start_time} Seconds')
+ """
+
+ if cloud_service_types := params.get("options", {}).get("cloud_service_types"):
+ for service in cloud_service_types:
+ for response in self.collect_metrics(service):
+ yield response
+ else:
+ for service in CLOUD_SERVICE_GROUP_MAP.keys():
+ for response in self.collect_metrics(service):
+ yield response
+ _LOGGER.debug(f"TOTAL TIME : {time.time() - start_time} Seconds")
def get_subscription_info(self, params):
- subscription_manager: SubscriptionsManager = self.locator.get_manager('SubscriptionsManager')
+ subscription_manager: SubscriptionsManager = self.locator.get_manager(
+ "SubscriptionsManager"
+ )
return subscription_manager.get_subscription_info(params)
def list_location_info(self, params):
- subscription_manager: SubscriptionsManager = self.locator.get_manager('SubscriptionsManager')
+ subscription_manager: SubscriptionsManager = self.locator.get_manager(
+ "SubscriptionsManager"
+ )
return subscription_manager.list_location_info(params)
- def _get_target_execute_manger(self, options):
- if 'cloud_service_types' in options:
- execute_managers = self._match_execute_manager(options['cloud_service_types'])
+ def _get_target_execute_manger(self, options: dict, task_options: dict) -> list:
+ if "cloud_service_types" in options:
+ execute_managers = self._match_execute_manager(
+ options["cloud_service_types"]
+ )
+ elif "cloud_service_types" in task_options:
+ execute_managers = self._match_execute_manager(
+ task_options["cloud_service_types"]
+ )
else:
execute_managers = list(CLOUD_SERVICE_GROUP_MAP.values())
@@ -105,5 +136,46 @@ def _get_target_execute_manger(self, options):
@staticmethod
def _match_execute_manager(cloud_service_groups):
- return [CLOUD_SERVICE_GROUP_MAP[_cloud_service_group] for _cloud_service_group in cloud_service_groups
- if _cloud_service_group in CLOUD_SERVICE_GROUP_MAP]
+ return [
+ CLOUD_SERVICE_GROUP_MAP[_cloud_service_group]
+ for _cloud_service_group in cloud_service_groups
+ if _cloud_service_group in CLOUD_SERVICE_GROUP_MAP
+ ]
+
+ def collect_metrics(self, service: str) -> list:
+ if not os.path.exists(os.path.join(_METRIC_DIR, service)):
+ os.mkdir(os.path.join(_METRIC_DIR, service))
+ for dirname in os.listdir(os.path.join(_METRIC_DIR, service)):
+ for filename in os.listdir(os.path.join(_METRIC_DIR, service, dirname)):
+ if filename.endswith(".yaml"):
+ file_path = os.path.join(_METRIC_DIR, service, dirname, filename)
+ info = utils.load_yaml_from_file(file_path)
+ if filename == "namespace.yaml":
+ yield self.make_namespace_or_metric_response(
+ namespace=info,
+ resource_type="inventory.Namespace",
+ )
+ else:
+ yield self.make_namespace_or_metric_response(
+ metric=info,
+ resource_type="inventory.Metric",
+ )
+
+ @staticmethod
+ def make_namespace_or_metric_response(
+ metric=None,
+ namespace=None,
+ resource_type: str = "inventory.Metric",
+ ) -> dict:
+ response = {
+ "state": "SUCCESS",
+ "resource_type": resource_type,
+ "match_rules": {},
+ }
+
+ if resource_type == "inventory.Metric" and metric is not None:
+ response["resource"] = metric
+ elif resource_type == "inventory.Namespace" and namespace is not None:
+ response["resource"] = namespace
+
+ return response
diff --git a/src/spaceone/inventory/service/job_service.py b/src/spaceone/inventory/service/job_service.py
new file mode 100644
index 00000000..f367b532
--- /dev/null
+++ b/src/spaceone/inventory/service/job_service.py
@@ -0,0 +1,49 @@
+import logging
+
+from spaceone.core.service import *
+from spaceone.inventory.model.job_model import Tasks
+from spaceone.inventory.conf.cloud_service_conf import *
+
+_LOGGER = logging.getLogger(__name__)
+
+
+@authentication_handler
+class JobService(BaseService):
+ resource = "Job"
+
+ def __init__(self, metadata):
+ super().__init__(metadata)
+
+ @transaction
+ @check_required(["options", "secret_data"])
+ def get_tasks(self, params: dict):
+ """
+ Args:
+ params:
+ - options
+ - schema
+ - secret_data
+ - task_filter
+ """
+
+ options = params.get("options", {})
+ secret_data = params.get("secret_data", {})
+
+ tasks = []
+
+ cloud_service_types = options.get(
+ "cloud_service_types", CLOUD_SERVICE_GROUP_MAP.keys()
+ )
+
+ for cloud_service_type in cloud_service_types:
+ tasks.append(
+ {
+ "task_options": {
+ "cloud_service_types": [cloud_service_type],
+ }
+ }
+ )
+ tasks = Tasks({"tasks": tasks})
+ tasks.validate()
+
+ return tasks.to_primitive()
diff --git a/test/api/test_cloud_service_api.py b/test/api/test_cloud_service_api.py
index c0e10343..e9436b9f 100644
--- a/test/api/test_cloud_service_api.py
+++ b/test/api/test_cloud_service_api.py
@@ -11,35 +11,59 @@
class TestCollector(TestCase):
-
@classmethod
def setUpClass(cls):
- azure_cred = os.environ.get('AZURE_CRED')
+ azure_cred = os.environ.get("AZURE_CRED")
test_config = utils.load_yaml_from_file(azure_cred)
- cls.schema = 'azure_client_secret'
- cls.azure_credentials = test_config.get('AZURE_CREDENTIALS', {})
+ cls.schema = "azure_client_secret"
+ cls.azure_credentials = test_config.get("AZURE_CREDENTIALS", {})
super().setUpClass()
def test_init(self):
- v_info = self.inventory.Collector.init({'options': {}})
+ v_info = self.inventory.Collector.init({"options": {}})
print_json(v_info)
def test_verify(self):
+ options = {}
+ v_info = self.inventory.Collector.verify(
+ {"options": options, "secret_data": self.azure_credentials}
+ )
+ print_json(v_info)
+
+ def test_get_tasks(self):
options = {
+ "cloud_service_types": ["KeyVaults"],
+ # 'custom_asset_url': 'https://xxxxx.cloudforet.dev.icon/azure'
}
- v_info = self.inventory.Collector.verify({'options': options, 'secret_data': self.azure_credentials})
+ # options = {}
+ v_info = self.inventory.Job.get_tasks(
+ {
+ "options": options,
+ "secret_data": self.azure_credentials,
+ }
+ )
print_json(v_info)
def test_collect(self):
options = {
- 'cloud_service_types': ['WebPubSubService'],
+ # "cloud_service_types": ["KeyVaults"],
# 'custom_asset_url': 'https://xxxxx.cloudforet.dev.icon/azure'
}
+
# options = {}
+ task_options = {
+ # "cloud_service_types": ["StorageAccounts"],
+ }
filter = {}
- resource_stream = self.inventory.Collector.collect({'options': options, 'secret_data': self.azure_credentials,
- 'filter': filter})
+ resource_stream = self.inventory.Collector.collect(
+ {
+ "options": options,
+ "secret_data": self.azure_credentials,
+ "task_options": task_options,
+ "filter": filter,
+ }
+ )
for res in resource_stream:
print_json(res)