Skip to content

Commit

Permalink
Merge pull request #66 from sassoftware/develop
Browse files Browse the repository at this point in the history
Merge develop branch to main
  • Loading branch information
kevinlinglesas authored Feb 17, 2021
2 parents db0154b + 3dd644a commit 6390228
Show file tree
Hide file tree
Showing 17 changed files with 284 additions and 51 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ class Kubernetes(object):
"""
API_RESOURCES_DICT = "apiResources"
API_VERSIONS_LIST = "apiVersions"
CADENCE_INFO = "cadenceInfo"
DISCOVERED_KINDS_DICT = "discoveredKinds"
INGRESS_CTRL = "ingressController"
NAMESPACE = "namespace"
Expand Down
32 changes: 31 additions & 1 deletion deployment_report/model/test/test_viya_deployment_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,9 +86,10 @@ def test_get_kubernetes_details(report: ViyaDeploymentReport) -> None:
kube_details: Dict = report.get_kubernetes_details()

# check for all expected entries
assert len(kube_details) == 7
assert len(kube_details) == 8
assert ReportKeys.Kubernetes.API_RESOURCES_DICT in kube_details
assert ReportKeys.Kubernetes.API_VERSIONS_LIST in kube_details
assert ReportKeys.Kubernetes.CADENCE_INFO in kube_details
assert ReportKeys.Kubernetes.DISCOVERED_KINDS_DICT in kube_details
assert ReportKeys.Kubernetes.INGRESS_CTRL in kube_details
assert ReportKeys.Kubernetes.NAMESPACE in kube_details
Expand Down Expand Up @@ -602,3 +603,32 @@ def test_write_report_unpopulated() -> None:
# make sure None is returned
assert data_file is None
assert html_file is None


def test_get_cadence_version(report: ViyaDeploymentReport) -> None:
"""
This test verifies that the provided cadence data is returned when values is passed to get_cadence_version().
:param report: The populated ViyaDeploymentReport returned by the report() fixture.
"""
# check for expected attributes

cadence_data = KubectlTest.get_resources(KubectlTest(), "ConfigMaps")
cadence_info: Text = None

for c in cadence_data:
cadence_info = report.get_cadence_version(c)
if cadence_info:
break

assert cadence_info == KubectlTest.Values.CADENCEINFO


def test_get_cadence_version_none() -> None:
"""
This test verifies that a None value is returned for the cadence when the report is unpopulated.
"""

# make sure None is returned
assert ViyaDeploymentReport().get_sas_component_resources(KubectlTest.Values.CADENCEINFO,
KubernetesResource.Kinds.CONFIGMAP) is None
Original file line number Diff line number Diff line change
Expand Up @@ -52,11 +52,12 @@ def gathered_resources() -> Dict:
# create a list of resource kinds to gather
# nodes and networking kinds do not typically have owning objects, so these need to be called individually
kinds_list: List = [
KubernetesResource.Kinds.CONFIGMAP,
KubernetesResource.Kinds.INGRESS,
KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE,
KubernetesResource.Kinds.NODE,
KubernetesResource.Kinds.POD,
KubernetesResource.Kinds.SERVICE,
KubernetesResource.Kinds.INGRESS,
KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE]
KubernetesResource.Kinds.SERVICE]

for resource_kind in kinds_list:
ViyaDeploymentReportUtils.gather_resource_details(
Expand Down
36 changes: 36 additions & 0 deletions deployment_report/model/viya_deployment_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,6 +109,21 @@ def gather_details(self, kubectl: KubectlInterface,
# create dictionary to store gathered resources #
gathered_resources: Dict = dict()

# start by gathering details about ConfigMap #
cadence_info: Optional[Text] = None
try:
ViyaDeploymentReportUtils.gather_resource_details(kubectl, gathered_resources, api_resources,
k8s_kinds.CONFIGMAP)
for item in gathered_resources[k8s_kinds.CONFIGMAP]['items']:
resource_definition = gathered_resources[k8s_kinds.CONFIGMAP]['items'][item]['resourceDefinition']
cadence_info = self.get_cadence_version(resource_definition)
if cadence_info:
break

except CalledProcessError:
pass

gathered_resources = dict()
# start by gathering details about Nodes, if available #
# this information can be reported even if Pods are not listable #
try:
Expand Down Expand Up @@ -256,6 +271,8 @@ def gather_details(self, kubectl: KubectlInterface,
k8s_details_dict[Keys.Kubernetes.VERSIONS_DICT]: Dict = kubectl.version()
# create a key to hold the meta information about resources discovered in the cluster: dict #
k8s_details_dict[Keys.Kubernetes.DISCOVERED_KINDS_DICT]: Dict = dict()
# create a key to hold the cadence version information: str|None #
k8s_details_dict[Keys.Kubernetes.CADENCE_INFO]: Optional[Text] = cadence_info

# add the availability and count of all discovered resources #
for kind_name, kind_details in gathered_resources.items():
Expand Down Expand Up @@ -504,6 +521,25 @@ def get_sas_component_resources(self, component_name: Text, resource_kind: Text)
except KeyError:
return None

def get_cadence_version(self, resource: KubernetesResource) -> Optional[Text]:
"""
Returns the combined key values from the 'data' dictionary.
:param key: The key of the value to return.
:return: The value mapped to the given key, or None if the given key doesn't exist.
"""
cadence_info: Optional[Text] = None
try:
if 'sas-deployment-metadata' in resource.get_name():
cadence_data = resource.get_data()
cadence_info = cadence_data['SAS_CADENCE_NAME'].capitalize() + ' ' + \
cadence_data['SAS_CADENCE_VERSION'] + ' (' + \
cadence_data['SAS_CADENCE_RELEASE'] + ')'

return cadence_info
except KeyError:
return None

def write_report(self, output_directory: Text = OUTPUT_DIRECTORY_DEFAULT,
data_file_only: bool = DATA_FILE_ONLY_DEFAULT,
include_resource_definitions: bool = INCLUDE_RESOURCE_DEFINITIONS_DEFAULT,
Expand Down
6 changes: 5 additions & 1 deletion deployment_report/templates/viya_deployment_report.html.j2
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,10 @@
<th>Ingress Controller</th>
<td>{{ report_data.kubernetes.ingressController | default("<em>could not be determined</em>") }}</td>
</tr>
<tr>
<th>Cadence Version</th>
<td>{{ report_data.kubernetes.cadenceInfo | default("<em>could not be determined- cadinfo</em>") }}</t
</tr>
</table>
{# Cluster Overview: Overview Table #}

Expand Down Expand Up @@ -258,4 +262,4 @@
</div>
{# API: Content #}
{% endblock %}
{# BLOCK DEFINITIONS #}
{# BLOCK DEFINITIONS #}
4 changes: 4 additions & 0 deletions pre_install_report/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,10 @@ $ export INGRESS_HOST=externalIP=$(kubectl -n <ingress-namespace> get service <n
$ export INGRESS_HTTP_PORT=$(kubectl -n <ingress-namespace> get service <nginx-ingress-controller-name> -o jsonpath='{.spec.ports[?(@.name=="http")].port}')
$ export INGRESS_HTTPS_PORT=$(kubectl -n <ingress-namespace> get service <nginx-ingress-controller-name> -o jsonpath='{.spec.ports[?(@.name=="https")].port}')
```
The command to determine the Ingress Host may be slightly different with Amazon Elastic Kubernetes Service(EKS):
```
$ export INGRESS_HOST=externalIP=$(kubectl -n <ingress-namespace> get service <nginx-ingress-controller-name> -o jsonpath='{.status.loadBalancer.ingress[*].hostname}')
```

Use the values gathered on the command line for http or https as appropriate for your deployment:

Expand Down
18 changes: 16 additions & 2 deletions pre_install_report/library/pre_install_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ def __init__(self, sas_logger: ViyaARKLogger, viya_kubelet_version_min, viya_min
self._viya_min_aggregate_worker_memory: Text = viya_min_aggregate_worker_memory
self._calculated_aggregate_allocatable_memory = None
self._workers = 0
self._aggregate_nodeStatus_failures = 0

def _parse_release_info(self, release_info):
"""
Expand Down Expand Up @@ -609,11 +610,15 @@ def _check_kubelet_errors(self, global_data, aggregate_kubelet_failures):
return: updated global data about worker nodes retrieved
"""
aggregate_kubelet_data = {}
aggregate_kubelet_data.update({'aggregate_kubelet_failures': str(aggregate_kubelet_failures)})
node_status_msg = ""
if self._aggregate_nodeStatus_failures > 0:
node_status_msg = " Check Node(s). All Nodes NOT in Ready Status." \
+ ' Issues Found: ' + str(self._aggregate_nodeStatus_failures)
aggregate_kubelet_data.update({'aggregate_kubelet_failures': node_status_msg})
if aggregate_kubelet_failures > 0:
aggregate_kubelet_data.update({'aggregate_kubelet_failures':
'Check Kubelet Version on nodes.' +
' Issues Found: ' + str(aggregate_kubelet_failures)})
' Issues Found: ' + str(aggregate_kubelet_failures) + '.' + node_status_msg})
global_data.append(aggregate_kubelet_data)

return global_data
Expand Down Expand Up @@ -735,6 +740,15 @@ def evaluate_nodes(self, nodes_data, global_data, cluster_info, quantity_):
total_memory = total_memory + quantity_(str(node['memory']))
total_allocatable_memory = total_allocatable_memory + quantity_(alloc_memory)

try:
nodeReady = str(node['Ready'])
if nodeReady == "True":
pass
else:
self._aggregate_nodeStatus_failures += 1
except KeyError:
node['Ready'] = viya_constants.KEY_NOT_FOUND

if node['worker']:
total_cpu_cores = total_cpu_cores + alloc_cpu_cores
self.logger.info("worker total_cpu_cores {}".format(str(total_cpu_cores)))
Expand Down
92 changes: 74 additions & 18 deletions pre_install_report/library/pre_install_check_permissions.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,8 @@ def __init__(self, params):
self.ingress_data[viya_constants.INGRESS_CONTROLLER] = self.ingress_controller
self.ingress_file = "hello-ingress.yaml"
self._storage_class_sc: List[KubernetesResource] = None
self._sample_deployment = 0
self._sample_output = ""

def _set_results_cluster_admin(self, resource_key, rc):
"""
Expand All @@ -73,7 +75,9 @@ def _set_results_cluster_admin(self, resource_key, rc):
"""
if rc == 1:
self.cluster_admin_permission_data[resource_key] = viya_constants.INSUFFICIENT_PERMS
self.cluster_admin_permission_aggregate[viya_constants.PERM_PERMISSIONS] = viya_constants.INSUFFICIENT_PERMS
self.cluster_admin_permission_aggregate[viya_constants.PERM_PERMISSIONS] = \
viya_constants.INSUFFICIENT_PERMS + ". Check Logs."

else:
self.cluster_admin_permission_data[resource_key] = viya_constants.ADEQUATE_PERMS

Expand All @@ -82,12 +86,41 @@ def _set_results_namespace_admin(self, resource_key, rc):
Set permissions status for specified resource/verb with namespace admin role
"""
if rc == 1:
self.namespace_admin_permission_data[resource_key] = viya_constants.INSUFFICIENT_PERMS
self.namespace_admin_permission_aggregate[viya_constants.PERM_PERMISSIONS] \
= viya_constants.INSUFFICIENT_PERMS
sample_keys = [viya_constants.PERM_DEPLOYMENT]
deployment_keys = [viya_constants.PERM_DELETE + viya_constants.PERM_DEPLOYMENT,
viya_constants.PERM_SERVICE,
viya_constants.PERM_DELETE + viya_constants.PERM_SERVICE,
viya_constants.PERM_INGRESS,
viya_constants.PERM_DELETE + viya_constants.PERM_INGRESS,
viya_constants.PERM_REPLICASET,
viya_constants.PERM_CREATE + viya_constants.PERM_ROLE,
viya_constants.PERM_CREATE + viya_constants.PERM_ROLEBINDING,
viya_constants.PERM_CREATE + viya_constants.PERM_SA,
viya_constants.PERM_DELETE + viya_constants.PERM_ROLE,
viya_constants.PERM_DELETE + viya_constants.PERM_ROLEBINDING,
viya_constants.PERM_DELETE + viya_constants.PERM_SA
]
if rc != 0:
self.logger.debug("resource_key = {}, sample_deployment = {} ".format(str(resource_key),
str(self._sample_deployment)))
if self._sample_deployment != 0:
if resource_key in deployment_keys:
self.namespace_admin_permission_data[resource_key] = viya_constants.INSUFFICIENT_PERMS
if resource_key in sample_keys:
self.namespace_admin_permission_data[resource_key] = viya_constants.INSUFFICIENT_PERMS + \
". Sample Deployment Check failed! " + \
"Ensure Node(s) Status is Ready. " + \
"Check Permissions in specified namespace. " \
+ self._sample_output

else:
self.namespace_admin_permission_data[resource_key] = viya_constants.INSUFFICIENT_PERMS
self.namespace_admin_permission_aggregate[viya_constants.PERM_PERMISSIONS] = \
viya_constants.INSUFFICIENT_PERMS + ". Check Logs."
else:
self.namespace_admin_permission_data[resource_key] = viya_constants.ADEQUATE_PERMS
# self.namespace_admin_permission_aggregate[viya_constants.PERM_PERMISSIONS] = \
# viya_constants.ADEQUATE_PERMS

def _get_pvc(self, pvc_name, key):
"""
Expand Down Expand Up @@ -284,20 +317,34 @@ def check_sample_application(self):

rc = self.utils.deploy_manifest_file(viya_constants.KUBECTL_APPLY,
'hello-application.yaml')
# self._set_results_namespace_admin(viya_constants.PERM_DEPLOYMENT, rc)
# self._set_results_namespace_admin(viya_constants.PERM_SERVICE, rc)

if rc == 0:
rc = self.utils.do_cmd(" rollout status deployment.v1.apps/hello-world ")
rc, sample_output = self.utils.do_cmd(" rollout status deployment.v1.apps/hello-world --timeout=180s")
# You can check if a Deployment has completed by using kubectl rollout status.
# If the rollout completed successfully, kubectl rollout status returns a zero exit code.

if rc != 0:
self._sample_deployment = 2
self._sample_output = sample_output
self._set_results_namespace_admin(viya_constants.PERM_DEPLOYMENT, rc)
self._set_results_namespace_admin(viya_constants.PERM_SERVICE, rc)
return 2

self._set_results_namespace_admin(viya_constants.PERM_DEPLOYMENT, rc)
self._set_results_namespace_admin(viya_constants.PERM_SERVICE, rc)

if rc == 0:
rc = self.utils.do_cmd(" scale --replicas=2 deployment/hello-world ")
rc, sample_output = self.utils.do_cmd(" scale --replicas=2 deployment/hello-world ")
if rc != 0:
self._sample_deployment = 3
self._set_results_namespace_admin(viya_constants.PERM_REPLICASET, rc)
return 3
else:
self._sample_deployment = 1
self._set_results_namespace_admin(viya_constants.PERM_DEPLOYMENT, rc)
self._set_results_namespace_admin(viya_constants.PERM_SERVICE, rc)

if rc == 0:
self._set_results_namespace_admin(viya_constants.PERM_REPLICASET, rc)
return 1

def check_sample_service(self):
"""
Expand Down Expand Up @@ -396,8 +443,9 @@ def check_delete_sample_application(self):
rc = self.utils.deploy_manifest_file(viya_constants.KUBECTL_DELETE,
'hello-application.yaml')
self._set_results_namespace_admin(viya_constants.PERM_DELETE + viya_constants.PERM_DEPLOYMENT, rc)
self._set_results_namespace_admin(viya_constants.PERM_DELETE + viya_constants.PERM_SERVICE, rc)

rc = self.utils.do_cmd(" wait --for=delete pod -l app=hello-world-pod --timeout=12s ")
self.utils.do_cmd(" wait --for=delete pod -l app=hello-world-pod --timeout=12s ")

def check_delete_sample_service(self):
"""
Expand Down Expand Up @@ -443,12 +491,13 @@ def check_deploy_crd(self):
def check_rbac_role(self):
"""
Check if RBAC is enabled in specified namespace
Create the Role and Rolebinding for the custome resource access with specified namespace. Set the
Create the Role and Rolebinding for the custom resource access with specified namespace. Set the
permissions status in the namespace_admin_permission_data dict object.
"""
found = self.utils.get_rbac_group_cmd()

self.logger.debug("get_rbace_group_cmd found = {}, sample_deployment = {}"
.format(str(found), str(self._sample_deployment)))
if found:
rc = self.utils.deploy_manifest_file(viya_constants.KUBECTL_APPLY,
'viya-role.yaml')
Expand All @@ -463,7 +512,14 @@ def check_rbac_role(self):
'viya-rolebinding.yaml')
self._set_results_namespace_admin(viya_constants.PERM_CREATE + viya_constants.PERM_ROLEBINDING, rc)
else:
self.logger.debug("sample_deployment = {}".format(str(self._sample_deployment)))
self.namespace_admin_permission_aggregate["RBAC Checking"] = viya_constants.PERM_SKIPPING
self._set_results_namespace_admin(viya_constants.PERM_CREATE + viya_constants.PERM_ROLE,
int(self._sample_deployment))
self._set_results_namespace_admin(viya_constants.PERM_CREATE + viya_constants.PERM_SA,
int(self._sample_deployment))
self._set_results_namespace_admin(viya_constants.PERM_CREATE + viya_constants.PERM_ROLEBINDING,
int(self._sample_deployment))

def check_rbac_delete_role(self):
"""
Expand Down Expand Up @@ -495,14 +551,14 @@ def check_get_custom_resource(self, namespace):
if not allowed:
rc1 = 1

self._set_results_namespace_admin_crd(viya_constants.PERM_CREATE + viya_constants.PERM_CR + " with RBAC "
+ viya_constants.PERM_SA + " resp: = " + str(allowed), rc1)
self._set_results_namespace_admin_crd(viya_constants.PERM_CREATE + viya_constants.PERM_CR_RBAC
+ viya_constants.PERM_SA, rc1)
allowed: bool = self.utils.can_i(' delete viyas.company.com --as=system:serviceaccount:'
+ namespace + ':crreader ')
if allowed:
rc2 = 1
self._set_results_namespace_admin_crd(viya_constants.PERM_DELETE + viya_constants.PERM_CR + " with RBAC "
+ viya_constants.PERM_SA + " resp: = " + str(allowed), rc2)
self._set_results_namespace_admin_crd(viya_constants.PERM_DELETE + viya_constants.PERM_CR_RBAC
+ viya_constants.PERM_SA, rc2)

def check_delete_custom_resource(self):
"""
Expand Down
Loading

0 comments on commit 6390228

Please sign in to comment.