diff --git a/deployment_report/model/static/viya_deployment_report_keys.py b/deployment_report/model/static/viya_deployment_report_keys.py
index 34e0fdf..33e9d94 100644
--- a/deployment_report/model/static/viya_deployment_report_keys.py
+++ b/deployment_report/model/static/viya_deployment_report_keys.py
@@ -40,6 +40,7 @@ class Kubernetes(object):
"""
API_RESOURCES_DICT = "apiResources"
API_VERSIONS_LIST = "apiVersions"
+ CADENCE_INFO = "cadenceInfo"
DISCOVERED_KINDS_DICT = "discoveredKinds"
INGRESS_CTRL = "ingressController"
NAMESPACE = "namespace"
diff --git a/deployment_report/model/test/test_viya_deployment_report.py b/deployment_report/model/test/test_viya_deployment_report.py
index 256896c..cca5cd1 100644
--- a/deployment_report/model/test/test_viya_deployment_report.py
+++ b/deployment_report/model/test/test_viya_deployment_report.py
@@ -86,9 +86,10 @@ def test_get_kubernetes_details(report: ViyaDeploymentReport) -> None:
kube_details: Dict = report.get_kubernetes_details()
# check for all expected entries
- assert len(kube_details) == 7
+ assert len(kube_details) == 8
assert ReportKeys.Kubernetes.API_RESOURCES_DICT in kube_details
assert ReportKeys.Kubernetes.API_VERSIONS_LIST in kube_details
+ assert ReportKeys.Kubernetes.CADENCE_INFO in kube_details
assert ReportKeys.Kubernetes.DISCOVERED_KINDS_DICT in kube_details
assert ReportKeys.Kubernetes.INGRESS_CTRL in kube_details
assert ReportKeys.Kubernetes.NAMESPACE in kube_details
@@ -602,3 +603,32 @@ def test_write_report_unpopulated() -> None:
# make sure None is returned
assert data_file is None
assert html_file is None
+
+
+def test_get_cadence_version(report: ViyaDeploymentReport) -> None:
+ """
+ This test verifies that the provided cadence data is returned when values is passed to get_cadence_version().
+
+ :param report: The populated ViyaDeploymentReport returned by the report() fixture.
+ """
+ # check for expected attributes
+
+ cadence_data = KubectlTest.get_resources(KubectlTest(), "ConfigMaps")
+ cadence_info: Text = None
+
+ for c in cadence_data:
+ cadence_info = report.get_cadence_version(c)
+ if cadence_info:
+ break
+
+ assert cadence_info == KubectlTest.Values.CADENCEINFO
+
+
+def test_get_cadence_version_none() -> None:
+ """
+ This test verifies that a None value is returned for the cadence when the report is unpopulated.
+ """
+
+ # make sure None is returned
+ assert ViyaDeploymentReport().get_sas_component_resources(KubectlTest.Values.CADENCEINFO,
+ KubernetesResource.Kinds.CONFIGMAP) is None
diff --git a/deployment_report/model/utils/test/test_viya_deployment_report_utils.py b/deployment_report/model/utils/test/test_viya_deployment_report_utils.py
index 4eb0329..de07e78 100644
--- a/deployment_report/model/utils/test/test_viya_deployment_report_utils.py
+++ b/deployment_report/model/utils/test/test_viya_deployment_report_utils.py
@@ -52,11 +52,12 @@ def gathered_resources() -> Dict:
# create a list of resource kinds to gather
# nodes and networking kinds do not typically have owning objects, so these need to be called individually
kinds_list: List = [
+ KubernetesResource.Kinds.CONFIGMAP,
+ KubernetesResource.Kinds.INGRESS,
+ KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE,
KubernetesResource.Kinds.NODE,
KubernetesResource.Kinds.POD,
- KubernetesResource.Kinds.SERVICE,
- KubernetesResource.Kinds.INGRESS,
- KubernetesResource.Kinds.ISTIO_VIRTUAL_SERVICE]
+ KubernetesResource.Kinds.SERVICE]
for resource_kind in kinds_list:
ViyaDeploymentReportUtils.gather_resource_details(
diff --git a/deployment_report/model/viya_deployment_report.py b/deployment_report/model/viya_deployment_report.py
index 022eaed..38aeaba 100644
--- a/deployment_report/model/viya_deployment_report.py
+++ b/deployment_report/model/viya_deployment_report.py
@@ -109,6 +109,21 @@ def gather_details(self, kubectl: KubectlInterface,
# create dictionary to store gathered resources #
gathered_resources: Dict = dict()
+ # start by gathering details about ConfigMap #
+ cadence_info: Optional[Text] = None
+ try:
+ ViyaDeploymentReportUtils.gather_resource_details(kubectl, gathered_resources, api_resources,
+ k8s_kinds.CONFIGMAP)
+ for item in gathered_resources[k8s_kinds.CONFIGMAP]['items']:
+ resource_definition = gathered_resources[k8s_kinds.CONFIGMAP]['items'][item]['resourceDefinition']
+ cadence_info = self.get_cadence_version(resource_definition)
+ if cadence_info:
+ break
+
+ except CalledProcessError:
+ pass
+
+ gathered_resources = dict()
# start by gathering details about Nodes, if available #
# this information can be reported even if Pods are not listable #
try:
@@ -256,6 +271,8 @@ def gather_details(self, kubectl: KubectlInterface,
k8s_details_dict[Keys.Kubernetes.VERSIONS_DICT]: Dict = kubectl.version()
# create a key to hold the meta information about resources discovered in the cluster: dict #
k8s_details_dict[Keys.Kubernetes.DISCOVERED_KINDS_DICT]: Dict = dict()
+ # create a key to hold the cadence version information: str|None #
+ k8s_details_dict[Keys.Kubernetes.CADENCE_INFO]: Optional[Text] = cadence_info
# add the availability and count of all discovered resources #
for kind_name, kind_details in gathered_resources.items():
@@ -504,6 +521,25 @@ def get_sas_component_resources(self, component_name: Text, resource_kind: Text)
except KeyError:
return None
+ def get_cadence_version(self, resource: KubernetesResource) -> Optional[Text]:
+ """
+ Returns the combined key values from the 'data' dictionary.
+
+ :param key: The key of the value to return.
+ :return: The value mapped to the given key, or None if the given key doesn't exist.
+ """
+ cadence_info: Optional[Text] = None
+ try:
+ if 'sas-deployment-metadata' in resource.get_name():
+ cadence_data = resource.get_data()
+ cadence_info = cadence_data['SAS_CADENCE_NAME'].capitalize() + ' ' + \
+ cadence_data['SAS_CADENCE_VERSION'] + ' (' + \
+ cadence_data['SAS_CADENCE_RELEASE'] + ')'
+
+ return cadence_info
+ except KeyError:
+ return None
+
def write_report(self, output_directory: Text = OUTPUT_DIRECTORY_DEFAULT,
data_file_only: bool = DATA_FILE_ONLY_DEFAULT,
include_resource_definitions: bool = INCLUDE_RESOURCE_DEFINITIONS_DEFAULT,
diff --git a/deployment_report/templates/viya_deployment_report.html.j2 b/deployment_report/templates/viya_deployment_report.html.j2
index 4be85a9..f79b9d6 100644
--- a/deployment_report/templates/viya_deployment_report.html.j2
+++ b/deployment_report/templates/viya_deployment_report.html.j2
@@ -92,6 +92,10 @@
Ingress Controller |
{{ report_data.kubernetes.ingressController | default("could not be determined") }} |
+
+ Cadence Version |
+ {{ report_data.kubernetes.cadenceInfo | default("could not be determined- cadinfo") }}
{# Cluster Overview: Overview Table #}
@@ -258,4 +262,4 @@
{# API: Content #}
{% endblock %}
-{# BLOCK DEFINITIONS #}
\ No newline at end of file
+{# BLOCK DEFINITIONS #}
diff --git a/pre_install_report/README.md b/pre_install_report/README.md
index c8291ef..41b462e 100644
--- a/pre_install_report/README.md
+++ b/pre_install_report/README.md
@@ -58,6 +58,10 @@ $ export INGRESS_HOST=externalIP=$(kubectl -n get service get service -o jsonpath='{.spec.ports[?(@.name=="http")].port}')
$ export INGRESS_HTTPS_PORT=$(kubectl -n get service -o jsonpath='{.spec.ports[?(@.name=="https")].port}')
```
+The command to determine the Ingress Host may be slightly different with Amazon Elastic Kubernetes Service(EKS):
+```
+$ export INGRESS_HOST=externalIP=$(kubectl -n get service -o jsonpath='{.status.loadBalancer.ingress[*].hostname}')
+```
Use the values gathered on the command line for http or https as appropriate for your deployment:
diff --git a/pre_install_report/library/pre_install_check.py b/pre_install_report/library/pre_install_check.py
index 8a492dc..1b5da78 100644
--- a/pre_install_report/library/pre_install_check.py
+++ b/pre_install_report/library/pre_install_check.py
@@ -73,6 +73,7 @@ def __init__(self, sas_logger: ViyaARKLogger, viya_kubelet_version_min, viya_min
self._viya_min_aggregate_worker_memory: Text = viya_min_aggregate_worker_memory
self._calculated_aggregate_allocatable_memory = None
self._workers = 0
+ self._aggregate_nodeStatus_failures = 0
def _parse_release_info(self, release_info):
"""
@@ -609,11 +610,15 @@ def _check_kubelet_errors(self, global_data, aggregate_kubelet_failures):
return: updated global data about worker nodes retrieved
"""
aggregate_kubelet_data = {}
- aggregate_kubelet_data.update({'aggregate_kubelet_failures': str(aggregate_kubelet_failures)})
+ node_status_msg = ""
+ if self._aggregate_nodeStatus_failures > 0:
+ node_status_msg = " Check Node(s). All Nodes NOT in Ready Status." \
+ + ' Issues Found: ' + str(self._aggregate_nodeStatus_failures)
+ aggregate_kubelet_data.update({'aggregate_kubelet_failures': node_status_msg})
if aggregate_kubelet_failures > 0:
aggregate_kubelet_data.update({'aggregate_kubelet_failures':
'Check Kubelet Version on nodes.' +
- ' Issues Found: ' + str(aggregate_kubelet_failures)})
+ ' Issues Found: ' + str(aggregate_kubelet_failures) + '.' + node_status_msg})
global_data.append(aggregate_kubelet_data)
return global_data
@@ -735,6 +740,15 @@ def evaluate_nodes(self, nodes_data, global_data, cluster_info, quantity_):
total_memory = total_memory + quantity_(str(node['memory']))
total_allocatable_memory = total_allocatable_memory + quantity_(alloc_memory)
+ try:
+ nodeReady = str(node['Ready'])
+ if nodeReady == "True":
+ pass
+ else:
+ self._aggregate_nodeStatus_failures += 1
+ except KeyError:
+ node['Ready'] = viya_constants.KEY_NOT_FOUND
+
if node['worker']:
total_cpu_cores = total_cpu_cores + alloc_cpu_cores
self.logger.info("worker total_cpu_cores {}".format(str(total_cpu_cores)))
diff --git a/pre_install_report/library/pre_install_check_permissions.py b/pre_install_report/library/pre_install_check_permissions.py
index 95d0690..83c3454 100644
--- a/pre_install_report/library/pre_install_check_permissions.py
+++ b/pre_install_report/library/pre_install_check_permissions.py
@@ -65,6 +65,8 @@ def __init__(self, params):
self.ingress_data[viya_constants.INGRESS_CONTROLLER] = self.ingress_controller
self.ingress_file = "hello-ingress.yaml"
self._storage_class_sc: List[KubernetesResource] = None
+ self._sample_deployment = 0
+ self._sample_output = ""
def _set_results_cluster_admin(self, resource_key, rc):
"""
@@ -73,7 +75,9 @@ def _set_results_cluster_admin(self, resource_key, rc):
"""
if rc == 1:
self.cluster_admin_permission_data[resource_key] = viya_constants.INSUFFICIENT_PERMS
- self.cluster_admin_permission_aggregate[viya_constants.PERM_PERMISSIONS] = viya_constants.INSUFFICIENT_PERMS
+ self.cluster_admin_permission_aggregate[viya_constants.PERM_PERMISSIONS] = \
+ viya_constants.INSUFFICIENT_PERMS + ". Check Logs."
+
else:
self.cluster_admin_permission_data[resource_key] = viya_constants.ADEQUATE_PERMS
@@ -82,12 +86,41 @@ def _set_results_namespace_admin(self, resource_key, rc):
Set permissions status for specified resource/verb with namespace admin role
"""
- if rc == 1:
- self.namespace_admin_permission_data[resource_key] = viya_constants.INSUFFICIENT_PERMS
- self.namespace_admin_permission_aggregate[viya_constants.PERM_PERMISSIONS] \
- = viya_constants.INSUFFICIENT_PERMS
+ sample_keys = [viya_constants.PERM_DEPLOYMENT]
+ deployment_keys = [viya_constants.PERM_DELETE + viya_constants.PERM_DEPLOYMENT,
+ viya_constants.PERM_SERVICE,
+ viya_constants.PERM_DELETE + viya_constants.PERM_SERVICE,
+ viya_constants.PERM_INGRESS,
+ viya_constants.PERM_DELETE + viya_constants.PERM_INGRESS,
+ viya_constants.PERM_REPLICASET,
+ viya_constants.PERM_CREATE + viya_constants.PERM_ROLE,
+ viya_constants.PERM_CREATE + viya_constants.PERM_ROLEBINDING,
+ viya_constants.PERM_CREATE + viya_constants.PERM_SA,
+ viya_constants.PERM_DELETE + viya_constants.PERM_ROLE,
+ viya_constants.PERM_DELETE + viya_constants.PERM_ROLEBINDING,
+ viya_constants.PERM_DELETE + viya_constants.PERM_SA
+ ]
+ if rc != 0:
+ self.logger.debug("resource_key = {}, sample_deployment = {} ".format(str(resource_key),
+ str(self._sample_deployment)))
+ if self._sample_deployment != 0:
+ if resource_key in deployment_keys:
+ self.namespace_admin_permission_data[resource_key] = viya_constants.INSUFFICIENT_PERMS
+ if resource_key in sample_keys:
+ self.namespace_admin_permission_data[resource_key] = viya_constants.INSUFFICIENT_PERMS + \
+ ". Sample Deployment Check failed! " + \
+ "Ensure Node(s) Status is Ready. " + \
+ "Check Permissions in specified namespace. " \
+ + self._sample_output
+
+ else:
+ self.namespace_admin_permission_data[resource_key] = viya_constants.INSUFFICIENT_PERMS
+ self.namespace_admin_permission_aggregate[viya_constants.PERM_PERMISSIONS] = \
+ viya_constants.INSUFFICIENT_PERMS + ". Check Logs."
else:
self.namespace_admin_permission_data[resource_key] = viya_constants.ADEQUATE_PERMS
+ # self.namespace_admin_permission_aggregate[viya_constants.PERM_PERMISSIONS] = \
+ # viya_constants.ADEQUATE_PERMS
def _get_pvc(self, pvc_name, key):
"""
@@ -284,20 +317,34 @@ def check_sample_application(self):
rc = self.utils.deploy_manifest_file(viya_constants.KUBECTL_APPLY,
'hello-application.yaml')
- # self._set_results_namespace_admin(viya_constants.PERM_DEPLOYMENT, rc)
- # self._set_results_namespace_admin(viya_constants.PERM_SERVICE, rc)
if rc == 0:
- rc = self.utils.do_cmd(" rollout status deployment.v1.apps/hello-world ")
+ rc, sample_output = self.utils.do_cmd(" rollout status deployment.v1.apps/hello-world --timeout=180s")
+ # You can check if a Deployment has completed by using kubectl rollout status.
+ # If the rollout completed successfully, kubectl rollout status returns a zero exit code.
+
+ if rc != 0:
+ self._sample_deployment = 2
+ self._sample_output = sample_output
+ self._set_results_namespace_admin(viya_constants.PERM_DEPLOYMENT, rc)
+ self._set_results_namespace_admin(viya_constants.PERM_SERVICE, rc)
+ return 2
self._set_results_namespace_admin(viya_constants.PERM_DEPLOYMENT, rc)
self._set_results_namespace_admin(viya_constants.PERM_SERVICE, rc)
if rc == 0:
- rc = self.utils.do_cmd(" scale --replicas=2 deployment/hello-world ")
+ rc, sample_output = self.utils.do_cmd(" scale --replicas=2 deployment/hello-world ")
+ if rc != 0:
+ self._sample_deployment = 3
+ self._set_results_namespace_admin(viya_constants.PERM_REPLICASET, rc)
+ return 3
+ else:
+ self._sample_deployment = 1
+ self._set_results_namespace_admin(viya_constants.PERM_DEPLOYMENT, rc)
+ self._set_results_namespace_admin(viya_constants.PERM_SERVICE, rc)
- if rc == 0:
- self._set_results_namespace_admin(viya_constants.PERM_REPLICASET, rc)
+ return 1
def check_sample_service(self):
"""
@@ -396,8 +443,9 @@ def check_delete_sample_application(self):
rc = self.utils.deploy_manifest_file(viya_constants.KUBECTL_DELETE,
'hello-application.yaml')
self._set_results_namespace_admin(viya_constants.PERM_DELETE + viya_constants.PERM_DEPLOYMENT, rc)
+ self._set_results_namespace_admin(viya_constants.PERM_DELETE + viya_constants.PERM_SERVICE, rc)
- rc = self.utils.do_cmd(" wait --for=delete pod -l app=hello-world-pod --timeout=12s ")
+ self.utils.do_cmd(" wait --for=delete pod -l app=hello-world-pod --timeout=12s ")
def check_delete_sample_service(self):
"""
@@ -443,12 +491,13 @@ def check_deploy_crd(self):
def check_rbac_role(self):
"""
Check if RBAC is enabled in specified namespace
- Create the Role and Rolebinding for the custome resource access with specified namespace. Set the
+ Create the Role and Rolebinding for the custom resource access with specified namespace. Set the
permissions status in the namespace_admin_permission_data dict object.
"""
found = self.utils.get_rbac_group_cmd()
-
+ self.logger.debug("get_rbace_group_cmd found = {}, sample_deployment = {}"
+ .format(str(found), str(self._sample_deployment)))
if found:
rc = self.utils.deploy_manifest_file(viya_constants.KUBECTL_APPLY,
'viya-role.yaml')
@@ -463,7 +512,14 @@ def check_rbac_role(self):
'viya-rolebinding.yaml')
self._set_results_namespace_admin(viya_constants.PERM_CREATE + viya_constants.PERM_ROLEBINDING, rc)
else:
+ self.logger.debug("sample_deployment = {}".format(str(self._sample_deployment)))
self.namespace_admin_permission_aggregate["RBAC Checking"] = viya_constants.PERM_SKIPPING
+ self._set_results_namespace_admin(viya_constants.PERM_CREATE + viya_constants.PERM_ROLE,
+ int(self._sample_deployment))
+ self._set_results_namespace_admin(viya_constants.PERM_CREATE + viya_constants.PERM_SA,
+ int(self._sample_deployment))
+ self._set_results_namespace_admin(viya_constants.PERM_CREATE + viya_constants.PERM_ROLEBINDING,
+ int(self._sample_deployment))
def check_rbac_delete_role(self):
"""
@@ -495,14 +551,14 @@ def check_get_custom_resource(self, namespace):
if not allowed:
rc1 = 1
- self._set_results_namespace_admin_crd(viya_constants.PERM_CREATE + viya_constants.PERM_CR + " with RBAC "
- + viya_constants.PERM_SA + " resp: = " + str(allowed), rc1)
+ self._set_results_namespace_admin_crd(viya_constants.PERM_CREATE + viya_constants.PERM_CR_RBAC
+ + viya_constants.PERM_SA, rc1)
allowed: bool = self.utils.can_i(' delete viyas.company.com --as=system:serviceaccount:'
+ namespace + ':crreader ')
if allowed:
rc2 = 1
- self._set_results_namespace_admin_crd(viya_constants.PERM_DELETE + viya_constants.PERM_CR + " with RBAC "
- + viya_constants.PERM_SA + " resp: = " + str(allowed), rc2)
+ self._set_results_namespace_admin_crd(viya_constants.PERM_DELETE + viya_constants.PERM_CR_RBAC
+ + viya_constants.PERM_SA, rc2)
def check_delete_custom_resource(self):
"""
diff --git a/pre_install_report/library/pre_install_utils.py b/pre_install_report/library/pre_install_utils.py
index de9dfde..c27718d 100644
--- a/pre_install_report/library/pre_install_utils.py
+++ b/pre_install_report/library/pre_install_utils.py
@@ -77,12 +77,12 @@ def do_cmd(self, test_cmd):
self.logger.info("cmd {} rc = 0".format(test_cmd))
self.logger.debug("cmd {} rc = 0 response {}".format(test_cmd, str(data)))
- return 0
+ return 0, str(data)
except CalledProcessError as e:
data = e.output
self.logger.error("do_cmd " + ' rc = ' + str(e.returncode) + test_cmd +
' data = ' + str(data))
- return e.returncode
+ return e.returncode, str(data)
def get_rbac_group_cmd(self):
"""
@@ -91,14 +91,14 @@ def get_rbac_group_cmd(self):
cmd: kubectl command to retrieve api_resources
return: True if both Role and RoleBinding kinds have an api_group
"""
- role = None
- rolebinding = None
+ role: bool = None
+ rolebinding: bool = None
try:
data: KubernetesApiResources = self._kubectl.api_resources(False)
role = data.get_api_group("Role")
rolebinding = data.get_api_group("RoleBinding")
except CalledProcessError as e:
- self.logger.exception("get_rbac_group_cmd rc {}" + str(e.returncode))
+ self.logger.exception("get_rbac_group_cmd rc {} ".format(str(e.returncode)))
return False
if role is None:
return False
diff --git a/pre_install_report/library/utils/viya_constants.py b/pre_install_report/library/utils/viya_constants.py
index ebe5b9c..6940401 100644
--- a/pre_install_report/library/utils/viya_constants.py
+++ b/pre_install_report/library/utils/viya_constants.py
@@ -50,3 +50,4 @@
PERM_ROLEBINDING = "RoleBinding"
PERM_SA = "Service Account"
PERM_CLASS = "PreInstallUtils"
+PERM_CR_RBAC = "Custom Resource with RBAC "
diff --git a/pre_install_report/templates/report_template_viya_pre_install_check.j2 b/pre_install_report/templates/report_template_viya_pre_install_check.j2
index ecd072d..7d469a0 100644
--- a/pre_install_report/templates/report_template_viya_pre_install_check.j2
+++ b/pre_install_report/templates/report_template_viya_pre_install_check.j2
@@ -189,6 +189,7 @@
Kubelet Version |
Container Runtime |
Kernel Version |
+ Node Status |
Issues |
|
{% for node in nodes_data %}
@@ -197,6 +198,7 @@
{{node.kubeletversion}} |
{{node.containerRuntimeVersion}} |
{{node.kernelVersion}} |
+ {{node.Ready}} |
{{node.error.kubeletversion}} |
{% endfor %}
@@ -355,7 +357,7 @@
-
Namespace Admin Permissions: {{ namespace_admin_permission_aggregate }}
+
Namespace Admin Permissions - {{ namespace_admin_permission_aggregate }}
diff --git a/pre_install_report/test/test_data/json_data/azure_nodes_no_master.json b/pre_install_report/test/test_data/json_data/azure_nodes_no_master.json
index 4f0dc08..15f468b 100644
--- a/pre_install_report/test/test_data/json_data/azure_nodes_no_master.json
+++ b/pre_install_report/test/test_data/json_data/azure_nodes_no_master.json
@@ -112,7 +112,7 @@
"lastTransitionTime": "2020-09-02T20:10:38Z",
"message": "kubelet is posting ready status. AppArmor enabled",
"reason": "KubeletReady",
- "status": "True",
+ "status": "Unknown",
"type": "Ready"
}
],
@@ -651,7 +651,7 @@
"lastTransitionTime": "2020-09-02T20:10:08Z",
"message": "kubelet is posting ready status. AppArmor enabled",
"reason": "KubeletReady",
- "status": "True",
+ "status": "Unknown",
"type": "Ready"
}
],
@@ -816,7 +816,7 @@
"lastTransitionTime": "2020-09-02T20:09:34Z",
"message": "kubelet is posting ready status. AppArmor enabled",
"reason": "KubeletReady",
- "status": "True",
+ "status": "Unknown",
"type": "Ready"
}
],
@@ -993,7 +993,7 @@
"lastTransitionTime": "2020-09-02T20:09:38Z",
"message": "kubelet is posting ready status. AppArmor enabled",
"reason": "KubeletReady",
- "status": "True",
+ "status": "Unknown",
"type": "Ready"
}
],
@@ -1175,7 +1175,7 @@
"lastTransitionTime": "2020-09-02T20:09:54Z",
"message": "kubelet is posting ready status. AppArmor enabled",
"reason": "KubeletReady",
- "status": "True",
+ "status": "Unknown",
"type": "Ready"
}
],
@@ -1326,7 +1326,7 @@
"lastTransitionTime": "2020-09-02T20:09:42Z",
"message": "kubelet is posting ready status. AppArmor enabled",
"reason": "KubeletReady",
- "status": "True",
+ "status": "Unknown",
"type": "Ready"
}
],
@@ -1469,7 +1469,7 @@
"lastTransitionTime": "2020-09-02T20:06:29Z",
"message": "kubelet is posting ready status. AppArmor enabled",
"reason": "KubeletReady",
- "status": "True",
+ "status": "Unknown",
"type": "Ready"
}
],
@@ -1612,7 +1612,7 @@
"lastTransitionTime": "2020-09-02T20:06:46Z",
"message": "kubelet is posting ready status. AppArmor enabled",
"reason": "KubeletReady",
- "status": "True",
+ "status": "Unknown",
"type": "Ready"
}
],
diff --git a/pre_install_report/test/test_pre_install_report.py b/pre_install_report/test/test_pre_install_report.py
index 3f57e39..d31055c 100644
--- a/pre_install_report/test/test_pre_install_report.py
+++ b/pre_install_report/test/test_pre_install_report.py
@@ -184,7 +184,7 @@ def test_get_nested_nodes_info():
' Issues Found: 1'
total_allocatable_memoryG = vpc.get_calculated_aggregate_memory() # quantity_("62.3276481628418 Gi").to('G')
assert str(round(total_allocatable_memoryG.to("G"), 2)) == '66.92 G'
- assert global_data[4]['aggregate_kubelet_failures'] in '1, Check Kubelet Version on nodes. Issues Found: 1'
+ assert global_data[4]['aggregate_kubelet_failures'] in 'Check Kubelet Version on nodes. Issues Found: 1.'
template_render(global_data, configs_data, storage_data, 'nested_nodes_info.html')
@@ -227,7 +227,7 @@ def test_get_nested_millicores_nodes_info():
total_allocatable_memoryG = vpc.get_calculated_aggregate_memory()
assert str(round(total_allocatable_memoryG.to("G"), 2)) == '66.92 G'
- assert global_data[4]['aggregate_kubelet_failures'] in '2, Check Kubelet Version on nodes. Issues Found: 2'
+ assert global_data[4]['aggregate_kubelet_failures'] in 'Check Kubelet Version on nodes. Issues Found: 2.'
template_render(global_data, configs_data, storage_data, 'nested_millicores_nodes_info.html')
@@ -456,6 +456,9 @@ def test_azure_multi_get_nested_nodes_info():
global_data = []
cluster_info = "Kubernetes master is running at https://node3:6443\n"
+ for node in nodes_data:
+ assert node['Ready'] in 'True'
+
global_data = vpc.evaluate_nodes(nodes_data, global_data, cluster_info, quantity_)
pprint.pprint(global_data)
for nodes in global_data:
@@ -469,7 +472,7 @@ def test_azure_multi_get_nested_nodes_info():
def test_azure_worker_nodes():
-
+ viya_kubelet_version_min = 'v1.17.0'
vpc = createViyaPreInstallCheck(viya_kubelet_version_min,
viya_min_worker_allocatable_CPU,
viya_min_aggregate_worker_CPU_cores,
@@ -489,7 +492,7 @@ def test_azure_worker_nodes():
global_data = []
cluster_info = "Kubernetes master is running at https://node3:6443\n"
-
+ issues_found = 8
global_data = vpc.evaluate_nodes(nodes_data, global_data, cluster_info, quantity_)
pprint.pprint(global_data)
for nodes in global_data:
@@ -497,7 +500,9 @@ def test_azure_worker_nodes():
'Expected: 12, Calculated: 141.56, Issues Found: 0'
assert global_data[3]['aggregate_memory_failures'] in 'Expected: 56G, Calculated: 727.85 G, ' \
'Issues Found: 0'
- assert global_data[4]['aggregate_kubelet_failures'] in '0, Check Kubelet Version on nodes.'
+ assert global_data[4]['aggregate_kubelet_failures'] in 'Check Kubelet Version on nodes. Issues Found: 10. ' \
+ 'Check Node(s). All Nodes NOT in Ready Status. ' \
+ 'Issues Found: ' + str(issues_found)
template_render(global_data, configs_data, storage_data, 'azure_nodes_no_master.html')
diff --git a/viya_ark_library/k8s/sas_k8s_objects.py b/viya_ark_library/k8s/sas_k8s_objects.py
index 8ed7c32..add7f1e 100644
--- a/viya_ark_library/k8s/sas_k8s_objects.py
+++ b/viya_ark_library/k8s/sas_k8s_objects.py
@@ -347,6 +347,7 @@ class Keys(object):
CONTROLLER_TEMPLATE = "controllerTemplate"
CREATION_TIMESTAMP = "creationTimestamp"
CURRENT_REPLICAS = "currentReplicas"
+ DATA = "data"
DESTINATION = "destination"
ENV = "env"
EXACT = "exact"
@@ -432,6 +433,7 @@ class Kinds(object):
All available kinds can be discovered by calling the Kubectl.get_api_resources() method.
"""
CAS_DEPLOYMENT = "CASDeployment"
+ CONFIGMAP = "ConfigMap"
CRON_JOB = "CronJob"
CRUNCHY_PG_BACKUP = "Pgbackup"
CRUNCHY_PG_CLUSTER = "Pgcluster"
@@ -766,3 +768,11 @@ def get_provisioner(self) -> Optional[AnyStr]:
:return: This Resource's 'metadata.creationTimestamp' value.
"""
return self._resource.get(self.Keys.PROVISIONER)
+
+ def get_data(self) -> Optional[Dict]:
+ """
+ Returns the 'data' dictionary for this Resource.
+
+ :return: This Resource's 'data' dictionary.
+ """
+ return self._resource.get(self.Keys.DATA)
diff --git a/viya_ark_library/k8s/sas_kubectl.py b/viya_ark_library/k8s/sas_kubectl.py
index 1eac275..6449d10 100644
--- a/viya_ark_library/k8s/sas_kubectl.py
+++ b/viya_ark_library/k8s/sas_kubectl.py
@@ -22,6 +22,7 @@
_HEADER_NAME_ = "NAME"
_HEADER_SHORTNAME_ = "SHORTNAMES"
_HEADER_APIGROUP_ = "APIGROUP"
+_HEADER_APIVERSION_ = "APIVERSION"
_HEADER_NAMESPACED_ = "NAMESPACED"
_HEADER_KIND_ = "KIND"
_HEADER_VERBS_ = "VERBS"
@@ -140,7 +141,16 @@ def api_resources(self, ignore_errors: bool = False) -> KubernetesApiResources:
# get the index of all expected headers #
name_index: int = api_resource_headers.index(_HEADER_NAME_)
shortname_index: int = api_resource_headers.index(_HEADER_SHORTNAME_)
- apigroup_index: int = api_resource_headers.index(_HEADER_APIGROUP_)
+
+ # the "APIGROUP" header is renamed to "APIVERSION" at kubectl v1.20.0
+ # since group describes the values listed at all versions, this method will
+ # return the value as the "api_group"
+ apigroup_index: int = -1
+ if _HEADER_APIGROUP_ in api_resource_headers:
+ apigroup_index = api_resource_headers.index(_HEADER_APIGROUP_)
+ elif _HEADER_APIVERSION_ in api_resource_headers:
+ apigroup_index = api_resource_headers.index(_HEADER_APIVERSION_)
+
namespaced_index: int = api_resource_headers.index(_HEADER_NAMESPACED_)
kind_index: int = api_resource_headers.index(_HEADER_KIND_)
verbs_index: int = api_resource_headers.index(_HEADER_VERBS_)
@@ -176,11 +186,12 @@ def api_resources(self, ignore_errors: bool = False) -> KubernetesApiResources:
break
# get the api group value #
- for char in api_resource_line[apigroup_index:]:
- if char != " ":
- api_group = api_group + char
- else:
- break
+ if apigroup_index != -1:
+ for char in api_resource_line[apigroup_index:]:
+ if char != " ":
+ api_group = api_group + char
+ else:
+ break
# get the namespaced value #
for char in api_resource_line[namespaced_index:]:
@@ -258,7 +269,7 @@ def manage_resource(self, action: Text, file: Text, ignore_errors: bool = False,
# define the command #
cmd: Text = f" {action} -f {file}"
# run the command #
- return (self.do(cmd, ignore_errors))
+ return self.do(cmd, ignore_errors)
def config_view(self, ignore_errors: bool = False) -> Dict:
# get the raw config response
diff --git a/viya_ark_library/k8s/test_impl/response_data/resources_configmaps.json b/viya_ark_library/k8s/test_impl/response_data/resources_configmaps.json
new file mode 100644
index 0000000..0d13b34
--- /dev/null
+++ b/viya_ark_library/k8s/test_impl/response_data/resources_configmaps.json
@@ -0,0 +1,56 @@
+[
+ {
+ "apiVersion": "v1",
+ "data": {
+ "SAS_BUILD_TYPE": "x64_oci_linux_2-docker",
+ "SAS_CADENCE_DISPLAY_NAME": "Fast R/TR",
+ "SAS_CADENCE_DISPLAY_SHORT_NAME": "Fast",
+ "SAS_CADENCE_DISPLAY_VERSION": "R/TR",
+ "SAS_CADENCE_NAME": "fast",
+ "SAS_CADENCE_RELEASE": "20201214.1607958443388",
+ "SAS_CADENCE_VERSION": "2020",
+ "SAS_DEPLOYMENT_TYPE": "default",
+ "SAS_REPOSITORY_WAREHOUSE_URL": "https://ses.sas.download/ses"
+ },
+ "kind": "ConfigMap",
+ "metadata": {
+ "annotations": {
+ "kubectl.kubernetes.io/last-applied-configuration": ""
+ },
+ "creationTimestamp": "2020-12-14T15:29:58Z",
+ "labels": {
+ "sas.com/admin": "namespace",
+ "sas.com/deployment": "sas-viya"
+ },
+ "name": "sas-deployment-metadata-kkd5dm2ch9",
+ "namespace": "test",
+ "resourceVersion": "36621129",
+ "selfLink": "/api/v1/namespaces/test/configmaps/sas-deployment-metadata-kkd5dm2ch9",
+ "uid": "73ce97ad-c682-4725-b8eb-9af2f8a0c48c"
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "data": {
+ "SAS_CONTEXT_PATH": "SASLogon",
+ "SAS_DU_NAME": "sas-logon-app",
+ "SAS_SERVICE_NAME": "sas-logon-app"
+ },
+ "kind": "ConfigMap",
+ "metadata": {
+ "annotations": {
+ "kubectl.kubernetes.io/last-applied-configuration": ""
+ },
+ "creationTimestamp": "2021-01-13T15:25:41Z",
+ "labels": {
+ "sas.com/admin": "cluster-local",
+ "sas.com/deployment": "sas-viya"
+ },
+ "name": "sas-logon-app-parameters-g4hg56gm5b",
+ "namespace": "d24140",
+ "resourceVersion": "53636348",
+ "selfLink": "/api/v1/namespaces/d24140/configmaps/sas-logon-app-parameters-g4hg56gm5b",
+ "uid": "2a38df05-ef72-439b-8155-faa984796d26"
+ }
+ }
+]
diff --git a/viya_ark_library/k8s/test_impl/sas_kubectl_test.py b/viya_ark_library/k8s/test_impl/sas_kubectl_test.py
index e8140bc..6f45cb4 100644
--- a/viya_ark_library/k8s/test_impl/sas_kubectl_test.py
+++ b/viya_ark_library/k8s/test_impl/sas_kubectl_test.py
@@ -76,6 +76,7 @@ class Values(object):
Class providing static references to values returned by the KubectlTest implementation.
"""
NAMESPACE: Text = "test"
+ CADENCEINFO: Text = "Fast 2020 (20201214.1607958443388)"
# Component: prometheus
COMPONENT_PROMETHEUS_DEPLOYMENT_NAME: Text = "pushgateway-test-prometheus-pushgateway"
@@ -182,6 +183,7 @@ class Values(object):
# Resource: all kinds
RESOURCE_KINDS_LIST: List[Text] = [
KubernetesResource.Kinds.CAS_DEPLOYMENT,
+ KubernetesResource.Kinds.CONFIGMAP,
KubernetesResource.Kinds.CRON_JOB,
KubernetesResource.Kinds.DEPLOYMENT,
KubernetesResource.Kinds.INGRESS,