Skip to content

Commit

Permalink
Merge pull request #170 from sassoftware/develop
Browse files Browse the repository at this point in the history
Prepare for Release 1.8.0
  • Loading branch information
lasiva authored Feb 23, 2022
2 parents 7453e6e + 9cfc601 commit 468d343
Show file tree
Hide file tree
Showing 18 changed files with 242 additions and 257 deletions.
1 change: 1 addition & 0 deletions deployment_report/model/utils/ingress_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,6 +82,7 @@ def ignorable_for_controller_if_unavailable(ingress_controller: Text, resource_t
####################
elif ingress_controller == SupportedIngress.Controllers.NGINX and (
resource_type == ResourceTypeValues.CONTOUR_HTTP_PROXIES or
resource_type == ResourceTypeValues.K8S_EXTENSIONS_INGRESSES or
resource_type == ResourceTypeValues.OPENSHIFT_ROUTES or
resource_type == ResourceTypeValues.ISTIO_VIRTUAL_SERVICES
):
Expand Down
30 changes: 19 additions & 11 deletions deployment_report/model/utils/resource_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@
# ###
####################################################################
from subprocess import CalledProcessError
from typing import Dict, List, Optional, Text
from typing import AnyStr, Dict, List, Optional, Text

from deployment_report.model.static.viya_deployment_report_keys import \
ITEMS_KEY, \
Expand Down Expand Up @@ -130,19 +130,27 @@ def cache_resources(resource_type: Text, kubectl: KubectlInterface, resource_cac
owner_kind: Text = owner_reference[KubernetesResourceKeys.KIND]
owner_name: Text = owner_reference[KubernetesResourceKeys.NAME]

# lookup the owner's resource type
owner_type: Text = kubectl.api_resources().get_type(kind=owner_kind, api_version=owner_api_version)
# only continue if the owner kind is available
if owner_kind:
# lookup the owner's resource type
owner_type: Optional[AnyStr] = kubectl.api_resources().get_type(kind=owner_kind,
api_version=owner_api_version)

# if the owning resource type isn't in the owning resource types list, add it
if owner_type not in owning_resource_types:
owning_resource_types.append(owner_type)
# make sure the owner_type was found
if not owner_type:
# if not, default to the owner kind
owner_type = owner_kind

# create a relationship for the owning object
relationship: Dict = relationship_util.create_relationship_dict(resource_name=owner_name,
resource_type=owner_type)
# if the owning resource type isn't in the owning resource types list, add it
if owner_type not in owning_resource_types:
owning_resource_types.append(owner_type)

# and add it to the relationship list
resource_relationships.append(relationship)
# create a relationship for the owning object
relationship: Dict = relationship_util.create_relationship_dict(resource_name=owner_name,
resource_type=owner_type)

# and add it to the relationship list
resource_relationships.append(relationship)

# if more resource types have been discovered, gather them as well
for owning_resource_type in owning_resource_types:
Expand Down
7 changes: 4 additions & 3 deletions deployment_report/model/utils/test/test_ingress_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -233,6 +233,10 @@ def test_ignorable_for_controller_if_unavailable_nginx():
assert ingress_util.ignorable_for_controller_if_unavailable(
ingress_controller=SupportedIngress.Controllers.NGINX,
resource_type=ResourceTypeValues.CONTOUR_HTTP_PROXIES)
# Ingress (v1beta1 - deprecated)
assert ingress_util.ignorable_for_controller_if_unavailable(
ingress_controller=SupportedIngress.Controllers.NGINX,
resource_type=ResourceTypeValues.K8S_EXTENSIONS_INGRESSES)
# Route
assert ingress_util.ignorable_for_controller_if_unavailable(
ingress_controller=SupportedIngress.Controllers.NGINX,
Expand All @@ -248,9 +252,6 @@ def test_ignorable_for_controller_if_unavailable_nginx():
ingress_controller=SupportedIngress.Controllers.NGINX,
resource_type=ResourceTypeValues.K8S_CORE_PODS)
# Ingress
assert not ingress_util.ignorable_for_controller_if_unavailable(
ingress_controller=SupportedIngress.Controllers.NGINX,
resource_type=ResourceTypeValues.K8S_EXTENSIONS_INGRESSES)
assert not ingress_util.ignorable_for_controller_if_unavailable(
ingress_controller=SupportedIngress.Controllers.NGINX,
resource_type=ResourceTypeValues.K8S_NETWORKING_INGRESSES)
Expand Down
6 changes: 4 additions & 2 deletions pre_install_report/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,10 @@ deployment in a Kubernetes cluster. The tool cannot account for the dynamic res
Kubernetes may orchestrate once Viya is deployed. The report and the information therein must
be considered a snapshot in time.

The Kubernetes cluster for a SAS Viya deployment must meet the requirements documented in [SAS® Viya® Operations](https://go.documentation.sas.com/doc/en/itopscdc/default/itopssr/titlepage.htm)
Ensure that the Kubernetes version is within the documented range for the selected cloud provider.


### Memory and vCPU Check
The tool calculates the aggregate Memory and aggregate vCPUs of your cluster. The aggregate Memory is the sum
of the Memory capacity on all the active and running nodes. The aggregate CPU is calculated similarly.
Expand Down Expand Up @@ -38,11 +42,9 @@ If calculated aggregate vCPUs is less than VIYA_MIN_AGGREGATE_WORKER_CPU_CORES t
SAS recommends using the SAS Viya 4 Infrastructure as Code (IaC) tools to create a cluster.
Refer to the following IaC repositories for [Microsoft Azure](https://github.com/sassoftware/viya4-iac-azure), [AWS](https://github.com/sassoftware/viya4-iac-aws]) or [GCP](https://github.com/sassoftware/viya4-iac-gcp).
For OpenShift refer to the documentation in SAS® Viya® Operations [OpenShift](https://go.documentation.sas.com/doc/en/itopscdc/v_019/itopssr/n1ika6zxghgsoqn1mq4bck9dx695.htm#p1c8bxlbu0gzuvn1e75nck1yozcn)

**Example**: Setting for aggregate Memory and vCPU for deployment based on documentation in SAS Viya Operations under System Requirements
in the Hardware and Resource Requirements section. See Sizing Recommendations for Microsoft Azure.


| Offering | CAS Node(s) | System Node | Nodes in User Node Pool(s) |
| ------------------------- |------------- | --------- | -------------|
| SAS Visual Analytics and SAS Data Preparation | Num of Node: Node 1, CPU: 16, Memory, 128 | Num. of Nodes: 1, CPU: 8, Memory: 64 | Num of Node: 1 per Node User Node Pool, CPU: 8 Memory 64 |
Expand Down
136 changes: 100 additions & 36 deletions pre_install_report/library/pre_install_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
# ### Author: SAS Institute Inc. ###
####################################################################
# ###
# Copyright (c) 2021, SAS Institute Inc., Cary, NC, USA. ###
# Copyright (c) 2021-2022, SAS Institute Inc., Cary, NC, USA. ###
# All Rights Reserved. ###
# SPDX-License-Identifier: Apache-2.0 ###
# ###
Expand All @@ -21,9 +21,10 @@
import subprocess
import pint
from subprocess import CalledProcessError
from typing import Text
from typing import Text, Dict

# import pint to add, compare memory
import semantic_version
from pint import UnitRegistry

from pre_install_report.library.utils import viya_messages
Expand Down Expand Up @@ -64,14 +65,14 @@ def __init__(self, sas_logger: ViyaARKLogger, viya_kubelet_version_min,
self._kubectl: KubectlInterface = None
self.sas_logger = sas_logger
self.logger = self.sas_logger.get_logger()
self._min_kubelet_version: tuple = ()
self._viya_kubelet_version_min = viya_kubelet_version_min
self._viya_min_aggregate_worker_CPU_cores: Text = viya_min_aggregate_worker_CPU_cores
self._viya_min_aggregate_worker_memory: Text = viya_min_aggregate_worker_memory
self._calculated_aggregate_memory = None
self._workers = 0
self._aggregate_nodeStatus_failures = 0
self._ingress_controller = None
self._k8s_server_version = None

def _parse_release_info(self, release_info):
"""
Expand All @@ -91,6 +92,65 @@ def _parse_release_info(self, release_info):
print(viya_messages.KUBELET_VERSION_ERROR)
sys.exit(viya_messages.BAD_OPT_RC_)

def _validate_k8s_server_version(self, version):
"""
Validate the major and minor parts of the kubernetes version. The 3rd part is not specified in the Kubernetes
cluster requirements
"""
version_parts_to_validate = 2
version_lst = version.split('.')
if (len(version_lst) < version_parts_to_validate):
self.logger.error(viya_messages.KUBERNETES_VERSION_ERROR.format(version))
print(viya_messages.KUBERNETES_VERSION_ERROR.format(version))
sys.exit(viya_messages.INVALID_K8S_VERSION_RC_)

for i in range(version_parts_to_validate):
if version_lst[i].startswith("0") or float(version_lst[i]) < 0:
self.logger.error(viya_messages.KUBERNETES_VERSION_ERROR.format(version))
print(viya_messages.KUBERNETES_VERSION_ERROR.format(version))
sys.exit(viya_messages.INVALID_K8S_VERSION_RC_)
i += 1
return 0

def _retrieve_k8s_server_version(self, utils):
"""
Retrieve the Kubernetes server version and validate the git version
"""
try:
versions: Dict = utils.get_k8s_version()
server_version = versions.get('serverVersion')
git_version = str(server_version.get('gitVersion'))
self.logger.debug("git_version {} ".format(git_version))
# check git_version is not empty
if git_version and git_version.startswith("v"):
git_version = git_version[1:]
self._validate_k8s_server_version(git_version)

self.logger.info('Kubernetes Server version = {}'.format(git_version))
return git_version
except CalledProcessError as cpe:
self.logger.exception('kubectl version command failed. Return code = {}'.format(str(cpe.returncode)))
sys.exit(viya_messages.RUNTIME_ERROR_RC_)

def _k8s_server_version_min(self):
"""
Compare Kubernetes Version to the Minimum version expected. See
https://pypi.org/project/semantic_version/ 2.8.5 initial version
"""
try:
curr_version = semantic_version.Version(str(self._k8s_server_version))

if(curr_version in semantic_version.SimpleSpec(viya_constants.MIN_K8S_SERVER_VERSION)):
self.logger.error("This release of Kubernetes is not supported {}.{}.x"
.format(str(curr_version.major),
str(curr_version.minor)))
return False
else:
return True
except ValueError as cpe:
self.logger.exception(viya_messages.EXCEPTION_MESSAGE.format(str(cpe)))
sys.exit(viya_messages.RUNTIME_ERROR_RC_)

def check_details(self, kubectl, ingress_port, ingress_host, ingress_controller,
output_dir):
self._ingress_controller = ingress_controller
Expand All @@ -110,6 +170,8 @@ def check_details(self, kubectl, ingress_port, ingress_host, ingress_controller,
pre_check_utils_params["logger"] = self.sas_logger
utils = PreCheckUtils(pre_check_utils_params)

self._k8s_server_version = self._retrieve_k8s_server_version(utils)

configs_data = self.get_config_info()
cluster_info = self._get_master_json()
master_data = self._check_master(cluster_info)
Expand All @@ -130,11 +192,22 @@ def check_details(self, kubectl, ingress_port, ingress_host, ingress_controller,
params[viya_constants.INGRESS_HOST] = str(ingress_host)
params[viya_constants.INGRESS_PORT] = str(ingress_port)
params[viya_constants.PERM_CLASS] = utils
params[viya_constants.SERVER_K8S_VERSION] = self._k8s_server_version
params['logger'] = self.sas_logger

permissions_check = PreCheckPermissions(params)
self._check_permissions(permissions_check)

test_list = [viya_constants.INSUFFICIENT_PERMS, viya_constants.PERM_SKIPPING]

# Log Summary of Permissions Issues found
if(any(ele in str(permissions_check.get_cluster_admin_permission_aggregate()) for ele in test_list)):
self.logger.warn("WARN: Review Cluster Aggregate Report")
if(any(ele in str(permissions_check.get_namespace_admin_permission_aggregate()) for ele in test_list)):
self.logger.warn("WARN: Review Namespace Aggregate Report")
if(any(ele in str(permissions_check.get_namespace_admin_permission_data()) for ele in test_list)):
self.logger.warn("WARN: Review Namespace Permissions")

self.generate_report(global_data, master_data, configs_data, storage_data, namespace_data,
permissions_check.get_cluster_admin_permission_data(),
permissions_check.get_namespace_admin_permission_data(),
Expand Down Expand Up @@ -389,8 +462,6 @@ def _check_permissions(self, permissions_check: PreCheckPermissions):
permissions_check: instance of PreCheckPermissions class
"""
namespace = self._kubectl.get_namespace()
permissions_check.get_server_git_version()
permissions_check.set_ingress_manifest_file()
permissions_check.get_sc_resources()

permissions_check.manage_pvc(viya_constants.KUBECTL_APPLY, False)
Expand Down Expand Up @@ -520,6 +591,20 @@ def _set_time(self, global_data):
self.logger.debug("global data{} time{}".format(pprint.pformat(global_data), time_string))
return global_data

def _update_k8s_version(self, global_data, git_version):
"""Set the Cluster Kubernetes Version for the report in the global data list
global_data: List to be updated
return: global_data list updated with Kubernetes Version to be added to the report
"""
global_nodes = {}

global_nodes.update({'k8sVersion': str(git_version)})
global_data.append(global_nodes)

self.logger.debug("global data{} Kubernetes Version {}".format(pprint.pformat(global_data), git_version))
return global_data

def _check_cpu_errors(self, global_data, total_capacity_cpu_cores: float, aggregate_cpu_failures):
"""
Check if the aggregate CPUs across all worker nodes meets SAS total cpu requirements
Expand Down Expand Up @@ -708,8 +793,6 @@ def evaluate_nodes(self, nodes_data, global_data, cluster_info, quantity_):
global_data: list of dictionary object with global data for nodes
return: return ist of dictionary objects with updated information and status
"""
self._min_kubelet_version = self._parse_release_info(self._viya_kubelet_version_min)

aggregate_cpu_failures = int(0)
aggregate_memory_failures = int(0)
aggregate_kubelet_failures = int(0)
Expand Down Expand Up @@ -749,15 +832,18 @@ def evaluate_nodes(self, nodes_data, global_data, cluster_info, quantity_):
self._set_status(0, node, 'capacityMemory')
node['error']['capacityMemory'] = "See below."

if self._release_in_range(kubeletversion):
if (self._k8s_server_version_min()):
self._set_status(0, node, 'kubeletversion')
self.logger.debug("node kubeletversion status 0 {} ".format(pprint.pformat(node)))
else:
self._set_status(1, node, 'kubeletversion')
node['error']['kubeletversion'] = viya_constants.SET + ': ' + kubeletversion + ', ' + \
str(viya_constants.EXPECTED) + ': ' + self._viya_kubelet_version_min + ' or later '
str(viya_constants.EXPECTED) + ': ' + viya_constants.MIN_K8S_SERVER_VERSION[1:] + ' or later '

aggregate_kubelet_failures += 1
self.logger.debug("node {} ".format(pprint.pformat(node)))
self.logger.debug("aggregate_kubelet_failures {} ".format(str(aggregate_kubelet_failures)))
self.logger.debug("node kubeletversion{} ".format(pprint.pformat(node)))

global_data = self._check_workers(global_data, nodes_data)
global_data = self._set_time(global_data)
global_data = self._check_cpu_errors(global_data, total_cpu_cores, aggregate_cpu_failures)
Expand All @@ -767,6 +853,7 @@ def evaluate_nodes(self, nodes_data, global_data, cluster_info, quantity_):
global_data = self._check_kubelet_errors(global_data, aggregate_kubelet_failures)

global_data.append(nodes_data)
global_data = self._update_k8s_version(global_data, self._k8s_server_version)
self.logger.debug("nodes_data {}".format(pprint.pformat(nodes_data)))
return global_data

Expand Down Expand Up @@ -904,32 +991,6 @@ def _get_config_json(self):
self.logger.debug("config view JSON{} return_code{}".format(str(config_json), str(return_code)))
return config_json, return_code

def _release_in_range(self, kubeletversion):
"""
Check if the current kublet version retrieved from the cluster nodes is equal to or greater
than the major/minor version specified in the viya_cluster_settings file
:param kubeletversion - current version in cluster node
:return True if version is within range. If not return False
"""

try:
current = tuple(kubeletversion.split("."))

if int(current[0][1:]) > int(self._min_kubelet_version[0][1:]):
return True
if int(current[0][1:]) < int(self._min_kubelet_version[0][1:]):
return False
if int(current[0][1:]) == int(self._min_kubelet_version[0][1:]) and \
(int(current[1]) >= int(self._min_kubelet_version[1])):
return True
return False
except ValueError:
print(viya_messages.LIMIT_ERROR.format("VIYA_KUBELET_VERSION_MIN", str(self._min_kubelet_version)))
self.logger.exception(viya_messages.LIMIT_ERROR.format("VIYA_KUBELET_VERSION_MIN",
str(self._min_kubelet_version)))
sys.exit(viya_messages.BAD_OPT_RC_)

def _get_memory(self, limit, key, quantity_):
"""
Check that the memory specified in the viya_cluster_settings file is valid. Exit if
Expand Down Expand Up @@ -991,6 +1052,9 @@ def get_config_info(self):
def get_calculated_aggregate_memory(self):
return self._calculated_aggregate_memory

def set_k8s_version(self, version: Text):
self._k8s_server_version = version

def generate_report(self,
global_data,
master_data,
Expand Down
Loading

0 comments on commit 468d343

Please sign in to comment.