From 82035edafb5a55f118c9c9792b06222d04c1a42a Mon Sep 17 00:00:00 2001 From: Roey Prat Date: Thu, 22 Aug 2019 16:03:06 +0300 Subject: [PATCH] RED-32089 Removing globals from python code. This makes the flow of data more predictable and readable. (cherry picked from commit 6a57b4ec11aecb94d4368019aaf33d29f8239d7f) --- log_collector.py | 91 ++++++++++++++++++------------------------------ 1 file changed, 34 insertions(+), 57 deletions(-) diff --git a/log_collector.py b/log_collector.py index 070fa9c..3613c6b 100755 --- a/log_collector.py +++ b/log_collector.py @@ -19,8 +19,6 @@ logger = logging.getLogger("log collector") -output_dir = "" -namespace = "" TIME_FORMAT = time.strftime("%Y%m%d-%H%M%S") dir_name = "redis_enterprise_k8s_debug_info_{}".format(TIME_FORMAT) @@ -37,6 +35,7 @@ def make_dir(directory): if not os.path.exists(directory): + # noinspection PyBroadException try: os.mkdir(directory) except: @@ -44,48 +43,37 @@ def make_dir(directory): sys.exit() -def run(configured_namespace, configured_output_path): - global output_dir, namespace - - namespace = "" - if configured_namespace: - namespace = configured_namespace - else: +def run(namespace, output_dir): + if not namespace: namespace = get_namespace_from_config() - global TIME_FORMAT - global dir_name - - if configured_output_path: - output_dir = os.path.join(configured_output_path, dir_name) - else: + if not output_dir: output_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), dir_name) make_dir(output_dir) - get_redis_enterprise_debug_info() - collect_cluster_info() - collect_resources_list() - collect_events() - collect_api_resources() - collect_pods_logs() - archive_files() + get_redis_enterprise_debug_info(namespace, output_dir) + collect_cluster_info(output_dir) + collect_resources_list(output_dir) + collect_events(namespace, output_dir) + collect_api_resources(namespace, output_dir) + collect_pods_logs(namespace, output_dir) + archive_files(output_dir) logger.info("Finished Redis Enterprise log collector") -def get_redis_enterprise_debug_info(): +def get_redis_enterprise_debug_info(namespace, output_dir): """ Connects to an RS cluster node, creates and copies debug info package from the pod """ - pod_names = get_pod_names(selector='redis.io/role=node') + pod_names = get_pod_names(namespace, selector='redis.io/role=node') if not pod_names: logger.warning("Cannot find redis enterprise pod") return pod_name = pod_names[0] - cmd = "kubectl {} exec {} /opt/redislabs/bin/rladmin cluster debug_info path /tmp".format( - get_namespace_argument(), pod_name) + cmd = "kubectl -n {} exec {} /opt/redislabs/bin/rladmin cluster debug_info path /tmp".format(namespace, pod_name) rc, out = run_shell_command(cmd) if "Downloading complete" not in out: logger.warning("Failed running rladmin command in pod: {}".format(out)) @@ -103,7 +91,7 @@ def get_redis_enterprise_debug_info(): return # copy package from RS pod - cmd = "kubectl {} cp {}:{} {}".format(get_namespace_argument(), pod_name, debug_file, output_dir) + cmd = "kubectl -n {} cp {}:{} {}".format(namespace, pod_name, debug_file, output_dir) rc, out = run_shell_command(cmd) if rc: logger.warning( @@ -113,43 +101,42 @@ def get_redis_enterprise_debug_info(): logger.info("Collected Redis Enterprise cluster debug package") -def collect_resources_list(): +def collect_resources_list(output_dir): """ Prints the output of kubectl get all to a file """ - collect_helper(cmd="kubectl get all", file_name="resources_list", resource_name="resources list") + collect_helper(output_dir, cmd="kubectl get all", file_name="resources_list", resource_name="resources list") -def collect_cluster_info(): +def collect_cluster_info(output_dir): """ Prints the output of kubectl cluster-info to a file """ - collect_helper(cmd="kubectl cluster-info", file_name="cluster_info", resource_name="cluster-info") + collect_helper(output_dir, cmd="kubectl cluster-info", file_name="cluster_info", resource_name="cluster-info") -def collect_events(): +def collect_events(namespace, output_dir): """ Prints the output of kubectl cluster-info to a file """ - global output_dir # events need -n parameter in kubectl if not namespace: logger.warning("Cannot collect events without namespace - skipping events collection") return - cmd = "kubectl get events {}".format(get_namespace_argument()) - collect_helper(cmd=cmd, file_name="events", resource_name="events") + cmd = "kubectl get events -n {}".format(namespace) + collect_helper(output_dir, cmd=cmd, file_name="events", resource_name="events") -def collect_api_resources(): +def collect_api_resources(namespace, output_dir): """ Creates file for each of the API resources with the output of kubectl get -o yaml """ logger.info("Collecting API resources:") resources_out = OrderedDict() for resource in api_resources: - output = run_kubectl_get(resource) + output = run_kubectl_get(namespace, resource) if output: - resources_out[resource] = run_kubectl_get(resource) + resources_out[resource] = run_kubectl_get(namespace, resource) logger.info(" + {}".format(resource)) for entry, out in resources_out.iteritems(): @@ -157,22 +144,21 @@ def collect_api_resources(): fp.write(out) -def collect_pods_logs(): +def collect_pods_logs(namespace, output_dir): """ Collects all the pods logs from given namespace """ - global output_dir logger.info("Collecting pods' logs:") logs_dir = os.path.join(output_dir, "pods") make_dir(logs_dir) - pods = get_pod_names() + pods = get_pod_names(namespace) if not pods: logger.warning("Could not get pods list - skipping pods logs collection") return for pod in pods: - cmd = "kubectl logs {} {}".format(get_namespace_argument(), pod) + cmd = "kubectl logs -n {} {}".format(namespace, pod) with open(os.path.join(logs_dir, "{}.log".format(pod)), "w+") as fp: p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) while True: @@ -185,8 +171,7 @@ def collect_pods_logs(): logger.info(" + {}".format(pod)) -def archive_files(): - global dir_name +def archive_files(output_dir): file_name = output_dir + ".tar.gz" with tarfile.open(file_name, "w|gz") as tar: @@ -199,13 +184,13 @@ def archive_files(): logger.warning("Failed to delete directory after archiving: %s", e) -def get_pod_names(selector=""): +def get_pod_names(namespace, selector=""): """ Returns list of pods names """ if selector: selector = '--selector="{}"'.format(selector) - cmd = 'kubectl get pod {} {} -o json '.format(get_namespace_argument(), selector) + cmd = 'kubectl get pod -n {} {} -o json '.format(namespace, selector) rc, out = run_shell_command(cmd) if rc: logger.warning("Failed to get pod names: {}".format(out)) @@ -215,13 +200,6 @@ def get_pod_names(selector=""): return [pod['metadata']['name'] for pod in pods_json['items']] -def get_namespace_argument(): - global namespace - if namespace: - return "-n {}".format(namespace) - return "" - - def get_namespace_from_config(): """ Returns the namespace from current context if one is set OW None @@ -244,11 +222,10 @@ def get_namespace_from_config(): break -def collect_helper(cmd, file_name, resource_name): +def collect_helper(output_dir, cmd, file_name, resource_name): """ Runs command, write output to file_name, logs the resource_name """ - global output_dir rc, out = run_shell_command(cmd) if rc: logger.warning("Error when running {}: {}".format(cmd, out)) @@ -279,11 +256,11 @@ def run_shell_command(cmd): return 0, native_string(output) -def run_kubectl_get(resource_type): +def run_kubectl_get(namespace, resource_type): """ Runs kubectl get command """ - cmd = "kubectl get {} {} -o yaml".format(resource_type, get_namespace_argument()) + cmd = "kubectl get -n {} {} -o yaml".format(namespace, resource_type) rc, out = run_shell_command(cmd) if rc == 0: return out