diff --git a/README.md b/README.md index f63b0832a29..d6f9a5e25a6 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ A convenient alternative is to let the SONiC build system configure a build envi 2. Build the sonic-utilities Python wheel package inside the Bullseye slave container, and tell the build system to keep the container alive when finished ``` - make NOSTRETCH=1 NOBUSTER=1 KEEP_SLAVE_ON=yes target/python-wheels/bullseye/sonic_utilities-1.2-py3-none-any.whl + make -f Makefile.work BLDENV=bookworm KEEP_SLAVE_ON=yes target/python-wheels/bookworm/sonic_utilities-1.2-py3-none-any.whl ``` 3. When the build finishes, your prompt will change to indicate you are inside the slave container. Change into the `src/sonic-utilities/` directory @@ -66,6 +66,7 @@ A convenient alternative is to let the SONiC build system configure a build envi ``` python3 setup.py bdist_wheel ``` +Note: This command by default will not update the wheel package in target/. To specify the destination location of wheel package, use "-d" option. #### To run unit tests @@ -73,6 +74,12 @@ python3 setup.py bdist_wheel python3 setup.py test ``` +#### To install the package on a SONiC machine +``` +sudo pip uninstall sonic-utilities +sudo pip install YOUR_WHEEL_PACKAGE +``` +Note: Don't use "--force-reinstall". ### sonic-utilities-data diff --git a/azure-pipelines.yml b/azure-pipelines.yml index dec731eea4d..8cb6586a9b9 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -27,7 +27,7 @@ stages: displayName: "Static Analysis" timeoutInMinutes: 10 continueOnError: true - pool: ubuntu-20.04 + pool: sonic-ubuntu-1c steps: - template: .azure-pipelines/pre-commit-check.yml @@ -46,6 +46,13 @@ stages: image: sonicdev-microsoft.azurecr.io:443/sonic-slave-bullseye:$(BUILD_BRANCH) steps: + - script: | + set -ex + sudo apt-get update + sudo apt-get install -y python3-pip + sudo pip3 install requests==2.31.0 + displayName: "Install dependencies" + - script: | sourceBranch=$(Build.SourceBranchName) if [[ "$(Build.Reason)" == "PullRequest" ]];then diff --git a/config/aaa.py b/config/aaa.py index 3c76187126f..fdb784dc4ae 100644 --- a/config/aaa.py +++ b/config/aaa.py @@ -114,9 +114,9 @@ def trace(option): @click.command() -@click.argument('auth_protocol', nargs=-1, type=click.Choice(["radius", "tacacs+", "local", "default"])) +@click.argument('auth_protocol', nargs=-1, type=click.Choice(["ldap", "radius", "tacacs+", "local", "default"])) def login(auth_protocol): - """Switch login authentication [ {radius, tacacs+, local} | default ]""" + """Switch login authentication [ {ldap, radius, tacacs+, local} | default ]""" if len(auth_protocol) is 0: click.echo('Argument "auth_protocol" is required') return @@ -135,9 +135,9 @@ def login(auth_protocol): val2 = auth_protocol[1] good_ap = False if val == 'local': - if val2 == 'radius' or val2 == 'tacacs+': + if val2 == 'radius' or val2 == 'tacacs+' or val2 == 'ldap': good_ap = True - elif val == 'radius' or val == 'tacacs+': + elif val == 'radius' or val == 'tacacs+' or val == 'ldap': if val2 == 'local': good_ap = True if good_ap == True: diff --git a/config/bgp_cli.py b/config/bgp_cli.py new file mode 100644 index 00000000000..a5a565359a9 --- /dev/null +++ b/config/bgp_cli.py @@ -0,0 +1,192 @@ +import click +import utilities_common.cli as clicommon + +from sonic_py_common import logger +from utilities_common.bgp import ( + CFG_BGP_DEVICE_GLOBAL, + BGP_DEVICE_GLOBAL_KEY, + SYSLOG_IDENTIFIER, + to_str, +) + + +log = logger.Logger(SYSLOG_IDENTIFIER) +log.set_min_log_priority_info() + + +# +# BGP DB interface ---------------------------------------------------------------------------------------------------- +# + + +def update_entry_validated(db, table, key, data, create_if_not_exists=False): + """ Update entry in table and validate configuration. + If attribute value in data is None, the attribute is deleted. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector object. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + data (Dict): Entry data. + create_if_not_exists (bool): + In case entry does not exists already a new entry + is not created if this flag is set to False and + creates a new entry if flag is set to True. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + + if not data: + raise click.ClickException(f"No field/values to update {key}") + + if create_if_not_exists: + cfg[table].setdefault(key, {}) + + if key not in cfg[table]: + raise click.ClickException(f"{key} does not exist") + + entry_changed = False + for attr, value in data.items(): + if value == cfg[table][key].get(attr): + continue + entry_changed = True + if value is None: + cfg[table][key].pop(attr, None) + else: + cfg[table][key][attr] = value + + if not entry_changed: + return + + db.set_entry(table, key, cfg[table][key]) + + +# +# BGP handlers -------------------------------------------------------------------------------------------------------- +# + + +def tsa_handler(ctx, db, state): + """ Handle config updates for Traffic-Shift-Away (TSA) feature """ + + table = CFG_BGP_DEVICE_GLOBAL + key = BGP_DEVICE_GLOBAL_KEY + data = { + "tsa_enabled": state, + } + + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + log.log_notice("Configured TSA state: {}".format(to_str(state))) + except Exception as e: + log.log_error("Failed to configure TSA state: {}".format(str(e))) + ctx.fail(str(e)) + + +def wcmp_handler(ctx, db, state): + """ Handle config updates for Weighted-Cost Multi-Path (W-ECMP) feature """ + + table = CFG_BGP_DEVICE_GLOBAL + key = BGP_DEVICE_GLOBAL_KEY + data = { + "wcmp_enabled": state, + } + + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + log.log_notice("Configured W-ECMP state: {}".format(to_str(state))) + except Exception as e: + log.log_error("Failed to configure W-ECMP state: {}".format(str(e))) + ctx.fail(str(e)) + + +# +# BGP device-global --------------------------------------------------------------------------------------------------- +# + + +@click.group( + name="device-global", + cls=clicommon.AliasedGroup +) +def DEVICE_GLOBAL(): + """ Configure BGP device global state """ + + pass + + +# +# BGP device-global tsa ----------------------------------------------------------------------------------------------- +# + + +@DEVICE_GLOBAL.group( + name="tsa", + cls=clicommon.AliasedGroup +) +def DEVICE_GLOBAL_TSA(): + """ Configure Traffic-Shift-Away (TSA) feature """ + + pass + + +@DEVICE_GLOBAL_TSA.command( + name="enabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_TSA_ENABLED(ctx, db): + """ Enable Traffic-Shift-Away (TSA) feature """ + + tsa_handler(ctx, db, "true") + + +@DEVICE_GLOBAL_TSA.command( + name="disabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_TSA_DISABLED(ctx, db): + """ Disable Traffic-Shift-Away (TSA) feature """ + + tsa_handler(ctx, db, "false") + + +# +# BGP device-global w-ecmp -------------------------------------------------------------------------------------------- +# + + +@DEVICE_GLOBAL.group( + name="w-ecmp", + cls=clicommon.AliasedGroup +) +def DEVICE_GLOBAL_WCMP(): + """ Configure Weighted-Cost Multi-Path (W-ECMP) feature """ + + pass + + +@DEVICE_GLOBAL_WCMP.command( + name="enabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_WCMP_ENABLED(ctx, db): + """ Enable Weighted-Cost Multi-Path (W-ECMP) feature """ + + wcmp_handler(ctx, db, "true") + + +@DEVICE_GLOBAL_WCMP.command( + name="disabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_WCMP_DISABLED(ctx, db): + """ Disable Weighted-Cost Multi-Path (W-ECMP) feature """ + + wcmp_handler(ctx, db, "false") diff --git a/config/chassis_modules.py b/config/chassis_modules.py old mode 100644 new mode 100755 index e640779d160..4e7fd8096bd --- a/config/chassis_modules.py +++ b/config/chassis_modules.py @@ -1,9 +1,14 @@ #!/usr/sbin/env python import click - +import time +import re +import subprocess import utilities_common.cli as clicommon +TIMEOUT_SECS = 10 + + # # 'chassis_modules' group ('config chassis_modules ...') # @@ -17,6 +22,81 @@ def modules(): """Configure chassis modules""" pass + +def get_config_module_state(db, chassis_module_name): + config_db = db.cfgdb + fvs = config_db.get_entry('CHASSIS_MODULE', chassis_module_name) + if not fvs: + return 'up' + else: + return fvs['admin_status'] + + +# +# Name: check_config_module_state_with_timeout +# return: True: timeout, False: not timeout +# +def check_config_module_state_with_timeout(ctx, db, chassis_module_name, state): + counter = 0 + while get_config_module_state(db, chassis_module_name) != state: + time.sleep(1) + counter += 1 + if counter >= TIMEOUT_SECS: + ctx.fail("get_config_module_state {} timeout".format(chassis_module_name)) + return True + return False + + +def get_asic_list_from_db(chassisdb, chassis_module_name): + asic_list = [] + asics_keys_list = chassisdb.keys("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE*") + for asic_key in asics_keys_list: + name = chassisdb.get("CHASSIS_STATE_DB", asic_key, "name") + if name == chassis_module_name: + asic_id = int(re.search(r"(\d+)$", asic_key).group()) + asic_list.append(asic_id) + return asic_list + + +# +# Syntax: fabric_module_set_admin_status <'up'/'down'> +# +def fabric_module_set_admin_status(db, chassis_module_name, state): + chassisdb = db.db + chassisdb.connect("CHASSIS_STATE_DB") + asic_list = get_asic_list_from_db(chassisdb, chassis_module_name) + + if len(asic_list) == 0: + return + + if state == "down": + for asic in asic_list: + click.echo("Stop swss@{} and peer services".format(asic)) + clicommon.run_command('sudo systemctl stop swss@{}.service'.format(asic)) + + is_active = subprocess.call(["systemctl", "is-active", "--quiet", "swss@{}.service".format(asic)]) + + if is_active == 0: # zero active, non-zero, inactive + click.echo("Stop swss@{} and peer services failed".format(asic)) + return + + click.echo("Delete related CAHSSIS_FABRIC_ASIC_TABLE entries") + + for asic in asic_list: + chassisdb.delete("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic" + str(asic)) + + # Start the services in case of the users just execute issue command "systemctl stop swss@/syncd@" + # without bring down the hardware + for asic in asic_list: + # To address systemd service restart limit by resetting the count + clicommon.run_command('sudo systemctl reset-failed swss@{}.service'.format(asic)) + click.echo("Start swss@{} and peer services".format(asic)) + clicommon.run_command('sudo systemctl start swss@{}.service'.format(asic)) + elif state == "up": + for asic in asic_list: + click.echo("Start swss@{} and peer services".format(asic)) + clicommon.run_command('sudo systemctl start swss@{}.service'.format(asic)) + # # 'shutdown' subcommand ('config chassis_modules shutdown ...') # @@ -33,8 +113,17 @@ def shutdown_chassis_module(db, chassis_module_name): not chassis_module_name.startswith("FABRIC-CARD"): ctx.fail("'module_name' has to begin with 'SUPERVISOR', 'LINE-CARD' or 'FABRIC-CARD'") + # To avoid duplicate operation + if get_config_module_state(db, chassis_module_name) == 'down': + click.echo("Module {} is already in down state".format(chassis_module_name)) + return + + click.echo("Shutting down chassis module {}".format(chassis_module_name)) fvs = {'admin_status': 'down'} config_db.set_entry('CHASSIS_MODULE', chassis_module_name, fvs) + if chassis_module_name.startswith("FABRIC-CARD"): + if not check_config_module_state_with_timeout(ctx, db, chassis_module_name, 'down'): + fabric_module_set_admin_status(db, chassis_module_name, 'down') # # 'startup' subcommand ('config chassis_modules startup ...') @@ -45,5 +134,15 @@ def shutdown_chassis_module(db, chassis_module_name): def startup_chassis_module(db, chassis_module_name): """Chassis-module startup of module""" config_db = db.cfgdb + ctx = click.get_current_context() + + # To avoid duplicate operation + if get_config_module_state(db, chassis_module_name) == 'up': + click.echo("Module {} is already set to up state".format(chassis_module_name)) + return + click.echo("Starting up chassis module {}".format(chassis_module_name)) config_db.set_entry('CHASSIS_MODULE', chassis_module_name, None) + if chassis_module_name.startswith("FABRIC-CARD"): + if not check_config_module_state_with_timeout(ctx, db, chassis_module_name, 'up'): + fabric_module_set_admin_status(db, chassis_module_name, 'up') diff --git a/config/main.py b/config/main.py index 6474a181f81..0ee61b0f069 100644 --- a/config/main.py +++ b/config/main.py @@ -1,6 +1,8 @@ #!/usr/sbin/env python +import threading import click +import concurrent.futures import datetime import ipaddress import json @@ -20,6 +22,7 @@ from jsonpointer import JsonPointerException from collections import OrderedDict from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat, extract_scope +from generic_config_updater.gu_common import HOST_NAMESPACE, GenericConfigUpdaterError from minigraph import parse_device_desc_xml, minigraph_encoder from natsort import natsorted from portconfig import get_child_ports @@ -27,9 +30,10 @@ from sonic_py_common import device_info, multi_asic from sonic_py_common.general import getstatusoutput_noshell from sonic_py_common.interface import get_interface_table_name, get_port_table_name, get_intf_longname +from sonic_yang_cfg_generator import SonicYangCfgDbGenerator from utilities_common import util_base from swsscommon import swsscommon -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, ConfigDBPipeConnector from utilities_common.db import Db from utilities_common.intf_filter import parse_interface_in_filter from utilities_common import bgp_util @@ -59,6 +63,7 @@ from . import syslog from . import switchport from . import dns +from . import bgp_cli # mock masic APIs for unit test @@ -246,7 +251,7 @@ def breakout_Ports(cm, delPorts=list(), portJson=dict(), force=False, \ click.echo("*** Printing dependencies ***") for dep in deps: click.echo(dep) - sys.exit(0) + sys.exit(1) else: click.echo("[ERROR] Port breakout Failed!!! Opting Out") raise click.Abort() @@ -895,10 +900,47 @@ def _reset_failed_services(): for service in _get_sonic_services(): clicommon.run_command(['systemctl', 'reset-failed', str(service)]) + +def get_service_finish_timestamp(service): + out, _ = clicommon.run_command(['sudo', + 'systemctl', + 'show', + '--no-pager', + service, + '-p', + 'ExecMainExitTimestamp', + '--value'], + return_cmd=True) + return out.strip(' \t\n\r') + + +def wait_service_restart_finish(service, last_timestamp, timeout=30): + start_time = time.time() + elapsed_time = 0 + while elapsed_time < timeout: + current_timestamp = get_service_finish_timestamp(service) + if current_timestamp and (current_timestamp != last_timestamp): + return + + time.sleep(1) + elapsed_time = time.time() - start_time + + log.log_warning("Service: {} does not restart in {} seconds, stop waiting".format(service, timeout)) + + def _restart_services(): + last_interface_config_timestamp = get_service_finish_timestamp('interfaces-config') + last_networking_timestamp = get_service_finish_timestamp('networking') + click.echo("Restarting SONiC target ...") clicommon.run_command(['sudo', 'systemctl', 'restart', 'sonic.target']) + # These service will restart eth0 and cause device lost network for 10 seconds + # When enable TACACS, every remote user commands will authorize by TACACS service via network + # If load_minigraph exit before eth0 restart, commands after load_minigraph may failed + wait_service_restart_finish('interfaces-config', last_interface_config_timestamp) + wait_service_restart_finish('networking', last_networking_timestamp) + try: subprocess.check_call(['sudo', 'monit', 'status'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) click.echo("Enabling container monitoring ...") @@ -1155,25 +1197,173 @@ def validate_gre_type(ctx, _, value): return gre_type_value except ValueError: raise click.UsageError("{} is not a valid GRE type".format(value)) - + + +def multiasic_save_to_singlefile(db, filename): + """A function to save all asic's config to single file + """ + all_current_config = {} + cfgdb_clients = db.cfgdb_clients + + for ns, config_db in cfgdb_clients.items(): + current_config = config_db.get_config() + sonic_cfggen.FormatConverter.to_serialized(current_config) + asic_name = "localhost" if ns == DEFAULT_NAMESPACE else ns + all_current_config[asic_name] = sort_dict(current_config) + click.echo("Integrate each ASIC's config into a single JSON file {}.".format(filename)) + with open(filename, 'w') as file: + json.dump(all_current_config, file, indent=4) + + +def apply_patch_wrapper(args): + return apply_patch_for_scope(*args) + + # Function to apply patch for a single ASIC. def apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path): scope, changes = scope_changes # Replace localhost to DEFAULT_NAMESPACE which is db definition of Host - if scope.lower() == "localhost" or scope == "": + if scope.lower() == HOST_NAMESPACE or scope == "": scope = multi_asic.DEFAULT_NAMESPACE - - scope_for_log = scope if scope else "localhost" + + scope_for_log = scope if scope else HOST_NAMESPACE + thread_id = threading.get_ident() + log.log_notice(f"apply_patch_for_scope started for {scope_for_log} by {changes} in thread:{thread_id}") + try: # Call apply_patch with the ASIC-specific changes and predefined parameters - GenericUpdater(namespace=scope).apply_patch(jsonpatch.JsonPatch(changes), config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + GenericUpdater(scope=scope).apply_patch(jsonpatch.JsonPatch(changes), + config_format, + verbose, + dry_run, + ignore_non_yang_tables, + ignore_path) results[scope_for_log] = {"success": True, "message": "Success"} - log.log_notice(f"'apply-patch' executed successfully for {scope_for_log} by {changes}") + log.log_notice(f"'apply-patch' executed successfully for {scope_for_log} by {changes} in thread:{thread_id}") except Exception as e: results[scope_for_log] = {"success": False, "message": str(e)} log.log_error(f"'apply-patch' executed failed for {scope_for_log} by {changes} due to {str(e)}") +def validate_patch(patch): + try: + command = ["show", "runningconfiguration", "all"] + proc = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) + all_running_config, returncode = proc.communicate() + if returncode: + log.log_notice(f"Fetch all runningconfiguration failed as output:{all_running_config}") + return False + + # Structure validation and simulate apply patch. + all_target_config = patch.apply(json.loads(all_running_config)) + + # Verify target config by YANG models + target_config = all_target_config.pop(HOST_NAMESPACE) if multi_asic.is_multi_asic() else all_target_config + target_config.pop("bgpraw", None) + if not SonicYangCfgDbGenerator().validate_config_db_json(target_config): + return False + + if multi_asic.is_multi_asic(): + for asic in multi_asic.get_namespace_list(): + target_config = all_target_config.pop(asic) + target_config.pop("bgpraw", None) + if not SonicYangCfgDbGenerator().validate_config_db_json(target_config): + return False + + return True + except Exception as e: + raise GenericConfigUpdaterError(f"Validate json patch: {patch} failed due to:{e}") + + +def multiasic_validate_single_file(filename): + ns_list = [DEFAULT_NAMESPACE, *multi_asic.get_namespace_list()] + file_input = read_json_file(filename) + file_ns_list = [DEFAULT_NAMESPACE if key == HOST_NAMESPACE else key for key in file_input] + if set(ns_list) != set(file_ns_list): + click.echo( + "Input file {} must contain all asics config. ns_list: {} file ns_list: {}".format( + filename, ns_list, file_ns_list) + ) + raise click.Abort() + + +def load_sysinfo_if_missing(asic_config): + device_metadata = asic_config.get('DEVICE_METADATA', {}) + platform = device_metadata.get("localhost", {}).get("platform") + mac = device_metadata.get("localhost", {}).get("mac") + if not platform: + log.log_warning("platform is missing from Input file") + return True + elif not mac: + log.log_warning("mac is missing from Input file") + return True + else: + return False + + +def flush_configdb(namespace=DEFAULT_NAMESPACE): + if namespace is DEFAULT_NAMESPACE: + config_db = ConfigDBConnector() + else: + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + + config_db.connect() + client = config_db.get_redis_client(config_db.CONFIG_DB) + client.flushdb() + return client, config_db + + +def migrate_db_to_lastest(namespace=DEFAULT_NAMESPACE): + # Migrate DB contents to latest version + db_migrator = '/usr/local/bin/db_migrator.py' + if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): + if namespace is DEFAULT_NAMESPACE: + command = [db_migrator, '-o', 'migrate'] + else: + command = [db_migrator, '-o', 'migrate', '-n', namespace] + clicommon.run_command(command, display_cmd=True) + + +def multiasic_write_to_db(filename, load_sysinfo): + file_input = read_json_file(filename) + for ns in [DEFAULT_NAMESPACE, *multi_asic.get_namespace_list()]: + asic_name = HOST_NAMESPACE if ns == DEFAULT_NAMESPACE else ns + asic_config = file_input[asic_name] + + asic_load_sysinfo = True if load_sysinfo else False + if not asic_load_sysinfo: + asic_load_sysinfo = load_sysinfo_if_missing(asic_config) + + if asic_load_sysinfo: + cfg_hwsku = asic_config.get("DEVICE_METADATA", {}).\ + get("localhost", {}).get("hwsku") + if not cfg_hwsku: + click.secho("Could not get the HWSKU from config file, Exiting!", fg='magenta') + sys.exit(1) + + client, _ = flush_configdb(ns) + + if asic_load_sysinfo: + if ns is DEFAULT_NAMESPACE: + command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] + else: + command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(ns), '--write-to-db'] + clicommon.run_command(command, display_cmd=True) + + if ns is DEFAULT_NAMESPACE: + config_db = ConfigDBPipeConnector(use_unix_socket_path=True) + else: + config_db = ConfigDBPipeConnector(use_unix_socket_path=True, namespace=ns) + + config_db.connect(False) + sonic_cfggen.FormatConverter.to_deserialized(asic_config) + data = sonic_cfggen.FormatConverter.output_to_db(asic_config) + config_db.mod_config(sonic_cfggen.FormatConverter.output_to_db(data)) + client.set(config_db.INIT_INDICATOR, 1) + + migrate_db_to_lastest(ns) + + # This is our main entrypoint - the main 'config' command @click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -1241,7 +1431,8 @@ def config(ctx): @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Existing files will be overwritten, continue?') @click.argument('filename', required=False) -def save(filename): +@clicommon.pass_db +def save(db, filename): """Export current config DB to a file on disk.\n : Names of configuration file(s) to save, separated by comma with no spaces in between """ @@ -1256,7 +1447,13 @@ def save(filename): if filename is not None: cfg_files = filename.split(',') - if len(cfg_files) != num_cfg_file: + # If only one filename is provided in multi-ASIC mode, + # save all ASIC configurations to that single file. + if len(cfg_files) == 1 and multi_asic.is_multi_asic(): + filename = cfg_files[0] + multiasic_save_to_singlefile(db, filename) + return + elif len(cfg_files) != num_cfg_file: click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) return @@ -1362,11 +1559,12 @@ def print_dry_run_message(dry_run): help='format of config of the patch is either ConfigDb(ABNF) or SonicYang', show_default=True) @click.option('-d', '--dry-run', is_flag=True, default=False, help='test out the command without affecting config state') +@click.option('-p', '--parallel', is_flag=True, default=False, help='applying the change to all ASICs parallelly') @click.option('-n', '--ignore-non-yang-tables', is_flag=True, default=False, help='ignore validation for tables without YANG models', hidden=True) @click.option('-i', '--ignore-path', multiple=True, help='ignore validation for config specified by given path which is a JsonPointer', hidden=True) @click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing') @click.pass_context -def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, ignore_path, verbose): +def apply_patch(ctx, patch_file_path, format, dry_run, parallel, ignore_non_yang_tables, ignore_path, verbose): """Apply given patch of updates to Config. A patch is a JsonPatch which follows rfc6902. This command can be used do partial updates to the config with minimum disruption to running processes. It allows addition as well as deletion of configs. The patch file represents a diff of ConfigDb(ABNF) @@ -1381,6 +1579,9 @@ def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, i patch_as_json = json.loads(text) patch = jsonpatch.JsonPatch(patch_as_json) + if not validate_patch(patch): + raise GenericConfigUpdaterError(f"Failed validating patch:{patch}") + results = {} config_format = ConfigFormat[format.upper()] # Initialize a dictionary to hold changes categorized by scope @@ -1403,20 +1604,39 @@ def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, i # Empty case to force validate YANG model. if not changes_by_scope: asic_list = [multi_asic.DEFAULT_NAMESPACE] - asic_list.extend(multi_asic.get_namespace_list()) + if multi_asic.is_multi_asic(): + asic_list.extend(multi_asic.get_namespace_list()) for asic in asic_list: changes_by_scope[asic] = [] # Apply changes for each scope - for scope_changes in changes_by_scope.items(): - apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + if parallel: + with concurrent.futures.ThreadPoolExecutor() as executor: + # Prepare the argument tuples + arguments = [(scope_changes, results, config_format, + verbose, dry_run, ignore_non_yang_tables, ignore_path) + for scope_changes in changes_by_scope.items()] + + # Submit all tasks and wait for them to complete + futures = [executor.submit(apply_patch_wrapper, args) for args in arguments] + + # Wait for all tasks to complete + concurrent.futures.wait(futures) + else: + for scope_changes in changes_by_scope.items(): + apply_patch_for_scope(scope_changes, + results, + config_format, + verbose, dry_run, + ignore_non_yang_tables, + ignore_path) # Check if any updates failed failures = [scope for scope, result in results.items() if not result['success']] if failures: failure_messages = '\n'.join([f"- {failed_scope}: {results[failed_scope]['message']}" for failed_scope in failures]) - raise Exception(f"Failed to apply patch on the following scopes:\n{failure_messages}") + raise GenericConfigUpdaterError(f"Failed to apply patch on the following scopes:\n{failure_messages}") log.log_notice(f"Patch applied successfully for {patch}.") click.secho("Patch applied successfully.", fg="cyan", underline=True) @@ -1568,11 +1788,15 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form if multi_asic.is_multi_asic() and file_format == 'config_db': num_cfg_file += num_asic + multiasic_single_file_mode = False # If the user give the filename[s], extract the file names. if filename is not None: cfg_files = filename.split(',') - if len(cfg_files) != num_cfg_file: + if len(cfg_files) == 1 and multi_asic.is_multi_asic(): + multiasic_validate_single_file(cfg_files[0]) + multiasic_single_file_mode = True + elif len(cfg_files) != num_cfg_file: click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) return @@ -1581,127 +1805,109 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form log.log_notice("'reload' stopping services...") _stop_services() - # In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB - # service running in the host + DB services running in each ASIC namespace created per ASIC. - # In the below logic, we get all namespaces in this platform and add an empty namespace '' - # denoting the current namespace which we are in ( the linux host ) - for inst in range(-1, num_cfg_file-1): - # Get the namespace name, for linux host it is None - if inst == -1: - namespace = None - else: - namespace = "{}{}".format(NAMESPACE_PREFIX, inst) - - # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json - if cfg_files: - file = cfg_files[inst+1] - # Save to tmpfile in case of stdin input which can only be read once - if file == "/dev/stdin": - file_input = read_json_file(file) - (_, tmpfname) = tempfile.mkstemp(dir="/tmp", suffix="_configReloadStdin") - write_json_file(file_input, tmpfname) - file = tmpfname - else: - if file_format == 'config_db': - if namespace is None: - file = DEFAULT_CONFIG_DB_FILE - else: - file = "/etc/sonic/config_db{}.json".format(inst) + if multiasic_single_file_mode: + multiasic_write_to_db(cfg_files[0], load_sysinfo) + else: + # In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB + # service running in the host + DB services running in each ASIC namespace created per ASIC. + # In the below logic, we get all namespaces in this platform and add an empty namespace '' + # denoting the current namespace which we are in ( the linux host ) + for inst in range(-1, num_cfg_file-1): + # Get the namespace name, for linux host it is DEFAULT_NAMESPACE + if inst == -1: + namespace = DEFAULT_NAMESPACE else: - file = DEFAULT_CONFIG_YANG_FILE - - - # Check the file exists before proceeding. - if not os.path.exists(file): - click.echo("The config file {} doesn't exist".format(file)) - continue - - if file_format == 'config_db': - file_input = read_json_file(file) + namespace = "{}{}".format(NAMESPACE_PREFIX, inst) + + # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json + if cfg_files: + file = cfg_files[inst+1] + # Save to tmpfile in case of stdin input which can only be read once + if file == "/dev/stdin": + file_input = read_json_file(file) + (_, tmpfname) = tempfile.mkstemp(dir="/tmp", suffix="_configReloadStdin") + write_json_file(file_input, tmpfname) + file = tmpfname + else: + if file_format == 'config_db': + if namespace is DEFAULT_NAMESPACE: + file = DEFAULT_CONFIG_DB_FILE + else: + file = "/etc/sonic/config_db{}.json".format(inst) + else: + file = DEFAULT_CONFIG_YANG_FILE - platform = file_input.get("DEVICE_METADATA", {}).\ - get("localhost", {}).get("platform") - mac = file_input.get("DEVICE_METADATA", {}).\ - get("localhost", {}).get("mac") + # Check the file exists before proceeding. + if not os.path.exists(file): + click.echo("The config file {} doesn't exist".format(file)) + continue - if not platform or not mac: - log.log_warning("Input file does't have platform or mac. platform: {}, mac: {}" - .format(None if platform is None else platform, None if mac is None else mac)) - load_sysinfo = True + if file_format == 'config_db': + file_input = read_json_file(file) + if not load_sysinfo: + load_sysinfo = load_sysinfo_if_missing(file_input) + + if load_sysinfo: + try: + command = [SONIC_CFGGEN_PATH, "-j", file, '-v', "DEVICE_METADATA.localhost.hwsku"] + proc = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) + output, err = proc.communicate() + + except FileNotFoundError as e: + click.echo("{}".format(str(e)), err=True) + raise click.Abort() + except Exception as e: + click.echo("{}\n{}".format(type(e), str(e)), err=True) + raise click.Abort() + + if not output: + click.secho("Could not get the HWSKU from config file, Exiting!!!", fg='magenta') + sys.exit(1) - if load_sysinfo: - try: - command = [SONIC_CFGGEN_PATH, "-j", file, '-v', "DEVICE_METADATA.localhost.hwsku"] - proc = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) - output, err = proc.communicate() + cfg_hwsku = output.strip() - except FileNotFoundError as e: - click.echo("{}".format(str(e)), err=True) - raise click.Abort() - except Exception as e: - click.echo("{}\n{}".format(type(e), str(e)), err=True) - raise click.Abort() + client, config_db = flush_configdb(namespace) - if not output: - click.secho("Could not get the HWSKU from config file, Exiting!!!", fg='magenta') - sys.exit(1) + if load_sysinfo: + if namespace is DEFAULT_NAMESPACE: + command = [ + str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] + else: + command = [ + str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(namespace), '--write-to-db'] + clicommon.run_command(command, display_cmd=True) - cfg_hwsku = output.strip() + # For the database service running in linux host we use the file user gives as input + # or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace, + # the default config_db.json format is used. - if namespace is None: - config_db = ConfigDBConnector() - else: - config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_gen_opts = [] - config_db.connect() - client = config_db.get_redis_client(config_db.CONFIG_DB) - client.flushdb() + if os.path.isfile(INIT_CFG_FILE): + config_gen_opts += ['-j', str(INIT_CFG_FILE)] - if load_sysinfo: - if namespace is None: - command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] + if file_format == 'config_db': + config_gen_opts += ['-j', str(file)] else: - command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(namespace), '--write-to-db'] - clicommon.run_command(command, display_cmd=True) - - # For the database service running in linux host we use the file user gives as input - # or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace, - # the default config_db.json format is used. - + config_gen_opts += ['-Y', str(file)] - config_gen_opts = [] + if namespace is not DEFAULT_NAMESPACE: + config_gen_opts += ['-n', str(namespace)] - if os.path.isfile(INIT_CFG_FILE): - config_gen_opts += ['-j', str(INIT_CFG_FILE)] + command = [SONIC_CFGGEN_PATH] + config_gen_opts + ['--write-to-db'] - if file_format == 'config_db': - config_gen_opts += ['-j', str(file)] - else: - config_gen_opts += ['-Y', str(file)] - - if namespace is not None: - config_gen_opts += ['-n', str(namespace)] - - command = [SONIC_CFGGEN_PATH] + config_gen_opts + ['--write-to-db'] - - clicommon.run_command(command, display_cmd=True) - client.set(config_db.INIT_INDICATOR, 1) + clicommon.run_command(command, display_cmd=True) + client.set(config_db.INIT_INDICATOR, 1) - if os.path.exists(file) and file.endswith("_configReloadStdin"): - # Remove tmpfile - try: - os.remove(file) - except OSError as e: - click.echo("An error occurred while removing the temporary file: {}".format(str(e)), err=True) + if os.path.exists(file) and file.endswith("_configReloadStdin"): + # Remove tmpfile + try: + os.remove(file) + except OSError as e: + click.echo("An error occurred while removing the temporary file: {}".format(str(e)), err=True) - # Migrate DB contents to latest version - db_migrator='/usr/local/bin/db_migrator.py' - if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): - if namespace is None: - command = [db_migrator, '-o', 'migrate'] - else: - command = [db_migrator, '-o', 'migrate', '-n', str(namespace)] - clicommon.run_command(command, display_cmd=True) + # Migrate DB contents to latest version + migrate_db_to_lastest(namespace) # Re-generate the environment variable in case config_db.json was edited update_sonic_environment() @@ -1777,6 +1983,14 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, fg='magenta') raise click.Abort() + # Dependency check golden config json + config_to_check = read_json_file(golden_config_path) + if multi_asic.is_multi_asic(): + host_config = config_to_check.get('localhost', {}) + else: + host_config = config_to_check + table_hard_dependency_check(host_config) + #Stop services before config push if not no_service_restart: log.log_notice("'load_minigraph' stopping services...") @@ -1995,8 +2209,8 @@ def override_config_table(db, input_config_db, dry_run): if multi_asic.is_multi_asic() and len(config_input): # Golden Config will use "localhost" to represent host name if ns == DEFAULT_NAMESPACE: - if "localhost" in config_input.keys(): - ns_config_input = config_input["localhost"] + if HOST_NAMESPACE in config_input.keys(): + ns_config_input = config_input[HOST_NAMESPACE] else: click.secho("Wrong config format! 'localhost' not found in host config! cannot override.. abort") sys.exit(1) @@ -2151,18 +2365,6 @@ def synchronous_mode(sync_mode): config reload -y \n Option 2. systemctl restart swss""" % sync_mode) -# -# 'suppress-fib-pending' command ('config suppress-fib-pending ...') -# -@config.command('suppress-fib-pending') -@click.argument('state', metavar='', required=True, type=click.Choice(['enabled', 'disabled'])) -@clicommon.pass_db -def suppress_pending_fib(db, state): - ''' Enable or disable pending FIB suppression. Once enabled, BGP will not advertise routes that are not yet installed in the hardware ''' - - config_db = db.cfgdb - config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"suppress-fib-pending" : state}) - # # 'yang_config_validation' command ('config yang_config_validation ...') # @@ -3296,7 +3498,10 @@ def add_snmp_agent_address(ctx, agentip, port, vrf): """Add the SNMP agent listening IP:Port%Vrf configuration""" #Construct SNMP_AGENT_ADDRESS_CONFIG table key in the format ip|| - if not clicommon.is_ipaddress(agentip): + # Link local IP address should be provided along with zone id + # % for ex fe80::1%eth0 + agent_ip_addr = agentip.split('%')[0] + if not clicommon.is_ipaddress(agent_ip_addr): click.echo("Invalid IP address") return False config_db = ctx.obj['db'] @@ -3306,7 +3511,7 @@ def add_snmp_agent_address(ctx, agentip, port, vrf): click.echo("ManagementVRF is Enabled. Provide vrf.") return False found = 0 - ip = ipaddress.ip_address(agentip) + ip = ipaddress.ip_address(agent_ip_addr) for intf in netifaces.interfaces(): ipaddresses = netifaces.ifaddresses(intf) if ip_family[ip.version] in ipaddresses: @@ -3984,6 +4189,11 @@ def bgp(): """BGP-related configuration tasks""" pass + + +# BGP module extensions +config.commands['bgp'].add_command(bgp_cli.DEVICE_GLOBAL) + # # 'shutdown' subgroup ('config bgp shutdown ...') # @@ -4519,7 +4729,7 @@ def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load except Exception as e: click.secho("Failed to break out Port. Error: {}".format(str(e)), fg='magenta') - sys.exit(0) + sys.exit(1) def _get_all_mgmtinterface_keys(): """Returns list of strings containing mgmt interface keys @@ -4630,12 +4840,14 @@ def validate_vlan_exists(db,text): # 'add' subcommand # -@ip.command() + +@ip.command('add') @click.argument('interface_name', metavar='', required=True) @click.argument("ip_addr", metavar="", required=True) @click.argument('gw', metavar='', required=False) +@click.option('--secondary', "-s", is_flag=True, default=False) @click.pass_context -def add(ctx, interface_name, ip_addr, gw): +def add_interface_ip(ctx, interface_name, ip_addr, gw, secondary): """Add an IP address towards the interface""" # Get the config_db connector config_db = ValidatedConfigDBConnector(ctx.obj['config_db']) @@ -4644,6 +4856,14 @@ def add(ctx, interface_name, ip_addr, gw): interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") + # Add a validation to check this interface is not a member in vlan before + # changing it to a router port mode + vlan_member_table = config_db.get_table('VLAN_MEMBER') + + if (interface_is_in_vlan(vlan_member_table, interface_name)): + click.echo("Interface {} is a member of vlan\nAborting!".format(interface_name)) + return + portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER') @@ -4720,7 +4940,25 @@ def add(ctx, interface_name, ip_addr, gw): config_db.set_entry(table_name, interface_name, {"admin_status": "up"}) else: config_db.set_entry(table_name, interface_name, {"NULL": "NULL"}) - config_db.set_entry(table_name, (interface_name, str(ip_address)), {"NULL": "NULL"}) + + if secondary: + # We update the secondary flag only in case of VLAN Interface. + if table_name == "VLAN_INTERFACE": + vlan_interface_table = config_db.get_table(table_name) + contains_primary = False + for key, value in vlan_interface_table.items(): + if not isinstance(key, tuple): + continue + name, prefix = key + if name == interface_name and "secondary" not in value: + contains_primary = True + if contains_primary: + config_db.set_entry(table_name, (interface_name, str(ip_address)), {"secondary": "true"}) + else: + ctx.fail("Primary for the interface {} is not set, so skipping adding the interface" + .format(interface_name)) + else: + config_db.set_entry(table_name, (interface_name, str(ip_address)), {"NULL": "NULL"}) # # 'del' subcommand diff --git a/config/plugins/mlnx.py b/config/plugins/mlnx.py index accf944ce62..f61335d4f48 100644 --- a/config/plugins/mlnx.py +++ b/config/plugins/mlnx.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. +# Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. # Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); diff --git a/config/plugins/sonic-system-ldap_yang.py b/config/plugins/sonic-system-ldap_yang.py new file mode 100644 index 00000000000..cc211cdb90a --- /dev/null +++ b/config/plugins/sonic-system-ldap_yang.py @@ -0,0 +1,393 @@ +""" +Autogenerated config CLI plugin. + + +""" + +import copy +import click +import utilities_common.cli as clicommon +import utilities_common.general as general +from config import config_mgmt + +# Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. +sonic_cfggen = general.load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') + + +def exit_with_error(*args, **kwargs): + """ Print a message with click.secho and abort CLI. + + Args: + args: Positional arguments to pass to click.secho + kwargs: Keyword arguments to pass to click.secho + """ + + click.secho(*args, **kwargs) + raise click.Abort() + + +def validate_config_or_raise(cfg): + """ Validate config db data using ConfigMgmt. + + Args: + cfg (Dict): Config DB data to validate. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + try: + cfg = sonic_cfggen.FormatConverter.to_serialized(copy.deepcopy(cfg)) + config_mgmt.ConfigMgmt().loadData(cfg) + except Exception as err: + raise Exception('Failed to validate configuration: {}'.format(err)) + + +def add_entry_validated(db, table, key, data): + """ Add new entry in table and validate configuration. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector obect. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + data (Dict): Entry data. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key in cfg[table]: + raise Exception(f"{key} already exists") + + cfg[table][key] = data + + validate_config_or_raise(cfg) + db.set_entry(table, key, data) + + +def update_entry_validated(db, table, key, data, create_if_not_exists=False): + """ Update entry in table and validate configuration. + If attribute value in data is None, the attribute is deleted. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector obect. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + data (Dict): Entry data. + create_if_not_exists (bool): + In case entry does not exists already a new entry + is not created if this flag is set to False and + creates a new entry if flag is set to True. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + + if not data: + raise Exception(f"No field/values to update {key}") + + if create_if_not_exists: + cfg[table].setdefault(key, {}) + + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + + entry_changed = False + for attr, value in data.items(): + if value == cfg[table][key].get(attr): + continue + entry_changed = True + if value is None: + cfg[table][key].pop(attr, None) + else: + cfg[table][key][attr] = value + + if not entry_changed: + return + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def del_entry_validated(db, table, key): + """ Delete entry in table and validate configuration. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector obect. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + + cfg[table].pop(key) + + validate_config_or_raise(cfg) + db.set_entry(table, key, None) + + +@click.group(name="ldap-server", + cls=clicommon.AliasedGroup) +def LDAP_SERVER(): + """ """ + + pass + + +@LDAP_SERVER.command(name="add") +@click.argument( + "hostname", + nargs=1, + required=True, +) +@click.option( + "--priority", + help="Server priority", +) +@clicommon.pass_db +def LDAP_SERVER_add(db, hostname, priority): + """ Add object in LDAP_SERVER. """ + + table = "LDAP_SERVER" + key = hostname + data = {} + if priority is not None: + data["priority"] = priority + + try: + add_entry_validated(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_SERVER.command(name="update") +@click.argument( + "hostname", + nargs=1, + required=True, +) +@click.option( + "--priority", + help="Server priority", +) +@clicommon.pass_db +def LDAP_SERVER_update(db, hostname, priority): + """ Add object in LDAP_SERVER. """ + + table = "LDAP_SERVER" + key = hostname + data = {} + if priority is not None: + data["priority"] = priority + + try: + update_entry_validated(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_SERVER.command(name="delete") +@click.argument( + "hostname", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_SERVER_delete(db, hostname): + """ Delete object in LDAP_SERVER. """ + + table = "LDAP_SERVER" + key = hostname + try: + del_entry_validated(db.cfgdb, table, key) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@click.group(name="ldap", + cls=clicommon.AliasedGroup) +def LDAP(): + """ """ + + pass + + +@LDAP.group(name="global", cls=clicommon.AliasedGroup) +@clicommon.pass_db +def LDAP_global(db): + """ """ + + pass + + +@LDAP_global.command(name="bind-dn") +@click.argument( + "bind-dn", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_bind_dn(db, bind_dn): + """ LDAP global bind dn """ + + table = "LDAP" + key = "global" + data = { + "bind_dn": bind_dn, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_global.command(name="bind-password") +@click.argument( + "bind-password", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_bind_password(db, bind_password): + """ Shared secret used for encrypting the communication """ + + table = "LDAP" + key = "global" + data = { + "bind_password": bind_password, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_global.command(name="bind-timeout") +@click.argument( + "bind-timeout", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_bind_timeout(db, bind_timeout): + """ Ldap bind timeout """ + + table = "LDAP" + key = "global" + data = { + "bind_timeout": bind_timeout, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_global.command(name="version") +@click.argument( + "version", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_version(db, version): + """ Ldap version """ + + table = "LDAP" + key = "global" + data = { + "version": version, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_global.command(name="base-dn") +@click.argument( + "base-dn", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_base_dn(db, base_dn): + """ Ldap user base dn """ + + table = "LDAP" + key = "global" + data = { + "base_dn": base_dn, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_global.command(name="port") +@click.argument( + "port", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_port(db, port): + """ TCP port to communicate with LDAP server """ + + table = "LDAP" + key = "global" + data = { + "port": port, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_global.command(name="timeout") +@click.argument( + "timeout", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_timeout(db, timeout): + """ Ldap timeout duration in sec """ + + table = "LDAP" + key = "global" + data = { + "timeout": timeout, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +def register(cli): + """ Register new CLI nodes in root CLI. + + Args: + cli: Root CLI node. + Raises: + Exception: when root CLI already has a command + we are trying to register. + """ + cli_node = LDAP_SERVER + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(LDAP_SERVER) + cli_node = LDAP + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(LDAP) diff --git a/consutil/lib.py b/consutil/lib.py index 1d7f967bd37..e597e3b6437 100644 --- a/consutil/lib.py +++ b/consutil/lib.py @@ -277,7 +277,7 @@ def init_device_prefix(): @staticmethod def list_console_ttys(): """Lists all console tty devices""" - cmd = ["ls", SysInfoProvider.DEVICE_PREFIX + "*"] + cmd = ["bash", "-c", "ls " + SysInfoProvider.DEVICE_PREFIX + "*"] output, _ = SysInfoProvider.run_command(cmd, abort=False) ttys = output.split('\n') ttys = list([dev for dev in ttys if re.match(SysInfoProvider.DEVICE_PREFIX + r"\d+", dev) != None]) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 766e432f482..e1d65981a6e 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -47,6 +47,8 @@ * [CMIS firmware version show commands](#cmis-firmware-version-show-commands) * [CMIS firmware upgrade commands](#cmis-firmware-upgrade-commands) * [CMIS firmware target mode commands](#cmis-firmware-target-mode-commands) +* [CMIS debug](#cmis-debug) +* [CMIS debug loopback](#cmis-debug-loopback) * [DHCP Relay](#dhcp-relay) * [DHCP Relay show commands](#dhcp-relay-show-commands) * [DHCP Relay clear commands](#dhcp-relay-clear-commands) @@ -96,6 +98,11 @@ * [Linux Kernel Dump](#linux-kernel-dump) * [Linux Kernel Dump show commands](#Linux-Kernel-Dump-show-commands) * [Linux Kernel Dump config commands](#Linux-Kernel-Dump-config-commands) +* [LDAP](#LDAP) + * [show LDAP global commands](#LDAP-global-show-commands) + * [LDAP global config commands](#LDAP-global-config-commands) + * [show LDAP server commands](#LDAP-server-show-commands) + * [LDAP server config commands](#LDAP-server-config-commands) * [LLDP](#lldp) * [LLDP show commands](#lldp-show-commands) * [Loading, Reloading And Saving Configuration](#loading-reloading-and-saving-configuration) @@ -2607,24 +2614,24 @@ This command displays the routing policy that takes precedence over the other ro Exit routemap ``` -**show suppress-fib-pending** +**show bgp device-global** -This command is used to show the status of suppress pending FIB feature. -When enabled, BGP will not advertise routes which aren't yet offloaded. +This command displays BGP device global configuration. - Usage: - ``` - show suppress-fib-pending + ```bash + show bgp device-global ``` -- Examples: - ``` - admin@sonic:~$ show suppress-fib-pending - Enabled - ``` - ``` - admin@sonic:~$ show suppress-fib-pending - Disabled +- Options: + - _-j,--json_: display in JSON format + +- Example: + ```bash + admin@sonic:~$ show bgp device-global + TSA W-ECMP + ------- ------- + enabled enabled ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#bgp) @@ -2719,22 +2726,24 @@ This command is used to remove particular IPv4 or IPv6 BGP neighbor configuratio admin@sonic:~$ sudo config bgp remove neighbor SONIC02SPINE ``` -**config suppress-fib-pending** +**config bgp device-global tsa/w-ecmp** + +This command is used to manage BGP device global configuration. -This command is used to enable or disable announcements of routes not yet installed in the HW. -Once enabled, BGP will not advertise routes which aren't yet offloaded. +Feature list: +1. TSA - Traffic-Shift-Away +2. W-ECMP - Weighted-Cost Multi-Path - Usage: - ``` - config suppress-fib-pending + ```bash + config bgp device-global tsa + config bgp device-global w-ecmp ``` - Examples: - ``` - admin@sonic:~$ sudo config suppress-fib-pending enabled - ``` - ``` - admin@sonic:~$ sudo config suppress-fib-pending disabled + ```bash + admin@sonic:~$ config bgp device-global tsa enabled + admin@sonic:~$ config bgp device-global w-ecmp enabled ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#bgp) @@ -3089,6 +3098,31 @@ Example of the module supporting target mode Target Mode set to 1 ``` +## CMIS debug + +### CMIS debug loopback + +This command is the standard CMIS diagnostic control used for troubleshooting link and performance issues between the host switch and transceiver module. + +**sfputil debug loopback** + +- Usage: + ``` + sfputil debug loopback PORT_NAME LOOPBACK_MODE + + Set the loopback mode + host-side-input: host side input loopback mode + host-side-output: host side output loopback mode + media-side-input: media side input loopback mode + media-side-output: media side output loopback mode + none: disable loopback mode + ``` + +- Example: + ``` + admin@sonic:~$ sfputil debug loopback Ethernet88 host-side-input + ``` + ## DHCP Relay ### DHCP Relay show commands @@ -6300,6 +6334,86 @@ This command displays the kubernetes server status. ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#Kubernetes) +## LDAP + +### show LDAP global commands + +This command displays the global LDAP configuration that includes the following parameters: base_dn, bind_password, bind_timeout, version, port, timeout. + +- Usage: + ``` + show ldap global + ``` +- Example: + + ``` + admin@sonic:~$ show ldap global + base-dn Ldap user base dn + bind-dn LDAP global bind dn + bind-password Shared secret used for encrypting the communication + bind-timeout Ldap bind timeout <0-120> + port TCP port to communicate with LDAP server <1-65535> + timeout Ldap timeout duration in sec <1-60> + version Ldap version <1-3> + + ``` + +### LDAP global config commands + +These commands are used to configure the LDAP global parameters + + - Usage: + ``` + config ldap global + ``` +- Example: + ``` + admin@sonic:~$ config ldap global + + host
--prio <1 - 8> + base-dn Ldap user base dn + bind-dn LDAP global bind dn + bind-password Shared secret used for encrypting the communication + bind-timeout Ldap bind timeout <0-120> + port TCP port to communicate with LDAP server <1-65535> + timeout Ldap timeout duration in sec <1-60> + version Ldap version <1-3> + ``` + +### show LDAP server commands + +This command displays the global LDAP configuration that includes the following parameters: base_dn, bind_password, bind_timeout, version, port, timeout. + +- Usage: + ``` + show ldap-server + ``` +- Example: + + ``` + admin@sonic:~$ show ldap-server + hostname Ldap hostname or IP of the configured LDAP server + priority priority for the relevant LDAP server <1-8> + ``` + +### LDAP server config commands + +These commands are used to manage the LDAP servers in the system, they are created in correspondance to the global config parameters mentioned earlier. + + - Usage: + ``` + config ldap-server + ``` +- Example: + ``` + admin@sonic:~$ config ldap-server + + add Add a new LDAP server --priority <1-8> + delete Delete an existing LDAP server from the list --priority <1-8> + update Update and existing LDAP server + +Go Back To [Beginning of the document](#) or [Beginning of this section](#LDAP) + ## Linux Kernel Dump This section demonstrates the show commands and configuration commands of Linux kernel dump mechanism in SONiC. diff --git a/generic_config_updater/change_applier.py b/generic_config_updater/change_applier.py index 32a356bf9ae..8d8d23f87a1 100644 --- a/generic_config_updater/change_applier.py +++ b/generic_config_updater/change_applier.py @@ -16,6 +16,7 @@ print_to_console = False + def set_verbose(verbose=False): global print_to_console, logger @@ -34,11 +35,12 @@ def log_error(m): logger.log(logger.LOG_PRIORITY_ERROR, m, print_to_console) -def get_config_db(namespace=multi_asic.DEFAULT_NAMESPACE): - config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) +def get_config_db(scope=multi_asic.DEFAULT_NAMESPACE): + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=scope) config_db.connect() return config_db + def set_config(config_db, tbl, key, data): config_db.set_entry(tbl, key, data) @@ -61,11 +63,9 @@ class DryRunChangeApplier: def __init__(self, config_wrapper): self.config_wrapper = config_wrapper - def apply(self, change): self.config_wrapper.apply_change_to_config_db(change) - def remove_backend_tables_from_config(self, data): return data @@ -74,9 +74,9 @@ class ChangeApplier: updater_conf = None - def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace - self.config_db = get_config_db(self.namespace) + def __init__(self, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope + self.config_db = get_config_db(self.scope) self.backend_tables = [ "BUFFER_PG", "BUFFER_PROFILE", @@ -86,7 +86,6 @@ def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): with open(UPDATER_CONF_FILE, "r") as s: ChangeApplier.updater_conf = json.load(s) - def _invoke_cmd(self, cmd, old_cfg, upd_cfg, keys): # cmd is in the format as . # @@ -98,7 +97,6 @@ def _invoke_cmd(self, cmd, old_cfg, upd_cfg, keys): return method_to_call(old_cfg, upd_cfg, keys) - def _services_validate(self, old_cfg, upd_cfg, keys): lst_svcs = set() lst_cmds = set() @@ -124,7 +122,6 @@ def _services_validate(self, old_cfg, upd_cfg, keys): log_debug("service invoked: {}".format(cmd)) return 0 - def _upd_data(self, tbl, run_tbl, upd_tbl, upd_keys): for key in set(run_tbl.keys()).union(set(upd_tbl.keys())): run_data = run_tbl.get(key, None) @@ -135,20 +132,17 @@ def _upd_data(self, tbl, run_tbl, upd_tbl, upd_keys): upd_keys[tbl][key] = {} log_debug("Patch affected tbl={} key={}".format(tbl, key)) - def _report_mismatch(self, run_data, upd_data): log_error("run_data vs expected_data: {}".format( str(jsondiff.diff(run_data, upd_data))[0:40])) - def apply(self, change): run_data = self._get_running_config() upd_data = prune_empty_table(change.apply(copy.deepcopy(run_data))) upd_keys = defaultdict(dict) for tbl in sorted(set(run_data.keys()).union(set(upd_data.keys()))): - self._upd_data(tbl, run_data.get(tbl, {}), - upd_data.get(tbl, {}), upd_keys) + self._upd_data(tbl, run_data.get(tbl, {}), upd_data.get(tbl, {}), upd_keys) ret = self._services_validate(run_data, upd_data, upd_keys) if not ret: @@ -168,9 +162,9 @@ def remove_backend_tables_from_config(self, data): def _get_running_config(self): _, fname = tempfile.mkstemp(suffix="_changeApplier") - - if self.namespace: - cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.namespace] + + if self.scope: + cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.scope] else: cmd = ['sonic-cfggen', '-d', '--print-data'] @@ -181,7 +175,9 @@ def _get_running_config(self): return_code = result.returncode if return_code: os.remove(fname) - raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.namespace}, Return code: {return_code}, Error: {err}") + raise GenericConfigUpdaterError( + f"Failed to get running config for scope: {self.scope}," + + f"Return code: {return_code}, Error: {err}") run_data = {} try: diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index 68e49b6c03d..a379e7282f8 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -20,9 +20,9 @@ "spc1": [ "ACS-MSN2700", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2010", "Mellanox-SN2700", "Mellanox-SN2700-C28D8", "Mellanox-SN2700-D40C8S8", "Mellanox-SN2700-D44C10", "Mellanox-SN2700-D48C8", "ACS-MSN2700-A1", "Mellanox-SN2700-A1", "Mellanox-SN2700-A1-C28D8", "Mellanox-SN2700-A1-D40C8S8", "Mellanox-SN2700-A1-D44C10", "Mellanox-SN2700-A1-D48C8" ], "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], - "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40", - "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32"], - "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "ACS-SN5400" ] + "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", + "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "ACS-SN5400" ] }, "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index b75939749ce..8ce27455bbe 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -1,43 +1,74 @@ import json import jsonpointer import os +import subprocess + from enum import Enum -from .gu_common import GenericConfigUpdaterError, EmptyTableError, ConfigWrapper, \ - DryRunConfigWrapper, PatchWrapper, genericUpdaterLogging +from .gu_common import HOST_NAMESPACE, GenericConfigUpdaterError, EmptyTableError, ConfigWrapper, \ + DryRunConfigWrapper, PatchWrapper, genericUpdaterLogging from .patch_sorter import StrictPatchSorter, NonStrictPatchSorter, ConfigSplitter, \ - TablesWithoutYangConfigSplitter, IgnorePathsFromYangConfigSplitter + TablesWithoutYangConfigSplitter, IgnorePathsFromYangConfigSplitter from .change_applier import ChangeApplier, DryRunChangeApplier from sonic_py_common import multi_asic CHECKPOINTS_DIR = "/etc/sonic/checkpoints" CHECKPOINT_EXT = ".cp.json" + def extract_scope(path): if not path: raise Exception("Wrong patch with empty path.") - - try: - pointer = jsonpointer.JsonPointer(path) - parts = pointer.parts - except Exception as e: - raise Exception(f"Error resolving path: '{path}' due to {e}") - + pointer = jsonpointer.JsonPointer(path) + parts = pointer.parts if not parts: - raise Exception("Wrong patch with empty path.") + raise GenericConfigUpdaterError("Wrong patch with empty path.") if parts[0].startswith("asic"): if not parts[0][len("asic"):].isnumeric(): - raise Exception(f"Error resolving path: '{path}' due to incorrect ASIC number.") + raise GenericConfigUpdaterError(f"Error resolving path: '{path}' due to incorrect ASIC number.") scope = parts[0] remainder = "/" + "/".join(parts[1:]) - elif parts[0] == "localhost": - scope = "localhost" + elif parts[0] == HOST_NAMESPACE: + scope = HOST_NAMESPACE remainder = "/" + "/".join(parts[1:]) else: + if multi_asic.is_multi_asic(): + raise GenericConfigUpdaterError(f"Multi ASIC must have namespace prefix in path: '{path}'.") + scope = "" remainder = path - return scope, remainder + +def get_cmd_output(cmd): + proc = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE) + return proc.communicate()[0], proc.returncode + + +def get_config_json(): + scope_list = [multi_asic.DEFAULT_NAMESPACE] + all_running_config = {} + if multi_asic.is_multi_asic(): + scope_list.extend(multi_asic.get_namespace_list()) + for scope in scope_list: + command = ["sonic-cfggen", "-d", "--print-data"] + if scope != multi_asic.DEFAULT_NAMESPACE: + command += ["-n", scope] + + running_config_text, returncode = get_cmd_output(command) + if returncode: + raise GenericConfigUpdaterError( + f"Fetch all runningconfiguration failed as output:{running_config_text}") + running_config = json.loads(running_config_text) + + if multi_asic.is_multi_asic(): + if scope == multi_asic.DEFAULT_NAMESPACE: + scope = HOST_NAMESPACE + all_running_config[scope] = running_config + else: + all_running_config = running_config + return all_running_config + + class ConfigLock: def acquire_lock(self): # TODO: Implement ConfigLock @@ -52,22 +83,23 @@ class ConfigFormat(Enum): CONFIGDB = 1 SONICYANG = 2 + class PatchApplier: def __init__(self, patchsorter=None, changeapplier=None, config_wrapper=None, patch_wrapper=None, - namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.logger = genericUpdaterLogging.get_logger(title="Patch Applier", print_all_to_console=True) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) - self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(namespace=self.namespace) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(scope=self.scope) + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(scope=self.scope) self.patchsorter = patchsorter if patchsorter is not None else StrictPatchSorter(self.config_wrapper, self.patch_wrapper) - self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier(namespace=self.namespace) + self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier(scope=self.scope) def apply(self, patch, sort=True): - scope = self.namespace if self.namespace else 'localhost' + scope = self.scope if self.scope else HOST_NAMESPACE self.logger.log_notice(f"{scope}: Patch application starting.") self.logger.log_notice(f"{scope}: Patch: {patch}") @@ -84,15 +116,14 @@ def apply(self, patch, sort=True): self.config_wrapper.validate_field_operation(old_config, target_config) # Validate target config does not have empty tables since they do not show up in ConfigDb - self.logger.log_notice(f"{scope}: alidating target config does not have empty tables, " \ - "since they do not show up in ConfigDb.") + self.logger.log_notice(f"""{scope}: validating target config does not have empty tables, + since they do not show up in ConfigDb.""") empty_tables = self.config_wrapper.get_empty_tables(target_config) - if empty_tables: # if there are empty tables + if empty_tables: # if there are empty tables empty_tables_txt = ", ".join(empty_tables) - raise EmptyTableError(f"{scope}: given patch is not valid because it will result in empty tables " \ - "which is not allowed in ConfigDb. " \ - f"Table{'s' if len(empty_tables) != 1 else ''}: {empty_tables_txt}") - + raise EmptyTableError(f"{scope}: given patch is not valid because it will result in empty tables \ + which is not allowed in ConfigDb. \ + Table{'s' if len(empty_tables) != 1 else ''}: {empty_tables_txt}") # Generate list of changes to apply if sort: self.logger.log_notice(f"{scope}: sorting patch updates.") @@ -105,9 +136,6 @@ def apply(self, patch, sort=True): self.logger.log_notice(f"The {scope} patch was converted into {changes_len} " \ f"change{'s' if changes_len != 1 else ''}{':' if changes_len > 0 else '.'}") - for change in changes: - self.logger.log_notice(f" * {change}") - # Apply changes in order self.logger.log_notice(f"{scope}: applying {changes_len} change{'s' if changes_len != 1 else ''} " \ f"in order{':' if changes_len > 0 else '.'}") @@ -120,19 +148,19 @@ def apply(self, patch, sort=True): new_config = self.config_wrapper.get_config_db_as_json() self.changeapplier.remove_backend_tables_from_config(target_config) self.changeapplier.remove_backend_tables_from_config(new_config) - if not(self.patch_wrapper.verify_same_json(target_config, new_config)): + if not (self.patch_wrapper.verify_same_json(target_config, new_config)): raise GenericConfigUpdaterError(f"{scope}: after applying patch to config, there are still some parts not updated") self.logger.log_notice(f"{scope} patch application completed.") class ConfigReplacer: - def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.logger = genericUpdaterLogging.get_logger(title="Config Replacer", print_all_to_console=True) - self.patch_applier = patch_applier if patch_applier is not None else PatchApplier(namespace=self.namespace) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) - self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(namespace=self.namespace) + self.patch_applier = patch_applier if patch_applier is not None else PatchApplier(scope=self.scope) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(scope=self.scope) + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(scope=self.scope) def replace(self, target_config): self.logger.log_notice("Config replacement starting.") @@ -150,7 +178,7 @@ def replace(self, target_config): self.logger.log_notice("Verifying config replacement is reflected on ConfigDB.") new_config = self.config_wrapper.get_config_db_as_json() - if not(self.patch_wrapper.verify_same_json(target_config, new_config)): + if not (self.patch_wrapper.verify_same_json(target_config, new_config)): raise GenericConfigUpdaterError(f"After replacing config, there is still some parts not updated") self.logger.log_notice("Config replacement completed.") @@ -161,23 +189,24 @@ def __init__(self, checkpoints_dir=CHECKPOINTS_DIR, config_replacer=None, config_wrapper=None, - namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.logger = genericUpdaterLogging.get_logger(title="Config Rollbacker", print_all_to_console=True) + self.util = Util(checkpoints_dir=checkpoints_dir) self.checkpoints_dir = checkpoints_dir - self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer(namespace=self.namespace) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) + self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer(scope=self.scope) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(scope=self.scope) def rollback(self, checkpoint_name): self.logger.log_notice("Config rollbacking starting.") self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") self.logger.log_notice(f"Verifying '{checkpoint_name}' exists.") - if not self._check_checkpoint_exists(checkpoint_name): + if not self.util.check_checkpoint_exists(checkpoint_name): raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") self.logger.log_notice(f"Loading checkpoint into memory.") - target_config = self._get_checkpoint_content(checkpoint_name) + target_config = self.util.get_checkpoint_content(checkpoint_name) self.logger.log_notice(f"Replacing config using 'Config Replacer'.") self.config_replacer.replace(target_config) @@ -189,16 +218,16 @@ def checkpoint(self, checkpoint_name): self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") self.logger.log_notice("Getting current config db.") - json_content = self.config_wrapper.get_config_db_as_json() + json_content = get_config_json() self.logger.log_notice("Getting checkpoint full-path.") - path = self._get_checkpoint_full_path(checkpoint_name) + path = self.util.get_checkpoint_full_path(checkpoint_name) self.logger.log_notice("Ensuring checkpoint directory exist.") - self._ensure_checkpoints_dir_exists() + self.util.ensure_checkpoints_dir_exists() self.logger.log_notice(f"Saving config db content to {path}.") - self._save_json_file(path, json_content) + self.util.save_json_file(path, json_content) self.logger.log_notice("Config checkpoint completed.") @@ -206,12 +235,12 @@ def list_checkpoints(self): self.logger.log_info("Listing checkpoints starting.") self.logger.log_info(f"Verifying checkpoints directory '{self.checkpoints_dir}' exists.") - if not self._checkpoints_dir_exist(): + if not self.util.checkpoints_dir_exist(): self.logger.log_info("Checkpoints directory is empty, returning empty checkpoints list.") return [] self.logger.log_info("Getting checkpoints in checkpoints directory.") - checkpoint_names = self._get_checkpoint_names() + checkpoint_names = self.util.get_checkpoint_names() checkpoints_len = len(checkpoint_names) self.logger.log_info(f"Found {checkpoints_len} checkpoint{'s' if checkpoints_len != 1 else ''}{':' if checkpoints_len > 0 else '.'}") @@ -227,59 +256,139 @@ def delete_checkpoint(self, checkpoint_name): self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") self.logger.log_notice(f"Checking checkpoint exists.") - if not self._check_checkpoint_exists(checkpoint_name): + if not self.util.check_checkpoint_exists(checkpoint_name): raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") self.logger.log_notice(f"Deleting checkpoint.") - self._delete_checkpoint(checkpoint_name) + self.util.delete_checkpoint(checkpoint_name) self.logger.log_notice("Deleting checkpoint completed.") - def _ensure_checkpoints_dir_exists(self): + +class MultiASICConfigReplacer(ConfigReplacer): + def __init__(self, + patch_applier=None, + config_wrapper=None, + patch_wrapper=None, + scope=multi_asic.DEFAULT_NAMESPACE): + self.logger = genericUpdaterLogging.get_logger(title="MultiASICConfigReplacer", + print_all_to_console=True) + self.scopelist = [HOST_NAMESPACE, *multi_asic.get_namespace_list()] + super().__init__(patch_applier, config_wrapper, patch_wrapper, scope) + + def replace(self, target_config): + config_keys = set(target_config.keys()) + missing_scopes = set(self.scopelist) - config_keys + if missing_scopes: + raise GenericConfigUpdaterError(f"To be replace config is missing scope: {missing_scopes}") + + for scope in self.scopelist: + scope_config = target_config.pop(scope) + if scope.lower() == HOST_NAMESPACE: + scope = multi_asic.DEFAULT_NAMESPACE + ConfigReplacer(scope=scope).replace(scope_config) + + +class MultiASICConfigRollbacker(FileSystemConfigRollbacker): + def __init__(self, + checkpoints_dir=CHECKPOINTS_DIR, + config_replacer=None, + config_wrapper=None): + self.logger = genericUpdaterLogging.get_logger(title="MultiASICConfigRollbacker", + print_all_to_console=True) + self.scopelist = [HOST_NAMESPACE, *multi_asic.get_namespace_list()] + self.checkpoints_dir = checkpoints_dir + self.util = Util(checkpoints_dir=checkpoints_dir) + super().__init__(config_wrapper=config_wrapper, config_replacer=config_replacer) + + def rollback(self, checkpoint_name): + self.logger.log_notice("Config rollbacking starting.") + self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") + self.logger.log_notice(f"Verifying '{checkpoint_name}' exists.") + + if not self.util.check_checkpoint_exists(checkpoint_name): + raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") + + self.logger.log_notice(f"Loading checkpoint '{checkpoint_name}' into memory.") + target_config = self.util.get_checkpoint_content(checkpoint_name) + self.logger.log_notice(f"Replacing config '{checkpoint_name}' using 'Config Replacer'.") + + for scope in self.scopelist: + config = target_config.pop(scope) + if scope.lower() == HOST_NAMESPACE: + scope = multi_asic.DEFAULT_NAMESPACE + ConfigReplacer(scope=scope).replace(config) + + self.logger.log_notice("Config rollbacking completed.") + + def checkpoint(self, checkpoint_name): + all_configs = get_config_json() + self.logger.log_notice("Config checkpoint starting.") + self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") + + self.logger.log_notice("Getting checkpoint full-path.") + path = self.util.get_checkpoint_full_path(checkpoint_name) + + self.logger.log_notice("Ensuring checkpoint directory exist.") + self.util.ensure_checkpoints_dir_exists() + + self.logger.log_notice(f"Saving config db content to {path}.") + self.util.save_json_file(path, all_configs) + + self.logger.log_notice("Config checkpoint completed.") + + +class Util: + def __init__(self, checkpoints_dir=CHECKPOINTS_DIR): + self.checkpoints_dir = checkpoints_dir + + def ensure_checkpoints_dir_exists(self): os.makedirs(self.checkpoints_dir, exist_ok=True) - def _save_json_file(self, path, json_content): + def save_json_file(self, path, json_content): with open(path, "w") as fh: fh.write(json.dumps(json_content)) - def _get_checkpoint_content(self, checkpoint_name): - path = self._get_checkpoint_full_path(checkpoint_name) + def get_checkpoint_content(self, checkpoint_name): + path = self.get_checkpoint_full_path(checkpoint_name) with open(path) as fh: text = fh.read() return json.loads(text) - def _get_checkpoint_full_path(self, name): + def get_checkpoint_full_path(self, name): return os.path.join(self.checkpoints_dir, f"{name}{CHECKPOINT_EXT}") - def _get_checkpoint_names(self): + def get_checkpoint_names(self): file_names = [] for file_name in os.listdir(self.checkpoints_dir): if file_name.endswith(CHECKPOINT_EXT): # Remove extension from file name. # Example assuming ext is '.cp.json', then 'checkpoint1.cp.json' becomes 'checkpoint1' file_names.append(file_name[:-len(CHECKPOINT_EXT)]) - return file_names - def _checkpoints_dir_exist(self): + def checkpoints_dir_exist(self): return os.path.isdir(self.checkpoints_dir) - def _check_checkpoint_exists(self, name): - path = self._get_checkpoint_full_path(name) + def check_checkpoint_exists(self, name): + path = self.get_checkpoint_full_path(name) return os.path.isfile(path) - def _delete_checkpoint(self, name): - path = self._get_checkpoint_full_path(name) + def delete_checkpoint(self, name): + path = self.get_checkpoint_full_path(name) return os.remove(path) class Decorator(PatchApplier, ConfigReplacer, FileSystemConfigRollbacker): - def __init__(self, decorated_patch_applier=None, decorated_config_replacer=None, decorated_config_rollbacker=None, namespace=multi_asic.DEFAULT_NAMESPACE): + def __init__(self, + decorated_patch_applier=None, + decorated_config_replacer=None, + decorated_config_rollbacker=None, + scope=multi_asic.DEFAULT_NAMESPACE): # initing base classes to make LGTM happy - PatchApplier.__init__(self, namespace=namespace) - ConfigReplacer.__init__(self, namespace=namespace) - FileSystemConfigRollbacker.__init__(self, namespace=namespace) - + PatchApplier.__init__(self, scope=scope) + ConfigReplacer.__init__(self, scope=scope) + FileSystemConfigRollbacker.__init__(self, scope=scope) self.decorated_patch_applier = decorated_patch_applier self.decorated_config_replacer = decorated_config_replacer self.decorated_config_rollbacker = decorated_config_rollbacker @@ -304,10 +413,14 @@ def delete_checkpoint(self, checkpoint_name): class SonicYangDecorator(Decorator): - def __init__(self, patch_wrapper, config_wrapper, decorated_patch_applier=None, decorated_config_replacer=None, namespace=multi_asic.DEFAULT_NAMESPACE): - Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, namespace=namespace) - - self.namespace = namespace + def __init__(self, + patch_wrapper, + config_wrapper, + decorated_patch_applier=None, + decorated_config_replacer=None, + scope=multi_asic.DEFAULT_NAMESPACE): + Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, scope=scope) + self.scope = scope self.patch_wrapper = patch_wrapper self.config_wrapper = config_wrapper @@ -326,9 +439,12 @@ def __init__(self, decorated_config_replacer=None, decorated_config_rollbacker=None, config_lock=ConfigLock(), - namespace=multi_asic.DEFAULT_NAMESPACE): - Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, decorated_config_rollbacker, namespace=namespace) - + scope=multi_asic.DEFAULT_NAMESPACE): + Decorator.__init__(self, + decorated_patch_applier, + decorated_config_replacer, + decorated_config_rollbacker, + scope=scope) self.config_lock = config_lock def apply(self, patch, sort=True): @@ -350,20 +466,20 @@ def execute_write_action(self, action, *args): class GenericUpdateFactory: - def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + def __init__(self, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope def create_patch_applier(self, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): self.init_verbose_logging(verbose) config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) + patch_wrapper = PatchWrapper(config_wrapper, scope=self.scope) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, changeapplier=change_applier, - namespace=self.namespace) + scope=self.scope) if config_format == ConfigFormat.CONFIGDB: pass @@ -371,62 +487,75 @@ def create_patch_applier(self, config_format, verbose, dry_run, ignore_non_yang_ patch_applier = SonicYangDecorator(decorated_patch_applier=patch_applier, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper, - namespace=self.namespace) + scope=self.scope) else: raise ValueError(f"config-format '{config_format}' is not supported") if not dry_run: - patch_applier = ConfigLockDecorator(decorated_patch_applier=patch_applier, namespace=self.namespace) + patch_applier = ConfigLockDecorator(decorated_patch_applier=patch_applier, scope=self.scope) return patch_applier def create_config_replacer(self, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): self.init_verbose_logging(verbose) - config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) + patch_wrapper = PatchWrapper(config_wrapper, scope=self.scope) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, changeapplier=change_applier, - namespace=self.namespace) + scope=self.scope) + if multi_asic.is_multi_asic(): + config_replacer = MultiASICConfigReplacer(patch_applier=patch_applier, + config_wrapper=config_wrapper) + else: + config_replacer = ConfigReplacer(patch_applier=patch_applier, + config_wrapper=config_wrapper, + scope=self.scope) - config_replacer = ConfigReplacer(patch_applier=patch_applier, config_wrapper=config_wrapper, namespace=self.namespace) if config_format == ConfigFormat.CONFIGDB: pass elif config_format == ConfigFormat.SONICYANG: config_replacer = SonicYangDecorator(decorated_config_replacer=config_replacer, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper, - namespace=self.namespace) + scope=self.scope) else: raise ValueError(f"config-format '{config_format}' is not supported") if not dry_run: - config_replacer = ConfigLockDecorator(decorated_config_replacer=config_replacer, namespace=self.namespace) + config_replacer = ConfigLockDecorator(decorated_config_replacer=config_replacer, scope=self.scope) return config_replacer def create_config_rollbacker(self, verbose, dry_run=False, ignore_non_yang_tables=False, ignore_paths=[]): self.init_verbose_logging(verbose) - config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) + patch_wrapper = PatchWrapper(config_wrapper, scope=self.scope) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, changeapplier=change_applier, - namespace=self.namespace) - - config_replacer = ConfigReplacer(config_wrapper=config_wrapper, patch_applier=patch_applier, namespace=self.namespace) - config_rollbacker = FileSystemConfigRollbacker(config_wrapper=config_wrapper, config_replacer=config_replacer, namespace=self.namespace) + scope=self.scope) + if multi_asic.is_multi_asic(): + config_replacer = MultiASICConfigReplacer(config_wrapper=config_wrapper, + patch_applier=patch_applier) + config_rollbacker = MultiASICConfigRollbacker(config_wrapper=config_wrapper, + config_replacer=config_replacer) + else: + config_replacer = ConfigReplacer(config_wrapper=config_wrapper, + patch_applier=patch_applier, + scope=self.scope) + config_rollbacker = FileSystemConfigRollbacker(config_wrapper=config_wrapper, + config_replacer=config_replacer, + scope=self.scope) if not dry_run: - config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker=config_rollbacker, namespace=self.namespace) + config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker=config_rollbacker, scope=self.scope) return config_rollbacker @@ -435,15 +564,15 @@ def init_verbose_logging(self, verbose): def get_config_wrapper(self, dry_run): if dry_run: - return DryRunConfigWrapper(namespace=self.namespace) + return DryRunConfigWrapper(scope=self.scope) else: - return ConfigWrapper(namespace=self.namespace) + return ConfigWrapper(scope=self.scope) def get_change_applier(self, dry_run, config_wrapper): if dry_run: return DryRunChangeApplier(config_wrapper) else: - return ChangeApplier(namespace=self.namespace) + return ChangeApplier(scope=self.scope) def get_patch_sorter(self, ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper): if not ignore_non_yang_tables and not ignore_paths: @@ -462,9 +591,9 @@ def get_patch_sorter(self, ignore_non_yang_tables, ignore_paths, config_wrapper, class GenericUpdater: - def __init__(self, generic_update_factory=None, namespace=multi_asic.DEFAULT_NAMESPACE): + def __init__(self, generic_update_factory=None, scope=multi_asic.DEFAULT_NAMESPACE): self.generic_update_factory = \ - generic_update_factory if generic_update_factory is not None else GenericUpdateFactory(namespace=namespace) + generic_update_factory if generic_update_factory is not None else GenericUpdateFactory(scope=scope) def apply_patch(self, patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths, sort=True): patch_applier = self.generic_update_factory.create_patch_applier(config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths) diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 974c540c07a..938aa1d034a 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -16,6 +16,8 @@ SYSLOG_IDENTIFIER = "GenericConfigUpdater" SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) GCU_FIELD_OP_CONF_FILE = f"{SCRIPT_DIR}/gcu_field_operation_validators.conf.json" +HOST_NAMESPACE = "localhost" + class GenericConfigUpdaterError(Exception): pass @@ -52,8 +54,8 @@ def __eq__(self, other): return False class ConfigWrapper: - def __init__(self, yang_dir=YANG_DIR, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + def __init__(self, yang_dir=YANG_DIR, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.yang_dir = YANG_DIR self.sonic_yang_with_loaded_models = None @@ -64,8 +66,8 @@ def get_config_db_as_json(self): return config_db_json def _get_config_db_as_text(self): - if self.namespace is not None and self.namespace != multi_asic.DEFAULT_NAMESPACE: - cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.namespace] + if self.scope is not None and self.scope != multi_asic.DEFAULT_NAMESPACE: + cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.scope] else: cmd = ['sonic-cfggen', '-d', '--print-data'] @@ -73,7 +75,8 @@ def _get_config_db_as_text(self): text, err = result.communicate() return_code = result.returncode if return_code: # non-zero means failure - raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.namespace}, Return code: {return_code}, Error: {err}") + raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.scope}," + f" Return code: {return_code}, Error: {err}") return text def get_sonic_yang_as_json(self): @@ -300,8 +303,8 @@ def create_sonic_yang_with_loaded_models(self): class DryRunConfigWrapper(ConfigWrapper): # This class will simulate all read/write operations to ConfigDB on a virtual storage unit. - def __init__(self, initial_imitated_config_db = None, namespace=multi_asic.DEFAULT_NAMESPACE): - super().__init__(namespace=namespace) + def __init__(self, initial_imitated_config_db=None, scope=multi_asic.DEFAULT_NAMESPACE): + super().__init__(scope=scope) self.logger = genericUpdaterLogging.get_logger(title="** DryRun", print_all_to_console=True) self.imitated_config_db = copy.deepcopy(initial_imitated_config_db) @@ -321,9 +324,9 @@ def _init_imitated_config_db_if_none(self): class PatchWrapper: - def __init__(self, config_wrapper=None, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(self.namespace) + def __init__(self, config_wrapper=None, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(self.scope) self.path_addressing = PathAddressing(self.config_wrapper) def validate_config_db_patch_has_yang_models(self, patch): diff --git a/pfc/main.py b/pfc/main.py index b31d3c755ed..f0b376e2426 100644 --- a/pfc/main.py +++ b/pfc/main.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 - import click from swsscommon.swsscommon import ConfigDBConnector from tabulate import tabulate @@ -8,153 +7,167 @@ ALL_PRIORITIES = [str(x) for x in range(8)] PRIORITY_STATUS = ['on', 'off'] -def configPfcAsym(interface, pfc_asym): - """ - PFC handler to configure asymmentric PFC. - """ - configdb = ConfigDBConnector() - configdb.connect() - configdb.mod_entry("PORT", interface, {'pfc_asym': pfc_asym}) +class Pfc(object): + def __init__(self, cfgdb=None): + self.cfgdb = cfgdb + def configPfcAsym(self, interface, pfc_asym): + """ + PFC handler to configure asymmetric PFC. + """ + configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb + configdb.connect() -def showPfcAsym(interface): - """ - PFC handler to display asymmetric PFC information. - """ - header = ('Interface', 'Asymmetric') + configdb.mod_entry("PORT", interface, {'pfc_asym': pfc_asym}) - configdb = ConfigDBConnector() - configdb.connect() + def showPfcAsym(self, interface): + """ + PFC handler to display asymmetric PFC information. + """ + header = ('Interface', 'Asymmetric') - if interface: - db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|{0}'.format(interface)) - else: - db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|*') + configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb + configdb.connect() - table = [] - - for i in db_keys or [None]: - key = None - if i: - key = i.split('|')[-1] + if interface: + db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|{0}'.format(interface)) + else: + db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|*') - if key and key.startswith('Ethernet'): - entry = configdb.get_entry('PORT', key) - table.append([key, entry.get('pfc_asym', 'N/A')]) + table = [] - sorted_table = natsorted(table) + for i in db_keys or [None]: + key = None + if i: + key = i.split('|')[-1] - click.echo() - click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) - click.echo() + if key and key.startswith('Ethernet'): + entry = configdb.get_entry('PORT', key) + table.append([key, entry.get('pfc_asym', 'N/A')]) -def configPfcPrio(status, interface, priority): - configdb = ConfigDBConnector() - configdb.connect() + sorted_table = natsorted(table) - if interface not in configdb.get_keys('PORT_QOS_MAP'): - click.echo('Cannot find interface {0}'.format(interface)) - return + click.echo() + click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) + click.echo() - """Current lossless priorities on the interface""" - entry = configdb.get_entry('PORT_QOS_MAP', interface) - enable_prio = entry.get('pfc_enable').split(',') - - """Avoid '' in enable_prio""" - enable_prio = [x.strip() for x in enable_prio if x.strip()] - - if status == 'on' and priority in enable_prio: - click.echo('Priority {0} has already been enabled on {1}'.format(priority, interface)) - return - - if status == 'off' and priority not in enable_prio: - click.echo('Priority {0} is not enabled on {1}'.format(priority, interface)) - return - - if status == 'on': - enable_prio.append(priority) - - else: - enable_prio.remove(priority) - - enable_prio.sort() - configdb.mod_entry("PORT_QOS_MAP", interface, {'pfc_enable': ','.join(enable_prio)}) - - """Show the latest PFC configuration""" - showPfcPrio(interface) - -def showPfcPrio(interface): - """ - PFC handler to display PFC enabled priority information. - """ - header = ('Interface', 'Lossless priorities') - table = [] + def configPfcPrio(self, status, interface, priority): + configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb + configdb.connect() + + if interface not in configdb.get_keys('PORT_QOS_MAP'): + click.echo('Cannot find interface {0}'.format(interface)) + return + + """Current lossless priorities on the interface""" + entry = configdb.get_entry('PORT_QOS_MAP', interface) + enable_prio = entry.get('pfc_enable').split(',') + + """Avoid '' in enable_prio""" + enable_prio = [x.strip() for x in enable_prio if x.strip()] + + if status == 'on' and priority in enable_prio: + click.echo('Priority {0} has already been enabled on {1}'.format(priority, interface)) + return + + if status == 'off' and priority not in enable_prio: + click.echo('Priority {0} is not enabled on {1}'.format(priority, interface)) + return + + if status == 'on': + enable_prio.append(priority) + + else: + enable_prio.remove(priority) + + enable_prio.sort() + configdb.mod_entry("PORT_QOS_MAP", interface, {'pfc_enable': ','.join(enable_prio)}) + + """Show the latest PFC configuration""" + self.showPfcPrio(interface) - configdb = ConfigDBConnector() - configdb.connect() - - """Get all the interfaces with QoS map information""" - intfs = configdb.get_keys('PORT_QOS_MAP') - - """The user specifies an interface but we cannot find it""" - if interface and interface not in intfs: - click.echo('Cannot find interface {0}'.format(interface)) - return - - if interface: - intfs = [interface] - - for intf in intfs: - entry = configdb.get_entry('PORT_QOS_MAP', intf) - table.append([intf, entry.get('pfc_enable', 'N/A')]) - - sorted_table = natsorted(table) - click.echo() - click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) - click.echo() + def showPfcPrio(self, interface): + """ + PFC handler to display PFC enabled priority information. + """ + header = ('Interface', 'Lossless priorities') + table = [] + + configdb = ConfigDBConnector() if self.cfgdb is None else self.cfgdb + configdb.connect() + + """Get all the interfaces with QoS map information""" + intfs = configdb.get_keys('PORT_QOS_MAP') + + """The user specifies an interface but we cannot find it""" + if interface and interface not in intfs: + click.echo('Cannot find interface {0}'.format(interface)) + return + + if interface: + intfs = [interface] + + for intf in intfs: + entry = configdb.get_entry('PORT_QOS_MAP', intf) + table.append([intf, entry.get('pfc_enable', 'N/A')]) + + sorted_table = natsorted(table) + click.echo() + click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) + click.echo() @click.group() -def cli(): +@click.pass_context +def cli(ctx): """PFC Command Line""" - pass + # Use the cfgdb object if given as input. + cfgdb = None if ctx.obj is None else ctx.obj.cfgdb + + ctx.obj = {'pfc': Pfc(cfgdb)} @cli.group() -def config(): +@click.pass_context +def config(ctx): """Config PFC""" pass @cli.group() -def show(): +@click.pass_context +def show(ctx): """Show PFC information""" pass @click.command() @click.argument('status', type=click.Choice(PRIORITY_STATUS)) @click.argument('interface', type=click.STRING) -def configAsym(status, interface): +@click.pass_context +def configAsym(ctx, status, interface): """Configure asymmetric PFC on a given port.""" - configPfcAsym(interface, status) + ctx.obj['pfc'].configPfcAsym(interface, status) @click.command() @click.argument('status', type=click.Choice(PRIORITY_STATUS)) @click.argument('interface', type=click.STRING) @click.argument('priority', type=click.Choice(ALL_PRIORITIES)) -def configPrio(status, interface, priority): +@click.pass_context +def configPrio(ctx, status, interface, priority): """Configure PFC on a given priority.""" - configPfcPrio(status, interface, priority) - + ctx.obj['pfc'].configPfcPrio(status, interface, priority) + @click.command() @click.argument('interface', type=click.STRING, required=False) -def showAsym(interface): +@click.pass_context +def showAsym(ctx, interface): """Show asymmetric PFC information""" - showPfcAsym(interface) + ctx.obj['pfc'].showPfcAsym(interface) @click.command() @click.argument('interface', type=click.STRING, required=False) -def showPrio(interface): +@click.pass_context +def showPrio(ctx, interface): """Show PFC priority information""" - showPfcPrio(interface) + ctx.obj['pfc'].showPfcPrio(interface) config.add_command(configAsym, "asymmetric") config.add_command(configPrio, "priority") diff --git a/rcli/linecard.py b/rcli/linecard.py index 73c13a73efb..f893428a42f 100644 --- a/rcli/linecard.py +++ b/rcli/linecard.py @@ -8,7 +8,7 @@ import termios import tty -from .utils import get_linecard_ip +from .utils import get_linecard_ip, get_linecard_hostname_from_module_name, get_linecard_module_name_from_hostname from paramiko.py3compat import u from paramiko import Channel @@ -31,7 +31,17 @@ def __init__(self, linecard_name, username, password): if not self.ip: sys.exit(1) - self.linecard_name = linecard_name + # if the user passes linecard hostname, then try to get the module name for that linecard + module_name = get_linecard_module_name_from_hostname(linecard_name) + if module_name is None: + # if the module name cannot be found from host, assume the user has passed module name + self.module_name = linecard_name + self.hostname = get_linecard_hostname_from_module_name(linecard_name) + else: + # the user has passed linecard hostname + self.hostname = linecard_name + self.module_name = module_name + self.username = username self.password = password diff --git a/rcli/rexec.py b/rcli/rexec.py index 8831d5585f4..21929c8012b 100644 --- a/rcli/rexec.py +++ b/rcli/rexec.py @@ -30,20 +30,22 @@ def cli(linecard_names, command, username): if list(linecard_names) == ["all"]: # Get all linecard names using autocompletion helper - linecard_names = rcli_utils.get_all_linecards(None, None, "") + module_names = sorted(rcli_utils.get_all_linecards(None, None, "")) + else: + module_names = linecard_names linecards = [] # Iterate through each linecard, check if the login was successful - for linecard_name in linecard_names: - linecard = Linecard(linecard_name, username, password) + for module_name in module_names: + linecard = Linecard(module_name, username, password) if not linecard.connection: - click.echo(f"Failed to connect to {linecard_name} with username {username}") + click.echo(f"Failed to connect to {module_name} with username {username}") sys.exit(1) linecards.append(linecard) for linecard in linecards: if linecard.connection: - click.echo(f"======== {linecard.linecard_name} output: ========") + click.echo(f"======== {linecard.module_name}|{linecard.hostname} output: ========") click.echo(linecard.execute_cmd(command)) diff --git a/rcli/rshell.py b/rcli/rshell.py index bac02d42d81..b22187a0f34 100644 --- a/rcli/rshell.py +++ b/rcli/rshell.py @@ -28,14 +28,14 @@ def cli(linecard_name, username): try: linecard = Linecard(linecard_name, username, password) if linecard.connection: - click.echo(f"Connecting to {linecard.linecard_name}") + click.echo(f"Connecting to {linecard.module_name}") # If connection was created, connection exists. # Otherwise, user will see an error message. linecard.start_shell() click.echo("Connection Closed") except paramiko.ssh_exception.AuthenticationException: click.echo( - f"Login failed on '{linecard.linecard_name}' with username '{linecard.username}'") + f"Login failed on '{linecard.module_name}' with username '{linecard.username}'") if __name__=="__main__": diff --git a/rcli/utils.py b/rcli/utils.py index 510e360581a..e2f48788bab 100644 --- a/rcli/utils.py +++ b/rcli/utils.py @@ -43,6 +43,20 @@ def get_linecard_module_name_from_hostname(linecard_name: str): return None + +def get_linecard_hostname_from_module_name(linecard_name: str): + + chassis_state_db = connect_to_chassis_state_db() + keys = chassis_state_db.keys(chassis_state_db.CHASSIS_STATE_DB, '{}|{}'.format(CHASSIS_MODULE_HOSTNAME_TABLE, '*')) + for key in keys: + module_name = key.split('|')[1] + if module_name.replace('-', '').lower() == linecard_name.replace('-', '').lower(): + hostname = chassis_state_db.get(chassis_state_db.CHASSIS_STATE_DB, key, CHASSIS_MODULE_HOSTNAME) + return hostname + + return None + + def get_linecard_ip(linecard_name: str): """ Given a linecard name, lookup its IP address in the midplane table @@ -69,6 +83,7 @@ def get_linecard_ip(linecard_name: str): return None return module_ip + def get_module_ip_and_access_from_state_db(module_name): state_db = connect_state_db() data_dict = state_db.get_all( diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index afd5e638de4..9be3ce325b1 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -58,7 +58,7 @@ def __init__(self, namespace, socket=None): none-zero values. build: sequentially increase within a minor version domain. """ - self.CURRENT_VERSION = 'version_202405_01' + self.CURRENT_VERSION = 'version_202411_01' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' @@ -1228,10 +1228,18 @@ def version_202311_03(self): def version_202405_01(self): """ - Version 202405_01, this version should be the final version for - master branch until 202405 branch is created. + Version 202405_01. """ log.log_info('Handling version_202405_01') + self.set_version('version_202411_01') + return 'version_202411_01' + + def version_202411_01(self): + """ + Version 202411_01, this version should be the final version for + master branch until 202411 branch is created. + """ + log.log_info('Handling version_202411_01') return None def get_version(self): diff --git a/scripts/decode-syseeprom b/scripts/decode-syseeprom index 3d0b8d1db91..5812f38190b 100755 --- a/scripts/decode-syseeprom +++ b/scripts/decode-syseeprom @@ -17,13 +17,15 @@ import sys import sonic_platform from sonic_platform_base.sonic_eeprom.eeprom_tlvinfo import TlvInfoDecoder -from sonic_py_common import device_info +from sonic_py_common import device_info, logger from swsscommon.swsscommon import SonicV2Connector from tabulate import tabulate EEPROM_INFO_TABLE = 'EEPROM_INFO' +SYSLOG_IDENTIFIER = 'decode-syseeprom' +log = logger.Logger(SYSLOG_IDENTIFIER) def instantiate_eeprom_object(): eeprom = None diff --git a/scripts/dropconfig b/scripts/dropconfig index 180c6166c64..1fc812a4748 100755 --- a/scripts/dropconfig +++ b/scripts/dropconfig @@ -105,7 +105,7 @@ class DropConfig(object): if supported_reasons and int(capabilities.get('count', 0)) > 0: print('\n{}'.format(counter)) for reason in supported_reasons: - print('\t{}'.format(reason)) + print(' {}'.format(reason)) def create_counter(self, counter_name, alias, group, counter_type, description, reasons): diff --git a/scripts/dropstat b/scripts/dropstat index 4e9f5bb4d03..219ad2b4947 100755 --- a/scripts/dropstat +++ b/scripts/dropstat @@ -11,8 +11,8 @@ # - Refactor calls to COUNTERS_DB to reduce redundancy # - Cache DB queries to reduce # of expensive queries +import click import json -import argparse import os import socket import sys @@ -20,6 +20,9 @@ import sys from collections import OrderedDict from natsort import natsorted from tabulate import tabulate +from sonic_py_common import multi_asic +from utilities_common.general import load_db_config +import utilities_common.multi_asic as multi_asic_util # mock the redis for unit test purposes # try: @@ -28,9 +31,14 @@ try: test_path = os.path.join(modules_path, "tests") sys.path.insert(0, modules_path) sys.path.insert(0, test_path) - import mock_tables.dbconnector + from tests.mock_tables import dbconnector socket.gethostname = lambda: 'sonic_drops_test' os.getuid = lambda: 27 + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import tests.mock_tables.mock_multi_asic + dbconnector.load_namespace_config() + else: + dbconnector.load_database_config() except KeyError: pass @@ -43,6 +51,7 @@ DEBUG_COUNTER_PORT_STAT_MAP = 'COUNTERS_DEBUG_NAME_PORT_STAT_MAP' DEBUG_COUNTER_SWITCH_STAT_MAP = 'COUNTERS_DEBUG_NAME_SWITCH_STAT_MAP' COUNTERS_PORT_NAME_MAP = 'COUNTERS_PORT_NAME_MAP' COUNTER_TABLE_PREFIX = 'COUNTERS:' +SWITCH_LEVEL_COUNTER_PREFIX = 'SWITCH_STD_DROP_COUNTER-' # ASIC_DB Tables ASIC_SWITCH_INFO_PREFIX = 'ASIC_STATE:SAI_OBJECT_TYPE_SWITCH:' @@ -79,34 +88,43 @@ std_port_headers_map = { # Standard Switch-Level Headers std_switch_description_header = ['DEVICE'] +std_switch_dflt_drop_headers= [ 'SWITCH-ID'] +std_switch_drop_headers_map = { + 'SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP': 'PKT_INTEGRITY_ERR' +} def get_dropstat_dir(): return UserCache().get_directory() class DropStat(object): - def __init__(self): - self.config_db = ConfigDBConnector() - self.config_db.connect() - - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.COUNTERS_DB) - self.db.connect(self.db.ASIC_DB) - self.db.connect(self.db.APPL_DB) + def __init__(self, namespace): + self.namespaces = multi_asic.get_namespace_list(namespace) + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.db = None + self.config_db = None + self.cached_namespace = None dropstat_dir = get_dropstat_dir() self.port_drop_stats_file = os.path.join(dropstat_dir, 'port-stats') - self.switch_drop_stats_file = os.path.join(dropstat_dir + 'switch-stats') + self.switch_drop_stats_file = os.path.join(dropstat_dir, 'switch-stats') + self.switch_std_drop_stats_file = os.path.join(dropstat_dir, 'switch-std-drop-stats') self.stat_lookup = {} self.reverse_stat_lookup = {} + @multi_asic_util.run_on_multi_asic def show_drop_counts(self, group, counter_type): """ Prints out the current drop counts at the port-level and switch-level. """ + if os.environ.get("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE", "0") == "1": + # Temp cache needs to be cleard to avoid interference from previous test cases + UserCache().remove() + + self.show_switch_std_drop_counts(group, counter_type) self.show_port_drop_counts(group, counter_type) print('') self.show_switch_drop_counts(group, counter_type) @@ -116,16 +134,91 @@ class DropStat(object): Clears the current drop counts. """ + counters_port_drop = {} + counters_switch_drop = {} + counters_switch_std_drop = {} + for ns in self.namespaces: + self.config_db = multi_asic.connect_config_db_for_ns(ns) + self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + + counts = self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP) + if counts: + counters_port_drop.update(counts) + + counters = self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP) + if counters: + counts = self.get_counts(counters, self.get_switch_id()) + counters_switch_drop.update(counts) + + counters = self.get_configured_counters(DEBUG_COUNTER_SWITCH_STAT_MAP, True) + if counters: + counts = self.get_counts(counters, self.get_switch_id()) + counters_switch_std_drop.update(counts) + try: - json.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), - open(self.port_drop_stats_file, 'w+')) - json.dump(self.get_counts(self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP), self.get_switch_id()), - open(self.switch_drop_stats_file, 'w+')) + if counters_port_drop: + json.dump(counters_port_drop, open(self.port_drop_stats_file, 'w+')) + + if counters_switch_drop: + json.dump(counters_switch_drop, open(self.switch_drop_stats_file, 'w+')) + + if counters_switch_std_drop: + json.dump(counters_switch_std_drop, open(self.switch_std_drop_stats_file, 'w+')) except IOError as e: print(e) sys.exit(e.errno) print("Cleared drop counters") + def show_switch_std_drop_counts(self, group, counter_type): + """ + Prints out the standard drop counts (packet integrity drop etc) at the switch level, if such counts exist. + """ + + if group is not None or counter_type is not None: + return + + #Currently the switch drop counter (packet integrity) is supported only for chassis. + if os.environ.get("VOQ_DROP_COUNTER_TESTING", "0") == "1": + #fake the switch_type for mock-test code coverage + switch_type = "voq" + else: + switch_type = self.db.get(self.db.CONFIG_DB, "DEVICE_METADATA|localhost", "switch_type") + + if switch_type is None: + return + if switch_type != "fabric" and switch_type != "voq": + return + + switch_std_drop_ckpt = {} + + # Grab the latest clear checkpoint, if it exists + if os.path.isfile(self.switch_std_drop_stats_file): + switch_std_drop_ckpt = json.load(open(self.switch_std_drop_stats_file, 'r')) + + counters = self.get_configured_counters(DEBUG_COUNTER_SWITCH_STAT_MAP, True) + if not counters: + return + switch_id = self.get_switch_id() + switch_std_stats = self.get_counts(counters, switch_id) + + if not switch_std_stats: + return + + if os.environ.get("VOQ_DROP_COUNTER_TESTING", "0") == "1": + row = [socket.gethostname()] + else: + cfg_switch_id = self.db.get(self.db.CONFIG_DB, "DEVICE_METADATA|localhost", "switch_id") + row = [cfg_switch_id] + + headers = std_switch_dflt_drop_headers + for cntr in counters: + if cntr in std_switch_drop_headers_map: + row.append(switch_std_stats.get(cntr, 0) - switch_std_drop_ckpt.get(cntr, 0)) + headers.append(std_switch_drop_headers_map[cntr]) + if row: + print(tabulate([row], headers, tablefmt='simple', stralign='right')) + print('') + def show_port_drop_counts(self, group, counter_type): """ Prints out the drop counts at the port level, if such counts exist. @@ -189,7 +282,7 @@ class DropStat(object): the group or not the right counter type. """ - configured_counters = self.get_configured_counters(object_stat_map) + configured_counters = self.get_configured_counters(object_stat_map, False) counters = std_counters + configured_counters return [ctr for ctr in counters if self.in_group(ctr, object_stat_map, group) and @@ -258,12 +351,13 @@ class DropStat(object): the given object type. """ + if self.cached_namespace != self.multi_asic.current_namespace: + self.stat_lookup = {} + self.cached_namespace = self.multi_asic.current_namespace + if not self.stat_lookup.get(object_stat_map, None): stats_map = self.db.get_all(self.db.COUNTERS_DB, object_stat_map) - if stats_map: - self.stat_lookup[object_stat_map] = stats_map - else: - self.stat_lookup[object_stat_map] = None + self.stat_lookup[object_stat_map] = stats_map if stats_map else None return self.stat_lookup[object_stat_map] @@ -282,7 +376,7 @@ class DropStat(object): return self.reverse_stat_lookup[object_stat_map] - def get_configured_counters(self, object_stat_map): + def get_configured_counters(self, object_stat_map, std_switch_cntr=False): """ Returns the list of counters that have been configured to track packet drops. @@ -294,6 +388,15 @@ class DropStat(object): if not counters: return configured_counters + #Switch level standard drop counters are added by default and added to DEBUG_COUNTER_SWITCH_STAT_MAP table, + #so remove it from configrued counters + if object_stat_map == DEBUG_COUNTER_SWITCH_STAT_MAP: + if std_switch_cntr: + new_cntrs = {k:counters[k] for k in counters if SWITCH_LEVEL_COUNTER_PREFIX in k} + else: + new_cntrs = {k:counters[k] for k in counters if not SWITCH_LEVEL_COUNTER_PREFIX in k} + return list(new_cntrs.values()) + return list(counters.values()) def get_counter_name(self, object_stat_map, counter_stat): @@ -385,39 +488,22 @@ class DropStat(object): else: return PORT_STATE_NA - -def main(): - parser = argparse.ArgumentParser(description='Display drop counters', - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -Examples: - dropstat -""") - - # Version - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - - # Actions - parser.add_argument('-c', '--command', type=str, help='Desired action to perform') - - # Variables - parser.add_argument('-g', '--group', type=str, help='The group of the target drop counter', default=None) - parser.add_argument('-t', '--type', type=str, help='The type of the target drop counter', default=None) - - args = parser.parse_args() - - command = args.command - - group = args.group - counter_type = args.type - - dcstat = DropStat() +@click.command(help='Display drop counters') +@click.option('-c', '--command', required=True, help='Desired action to perform', + type=click.Choice(['clear', 'show'], case_sensitive=False)) +@click.option('-g', '--group', default=None, help='The group of the target drop counter') +@click.option('-t', '--type', 'counter_type', default=None, help='The type of the target drop counter') +@click.option('-n', '--namespace', help='Namespace name', default=None, + type=click.Choice(multi_asic.get_namespace_list())) +@click.version_option(version='1.0') +def main(command, group, counter_type, namespace): + load_db_config() + + dcstat = DropStat(namespace) if command == 'clear': dcstat.clear_drop_counts() - elif command == 'show': - dcstat.show_drop_counts(group, counter_type) else: - print("Command not recognized") + dcstat.show_drop_counts(group, counter_type) if __name__ == '__main__': diff --git a/scripts/fabricstat b/scripts/fabricstat index cf3d14bf5e9..6f1893c9dbc 100755 --- a/scripts/fabricstat +++ b/scripts/fabricstat @@ -399,6 +399,49 @@ class FabricIsolation(FabricStat): print(tabulate(body, header, tablefmt='simple', stralign='right')) return +class FabricRate(FabricStat): + def rate_print(self): + # Connect to database + self.db = multi_asic.connect_to_all_dbs_for_ns(self.namespace) + # Get the set of all fabric ports + port_keys = self.db.keys(self.db.STATE_DB, FABRIC_PORT_STATUS_TABLE_PREFIX + '*') + # Create a new dictionary. The keys are the local port values in integer format. + # Only fabric ports that have remote port data are added. + port_dict = {} + for port_key in port_keys: + port_data = self.db.get_all(self.db.STATE_DB, port_key) + port_number = int(port_key.replace("FABRIC_PORT_TABLE|PORT", "")) + port_dict.update({port_number: port_data}) + # Create ordered table of fabric ports. + rxRate = 0 + rxData = 0 + txRate = 0 + txData = 0 + time = 0 + local_time = "" + # RX data , Tx data , Time are for testing + asic = "asic0" + if self.namespace: + asic = self.namespace + header = ["ASIC", "Link ID", "Rx Data Mbps", "Tx Data Mbps"] + body = [] + for port_number in sorted(port_dict.keys()): + port_data = port_dict[port_number] + if "OLD_RX_RATE_AVG" in port_data: + rxRate = port_data["OLD_RX_RATE_AVG"] + if "OLD_RX_DATA" in port_data: + rxData = port_data["OLD_RX_DATA"] + if "OLD_TX_RATE_AVG" in port_data: + txRate = port_data["OLD_TX_RATE_AVG"] + if "OLD_TX_DATA" in port_data: + txData = port_data["OLD_TX_DATA"] + if "LAST_TIME" in port_data: + time = int(port_data["LAST_TIME"]) + local_time = datetime.fromtimestamp(time) + body.append((asic, port_number, rxRate, txRate)); + click.echo() + click.echo(tabulate(body, header, tablefmt='simple', stralign='right')) + def main(): global cnstat_dir global cnstat_fqn_file_port @@ -415,6 +458,8 @@ Examples: fabricstat -q -n asic0 fabricstat -c fabricstat -c -n asic0 + fabricstat -s + fabricstat -s -n asic0 fabricstat -C fabricstat -D """) @@ -425,6 +470,7 @@ Examples: parser.add_argument('-e', '--errors', action='store_true', help='Display errors') parser.add_argument('-c','--capacity',action='store_true', help='Display fabric capacity') parser.add_argument('-i','--isolation', action='store_true', help='Display fabric ports isolation status') + parser.add_argument('-s','--rate', action='store_true', help='Display fabric counters rate') parser.add_argument('-C','--clear', action='store_true', help='Copy & clear fabric counters') parser.add_argument('-D','--delete', action='store_true', help='Delete saved stats') @@ -433,6 +479,7 @@ Examples: reachability = args.reachability capacity_status = args.capacity isolation_status = args.isolation + rate = args.rate namespace = args.namespace errors_only = args.errors @@ -455,17 +502,21 @@ Examples: def nsStat(ns, errors_only): if queue: - stat = FabricQueueStat(ns) + stat = FabricQueueStat(ns) elif reachability: - stat = FabricReachability(ns) - stat.reachability_print() - return + stat = FabricReachability(ns) + stat.reachability_print() + return elif isolation_status: - stat = FabricIsolation(ns) - stat.isolation_print() - return + stat = FabricIsolation(ns) + stat.isolation_print() + return + elif rate: + stat = FabricRate(ns) + stat.rate_print() + return else: - stat = FabricPortStat(ns) + stat = FabricPortStat(ns) cnstat_dict = stat.get_cnstat_dict() if save_fresh_stats: stat.save_fresh_stats() @@ -489,7 +540,10 @@ Examples: stat = FabricCapacity(namespace, table_cnt, threshold) stat.capacity_print() - click.echo("Monitored fabric capacity threshold: {}".format(threshold[0])) + print_th = "" + if threshold: + print_th = threshold[0] + click.echo("Monitored fabric capacity threshold: {}".format(print_th)) click.echo() click.echo(tabulate(table_cnt, capacity_header, tablefmt='simple', stralign='right')) else: diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 91791b37714..e183c34219d 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -50,6 +50,7 @@ EXIT_NO_CONTROL_PLANE_ASSISTANT=20 EXIT_SONIC_INSTALLER_VERIFY_REBOOT=21 EXIT_PLATFORM_FW_AU_FAILURE=22 EXIT_TEAMD_RETRY_COUNT_FAILURE=23 +EXIT_NO_MIRROR_SESSION_ACLS=24 function error() { @@ -146,7 +147,7 @@ function clear_boot() # common_clear debug "${REBOOT_TYPE} failure ($?) cleanup ..." - /sbin/kexec -u || /bin/true + /sbin/kexec -u -a || /bin/true teardown_control_plane_assistant @@ -243,18 +244,42 @@ function wait_for_pre_shutdown_complete_or_fail() function backup_database() { debug "Backing up database ..." + + if [[ "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then + # Advanced reboot: dump state to host disk + sonic-db-cli ASIC_DB FLUSHDB > /dev/null + sonic-db-cli COUNTERS_DB FLUSHDB > /dev/null + sonic-db-cli FLEX_COUNTER_DB FLUSHDB > /dev/null + fi + + if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then + # Flush RESTAP_DB in fast-reboot to avoid stale status + sonic-db-cli RESTAPI_DB FLUSHDB > /dev/null + fi + # Dump redis content to a file 'dump.rdb' in warmboot directory mkdir -p $WARM_DIR # Delete keys in stateDB except FDB_TABLE|*, MIRROR_SESSION_TABLE|*, WARM_RESTART_ENABLE_TABLE|*, FG_ROUTE_TABLE|* sonic-db-cli STATE_DB eval " for _, k in ipairs(redis.call('keys', '*')) do - if not string.match(k, 'FDB_TABLE|') and not string.match(k, 'WARM_RESTART_TABLE|') \ + if string.match(k, 'PORT_TABLE|Ethernet') then + for i, f in ipairs(redis.call('hgetall', k)) do + if i % 2 == 1 then + if not string.match(f, 'host_tx_ready') \ + and not string.match(f, 'NPU_SI_SETTINGS_SYNC_STATUS') \ + and not string.match(f, 'CMIS_REINIT_REQUIRED') then + redis.call('hdel', k, f) + end + end + end + elseif not string.match(k, 'FDB_TABLE|') and not string.match(k, 'WARM_RESTART_TABLE|') \ and not string.match(k, 'MIRROR_SESSION_TABLE|') \ and not string.match(k, 'FG_ROUTE_TABLE|') \ and not string.match(k, 'WARM_RESTART_ENABLE_TABLE|') \ and not string.match(k, 'TRANSCEIVER_INFO|') \ and not string.match(k, 'VXLAN_TUNNEL_TABLE|') \ and not string.match(k, 'BUFFER_MAX_PARAM_TABLE|') \ + and not string.match(k, 'STORAGE_INFO|') \ and not string.match(k, 'FAST_RESTART_ENABLE_TABLE|') then redis.call('del', k) end @@ -272,6 +297,47 @@ function backup_database() fi } +function check_mirror_session_acls() +{ + debug "Checking if mirror session ACLs (arp, nd) programmed to ASIC successfully" + ACL_ARP="missing" + ACL_ND="missing" + start_time=${SECONDS} + elapsed_time=$((${SECONDS} - ${start_time})) + while [[ ${elapsed_time} -lt 10 ]]; do + CHECK_ACL_ENTRIES=0 + ACL_OUTPUT=$(sonic-db-cli ASIC_DB KEYS "*" | grep SAI_OBJECT_TYPE_ACL_ENTRY) || CHECK_ACL_ENTRIES=$? + if [[ ${CHECK_ACL_ENTRIES} -ne 0 ]]; then + error "Failed to retrieve SAI_OBJECT_TYPE_ACL_ENTRY from redis" + exit ${EXIT_NO_MIRROR_SESSION_ACLS} + fi + ACL_ENTRIES=( ${ACL_OUTPUT} ) + if [[ ${#ACL_ENTRIES[@]} -eq 0 ]]; then + error "NO SAI_OBJECT_TYPE_ACL_ENTRY objects found" + exit ${EXIT_NO_MIRROR_SESSION_ACLS} + fi + for ACL_ENTRY in ${ACL_ENTRIES[@]}; do + ACL_PRIORITY=$(sonic-db-cli ASIC_DB HGET ${ACL_ENTRY} SAI_ACL_ENTRY_ATTR_PRIORITY) + if [[ ${ACL_PRIORITY} -eq 8888 ]]; then + ACL_ARP="found" + fi + if [[ ${ACL_PRIORITY} -eq 8887 ]]; then + ACL_ND="found" + fi + done + if [[ "${ACL_ARP}" = "found" && "${ACL_ND}" = "found" ]]; then + break + fi + sleep 0.1 + elapsed_time=$((${SECONDS} - ${start_time})) + done + if [[ "${ACL_ARP}" != "found" || "${ACL_ND}" != "found" ]]; then + debug "Failed to program mirror session ACLs on ASIC. ACLs: ARP=${ACL_ARP} ND=${ACL_ND}" + exit ${EXIT_NO_MIRROR_SESSION_ACLS} + fi + debug "Mirror session ACLs (arp, nd) programmed to ASIC successfully" +} + function setup_control_plane_assistant() { if [[ -n "${ASSISTANT_IP_LIST}" && -x ${ASSISTANT_SCRIPT} ]]; then @@ -279,6 +345,7 @@ function setup_control_plane_assistant() if [[ "${HWSKU}" != "DellEMC-Z9332f-M-O16C64" && "${HWSKU}" != "DellEMC-Z9332f-M-O16C64-lab" ]]; then debug "Setting up control plane assistant: ${ASSISTANT_IP_LIST} ..." ${ASSISTANT_SCRIPT} -s ${ASSISTANT_IP_LIST} -m set + check_mirror_session_acls else debug "${HWSKU} Not capable to support CPA. Skipping gracefully ..." fi @@ -452,7 +519,7 @@ function unload_kernel() { # Unload the previously loaded kernel if any loaded if [[ "$(cat /sys/kernel/kexec_loaded)" -eq 1 ]]; then - /sbin/kexec -u + /sbin/kexec -u -a fi } @@ -752,23 +819,11 @@ for service in ${SERVICES_TO_STOP}; do wait_for_pre_shutdown_complete_or_fail fi - if [[ "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then - # Advanced reboot: dump state to host disk - sonic-db-cli ASIC_DB FLUSHDB > /dev/null - sonic-db-cli COUNTERS_DB FLUSHDB > /dev/null - sonic-db-cli FLEX_COUNTER_DB FLUSHDB > /dev/null - fi - - if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then - # Flush RESTAP_DB in fast-reboot to avoid stale status - sonic-db-cli RESTAPI_DB FLUSHDB > /dev/null - fi - - backup_database - fi done +backup_database + # Stop the docker container engine. Otherwise we will have a broken docker storage systemctl stop docker.service || debug "Ignore stopping docker service error $?" @@ -810,7 +865,6 @@ if [[ -x ${DEVPATH}/${PLATFORM}/${PLATFORM_FWUTIL_AU_REBOOT_HANDLE} ]]; then fi fi - # Enable Watchdog Timer if [ -x ${WATCHDOG_UTIL} ]; then debug "Enabling Watchdog before ${REBOOT_TYPE}" diff --git a/scripts/generate_dump b/scripts/generate_dump index 06d163a45e3..3d0ef3430d7 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1120,7 +1120,6 @@ save_file() { find_files() { trap 'handle_error $? $LINENO' ERR local -r directory=$1 - $TOUCH --date="${SINCE_DATE}" "${REFERENCE_FILE}" local -r find_command="find -L $directory -type f -newer ${REFERENCE_FILE}" echo $($find_command) @@ -1914,6 +1913,8 @@ main() { ${CMD_PREFIX}renice +5 -p $$ >> /dev/null ${CMD_PREFIX}ionice -c 2 -n 5 -p $$ >> /dev/null + # Created file as a reference to compare modification time + $TOUCH --date="${SINCE_DATE}" "${REFERENCE_FILE}" $MKDIR $V -p $TARDIR # Start with this script so its obvious what code is responsible @@ -2155,7 +2156,7 @@ finalize() { ############################################################################### -# Remove secret from pipeline inout and output result to pipeline. +# Remove secret from pipeline input and output result to pipeline. # Globals: # None # Arguments: @@ -2168,6 +2169,18 @@ remove_secret_from_config_db_dump() { sed -E 's/\"passkey\"\s*:\s*\"([^\"]*)\"/\"passkey\":\"****\"/g; /SNMP_COMMUNITY/,/\s{2,4}\},/d' } + +############################################################################### +# Remove secret from file. +############################################################################### +remove_secret_from_config_db_dump_file() { + local dumpfile=$1 + if [ -e ${dumpfile} ]; then + cat $dumpfile | remove_secret_from_config_db_dump > $dumpfile.temp + mv $dumpfile.temp $dumpfile + fi +} + ############################################################################### # Remove secret from dump files. # Globals: @@ -2201,8 +2214,24 @@ remove_secret_from_etc_files() { sed -i -E 's/(\s*snmp_\S*community\s*:\s*)(\S*)/\1****/g' $dumppath/etc/sonic/snmp.yml # Remove secret from /etc/sonic/config_db.json - cat $dumppath/etc/sonic/config_db.json | remove_secret_from_config_db_dump > $dumppath/etc/sonic/config_db.json.temp - mv $dumppath/etc/sonic/config_db.json.temp $dumppath/etc/sonic/config_db.json + remove_secret_from_config_db_dump_file $dumppath/etc/sonic/config_db.json + + # Remove secret from /etc/sonic/golden_config_db.json + remove_secret_from_config_db_dump_file $dumppath/etc/sonic/golden_config_db.json + + # Remove secret from /etc/sonic/old_config/ + + # Remove snmp community string from old_config/snmp.yml + local oldsnmp=${dumppath}/etc/sonic/old_config/snmp.yml + if [ -e ${oldsnmp} ]; then + sed -i -E 's/(\s*snmp_\S*community\s*:\s*)(\S*)/\1****/g' $oldsnmp + fi + + # Remove secret from /etc/sonic/config_db.json + remove_secret_from_config_db_dump_file ${dumppath}/etc/sonic/old_config/config_db.json + + # Remove secret from /etc/sonic/golden_config_db.json + remove_secret_from_config_db_dump_file ${dumppath}/etc/sonic/old_config/golden_config_db.json } ############################################################################### diff --git a/scripts/lldpshow b/scripts/lldpshow index e09176cf3cd..fe40296f910 100755 --- a/scripts/lldpshow +++ b/scripts/lldpshow @@ -26,8 +26,9 @@ import sys from lxml import etree as ET from sonic_py_common import device_info +from utilities_common import constants from swsscommon.swsscommon import ConfigDBConnector -from utilities_common.general import load_db_config +from utilities_common.general import load_db_config, get_feature_state_data from tabulate import tabulate BACKEND_ASIC_INTERFACE_NAME_PREFIX = 'Ethernet-BP' @@ -69,8 +70,12 @@ class Lldpshow(object): self.lldp_interface[instance_num] += key + SPACE_TOKEN # LLDP running in host namespace - self.lldp_instance.append(LLDP_INSTANCE_IN_HOST_NAMESPACE) - self.lldp_interface.append(LLDP_INTERFACE_LIST_IN_HOST_NAMESPACE) + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=constants.DEFAULT_NAMESPACE) + config_db.connect() + global_scope, asic_scope = get_feature_state_data(config_db, "lldp") + if global_scope == "True": + self.lldp_instance.append(LLDP_INSTANCE_IN_HOST_NAMESPACE) + self.lldp_interface.append(LLDP_INTERFACE_LIST_IN_HOST_NAMESPACE) def get_info(self, lldp_detail_info, lldp_port): """ @@ -85,7 +90,7 @@ class Lldpshow(object): elif lldp_interface_list == '': lldp_args = [] else: - lldp_args = [lldp_interface_list] + lldp_args = lldp_interface_list.split(' ') lldp_cmd = ['sudo', 'docker', 'exec', '-i', 'lldp{}'.format(self.lldp_instance[lldp_instace_num]), 'lldpctl'] + lldp_args p = subprocess.Popen(lldp_cmd, stdout=subprocess.PIPE, text=True) (output, err) = p.communicate() diff --git a/scripts/pg-drop b/scripts/pg-drop index 77415930811..9078d28ad69 100755 --- a/scripts/pg-drop +++ b/scripts/pg-drop @@ -5,6 +5,7 @@ # pg-drop is a tool for show/clear ingress pg dropped packet stats. # ##################################################################### +from importlib import reload import json import argparse import os @@ -13,6 +14,8 @@ from collections import OrderedDict from natsort import natsorted from tabulate import tabulate +from utilities_common.general import load_db_config +from sonic_py_common import multi_asic # mock the redis for unit test purposes # try: @@ -22,7 +25,9 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector - + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() except KeyError: pass @@ -43,13 +48,11 @@ def get_dropstat_dir(): class PgDropStat(object): - def __init__(self): - self.counters_db = SonicV2Connector(host='127.0.0.1') - self.counters_db.connect(self.counters_db.COUNTERS_DB) - - self.configdb = ConfigDBConnector() + def __init__(self, namespace): + self.namespace = namespace + self.ns_list = multi_asic.get_namespace_list(namespace) + self.configdb = ConfigDBConnector(namespace=namespace) self.configdb.connect() - dropstat_dir = get_dropstat_dir() self.port_drop_stats_file = os.path.join(dropstat_dir, 'pg_drop_stats') @@ -57,14 +60,14 @@ class PgDropStat(object): """ Get port ID using object ID """ - port_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, oid) + port_id = self.get_counters_mapdata(COUNTERS_PG_PORT_MAP, oid) if not port_id: print("Port is not available for oid '{}'".format(oid)) sys.exit(1) return port_id # Get all ports - self.counter_port_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + self.counter_port_name_map = self.get_counters_mapall(COUNTERS_PORT_NAME_MAP) if not self.counter_port_name_map: print("COUNTERS_PORT_NAME_MAP is empty!") sys.exit(1) @@ -77,7 +80,7 @@ class PgDropStat(object): self.port_name_map[self.counter_port_name_map[port]] = port # Get PGs for each port - counter_pg_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) + counter_pg_name_map = self.get_counters_mapall(COUNTERS_PG_NAME_MAP) if not counter_pg_name_map: print("COUNTERS_PG_NAME_MAP is empty!") sys.exit(1) @@ -94,13 +97,32 @@ class PgDropStat(object): "header_prefix": "PG"}, } + def get_counters_mapdata(self, tablemap, index): + for ns in self.ns_list: + counters_db = SonicV2Connector(namespace=ns) + counters_db.connect(counters_db.COUNTERS_DB) + data = counters_db.get(counters_db.COUNTERS_DB, tablemap, index) + if data: + return data + return None + + def get_counters_mapall(self, tablemap): + mapdata = {} + for ns in self.ns_list: + counters_db = SonicV2Connector(namespace=ns) + counters_db.connect(counters_db.COUNTERS_DB) + map_result = counters_db.get_all(counters_db.COUNTERS_DB, tablemap) + if map_result: + mapdata.update(map_result) + return mapdata + def get_pg_index(self, oid): """ return PG index (0-7) oid - object ID for entry in redis """ - pg_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, oid) + pg_index = self.get_counters_mapdata(COUNTERS_PG_INDEX_MAP, oid) if not pg_index: print("Priority group index is not available for oid '{}'".format(oid)) sys.exit(1) @@ -154,7 +176,7 @@ class PgDropStat(object): old_collected_data = port_drop_ckpt.get(name,{})[full_table_id] if len(port_drop_ckpt) > 0 else 0 idx = int(idx_func(obj_id)) pos = self.header_idx_to_pos[idx] - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, full_table_id, counter_name) + counter_data = self.get_counters_mapdata(full_table_id, counter_name) if counter_data is None: fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: @@ -180,18 +202,18 @@ class PgDropStat(object): print(tabulate(table, self.header_list, tablefmt='simple', stralign='right')) def get_counts(self, counters, oid): - """ - Get the PG drop counts for an individual counter. - """ - counts = {} - table_id = COUNTER_TABLE_PREFIX + oid - for counter in counters: - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, table_id, counter) - if counter_data is None: - counts[table_id] = 0 - else: - counts[table_id] = int(counter_data) - return counts + """ + Get the PG drop counts for an individual counter. + """ + counts = {} + table_id = COUNTER_TABLE_PREFIX + oid + for counter in counters: + counter_data = self.get_counters_mapdata(table_id, counter) + if counter_data is None: + counts[table_id] = 0 + else: + counts[table_id] = int(counter_data) + return counts def get_counts_table(self, counters, object_table): """ @@ -199,10 +221,10 @@ class PgDropStat(object): to its PG drop counts. Counts are contained in a dictionary that maps counter oid to its counts. """ - counter_object_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, object_table) + counter_object_name_map = self.get_counters_mapall(object_table) current_stat_dict = OrderedDict() - if counter_object_name_map is None: + if not counter_object_name_map: return current_stat_dict for obj in natsorted(counter_object_name_map): @@ -239,10 +261,12 @@ def main(): epilog=""" Examples: pg-drop -c show +pg-drop -c show --namespace asic0 pg-drop -c clear """) parser.add_argument('-c', '--command', type=str, help='Desired action to perform') + parser.add_argument('-n', '--namespace', type=str, help='Namespace name or skip for all', default=None) args = parser.parse_args() command = args.command @@ -256,7 +280,16 @@ pg-drop -c clear print(e) sys.exit(e.errno) - pgdropstat = PgDropStat() + # Load database config files + load_db_config() + namespaces = multi_asic.get_namespace_list() + if args.namespace and args.namespace not in namespaces: + namespacelist = ', '.join(namespaces) + print(f"Input value for '--namespace' / '-n'. Choose from one of ({namespacelist})") + sys.exit(1) + + # For 'clear' command force applying to all namespaces + pgdropstat = PgDropStat(args.namespace if command != 'clear' else None) if command == 'clear': pgdropstat.clear_drop_counts() diff --git a/scripts/queuestat b/scripts/queuestat index 8f95554481d..dd8c9d7e0cb 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/python3 ##################################################################### # @@ -38,8 +38,9 @@ from utilities_common import constants import utilities_common.multi_asic as multi_asic_util QueueStats = namedtuple("QueueStats", "queueindex, queuetype, totalpacket, totalbytes, droppacket, dropbytes") +VoqStats = namedtuple("VoqStats", "queueindex, queuetype, totalpacket, totalbytes, droppacket, dropbytes, creditWDpkts") header = ['Port', 'TxQ', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes'] -voq_header = ['Port', 'Voq', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes'] +voq_header = ['Port', 'Voq', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes', 'Credit-WD-Del/pkts'] counter_bucket_dict = { 'SAI_QUEUE_STAT_PACKETS': 2, @@ -47,6 +48,9 @@ counter_bucket_dict = { 'SAI_QUEUE_STAT_DROPPED_PACKETS': 4, 'SAI_QUEUE_STAT_DROPPED_BYTES': 5, } +voq_counter_bucket_dict = { + 'SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS': 6 +} from utilities_common.cli import json_dump from utilities_common.netstat import ns_diff, STATUS_NA @@ -73,15 +77,24 @@ cnstat_dir = 'N/A' cnstat_fqn_file = 'N/A' -def build_json(port, cnstat): +def build_json(port, cnstat, voq=False): def ports_stats(k): p = {} - p[k[1]] = { - "totalpacket": k[2], - "totalbytes": k[3], - "droppacket": k[4], - "dropbytes": k[5] - } + if voq: + p[k[1]] = { + "totalpacket": k[2], + "totalbytes": k[3], + "droppacket": k[4], + "dropbytes": k[5], + "creditWDPkts": k[6] + } + else: + p[k[1]] = { + "totalpacket": k[2], + "totalbytes": k[3], + "droppacket": k[4], + "dropbytes": k[5] + } return p out = {} @@ -175,18 +188,30 @@ class Queuestat(object): print("Queue Type is invalid:", table_id, queue_type) sys.exit(1) - fields = ["0","0","0","0","0","0"] + if self.voq: + fields = ["0","0","0","0","0","0","0"] + else: + fields = ["0","0","0","0","0","0"] fields[0] = get_queue_index(table_id) fields[1] = get_queue_type(table_id) - for counter_name, pos in counter_bucket_dict.items(): + counter_dict = {} + counter_dict.update(counter_bucket_dict) + if self.voq: + counter_dict.update(voq_counter_bucket_dict) + + for counter_name, pos in counter_dict.items(): full_table_id = COUNTER_TABLE_PREFIX + table_id counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) if counter_data is None: fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: fields[pos] = str(int(counter_data)) - cntr = QueueStats._make(fields)._asdict() + + if self.voq: + cntr = VoqStats._make(fields)._asdict() + else: + cntr = QueueStats._make(fields)._asdict() return cntr # Build a dictionary of the stats @@ -211,14 +236,21 @@ class Queuestat(object): if json_opt: json_output[port][key] = data continue - if not non_zero or data['totalpacket'] != '0' or data['totalbytes'] != '0' or \ - data['droppacket'] != '0' or data['dropbytes'] != '0': - table.append((port, data['queuetype'] + str(data['queueindex']), - data['totalpacket'], data['totalbytes'], - data['droppacket'], data['dropbytes'])) + if self.voq: + if not non_zero or data['totalpacket'] != '0' or data['totalbytes'] != '0' or \ + data['droppacket'] != '0' or data['dropbytes'] != '0' or data['creditWDpkts'] != '0': + table.append((port, data['queuetype'] + str(data['queueindex']), + data['totalpacket'], data['totalbytes'], + data['droppacket'], data['dropbytes'], data['creditWDpkts'])) + else: + if not non_zero or data['totalpacket'] != '0' or data['totalbytes'] != '0' or \ + data['droppacket'] != '0' or data['dropbytes'] != '0': + table.append((port, data['queuetype'] + str(data['queueindex']), + data['totalpacket'], data['totalbytes'], + data['droppacket'], data['dropbytes'])) if json_opt: - json_output[port].update(build_json(port, table)) + json_output[port].update(build_json(port, table, self.voq)) return json_output else: hdr = voq_header if self.voq else header @@ -242,25 +274,42 @@ class Queuestat(object): old_cntr = None if key in cnstat_old_dict: old_cntr = cnstat_old_dict.get(key) - if old_cntr is not None: - if not non_zero or ns_diff(cntr['totalpacket'], old_cntr['totalpacket']) != '0' or \ + if self.voq: + if not non_zero or ns_diff(cntr['totalpacket'], old_cntr['totalpacket']) != '0' or \ + ns_diff(cntr['totalbytes'], old_cntr['totalbytes']) != '0' or \ + ns_diff(cntr['droppacket'], old_cntr['droppacket']) != '0' or \ + ns_diff(cntr['dropbytes'], old_cntr['dropbytes']) != '0' or \ + ns_diff(cntr['creditWDpkts'], old_cntr['creditWDpkts']) != '0': + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), + ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), + ns_diff(cntr['droppacket'], old_cntr['droppacket']), + ns_diff(cntr['dropbytes'], old_cntr['dropbytes']), + ns_diff(cntr['creditWDpkts'], old_cntr['creditWDpkts']))) + elif not non_zero or cntr['totalpacket'] != '0' or cntr['totalbytes'] != '0' or \ + cntr['droppacket'] != '0' or cntr['dropbytes'] != '0' or cntr['creditWDpkts'] != '0': + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + cntr['totalpacket'], cntr['totalbytes'], + cntr['droppacket'], cntr['dropbytes'], cntr['creditWDpkts'])) + else: + if not non_zero or ns_diff(cntr['totalpacket'], old_cntr['totalpacket']) != '0' or \ ns_diff(cntr['totalbytes'], old_cntr['totalbytes']) != '0' or \ ns_diff(cntr['droppacket'], old_cntr['droppacket']) != '0' or \ ns_diff(cntr['dropbytes'], old_cntr['dropbytes']) != '0': - table.append((port, cntr['queuetype'] + str(cntr['queueindex']), - ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), - ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), - ns_diff(cntr['droppacket'], old_cntr['droppacket']), - ns_diff(cntr['dropbytes'], old_cntr['dropbytes']))) - elif not non_zero or cntr['totalpacket'] != '0' or cntr['totalbytes'] != '0' or \ + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), + ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), + ns_diff(cntr['droppacket'], old_cntr['droppacket']), + ns_diff(cntr['dropbytes'], old_cntr['dropbytes']))) + elif not non_zero or cntr['totalpacket'] != '0' or cntr['totalbytes'] != '0' or \ cntr['droppacket'] != '0' or cntr['dropbytes'] != '0': - table.append((port, cntr['queuetype'] + str(cntr['queueindex']), - cntr['totalpacket'], cntr['totalbytes'], - cntr['droppacket'], cntr['dropbytes'])) + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + cntr['totalpacket'], cntr['totalbytes'], + cntr['droppacket'], cntr['dropbytes'])) if json_opt: - json_output[port].update(build_json(port, table)) + json_output[port].update(build_json(port, table, self.voq)) return json_output else: hdr = voq_header if self.voq else header diff --git a/scripts/reboot b/scripts/reboot index b5b6a7a585a..b6f8ff96fb9 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -65,8 +65,8 @@ function stop_pmon_service() { CONTAINER_STOP_RC=0 debug "Stopping pmon docker" - docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? systemctl stop pmon || debug "Ignore stopping pmon error $?" + docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? if [[ CONTAINER_STOP_RC -ne 0 ]]; then debug "Failed killing container pmon RC $CONTAINER_STOP_RC ." fi @@ -198,6 +198,25 @@ function parse_options() done } +function linecard_reboot_notify_supervisor() +{ + is_linecard=$(python3 -c 'from sonic_py_common import device_info; print("True") if device_info.is_chassis() == True and device_info.is_supervisor() == False else print("False")') + if [ $is_linecard == "True" ]; then + key=$(sonic-db-cli STATE_DB keys "CHASSIS_MODULE_TABLE|LINE-CARD*") + status=$? + if [ $status -eq 0 ]; then + module="${key#CHASSIS_MODULE_TABLE}" + if [ ! -z module ]; then + sonic-db-cli CHASSIS_STATE_DB hset "CHASSIS_MODULE_REBOOT_INFO_TABLE${module}" "reboot" "expected" + status=$? + if [ $status -ne 0 ]; then + debug "Failed to notify Supervisor: Linecard reboot is expected" + fi + fi + fi + fi +} + parse_options $@ # Exit if not superuser @@ -216,6 +235,9 @@ reboot_pre_check # Tag remotely deployed images as local tag_images +# Linecard reboot notify supervisor +linecard_reboot_notify_supervisor + # Stop SONiC services gracefully. stop_sonic_services diff --git a/scripts/route_check.py b/scripts/route_check.py index ee417dc49cc..2fbe0415471 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -328,6 +328,16 @@ def get_asicdb_routes(namespace): return (selector, subs, sorted(rt)) +def is_bgp_suppress_fib_pending_enabled(namespace): + """ + Retruns True if FIB suppression is enabled in BGP config, False otherwise + """ + show_run_cmd = ['show', 'runningconfiguration', 'bgp', '-n', namespace] + + output = subprocess.check_output(show_run_cmd, text=True) + return 'bgp suppress-fib-pending' in output + + def is_suppress_fib_pending_enabled(namespace): """ Returns True if FIB suppression is enabled, False otherwise @@ -781,18 +791,20 @@ def check_routes(namespace): results[namespace] = {} results[namespace]["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss - rt_frr_miss = check_frr_pending_routes(namespace) + if is_bgp_suppress_fib_pending_enabled(namespace): + rt_frr_miss = check_frr_pending_routes(namespace) - if rt_frr_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["missed_FRR_routes"] = rt_frr_miss + if rt_frr_miss: + if namespace not in results: + results[namespace] = {} + results[namespace]["missed_FRR_routes"] = rt_frr_miss - if results: - if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: - print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all routes in APPL_DB and ASIC_DB are in sync".format(namespace)) - if is_suppress_fib_pending_enabled(namespace): - mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) + if results: + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all " + "routes in APPL_DB and ASIC_DB are in sync".format(namespace)) + if is_suppress_fib_pending_enabled(namespace): + mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") diff --git a/scripts/soft-reboot b/scripts/soft-reboot index 957c6009eb1..0b9030a6f72 100755 --- a/scripts/soft-reboot +++ b/scripts/soft-reboot @@ -64,8 +64,8 @@ function stop_pmon_service() { CONTAINER_STOP_RC=0 debug "Stopping pmon docker" - docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? systemctl stop pmon || debug "Ignore stopping pmon error $?" + docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? if [[ CONTAINER_STOP_RC -ne 0 ]]; then debug "Failed killing container pmon RC $CONTAINER_STOP_RC ." fi diff --git a/setup.py b/setup.py index 196777d0e34..6a66f012f9d 100644 --- a/setup.py +++ b/setup.py @@ -250,13 +250,14 @@ 'semantic-version>=2.8.5', 'prettyprinter>=0.18.0', 'pyroute2>=0.5.14, <0.6.1', - 'requests>=2.25.0', + 'requests>=2.25.0, <=2.31.0', 'tabulate==0.9.0', 'toposort==1.6', 'www-authenticate==0.9.2', 'xmltodict==0.12.0', 'lazy-object-proxy', 'six==1.16.0', + 'scp==0.14.5', ] + sonic_dependencies, setup_requires= [ 'pytest-runner', diff --git a/sfputil/main.py b/sfputil/main.py index ad0b1b3775e..2c8f85d0169 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -673,6 +673,20 @@ def eeprom(port, dump_dom, namespace): output += convert_sfp_info_to_output_string(xcvr_info) if dump_dom: + try: + api = platform_chassis.get_sfp(physical_port).get_xcvr_api() + except NotImplementedError: + output += "API is currently not implemented for this platform\n" + click.echo(output) + sys.exit(ERROR_NOT_IMPLEMENTED) + if api is None: + output += "API is none while getting DOM info!\n" + click.echo(output) + sys.exit(ERROR_NOT_IMPLEMENTED) + else: + if api.is_flat_memory(): + output += "DOM values not supported for flat memory module\n" + continue try: xcvr_dom_info = platform_chassis.get_sfp(physical_port).get_transceiver_bulk_status() except NotImplementedError: @@ -1306,6 +1320,62 @@ def reset(port_name): i += 1 + +# 'power' subgroup +@cli.group() +def power(): + """Enable or disable power of SFP transceiver""" + pass + + +# Helper method for setting low-power mode +def set_power(port_name, enable): + physical_port = logical_port_to_physical_port_index(port_name) + sfp = platform_chassis.get_sfp(physical_port) + + if is_port_type_rj45(port_name): + click.echo("Power disable/enable is not available for RJ45 port {}.".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + presence = sfp.get_presence() + except NotImplementedError: + click.echo("sfp get_presence() NOT implemented!") + sys.exit(EXIT_FAIL) + + if not presence: + click.echo("{}: SFP EEPROM not detected\n".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + result = platform_chassis.get_sfp(physical_port).set_power(enable) + except (NotImplementedError, AttributeError): + click.echo("This functionality is currently not implemented for this platform") + sys.exit(ERROR_NOT_IMPLEMENTED) + + if result: + click.echo("OK") + else: + click.echo("Failed") + sys.exit(EXIT_FAIL) + + +# 'disable' subcommand +@power.command() +@click.argument('port_name', metavar='') +def disable(port_name): + """Disable power of SFP transceiver""" + set_power(port_name, False) + + +# 'enable' subcommand +@power.command() +@click.argument('port_name', metavar='') +def enable(port_name): + """Enable power of SFP transceiver""" + set_power(port_name, True) + + def update_firmware_info_to_state_db(port_name): physical_port = logical_port_to_physical_port_index(port_name) @@ -1316,10 +1386,8 @@ def update_firmware_info_to_state_db(port_name): state_db.connect(state_db.STATE_DB) transceiver_firmware_info_dict = platform_chassis.get_sfp(physical_port).get_transceiver_info_firmware_versions() if transceiver_firmware_info_dict is not None: - active_firmware = transceiver_firmware_info_dict.get('active_firmware', 'N/A') - inactive_firmware = transceiver_firmware_info_dict.get('inactive_firmware', 'N/A') - state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), "active_firmware", active_firmware) - state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), "inactive_firmware", inactive_firmware) + for key, value in transceiver_firmware_info_dict.items(): + state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), key, value) # 'firmware' subgroup @cli.group() @@ -1890,5 +1958,50 @@ def get_overall_offset_sff8472(api, page, offset, size, wire_addr): return page * PAGE_SIZE + offset + PAGE_SIZE_FOR_A0H +# 'debug' subgroup +@cli.group() +def debug(): + """Module debug and diagnostic control""" + pass + + +# 'loopback' subcommand +@debug.command() +@click.argument('port_name', required=True, default=None) +@click.argument('loopback_mode', required=True, default="none", + type=click.Choice(["none", "host-side-input", "host-side-output", + "media-side-input", "media-side-output"])) +def loopback(port_name, loopback_mode): + """Set module diagnostic loopback mode + """ + physical_port = logical_port_to_physical_port_index(port_name) + sfp = platform_chassis.get_sfp(physical_port) + + if is_port_type_rj45(port_name): + click.echo("{}: This functionality is not applicable for RJ45 port".format(port_name)) + sys.exit(EXIT_FAIL) + + if not is_sfp_present(port_name): + click.echo("{}: SFP EEPROM not detected".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + api = sfp.get_xcvr_api() + except NotImplementedError: + click.echo("{}: This functionality is not implemented".format(port_name)) + sys.exit(ERROR_NOT_IMPLEMENTED) + + try: + status = api.set_loopback_mode(loopback_mode) + except AttributeError: + click.echo("{}: Set loopback mode is not applicable for this module".format(port_name)) + sys.exit(ERROR_NOT_IMPLEMENTED) + + if status: + click.echo("{}: Set {} loopback".format(port_name, loopback_mode)) + else: + click.echo("{}: Set {} loopback failed".format(port_name, loopback_mode)) + sys.exit(EXIT_FAIL) + if __name__ == '__main__': cli() diff --git a/show/bgp_cli.py b/show/bgp_cli.py new file mode 100644 index 00000000000..d475638092e --- /dev/null +++ b/show/bgp_cli.py @@ -0,0 +1,128 @@ +import click +import tabulate +import json +import utilities_common.cli as clicommon + +from utilities_common.bgp import ( + CFG_BGP_DEVICE_GLOBAL, + BGP_DEVICE_GLOBAL_KEY, + to_str, +) + + +# +# BGP helpers --------------------------------------------------------------------------------------------------------- +# + + +def format_attr_value(entry, attr): + """ Helper that formats attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attr (Dict): Attribute metadata. + + Returns: + str: formatted attribute value. + """ + + if attr["is-leaf-list"]: + value = entry.get(attr["name"], []) + return "\n".join(value) if value else "N/A" + return entry.get(attr["name"], "N/A") + + +# +# BGP CLI ------------------------------------------------------------------------------------------------------------- +# + + +@click.group( + name="bgp", + cls=clicommon.AliasedGroup +) +def BGP(): + """ Show BGP configuration """ + + pass + + +# +# BGP device-global --------------------------------------------------------------------------------------------------- +# + + +@BGP.command( + name="device-global" +) +@click.option( + "-j", "--json", "json_format", + help="Display in JSON format", + is_flag=True, + default=False +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL(ctx, db, json_format): + """ Show BGP device global state """ + + header = [ + "TSA", + "W-ECMP", + ] + body = [] + + table = db.cfgdb.get_table(CFG_BGP_DEVICE_GLOBAL) + entry = table.get(BGP_DEVICE_GLOBAL_KEY, {}) + + if not entry: + click.echo("No configuration is present in CONFIG DB") + ctx.exit(0) + + if json_format: + json_dict = { + "tsa": to_str( + format_attr_value( + entry, + { + 'name': 'tsa_enabled', + 'is-leaf-list': False + } + ) + ), + "w-ecmp": to_str( + format_attr_value( + entry, + { + 'name': 'wcmp_enabled', + 'is-leaf-list': False + } + ) + ) + } + click.echo(json.dumps(json_dict, indent=4)) + ctx.exit(0) + + row = [ + to_str( + format_attr_value( + entry, + { + 'name': 'tsa_enabled', + 'is-leaf-list': False + } + ) + ), + to_str( + format_attr_value( + entry, + { + 'name': 'wcmp_enabled', + 'is-leaf-list': False + } + ) + ) + ] + body.append(row) + + click.echo(tabulate.tabulate(body, header)) diff --git a/show/bgp_frr_v4.py b/show/bgp_frr_v4.py index 6343e8b7b26..ddcd688581a 100644 --- a/show/bgp_frr_v4.py +++ b/show/bgp_frr_v4.py @@ -1,6 +1,8 @@ import click +import sys +import subprocess -from sonic_py_common import multi_asic +from sonic_py_common import multi_asic, device_info from show.main import ip import utilities_common.bgp_util as bgp_util import utilities_common.cli as clicommon @@ -17,7 +19,14 @@ @ip.group(cls=clicommon.AliasedGroup) def bgp(): """Show IPv4 BGP (Border Gateway Protocol) information""" - pass + if device_info.is_supervisor(): + subcommand = sys.argv[3] + if subcommand not in "network": + # the command will be executed directly by rexec if it is not "show ip bgp network" + click.echo("Since the current device is a chassis supervisor, " + + "this command will be executed remotely on all linecards") + proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) + sys.exit(proc.returncode) # 'summary' subcommand ("show ip bgp summary") @@ -84,7 +93,7 @@ def neighbors(ipaddress, info_type, namespace): @bgp.command() @click.argument('ipaddress', metavar='[|]', - required=False) + required=True if device_info.is_supervisor() else False) @click.argument('info_type', metavar='[bestpath|json|longer-prefixes|multipath]', type=click.Choice( @@ -95,17 +104,26 @@ def neighbors(ipaddress, info_type, namespace): 'namespace', type=str, show_default=True, - required=True if multi_asic.is_multi_asic is True else False, + required=False, help='Namespace name or all', - default=multi_asic.DEFAULT_NAMESPACE, + default="all", callback=multi_asic_util.multi_asic_namespace_validation_callback) def network(ipaddress, info_type, namespace): """Show IP (IPv4) BGP network""" - if multi_asic.is_multi_asic() and namespace not in multi_asic.get_namespace_list(): - ctx = click.get_current_context() - ctx.fail('-n/--namespace option required. provide namespace from list {}'\ - .format(multi_asic.get_namespace_list())) + if device_info.is_supervisor(): + # the command will be executed by rexec + click.echo("Since the current device is a chassis supervisor, " + + "this command will be executed remotely on all linecards") + proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) + sys.exit(proc.returncode) + + namespace = namespace.strip() + if multi_asic.is_multi_asic(): + if namespace != "all" and namespace not in multi_asic.get_namespace_list(): + ctx = click.get_current_context() + ctx.fail('invalid namespace {}. provide namespace from list {}' + .format(namespace, multi_asic.get_namespace_list())) command = 'show ip bgp' if ipaddress is not None: @@ -125,5 +143,15 @@ def network(ipaddress, info_type, namespace): if info_type is not None: command += ' {}'.format(info_type) - output = bgp_util.run_bgp_show_command(command, namespace) - click.echo(output.rstrip('\n')) + if namespace == "all": + if multi_asic.is_multi_asic(): + for ns in multi_asic.get_namespace_list(): + click.echo("\n======== namespace {} ========".format(ns)) + output = bgp_util.run_bgp_show_command(command, ns) + click.echo(output.rstrip('\n')) + else: + output = bgp_util.run_bgp_show_command(command, "") + click.echo(output.rstrip('\n')) + else: + output = bgp_util.run_bgp_show_command(command, namespace) + click.echo(output.rstrip('\n')) diff --git a/show/dropcounters.py b/show/dropcounters.py index 30779b9364c..9bb988fc5b8 100644 --- a/show/dropcounters.py +++ b/show/dropcounters.py @@ -1,5 +1,6 @@ import click import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util # @@ -41,7 +42,8 @@ def capabilities(verbose): @click.option('-g', '--group', required=False) @click.option('-t', '--counter_type', required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") -def counts(group, counter_type, verbose): +@multi_asic_util.multi_asic_click_option_namespace +def counts(group, counter_type, verbose, namespace): """Show drop counts""" cmd = ['dropstat', '-c', 'show'] @@ -51,4 +53,7 @@ def counts(group, counter_type, verbose): if counter_type: cmd += ['-t', str(counter_type)] + if namespace: + cmd += ['-n', str(namespace)] + clicommon.run_command(cmd, display_cmd=verbose) diff --git a/show/fabric.py b/show/fabric.py index 785e1ab4779..898c76114cc 100644 --- a/show/fabric.py +++ b/show/fabric.py @@ -74,3 +74,13 @@ def queue(namespace): if namespace is not None: cmd += ['-n', str(namespace)] clicommon.run_command(cmd) + + +@counters.command() +@multi_asic_util.multi_asic_click_option_namespace +def rate(namespace): + """Show fabric counters rate""" + cmd = ['fabricstat', '-s'] + if namespace is not None: + cmd += ['-n', str(namespace)] + clicommon.run_command(cmd) diff --git a/show/main.py b/show/main.py index a4357f30f8c..740fcbb310b 100755 --- a/show/main.py +++ b/show/main.py @@ -66,6 +66,7 @@ from . import plugins from . import syslog from . import dns +from . import bgp_cli # Global Variables PLATFORM_JSON = 'platform.json' @@ -164,7 +165,7 @@ def get_config_json_by_namespace(namespace): iface_alias_converter = lazy_object_proxy.Proxy(lambda: clicommon.InterfaceAliasConverter()) # -# Display all storm-control data +# Display all storm-control data # def display_storm_all(): """ Show storm-control """ @@ -325,6 +326,8 @@ def cli(ctx): if is_gearbox_configured(): cli.add_command(gearbox.gearbox) +# bgp module +cli.add_command(bgp_cli.BGP) # # 'vrf' command ("show vrf") @@ -462,7 +465,7 @@ def is_mgmt_vrf_enabled(ctx): return False # -# 'storm-control' group +# 'storm-control' group # "show storm-control [interface ]" # @cli.group('storm-control', invoke_without_command=True) @@ -854,9 +857,12 @@ def drop(): pass @drop.command('counters') -def pg_drop_counters(): +@multi_asic_util.multi_asic_click_option_namespace +def pg_drop_counters(namespace): """Show dropped packets for priority-group""" command = ['pg-drop', '-c', 'show'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @priority_group.group(name='persistent-watermark') @@ -1184,7 +1190,11 @@ def protocol(verbose): ip.add_command(bgp) from .bgp_frr_v6 import bgp ipv6.add_command(bgp) - +elif device_info.is_supervisor(): + from .bgp_frr_v4 import bgp + ip.add_command(bgp) + from .bgp_frr_v6 import bgp + ipv6.add_command(bgp) # # 'link-local-mode' subcommand ("show ipv6 link-local-mode") # @@ -1438,11 +1448,11 @@ def all(verbose): for ns in ns_list: ns_config = get_config_json_by_namespace(ns) if bgp_util.is_bgp_feature_state_enabled(ns): - ns_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd, ns) + ns_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd, ns, exit_on_fail=False) output[ns] = ns_config click.echo(json.dumps(output, indent=4)) else: - host_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd) + host_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd, exit_on_fail=False) click.echo(json.dumps(output['localhost'], indent=4)) @@ -2108,7 +2118,7 @@ def summary(db): key_values = key.split('|') values = db.db.get_all(db.db.STATE_DB, key) if "local_discriminator" not in values.keys(): - values["local_discriminator"] = "NA" + values["local_discriminator"] = "NA" bfd_body.append([key_values[3], key_values[2], key_values[1], values["state"], values["type"], values["local_addr"], values["tx_interval"], values["rx_interval"], values["multiplier"], values["multihop"], values["local_discriminator"]]) @@ -2139,24 +2149,13 @@ def peer(db, peer_ip): key_values = key.split(delimiter) values = db.db.get_all(db.db.STATE_DB, key) if "local_discriminator" not in values.keys(): - values["local_discriminator"] = "NA" + values["local_discriminator"] = "NA" bfd_body.append([key_values[3], key_values[2], key_values[1], values.get("state"), values.get("type"), values.get("local_addr"), values.get("tx_interval"), values.get("rx_interval"), values.get("multiplier"), values.get("multihop"), values.get("local_discriminator")]) click.echo(tabulate(bfd_body, bfd_headers)) -# 'suppress-fib-pending' subcommand ("show suppress-fib-pending") -@cli.command('suppress-fib-pending') -@clicommon.pass_db -def suppress_pending_fib(db): - """ Show the status of suppress pending FIB feature """ - - field_values = db.cfgdb.get_entry('DEVICE_METADATA', 'localhost') - state = field_values.get('suppress-fib-pending', 'disabled').title() - click.echo(state) - - # asic-sdk-health-event subcommand ("show asic-sdk-health-event") @cli.group(cls=clicommon.AliasedGroup) def asic_sdk_health_event(): diff --git a/show/plugins/pbh.py b/show/plugins/pbh.py index 407c5961630..f47b43fbdc1 100644 --- a/show/plugins/pbh.py +++ b/show/plugins/pbh.py @@ -395,7 +395,7 @@ def get_counter_value(pbh_counters, saved_pbh_counters, key, type): if not pbh_counters[key]: return '0' - if key in saved_pbh_counters: + if key in saved_pbh_counters and saved_pbh_counters[key]: new_value = int(pbh_counters[key][type]) - int(saved_pbh_counters[key][type]) if new_value >= 0: return str(new_value) diff --git a/show/plugins/sonic-system-ldap_yang.py b/show/plugins/sonic-system-ldap_yang.py new file mode 100644 index 00000000000..a91c8609db5 --- /dev/null +++ b/show/plugins/sonic-system-ldap_yang.py @@ -0,0 +1,145 @@ +""" +Auto-generated show CLI plugin. + + +""" + +import click +import tabulate +import natsort +import utilities_common.cli as clicommon + + +def format_attr_value(entry, attr): + """ Helper that formats attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attr (Dict): Attribute metadata. + + Returns: + str: fomatted attribute value. + """ + + if attr["is-leaf-list"]: + return "\n".join(entry.get(attr["name"], [])) + return entry.get(attr["name"], "N/A") + + +@click.group(name="ldap-server", + cls=clicommon.AliasedGroup, + invoke_without_command=True) +@clicommon.pass_db +def LDAP_SERVER(db): + """ [Callable command group] """ + + header = ["HOSTNAME", "PRIORITY"] + + body = [] + + table = db.cfgdb.get_table("LDAP_SERVER") + for key in natsort.natsorted(table): + entry = table[key] + if not isinstance(key, tuple): + key = (key,) + + row = [*key] + [ + format_attr_value( + entry, + {'name': 'priority', 'description': 'Server priority', + 'is-leaf-list': False, 'is-mandatory': False, 'group': ''}), + ] + + body.append(row) + + click.echo(tabulate.tabulate(body, header)) + + +@click.group(name="ldap", + cls=clicommon.AliasedGroup) +def LDAP(): + """ """ + + pass + + +@LDAP.command(name="global") +@clicommon.pass_db +def LDAP_global(db): + """ """ + + header = [ + "BIND DN", + "BIND PASSWORD", + "BIND TIMEOUT", + "VERSION", + "BASE DN", + "PORT", + "TIMEOUT", + ] + + body = [] + + table = db.cfgdb.get_table("LDAP") + entry = table.get("global", {}) + row = [ + format_attr_value( + entry, + {'name': 'bind_dn', 'description': 'LDAP global bind dn', 'is-leaf-list': False, + 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + { + 'name': 'bind_password', 'description': 'Shared secret used for encrypting the communication', + 'is-leaf-list': False, 'is-mandatory': False, 'group': '' + } + ), + format_attr_value( + entry, + {'name': 'bind_timeout', 'description': 'Ldap bind timeout', 'is-leaf-list': False, + 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'version', 'description': 'Ldap version', 'is-leaf-list': False, + 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'base_dn', 'description': 'Ldap user base dn', 'is-leaf-list': False, + 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'port', 'description': 'TCP port to communicate with LDAP server', + 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'timeout', 'description': 'Ldap timeout duration in sec', 'is-leaf-list': False, + 'is-mandatory': False, 'group': ''} + ), + ] + + body.append(row) + click.echo(tabulate.tabulate(body, header)) + + +def register(cli): + """ Register new CLI nodes in root CLI. + + Args: + cli (click.core.Command): Root CLI node. + Raises: + Exception: when root CLI already has a command + we are trying to register. + """ + cli_node = LDAP_SERVER + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(LDAP_SERVER) + cli_node = LDAP + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(LDAP) diff --git a/sonic_installer/main.py b/sonic_installer/main.py index 341111f265e..c5d3a256f2c 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -337,6 +337,8 @@ def migrate_sonic_packages(bootloader, binary_image_version): new_image_docker_mount = os.path.join(new_image_mount, "var", "lib", "docker") docker_default_config = os.path.join(new_image_mount, "etc", "default", "docker") docker_default_config_backup = os.path.join(new_image_mount, TMP_DIR, "docker_config_backup") + custom_manifests_path = os.path.join(PACKAGE_MANAGER_DIR, "manifests") + new_image_package_directory_path = os.path.join(new_image_mount, "var", "lib", "sonic-package-manager") if not os.path.isdir(new_image_docker_dir): # NOTE: This codepath can be reached if the installation process did not @@ -372,6 +374,8 @@ def migrate_sonic_packages(bootloader, binary_image_version): run_command_or_raise(["chroot", new_image_mount, DOCKER_CTL_SCRIPT, "start"]) docker_started = True run_command_or_raise(["cp", packages_path, os.path.join(new_image_mount, TMP_DIR, packages_file)]) + run_command_or_raise(["mkdir", "-p", custom_manifests_path]) + run_command_or_raise(["cp", "-arf", custom_manifests_path, new_image_package_directory_path]) run_command_or_raise(["touch", os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)]) run_command_or_raise(["mount", "--bind", os.path.join(VAR_RUN_PATH, DOCKERD_SOCK), diff --git a/sonic_package_manager/main.py b/sonic_package_manager/main.py index 8a0aabb9016..d305e3c9115 100644 --- a/sonic_package_manager/main.py +++ b/sonic_package_manager/main.py @@ -15,6 +15,7 @@ from sonic_package_manager.errors import PackageManagerError from sonic_package_manager.logger import log from sonic_package_manager.manager import PackageManager +from sonic_package_manager.manifest import MANIFESTS_LOCATION BULLET_UC = '\u2022' @@ -157,6 +158,13 @@ def repository(ctx): pass +@cli.group() +@click.pass_context +def manifests(ctx): + """ Custom local Manifest management commands. """ + + pass + @cli.group() @click.pass_context def show(ctx): @@ -280,6 +288,73 @@ def changelog(ctx, exit_cli(f'Failed to print package changelog: {err}', fg='red') +@manifests.command('create') +@click.pass_context +@click.argument('name', type=click.Path()) +@click.option('--from-json', type=str, help='specify manifest json file') +@root_privileges_required +def create_manifest(ctx, name, from_json): + """Create a new custom local manifest file.""" + + manager: PackageManager = ctx.obj + try: + manager.create_package_manifest(name, from_json) + except Exception as e: + click.echo("Error: Manifest {} creation failed - {}".format(name, str(e))) + return + + +@manifests.command('update') +@click.pass_context +@click.argument('name', type=click.Path()) +@click.option('--from-json', type=str, required=True) +@root_privileges_required +def update_manifest(ctx, name, from_json): + """Update an existing custom local manifest file with new one.""" + + manager: PackageManager = ctx.obj + try: + manager.update_package_manifest(name, from_json) + except Exception as e: + click.echo(f"Error occurred while updating manifest '{name}': {e}") + return + + +@manifests.command('delete') +@click.pass_context +@click.argument('name', type=click.Path()) +@root_privileges_required +def delete_manifest(ctx, name): + """Delete a custom local manifest file.""" + manager: PackageManager = ctx.obj + try: + manager.delete_package_manifest(name) + except Exception as e: + click.echo("Error: Failed to delete manifest file '{}'. {}".format(name, e)) + + +@manifests.command('show') +@click.pass_context +@click.argument('name', type=click.Path()) +@root_privileges_required +def show_manifest(ctx, name): + """Show the contents of custom local manifest file.""" + manager: PackageManager = ctx.obj + try: + manager.show_package_manifest(name) + except FileNotFoundError: + click.echo("Manifest file '{}' not found.".format(name)) + + +@manifests.command('list') +@click.pass_context +@root_privileges_required +def list_manifests(ctx): + """List all custom local manifest files.""" + manager: PackageManager = ctx.obj + manager.list_package_manifest() + + @repository.command() @click.argument('name', type=str) @click.argument('repository', type=str) @@ -334,6 +409,14 @@ def remove(ctx, name): help='Allow package downgrade. By default an attempt to downgrade the package ' 'will result in a failure since downgrade might not be supported by the package, ' 'thus requires explicit request from the user.') +@click.option('--use-local-manifest', + is_flag=True, + default=None, + help='Use locally created custom manifest file. ', + hidden=True) +@click.option('--name', + type=str, + help='custom name for the package') @add_options(PACKAGE_SOURCE_OPTIONS) @add_options(PACKAGE_COMMON_OPERATION_OPTIONS) @add_options(PACKAGE_COMMON_INSTALL_OPTIONS) @@ -348,7 +431,9 @@ def install(ctx, enable, set_owner, skip_host_plugins, - allow_downgrade): + allow_downgrade, + use_local_manifest, + name): """ Install/Upgrade package using [PACKAGE_EXPR] in format "[=|@]". The repository to pull the package from is resolved by lookup in package database, @@ -378,16 +463,58 @@ def install(ctx, if allow_downgrade is not None: install_opts['allow_downgrade'] = allow_downgrade + if use_local_manifest: + if not name: + click.echo('name argument is not provided to use local manifest') + return + original_file = os.path.join(MANIFESTS_LOCATION, name) + if not os.path.exists(original_file): + click.echo(f'Local Manifest file for {name} does not exists to install') + return + try: manager.install(package_expr, from_repository, from_tarball, + use_local_manifest, + name, **install_opts) except Exception as err: exit_cli(f'Failed to install {package_source}: {err}', fg='red') except KeyboardInterrupt: exit_cli('Operation canceled by user', fg='red') +# At the end of sonic-package-manager install, a new manifest file is created with the name. +# At the end of sonic-package-manager uninstall name, +# this manifest file name and name.edit will be deleted. +# At the end of sonic-package-manager update, +# we need to mv maniests name.edit to name in case of success, else keep it as such. +# So during sonic-package-manager update, +# we could take old package from name and new package from edit and at the end, follow 3rd point + + +@cli.command() +@add_options(PACKAGE_COMMON_OPERATION_OPTIONS) +@add_options(PACKAGE_COMMON_INSTALL_OPTIONS) +@click.argument('name') +@click.pass_context +@root_privileges_required +def update(ctx, name, force, yes, skip_host_plugins): + """ Update package to the updated manifest file. """ + + manager: PackageManager = ctx.obj + + update_opts = { + 'force': force, + 'skip_host_plugins': skip_host_plugins, + 'update_only': True, + } + try: + manager.update(name, **update_opts) + except Exception as err: + exit_cli(f'Failed to update package {name}: {err}', fg='red') + except KeyboardInterrupt: + exit_cli('Operation canceled by user', fg='red') @cli.command() @add_options(PACKAGE_COMMON_OPERATION_OPTIONS) diff --git a/sonic_package_manager/manager.py b/sonic_package_manager/manager.py index e41bb00e8f6..a052479607e 100644 --- a/sonic_package_manager/manager.py +++ b/sonic_package_manager/manager.py @@ -65,7 +65,15 @@ version_to_tag, tag_to_version ) - +import click +import json +import requests +import getpass +import paramiko +import urllib.parse +from scp import SCPClient +from sonic_package_manager.manifest import Manifest, MANIFESTS_LOCATION, DEFAULT_MANIFEST_FILE +LOCAL_JSON = "/tmp/local_json" @contextlib.contextmanager def failure_ignore(ignore: bool): @@ -344,6 +352,8 @@ def install(self, expression: Optional[str] = None, repotag: Optional[str] = None, tarball: Optional[str] = None, + use_local_manifest: bool = False, + name: Optional[str] = None, **kwargs): """ Install/Upgrade SONiC Package from either an expression representing the package and its version, repository and tag or @@ -358,7 +368,7 @@ def install(self, PackageManagerError """ - source = self.get_package_source(expression, repotag, tarball) + source = self.get_package_source(expression, repotag, tarball, use_local_manifest=use_local_manifest, name=name) package = source.get_package() if self.is_installed(package.name): @@ -446,6 +456,37 @@ def install_from_source(self, self.database.update_package(package.entry) self.database.commit() + @under_lock + def update(self, + name: str, + **kwargs): + """ Update SONiC Package referenced by name. The update + can be forced if force argument is True. + + Args: + name: SONiC Package name. + Raises: + PackageManagerError + """ + if self.is_installed(name): + edit_name = name + '.edit' + edit_file = os.path.join(MANIFESTS_LOCATION, edit_name) + if os.path.exists(edit_file): + self.upgrade_from_source(None, name=name, **kwargs) + else: + click.echo("Package manifest {}.edit file does not exists to update".format(name)) + return + else: + click.echo("Package {} is not installed".format(name)) + return + + def remove_unused_docker_image(self, package): + image_id_used = any(entry.image_id == package.image_id for entry in self.database if entry.name != package.name) + if not image_id_used: + self.docker.rmi(package.image_id, force=True) + else: + log.info(f'Image with ID {package.image_id} is in use by other package(s). Skipping deletion') + @under_lock @opt_check def uninstall(self, name: str, @@ -493,7 +534,8 @@ def uninstall(self, name: str, self._get_installed_packages_except(package) ) self.docker.rm_by_ancestor(package.image_id, force=True) - self.docker.rmi(package.image_id, force=True) + # Delete image if it is not in use, otherwise skip deletion + self.remove_unused_docker_image(package) package.entry.image_id = None except Exception as err: raise PackageUninstallationError( @@ -504,6 +546,13 @@ def uninstall(self, name: str, package.entry.version = None self.database.update_package(package.entry) self.database.commit() + manifest_path = os.path.join(MANIFESTS_LOCATION, name) + edit_path = os.path.join(MANIFESTS_LOCATION, name + ".edit") + if os.path.exists(manifest_path): + os.remove(manifest_path) + if os.path.exists(edit_path): + os.remove(edit_path) + @under_lock @opt_check @@ -511,7 +560,9 @@ def upgrade_from_source(self, source: PackageSource, force=False, skip_host_plugins=False, - allow_downgrade=False): + allow_downgrade=False, + update_only: Optional[bool] = False, + name: Optional[str] = None): """ Upgrade SONiC Package to a version the package reference expression specifies. Can force the upgrade if force parameter is True. Force can allow a package downgrade. @@ -521,12 +572,17 @@ def upgrade_from_source(self, force: Force the upgrade. skip_host_plugins: Skip host OS plugins installation. allow_downgrade: Flag to allow package downgrade. + update_only: Perform package update with new manifest. + name: name of package. Raises: PackageManagerError """ - new_package = source.get_package() - name = new_package.name + if update_only: + new_package = self.get_installed_package(name, use_edit=True) + else: + new_package = source.get_package() + name = new_package.name with failure_ignore(force): if not self.is_installed(name): @@ -543,19 +599,20 @@ def upgrade_from_source(self, old_version = old_package.manifest['package']['version'] new_version = new_package.manifest['package']['version'] - with failure_ignore(force): - if old_version == new_version: - raise PackageUpgradeError(f'{new_version} is already installed') - - # TODO: Not all packages might support downgrade. - # We put a check here but we understand that for some packages - # the downgrade might be safe to do. There can be a variable in manifest - # describing package downgrade ability or downgrade-able versions. - if new_version < old_version and not allow_downgrade: - raise PackageUpgradeError( - f'Request to downgrade from {old_version} to {new_version}. ' - f'Downgrade might be not supported by the package' - ) + if not update_only: + with failure_ignore(force): + if old_version == new_version: + raise PackageUpgradeError(f'{new_version} is already installed') + + # TODO: Not all packages might support downgrade. + # We put a check here but we understand that for some packages + # the downgrade might be safe to do. There can be a variable in manifest + # describing package downgrade ability or downgrade-able versions. + if new_version < old_version and not allow_downgrade: + raise PackageUpgradeError( + f'Request to downgrade from {old_version} to {new_version}. ' + f'Downgrade might be not supported by the package' + ) # remove currently installed package from the list installed_packages = self._get_installed_packages_and(new_package) @@ -579,8 +636,9 @@ def upgrade_from_source(self, self._uninstall_cli_plugins(old_package) exits.callback(rollback(self._install_cli_plugins, old_package)) - source.install(new_package) - exits.callback(rollback(source.uninstall, new_package)) + if not update_only: + source.install(new_package) + exits.callback(rollback(source.uninstall, new_package)) feature_enabled = self.feature_registry.is_feature_enabled(old_feature) @@ -620,7 +678,8 @@ def upgrade_from_source(self, self._install_cli_plugins(new_package) exits.callback(rollback(self._uninstall_cli_plugin, new_package)) - self.docker.rmi(old_package.image_id, force=True) + if old_package.image_id != new_package.image_id: + self.remove_unused_docker_image(old_package) exits.pop_all() except Exception as err: @@ -633,6 +692,10 @@ def upgrade_from_source(self, new_package_entry.version = new_version self.database.update_package(new_package_entry) self.database.commit() + if update_only: + manifest_path = os.path.join(MANIFESTS_LOCATION, name) + edit_path = os.path.join(MANIFESTS_LOCATION, name + ".edit") + os.rename(edit_path, manifest_path) @under_lock @opt_check @@ -718,7 +781,7 @@ def migrate_package(old_package_entry, file.write(chunk) file.flush() - self.install(tarball=file.name) + self.install(tarball=file.name, name=name) else: log.info(f'installing {name} version {version}') @@ -755,7 +818,9 @@ def migrate_package(old_package_entry, new_package.version = old_package.version migrate_package(old_package, new_package) else: - self.install(f'{new_package.name}={new_package_default_version}') + # self.install(f'{new_package.name}={new_package_default_version}') + repo_tag_formed = "{}:{}".format(new_package.repository, new_package.default_reference) + self.install(None, repo_tag_formed, name=new_package.name) else: # No default version and package is not installed. # Migrate old package same version. @@ -764,7 +829,7 @@ def migrate_package(old_package_entry, self.database.commit() - def get_installed_package(self, name: str) -> Package: + def get_installed_package(self, name: str, use_local_manifest: bool = False, use_edit: bool = False) -> Package: """ Get installed package by name. Args: @@ -777,14 +842,19 @@ def get_installed_package(self, name: str) -> Package: source = LocalSource(package_entry, self.database, self.docker, - self.metadata_resolver) + self.metadata_resolver, + use_local_manifest=use_local_manifest, + name=name, + use_edit=use_edit) return source.get_package() def get_package_source(self, package_expression: Optional[str] = None, repository_reference: Optional[str] = None, tarboll_path: Optional[str] = None, - package_ref: Optional[PackageReference] = None): + package_ref: Optional[PackageReference] = None, + use_local_manifest: bool = False, + name: Optional[str] = None): """ Returns PackageSource object based on input source. Args: @@ -800,7 +870,7 @@ def get_package_source(self, if package_expression: ref = parse_reference_expression(package_expression) - return self.get_package_source(package_ref=ref) + return self.get_package_source(package_ref=ref, name=name) elif repository_reference: repo_ref = utils.DockerReference.parse(repository_reference) repository = repo_ref['name'] @@ -810,15 +880,19 @@ def get_package_source(self, reference, self.database, self.docker, - self.metadata_resolver) + self.metadata_resolver, + use_local_manifest, + name) elif tarboll_path: return TarballSource(tarboll_path, self.database, self.docker, - self.metadata_resolver) + self.metadata_resolver, + use_local_manifest, + name) elif package_ref: package_entry = self.database.get_package(package_ref.name) - + name = package_ref.name # Determine the reference if not specified. # If package is installed assume the installed # one is requested, otherwise look for default @@ -829,7 +903,9 @@ def get_package_source(self, return LocalSource(package_entry, self.database, self.docker, - self.metadata_resolver) + self.metadata_resolver, + use_local_manifest, + name) if package_entry.default_reference is not None: package_ref.reference = package_entry.default_reference else: @@ -840,7 +916,9 @@ def get_package_source(self, package_ref.reference, self.database, self.docker, - self.metadata_resolver) + self.metadata_resolver, + use_local_manifest, + name) else: raise ValueError('No package source provided') @@ -1018,6 +1096,196 @@ def _uninstall_cli_plugin(self, package: Package, command: str): if os.path.exists(host_plugin_path): os.remove(host_plugin_path) + def download_file(self, url, local_path): + # Parse information from the URL + parsed_url = urllib.parse.urlparse(url) + protocol = parsed_url.scheme + username = parsed_url.username + password = parsed_url.password + hostname = parsed_url.hostname + remote_path = parsed_url.path + supported_protocols = ['http', 'https', 'scp', 'sftp'] + + # clear the temporary local file + if os.path.exists(local_path): + os.remove(local_path) + + if not protocol: + # check for local file + if os.path.exists(url): + os.rename(url, local_path) + return True + else: + click.echo("Local file not present") + return False + if protocol not in supported_protocols: + click.echo("Protocol not supported") + return False + + # If the protocol is HTTP and no username or password is provided, proceed with the download using requests + if (protocol == 'http' or protocol == 'https') and not username and not password: + try: + with requests.get(url, stream=True) as response: + response.raise_for_status() + with open(local_path, 'wb') as f: + for chunk in response.iter_content(chunk_size=8192): + if chunk: + f.write(chunk) + except requests.exceptions.RequestException as e: + click.echo("Download error", e) + return False + else: + # If password is not provided, prompt the user for it securely + if password is None: + password = getpass.getpass(prompt=f"Enter password for {username}@{hostname}: ") + + # Create an SSH client + client = paramiko.SSHClient() + # Automatically add the server's host key (this is insecure and should be handled differently in production) + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + try: + # Connect to the SSH server + client.connect(hostname, username=username, password=password) + + if protocol == 'scp': + # Create an SCP client + scp = SCPClient(client.get_transport()) + # Download the file + scp.get(remote_path, local_path) + elif protocol == 'sftp': + # Open an SFTP channel + with client.open_sftp() as sftp: + # Download the file + sftp.get(remote_path, local_path) + elif protocol == 'http' or protocol == 'https': + # Download using HTTP for URLs without credentials + try: + with requests.get(url, auth=(username, password), stream=True) as response: + response.raise_for_status() # Raise an exception if the request was not successful + with open(local_path, 'wb') as f: + for chunk in response.iter_content(chunk_size=8192): + if chunk: + f.write(chunk) + except requests.exceptions.RequestException as e: + click.echo("Download error", e) + return False + else: + click.echo(f"Error: Source file '{remote_path}' does not exist.") + + finally: + # Close the SSH connection + client.close() + + def create_package_manifest(self, name, from_json): + if name == "default_manifest": + click.echo("Default Manifest creation is not allowed by user") + return + if self.is_installed(name): + click.echo("Error: A package with the same name {} is already installed".format(name)) + return + mfile_name = os.path.join(MANIFESTS_LOCATION, name) + if os.path.exists(mfile_name): + click.echo("Error: Manifest file '{}' already exists.".format(name)) + return + + if from_json: + ret = self.download_file(from_json, LOCAL_JSON) + if ret is False: + return + from_json = LOCAL_JSON + else: + from_json = DEFAULT_MANIFEST_FILE + data = {} + with open(from_json, 'r') as file: + data = json.load(file) + # Validate with manifest scheme + Manifest.marshal(data) + + # Make sure the 'name' is overwritten into the dict + data['package']['name'] = name + data['service']['name'] = name + + with open(mfile_name, 'w') as file: + json.dump(data, file, indent=4) + click.echo(f"Manifest '{name}' created successfully.") + + def update_package_manifest(self, name, from_json): + if name == "default_manifest": + click.echo("Default Manifest updation is not allowed") + return + + original_file = os.path.join(MANIFESTS_LOCATION, name) + if not os.path.exists(original_file): + click.echo(f'Local Manifest file for {name} does not exists to update') + return + # download json file from remote/local path + ret = self.download_file(from_json, LOCAL_JSON) + if ret is False: + return + from_json = LOCAL_JSON + + with open(from_json, 'r') as file: + data = json.load(file) + + # Validate with manifest scheme + Manifest.marshal(data) + + # Make sure the 'name' is overwritten into the dict + data['package']['name'] = name + data['service']['name'] = name + + if self.is_installed(name): + edit_name = name + '.edit' + edit_file = os.path.join(MANIFESTS_LOCATION, edit_name) + with open(edit_file, 'w') as edit_file: + json.dump(data, edit_file, indent=4) + click.echo(f"Manifest '{name}' updated successfully.") + else: + # If package is not installed, + # update the name file directly + with open(original_file, 'w') as orig_file: + json.dump(data, orig_file, indent=4) + click.echo(f"Manifest '{name}' updated successfully.") + + def delete_package_manifest(self, name): + if name == "default_manifest": + click.echo("Default Manifest deletion is not allowed") + return + # Check if the manifest file exists + mfile_name = "{}/{}".format(MANIFESTS_LOCATION, name) + if not os.path.exists(mfile_name): + click.echo("Error: Manifest file '{}' not found.".format(name)) + return + # Confirm deletion with user input + confirm = click.prompt("Are you sure you want to delete the manifest file '{}'? (y/n)".format(name), type=str) + if confirm.lower() == 'y': + os.remove(mfile_name) + click.echo("Manifest '{}' deleted successfully.".format(name)) + else: + click.echo("Deletion cancelled.") + return + + def show_package_manifest(self, name): + mfile_name = "{}/{}".format(MANIFESTS_LOCATION, name) + edit_file_name = "{}.edit".format(mfile_name) + if os.path.exists(edit_file_name): + mfile_name = edit_file_name + with open(mfile_name, 'r') as file: + data = json.load(file) + click.echo("Manifest file: {}".format(name)) + click.echo(json.dumps(data, indent=4)) + + def list_package_manifest(self): + # Get all files in the manifest location + manifest_files = os.listdir(MANIFESTS_LOCATION) + if not manifest_files: + click.echo("No custom local manifest files found.") + else: + click.echo("Custom Local Manifest files:") + for file in manifest_files: + click.echo("- {}".format(file)) + @staticmethod def get_manager() -> 'PackageManager': """ Creates and returns PackageManager instance. diff --git a/sonic_package_manager/manifest.py b/sonic_package_manager/manifest.py index 865db7ef5c3..bc156f102cc 100644 --- a/sonic_package_manager/manifest.py +++ b/sonic_package_manager/manifest.py @@ -10,7 +10,12 @@ ) from sonic_package_manager.errors import ManifestError from sonic_package_manager.version import Version +from sonic_package_manager.database import BASE_LIBRARY_PATH +import os +import json +MANIFESTS_LOCATION = os.path.join(BASE_LIBRARY_PATH, "manifests") +DEFAULT_MANIFEST_FILE = os.path.join(BASE_LIBRARY_PATH, "default_manifest") class ManifestSchema: """ ManifestSchema class describes and provides marshalling @@ -249,3 +254,38 @@ def marshal(cls, input_dict: dict): def unmarshal(self) -> Dict: return self.SCHEMA.unmarshal(self) + + def get_manifest_from_local_file(name): + + if '.edit' in name: + actual_name = name.split('.edit')[0] + else: + actual_name = name + + manifest_path = os.path.join(MANIFESTS_LOCATION, name) + if os.path.exists(manifest_path): + with open(manifest_path, 'r') as file: + manifest_dict = json.load(file) + manifest_dict["package"]["name"] = actual_name + manifest_dict["service"]["name"] = actual_name + else: + with open(DEFAULT_MANIFEST_FILE, 'r') as file: + manifest_dict = json.load(file) + manifest_dict["package"]["name"] = actual_name + manifest_dict["service"]["name"] = actual_name + new_manifest_path = os.path.join(MANIFESTS_LOCATION, name) + with open(new_manifest_path, 'w') as file: + json.dump(manifest_dict, file, indent=4) + + json_str = json.dumps(manifest_dict, indent=4) + desired_dict = { + 'Tag': 'master', + 'com': { + 'azure': { + 'sonic': { + 'manifest': json_str + } + } + } + } + return desired_dict diff --git a/sonic_package_manager/metadata.py b/sonic_package_manager/metadata.py index b44b658a748..6485a107828 100644 --- a/sonic_package_manager/metadata.py +++ b/sonic_package_manager/metadata.py @@ -4,15 +4,13 @@ import json import tarfile -from typing import Dict, List - +from typing import Dict, List, Optional from sonic_package_manager import utils from sonic_package_manager.errors import MetadataError from sonic_package_manager.logger import log from sonic_package_manager.manifest import Manifest from sonic_package_manager.version import Version - def translate_plain_to_tree(plain: Dict[str, str], sep='.') -> Dict: """ Convert plain key/value dictionary into a tree by spliting the key with '.' @@ -65,7 +63,8 @@ def __init__(self, docker, registry_resolver): self.docker = docker self.registry_resolver = registry_resolver - def from_local(self, image: str) -> Metadata: + def from_local(self, image: str, use_local_manifest: bool = False, + name: Optional[str] = None, use_edit: bool = False) -> Metadata: """ Reads manifest from locally installed docker image. Args: @@ -75,16 +74,31 @@ def from_local(self, image: str) -> Metadata: Raises: MetadataError """ + if name and (use_local_manifest or use_edit): + edit_file_name = name + '.edit' + if use_edit: + labels = Manifest.get_manifest_from_local_file(edit_file_name) + return self.from_labels(labels) + elif use_local_manifest: + labels = Manifest.get_manifest_from_local_file(name) + return self.from_labels(labels) labels = self.docker.labels(image) - if labels is None: - raise MetadataError('No manifest found in image labels') + if labels is None or len(labels) == 0 or 'com.azure.sonic.manifest' not in labels: + if name: + labels = Manifest.get_manifest_from_local_file(name) + if labels is None: + raise MetadataError('No manifest found in image labels') + else: + raise MetadataError('No manifest found in image labels') return self.from_labels(labels) def from_registry(self, repository: str, - reference: str) -> Metadata: + reference: str, + use_local_manifest: bool = False, + name: Optional[str] = None) -> Metadata: """ Reads manifest from remote registry. Args: @@ -96,19 +110,25 @@ def from_registry(self, MetadataError """ - registry = self.registry_resolver.get_registry_for(repository) + if use_local_manifest: + labels = Manifest.get_manifest_from_local_file(name) + return self.from_labels(labels) + registry = self.registry_resolver.get_registry_for(repository) manifest = registry.manifest(repository, reference) digest = manifest['config']['digest'] blob = registry.blobs(repository, digest) - labels = blob['config']['Labels'] + labels = blob['config'].get('Labels') + if labels is None or len(labels) == 0 or 'com.azure.sonic.manifest' not in labels: + if name is None: + raise MetadataError('The name(custom) option is required as there is no metadata found in image labels') + labels = Manifest.get_manifest_from_local_file(name) if labels is None: raise MetadataError('No manifest found in image labels') - return self.from_labels(labels) - def from_tarball(self, image_path: str) -> Metadata: + def from_tarball(self, image_path: str, use_local_manifest: bool = False, name: Optional[str] = None) -> Metadata: """ Reads manifest image tarball. Args: image_path: Path to image tarball. @@ -117,16 +137,23 @@ def from_tarball(self, image_path: str) -> Metadata: Raises: MetadataError """ + if use_local_manifest: + labels = Manifest.get_manifest_from_local_file(name) + return self.from_labels(labels) with tarfile.open(image_path) as image: manifest = json.loads(image.extractfile('manifest.json').read()) blob = manifest[0]['Config'] image_config = json.loads(image.extractfile(blob).read()) - labels = image_config['config']['Labels'] - if labels is None: - raise MetadataError('No manifest found in image labels') - + labels = image_config['config'].get('Labels') + if labels is None or len(labels) == 0 or 'com.azure.sonic.manifest' not in labels: + if name is None: + raise MetadataError('The name(custom) option is \ + required as there is no metadata found in image labels') + labels = Manifest.get_manifest_from_local_file(name) + if labels is None: + raise MetadataError('No manifest found in image labels') return self.from_labels(labels) @classmethod diff --git a/sonic_package_manager/service_creator/creator.py b/sonic_package_manager/service_creator/creator.py index 15d3aedd76d..57f8ac46240 100644 --- a/sonic_package_manager/service_creator/creator.py +++ b/sonic_package_manager/service_creator/creator.py @@ -2,6 +2,7 @@ import contextlib import os +import glob import sys import shutil import stat @@ -33,6 +34,7 @@ TIMER_UNIT_TEMPLATE = 'timer.unit.j2' SYSTEMD_LOCATION = '/usr/lib/systemd/system' +ETC_SYSTEMD_LOCATION = '/etc/systemd/system' GENERATED_SERVICES_CONF_FILE = '/etc/sonic/generated_services.conf' @@ -92,18 +94,30 @@ def set_executable_bit(filepath): os.chmod(filepath, st.st_mode | stat.S_IEXEC) -def remove_if_exists(path): +def remove_file(path): """ Remove filepath if it exists """ - if not os.path.exists(path): - return + try: + os.remove(path) + log.info(f'removed {path}') + except FileNotFoundError: + pass + + +def remove_dir(path): + """ Remove filepath if it exists """ + + try: + shutil.rmtree(path) + log.info(f'removed {path}') + except FileNotFoundError: + pass - os.remove(path) - log.info(f'removed {path}') def is_list_of_strings(command): return isinstance(command, list) and all(isinstance(item, str) for item in command) + def run_command(command: List[str]): """ Run arbitrary bash command. Args: @@ -197,12 +211,22 @@ def remove(self, """ name = package.manifest['service']['name'] - remove_if_exists(os.path.join(SYSTEMD_LOCATION, f'{name}.service')) - remove_if_exists(os.path.join(SYSTEMD_LOCATION, f'{name}@.service')) - remove_if_exists(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh')) - remove_if_exists(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh')) - remove_if_exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}')) - remove_if_exists(os.path.join(ETC_SONIC_PATH, f'{name}_reconcile')) + remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}.service')) + remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}@.service')) + remove_file(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh')) + remove_file(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh')) + remove_file(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}')) + remove_file(os.path.join(ETC_SONIC_PATH, f'{name}_reconcile')) + + # remove symlinks and configuration directories created by featured + remove_file(os.path.join(ETC_SYSTEMD_LOCATION, f'{name}.service')) + for unit_file in glob.glob(os.path.join(ETC_SYSTEMD_LOCATION, f'{name}@*.service')): + remove_file(unit_file) + + remove_dir(os.path.join(ETC_SYSTEMD_LOCATION, f'{name}.service.d')) + for unit_dir in glob.glob(os.path.join(ETC_SYSTEMD_LOCATION, f'{name}@*.service.d')): + remove_dir(unit_dir) + self.update_dependent_list_file(package, remove=True) self.update_generated_services_conf_file(package, remove=True) diff --git a/sonic_package_manager/source.py b/sonic_package_manager/source.py index 7a13dccbac5..2a0f07b0f1b 100644 --- a/sonic_package_manager/source.py +++ b/sonic_package_manager/source.py @@ -4,7 +4,7 @@ from sonic_package_manager.dockerapi import DockerApi, get_repository_from_image from sonic_package_manager.metadata import Metadata, MetadataResolver from sonic_package_manager.package import Package - +from typing import Optional class PackageSource(object): """ PackageSource abstracts the way manifest is read @@ -105,20 +105,24 @@ def __init__(self, tarball_path: str, database: PackageDatabase, docker: DockerApi, - metadata_resolver: MetadataResolver): + metadata_resolver: MetadataResolver, + use_local_manifest: bool = False, + name: Optional[str] = None): super().__init__(database, docker, metadata_resolver) self.tarball_path = tarball_path + self.use_local_manifest = use_local_manifest + self.name = name def get_metadata(self) -> Metadata: """ Returns manifest read from tarball. """ - - return self.metadata_resolver.from_tarball(self.tarball_path) + return self.metadata_resolver.from_tarball(self.tarball_path, + use_local_manifest=self.use_local_manifest, + name=self.name) def install_image(self, package: Package): """ Installs image from local tarball source. """ - return self.docker.load(self.tarball_path) @@ -131,18 +135,24 @@ def __init__(self, reference: str, database: PackageDatabase, docker: DockerApi, - metadata_resolver: MetadataResolver): + metadata_resolver: MetadataResolver, + use_local_manifest: bool = False, + name: Optional[str] = None): super().__init__(database, docker, metadata_resolver) self.repository = repository self.reference = reference + self.use_local_manifest = use_local_manifest + self.name = name def get_metadata(self) -> Metadata: """ Returns manifest read from registry. """ return self.metadata_resolver.from_registry(self.repository, - self.reference) + self.reference, + self.use_local_manifest, + self.name) def install_image(self, package: Package): """ Installs image from registry. """ @@ -161,11 +171,17 @@ def __init__(self, entry: PackageEntry, database: PackageDatabase, docker: DockerApi, - metadata_resolver: MetadataResolver): + metadata_resolver: MetadataResolver, + use_local_manifest: bool = False, + name: Optional[str] = None, + use_edit: bool = False): super().__init__(database, docker, metadata_resolver) self.entry = entry + self.use_local_manifest = use_local_manifest + self.name = name + self.use_edit = use_edit def get_metadata(self) -> Metadata: """ Returns manifest read from locally installed Docker. """ @@ -177,8 +193,7 @@ def get_metadata(self) -> Metadata: # won't have image_id in database. Using their # repository name as image. image = f'{self.entry.repository}:latest' - - return self.metadata_resolver.from_local(image) + return self.metadata_resolver.from_local(image, self.use_local_manifest, self.name, self.use_edit) def get_package(self) -> Package: return Package(self.entry, self.get_metadata()) diff --git a/ssdutil/main.py b/ssdutil/main.py index 62f43037e7b..7b6f2c1ca12 100755 --- a/ssdutil/main.py +++ b/ssdutil/main.py @@ -39,7 +39,7 @@ def import_ssd_api(diskdev): except ImportError as e: log.log_warning("Platform specific SsdUtil module not found. Falling down to the generic implementation") try: - from sonic_platform_base.sonic_ssd.ssd_generic import SsdUtil + from sonic_platform_base.sonic_storage.ssd import SsdUtil except ImportError as e: log.log_error("Failed to import default SsdUtil. Error: {}".format(str(e)), True) raise e diff --git a/tests/bgp_commands_input/bgp_network_test_vector.py b/tests/bgp_commands_input/bgp_network_test_vector.py index da93e8e8e85..f9edd66fa2e 100644 --- a/tests/bgp_commands_input/bgp_network_test_vector.py +++ b/tests/bgp_commands_input/bgp_network_test_vector.py @@ -227,6 +227,9 @@ multi_asic_bgp_network_err = \ """Error: -n/--namespace option required. provide namespace from list ['asic0', 'asic1']""" +multi_asic_bgp_network_asic_unknown_err = \ + """Error: invalid namespace asic_unknown. provide namespace from list ['asic0', 'asic1']""" + bgp_v4_network_asic0 = \ """ BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 @@ -276,7 +279,7 @@ *=i10.0.0.42/31 10.1.0.2 0 100 0 ? *>i 10.1.0.0 0 100 0 ? *=i10.0.0.44/31 10.1.0.2 0 100 0 ? -*>i 10.1.0.0 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? """ bgp_v4_network_ip_address_asic0 = \ @@ -311,6 +314,111 @@ Last update: Thu Apr 22 02:13:30 2021 """ +bgp_v4_network_all_asic = \ + """ +======== namespace asic0 ======== + +BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +* i0.0.0.0/0 10.1.0.2 100 0 65200 6666 6667 i +* i 10.1.0.0 100 0 65200 6666 6667 i +*= 10.0.0.5 0 65200 6666 6667 i +*> 10.0.0.1 0 65200 6666 6667 i +* i8.0.0.0/32 10.1.0.2 0 100 0 i +* i 10.1.0.0 0 100 0 i +* 0.0.0.0 0 32768 ? +*> 0.0.0.0 0 32768 i +*=i8.0.0.1/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.2/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.3/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*>i8.0.0.4/32 10.1.0.0 0 100 0 i +*>i8.0.0.5/32 10.1.0.2 0 100 0 i +* i10.0.0.0/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +* i10.0.0.4/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +*=i10.0.0.8/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.12/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.32/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.34/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.36/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.38/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.40/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.42/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.44/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? + +======== namespace asic1 ======== + +BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +* i0.0.0.0/0 10.1.0.2 100 0 65200 6666 6667 i +* i 10.1.0.0 100 0 65200 6666 6667 i +*= 10.0.0.5 0 65200 6666 6667 i +*> 10.0.0.1 0 65200 6666 6667 i +* i8.0.0.0/32 10.1.0.2 0 100 0 i +* i 10.1.0.0 0 100 0 i +* 0.0.0.0 0 32768 ? +*> 0.0.0.0 0 32768 i +*=i8.0.0.1/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.2/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.3/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*>i8.0.0.4/32 10.1.0.0 0 100 0 i +*>i8.0.0.5/32 10.1.0.2 0 100 0 i +* i10.0.0.0/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +* i10.0.0.4/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +*=i10.0.0.8/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.12/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.32/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.34/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.36/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.38/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.40/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.42/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.44/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +""" + bgp_v6_network_asic0 = \ """ BGP table version is 12849, local router ID is 10.1.0.32, vrf id 0 @@ -429,6 +537,9 @@ def mock_show_bgp_network_multi_asic(param): return bgp_v6_network_ip_address_asic0 elif param == 'bgp_v6_network_bestpath_asic0': return bgp_v6_network_ip_address_asic0_bestpath + elif param == "bgp_v4_network_all_asic": + # this is mocking the output of a single LC + return bgp_v4_network_asic0 else: return '' @@ -454,6 +565,11 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 1, 'rc_output': bgp_v4_network_longer_prefixes_error }, + 'bgp_v4_network_all_asic_on_single_asic': { + 'args': ['-nall'], + 'rc': 0, + 'rc_output': bgp_v4_network + }, 'bgp_v6_network': { 'args': [], 'rc': 0, @@ -479,10 +595,10 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 0, 'rc_output': bgp_v6_network_longer_prefixes }, - 'bgp_v4_network_multi_asic': { + 'bgp_v4_network_default_multi_asic': { 'args': [], - 'rc': 2, - 'rc_err_msg': multi_asic_bgp_network_err + 'rc': 0, + 'rc_output': bgp_v4_network_all_asic }, 'bgp_v4_network_asic0': { 'args': ['-nasic0'], @@ -499,6 +615,16 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 0, 'rc_output': bgp_v4_network_bestpath_asic0 }, + 'bgp_v4_network_all_asic': { + 'args': ['-nall'], + 'rc': 0, + 'rc_output': bgp_v4_network_all_asic + }, + 'bgp_v4_network_asic_unknown': { + 'args': ['-nasic_unknown'], + 'rc': 2, + 'rc_err_msg': multi_asic_bgp_network_asic_unknown_err + }, 'bgp_v6_network_multi_asic': { 'args': [], 'rc': 2, diff --git a/tests/bgp_commands_test.py b/tests/bgp_commands_test.py index a60ba8c81f5..11415e8727e 100644 --- a/tests/bgp_commands_test.py +++ b/tests/bgp_commands_test.py @@ -336,7 +336,7 @@ 3.3.3.8 4 65100 12 10 0 0 0 00:00:15 4 str2-sonic-lc1-1-ASIC1 Total number of neighbors 6 -""" +""" # noqa: E501 class TestBgpCommandsSingleAsic(object): diff --git a/tests/bgp_input/assert_show_output.py b/tests/bgp_input/assert_show_output.py new file mode 100644 index 00000000000..3671c3ce5f8 --- /dev/null +++ b/tests/bgp_input/assert_show_output.py @@ -0,0 +1,55 @@ +""" +Module holding the correct values for show CLI command outputs for the bgp_test.py +""" + +show_device_global_empty = """\ +No configuration is present in CONFIG DB +""" + +show_device_global_all_disabled = """\ +TSA W-ECMP +-------- -------- +disabled disabled +""" +show_device_global_all_disabled_json = """\ +{ + "tsa": "disabled", + "w-ecmp": "disabled" +} +""" + +show_device_global_all_enabled = """\ +TSA W-ECMP +------- -------- +enabled enabled +""" +show_device_global_all_enabled_json = """\ +{ + "tsa": "enabled", + "w-ecmp": "enabled" +} +""" + +show_device_global_tsa_enabled = """\ +TSA W-ECMP +------- -------- +enabled disabled +""" +show_device_global_tsa_enabled_json = """\ +{ + "tsa": "enabled", + "w-ecmp": "disabled" +} +""" + +show_device_global_wcmp_enabled = """\ +TSA W-ECMP +-------- -------- +disabled enabled +""" +show_device_global_wcmp_enabled_json = """\ +{ + "tsa": "disabled", + "w-ecmp": "enabled" +} +""" diff --git a/tests/bgp_input/mock_config/all_disabled.json b/tests/bgp_input/mock_config/all_disabled.json new file mode 100644 index 00000000000..30a929c7b79 --- /dev/null +++ b/tests/bgp_input/mock_config/all_disabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "false", + "wcmp_enabled": "false" + } +} diff --git a/tests/bgp_input/mock_config/all_enabled.json b/tests/bgp_input/mock_config/all_enabled.json new file mode 100644 index 00000000000..eab39897bb6 --- /dev/null +++ b/tests/bgp_input/mock_config/all_enabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "true", + "wcmp_enabled": "true" + } +} diff --git a/tests/bgp_input/mock_config/empty.json b/tests/bgp_input/mock_config/empty.json new file mode 100644 index 00000000000..e77dd4d79e5 --- /dev/null +++ b/tests/bgp_input/mock_config/empty.json @@ -0,0 +1,5 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "NULL": "NULL" + } +} diff --git a/tests/bgp_input/mock_config/tsa_enabled.json b/tests/bgp_input/mock_config/tsa_enabled.json new file mode 100644 index 00000000000..9c72a5f79d8 --- /dev/null +++ b/tests/bgp_input/mock_config/tsa_enabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "true", + "wcmp_enabled": "false" + } +} diff --git a/tests/bgp_input/mock_config/wcmp_enabled.json b/tests/bgp_input/mock_config/wcmp_enabled.json new file mode 100644 index 00000000000..fddc76b618e --- /dev/null +++ b/tests/bgp_input/mock_config/wcmp_enabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "false", + "wcmp_enabled": "true" + } +} diff --git a/tests/bgp_test.py b/tests/bgp_test.py new file mode 100644 index 00000000000..d64d0b9eeac --- /dev/null +++ b/tests/bgp_test.py @@ -0,0 +1,130 @@ +import pytest +import os +import logging +import show.main as show +import config.main as config + +from click.testing import CliRunner +from utilities_common.db import Db +from .mock_tables import dbconnector +from .bgp_input import assert_show_output + + +test_path = os.path.dirname(os.path.abspath(__file__)) +input_path = os.path.join(test_path, "bgp_input") +mock_config_path = os.path.join(input_path, "mock_config") + +logger = logging.getLogger(__name__) + + +SUCCESS = 0 + + +class TestBgp: + @classmethod + def setup_class(cls): + logger.info("Setup class: {}".format(cls.__name__)) + os.environ['UTILITIES_UNIT_TESTING'] = "1" + + @classmethod + def teardown_class(cls): + logger.info("Teardown class: {}".format(cls.__name__)) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs.clear() + + # ---------- CONFIG BGP ---------- # + + @pytest.mark.parametrize( + "feature", [ + "tsa", + "w-ecmp" + ] + ) + @pytest.mark.parametrize( + "state", [ + "enabled", + "disabled" + ] + ) + def test_config_device_global(self, feature, state): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["bgp"].commands["device-global"]. + commands[feature].commands[state], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + assert result.exit_code == SUCCESS + + # ---------- SHOW BGP ---------- # + + @pytest.mark.parametrize( + "cfgdb,output", [ + pytest.param( + os.path.join(mock_config_path, "empty"), + { + "plain": assert_show_output.show_device_global_empty, + "json": assert_show_output.show_device_global_empty + }, + id="empty" + ), + pytest.param( + os.path.join(mock_config_path, "all_disabled"), + { + "plain": assert_show_output.show_device_global_all_disabled, + "json": assert_show_output.show_device_global_all_disabled_json + }, + id="all-disabled" + ), + pytest.param( + os.path.join(mock_config_path, "all_enabled"), + { + "plain": assert_show_output.show_device_global_all_enabled, + "json": assert_show_output.show_device_global_all_enabled_json + }, + id="all-enabled" + ), + pytest.param( + os.path.join(mock_config_path, "tsa_enabled"), + { + "plain": assert_show_output.show_device_global_tsa_enabled, + "json": assert_show_output.show_device_global_tsa_enabled_json + }, + id="tsa-enabled" + ), + pytest.param( + os.path.join(mock_config_path, "wcmp_enabled"), + { + "plain": assert_show_output.show_device_global_wcmp_enabled, + "json": assert_show_output.show_device_global_wcmp_enabled_json + }, + id="w-ecmp-enabled" + ) + ] + ) + @pytest.mark.parametrize( + "format", [ + "plain", + "json", + ] + ) + def test_show_device_global(self, cfgdb, output, format): + dbconnector.dedicated_dbs["CONFIG_DB"] = cfgdb + + db = Db() + runner = CliRunner() + + result = runner.invoke( + show.cli.commands["bgp"].commands["device-global"], + [] if format == "plain" else ["--json"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + assert result.output == output[format] + assert result.exit_code == SUCCESS diff --git a/tests/chassis_modules_test.py b/tests/chassis_modules_test.py old mode 100644 new mode 100755 index 940e30c04bc..681e3d2c139 --- a/tests/chassis_modules_test.py +++ b/tests/chassis_modules_test.py @@ -7,6 +7,8 @@ import tests.mock_tables.dbconnector from utilities_common.db import Db from .utils import get_result_and_return_code +from unittest import mock +sys.modules['clicommon'] = mock.Mock() show_linecard0_shutdown_output="""\ LINE-CARD0 line-card 1 Empty down LC1000101 @@ -15,6 +17,15 @@ show_linecard0_startup_output="""\ LINE-CARD0 line-card 1 Empty up LC1000101 """ + +show_fabriccard0_shutdown_output = """\ +FABRIC-CARD0 fabric-card 17 Online down FC1000101 +""" + +show_fabriccard0_startup_output = """\ +FABRIC-CARD0 fabric-card 17 Online up FC1000101 +""" + header_lines = 2 warning_lines = 0 @@ -113,6 +124,11 @@ Linecard4|Asic2|PortChannel0001 2 22 Linecard4|Asic2|Ethernet29, Linecard4|Asic2|Ethernet30 """ + +def mock_run_command_side_effect(*args, **kwargs): + return '', 0 + + class TestChassisModules(object): @classmethod def setup_class(cls): @@ -186,6 +202,47 @@ def test_config_shutdown_module(self): #db.cfgdb.set_entry("CHASSIS_MODULE", "LINE-CARD0", { "admin_status" : "down" }) #db.get_data("CHASSIS_MODULE", "LINE-CARD0") + def test_config_shutdown_module_fabric(self): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + runner = CliRunner() + db = Db() + + chassisdb = db.db + chassisdb.connect("CHASSIS_STATE_DB") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "asic_id_in_module", "0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "asic_pci_address", "nokia-bdb:4:0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "name", "FABRIC-CARD0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "asic_id_in_module", "1") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "asic_pci_address", "nokia-bdb:4:1") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "name", "FABRIC-CARD0") + chassisdb.close("CHASSIS_STATE_DB") + + result = runner.invoke(config.config.commands["chassis"].commands["modules"].commands["shutdown"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["chassis"].commands["modules"].commands["status"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + result_lines = result.output.strip('\n').split('\n') + assert result.exit_code == 0 + header_lines = 2 + result_out = " ".join((result_lines[header_lines]).split()) + assert result_out.strip('\n') == show_fabriccard0_shutdown_output.strip('\n') + + fvs = {'admin_status': 'down'} + db.cfgdb.set_entry('CHASSIS_MODULE', "FABRIC-CARD0", fvs) + result = runner.invoke(config.config.commands["chassis"].commands["modules"].commands["shutdown"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 6 + def test_config_startup_module(self): runner = CliRunner() db = Db() @@ -202,6 +259,62 @@ def test_config_startup_module(self): result_out = " ".join((result_lines[header_lines]).split()) assert result_out.strip('\n') == show_linecard0_startup_output.strip('\n') + def test_config_startup_module_fabric(self): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + runner = CliRunner() + db = Db() + + chassisdb = db.db + chassisdb.connect("CHASSIS_STATE_DB") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "asic_id_in_module", "0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "asic_pci_address", "nokia-bdb:4:0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "name", "FABRIC-CARD0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "asic_id_in_module", "1") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "asic_pci_address", "nokia-bdb:4:1") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "name", "FABRIC-CARD0") + chassisdb.close("CHASSIS_STATE_DB") + + # FC is down and doing startup + fvs = {'admin_status': 'down'} + db.cfgdb.set_entry('CHASSIS_MODULE', "FABRIC-CARD0", fvs) + + result = runner.invoke(config.config.commands["chassis"].commands["modules"].commands["startup"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["chassis"].commands["modules"].commands["status"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + result_lines = result.output.strip('\n').split('\n') + assert result.exit_code == 0 + result_out = " ".join((result_lines[header_lines]).split()) + assert result_out.strip('\n') == show_fabriccard0_startup_output.strip('\n') + assert mock_run_command.call_count == 2 + + # FC is up and doing startup + fvs = {'admin_status': 'up'} + db.cfgdb.set_entry('CHASSIS_MODULE', "FABRIC-CARD0", fvs) + + result = runner.invoke(config.config.commands["chassis"].commands["modules"].commands["startup"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["chassis"].commands["modules"].commands["status"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + result_lines = result.output.strip('\n').split('\n') + assert result.exit_code == 0 + result_out = " ".join((result_lines[header_lines]).split()) + assert result_out.strip('\n') == show_fabriccard0_startup_output.strip('\n') + assert mock_run_command.call_count == 2 + def test_config_incorrect_module(self): runner = CliRunner() db = Db() diff --git a/tests/config_dpb_test.py b/tests/config_dpb_test.py index 58a24dc9585..0a3d99cbcd7 100644 --- a/tests/config_dpb_test.py +++ b/tests/config_dpb_test.py @@ -350,7 +350,7 @@ def test_config_breakout_extra_table_warning(self, breakout_cfg_file, sonic_db): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v', '-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Below Config can not be verified' in result.output assert 'UNKNOWN_TABLE' in result.output assert 'Do you wish to Continue?' in result.output @@ -396,7 +396,7 @@ def test_config_breakout_verbose(self, sonic_db): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v', '-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Dependencies Exist.' in result.output # verbose must be set while creating instance of ConfigMgmt class @@ -538,7 +538,7 @@ def config_dpb_port8_2x50G_1x100G(): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v','-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Dependencies Exist.' in result.output assert 'Printing dependencies' in result.output assert 'NO-NSW-PACL-V4' in result.output diff --git a/tests/config_save_output/all_config_db.json b/tests/config_save_output/all_config_db.json new file mode 100644 index 00000000000..17c3e7fc6ce --- /dev/null +++ b/tests/config_save_output/all_config_db.json @@ -0,0 +1,5 @@ +{ + "localhost": {}, + "asic0": {}, + "asic1": {} +} \ No newline at end of file diff --git a/tests/config_snmp_test.py b/tests/config_snmp_test.py index 76f56756902..25c54d36ec1 100644 --- a/tests/config_snmp_test.py +++ b/tests/config_snmp_test.py @@ -877,6 +877,34 @@ def test_config_snmp_community_add_new_community_with_invalid_type_yang_validati assert result.exit_code != 0 assert 'SNMP community configuration failed' in result.output + @patch('netifaces.interfaces', mock.Mock(return_value=['eth0'])) + @patch('netifaces.ifaddresses', mock.Mock(return_value={2: + [{'addr': '10.1.0.32', 'netmask': '255.255.255.0', + 'broadcast': '10.1.0.255'}], + 10: [{'addr': 'fe80::1%eth0', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]})) + @patch('os.system', mock.Mock(return_value=0)) + def test_config_snmpagentaddress_add_linklocal(self): + db = Db() + obj = {'db': db.cfgdb} + runner = CliRunner() + runner.invoke(config.config.commands["snmpagentaddress"].commands["add"], ["fe80::1%eth0"], obj=obj) + assert ('fe80::1%eth0', '', '') in db.cfgdb.get_keys('SNMP_AGENT_ADDRESS_CONFIG') + assert db.cfgdb.get_entry("SNMP_AGENT_ADDRESS_CONFIG", "fe80::1%eth0||") == {} + + @patch('netifaces.interfaces', mock.Mock(return_value=['eth0'])) + @patch('netifaces.ifaddresses', mock.Mock(return_value={2: + [{'addr': '10.1.0.32', 'netmask': '255.255.255.0', + 'broadcast': '10.1.0.255'}], + 10: [{'addr': 'fe80::1', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]})) + @patch('os.system', mock.Mock(return_value=0)) + def test_config_snmpagentaddress_add_ipv4(self): + db = Db() + obj = {'db': db.cfgdb} + runner = CliRunner() + runner.invoke(config.config.commands["snmpagentaddress"].commands["add"], ["10.1.0.32"], obj=obj) + assert ('10.1.0.32', '', '') in db.cfgdb.get_keys('SNMP_AGENT_ADDRESS_CONFIG') + assert db.cfgdb.get_entry("SNMP_AGENT_ADDRESS_CONFIG", "10.1.0.32||") == {} + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/config_test.py b/tests/config_test.py index 1054a52a33f..748d434fc2a 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -1,3 +1,5 @@ +import copy +import datetime import pytest import filecmp import importlib @@ -167,6 +169,78 @@ Reloading Monit configuration ... """ +reload_config_masic_onefile_output = """\ +Stopping SONiC target ... +Restarting SONiC target ... +Reloading Monit configuration ... +""" + +reload_config_masic_onefile_gen_sysinfo_output = """\ +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -H -k Mellanox-SN3800-D112C8 --write-to-db +Running command: /usr/local/bin/sonic-cfggen -H -k multi_asic -n asic0 --write-to-db +Running command: /usr/local/bin/sonic-cfggen -H -k multi_asic -n asic1 --write-to-db +Restarting SONiC target ... +Reloading Monit configuration ... +""" + +save_config_output = """\ +Running command: /usr/local/bin/sonic-cfggen -d --print-data > /etc/sonic/config_db.json +""" + +save_config_filename_output = """\ +Running command: /usr/local/bin/sonic-cfggen -d --print-data > /tmp/config_db.json +""" + +save_config_masic_output = """\ +Running command: /usr/local/bin/sonic-cfggen -d --print-data > /etc/sonic/config_db.json +Running command: /usr/local/bin/sonic-cfggen -n asic0 -d --print-data > /etc/sonic/config_db0.json +Running command: /usr/local/bin/sonic-cfggen -n asic1 -d --print-data > /etc/sonic/config_db1.json +""" + +save_config_filename_masic_output = """\ +Running command: /usr/local/bin/sonic-cfggen -d --print-data > config_db.json +Running command: /usr/local/bin/sonic-cfggen -n asic0 -d --print-data > config_db0.json +Running command: /usr/local/bin/sonic-cfggen -n asic1 -d --print-data > config_db1.json +""" + +save_config_onefile_masic_output = """\ +Integrate each ASIC's config into a single JSON file /tmp/all_config_db.json. +""" + +config_temp = { + "scope": { + "ACL_TABLE": { + "MY_ACL_TABLE": { + "policy_desc": "My ACL", + "ports": ["Ethernet1", "Ethernet2"], + "stage": "ingress", + "type": "L3" + } + }, + "PORT": { + "Ethernet1": { + "alias": "fortyGigE0/0", + "description": "fortyGigE0/0", + "index": "0", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet2": { + "alias": "fortyGigE0/100", + "description": "fortyGigE0/100", + "index": "25", + "lanes": "125,126,127,128", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } + } + } + def mock_run_command_side_effect(*args, **kwargs): command = args[0] if isinstance(command, str): @@ -186,6 +260,10 @@ def mock_run_command_side_effect(*args, **kwargs): return 'enabled', 0 elif command == 'cat /var/run/dhclient.eth0.pid': return '101', 0 + elif command == 'sudo systemctl show --no-pager interfaces-config -p ExecMainExitTimestamp --value': + return f'{datetime.datetime.now()}', 0 + elif command == 'sudo systemctl show --no-pager networking -p ExecMainExitTimestamp --value': + return f'{datetime.datetime.now()}', 0 else: return '', 0 @@ -299,6 +377,191 @@ def test_plattform_fw_update(self, mock_check_call): assert result.exit_code == 0 mock_check_call.assert_called_with(["fwutil", "update", 'update', 'module', 'Module1', 'component', 'BIOS', 'fw']) + +class TestConfigSave(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + print("SETUP") + import config.main + importlib.reload(config.main) + + def test_config_save(self, get_cmd_module, setup_single_broadcom_asic): + def read_json_file_side_effect(filename): + return {} + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('config.main.open', + mock.MagicMock()): + (config, show) = get_cmd_module + + runner = CliRunner() + + result = runner.invoke(config.config.commands["save"], ["-y"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_output + + def test_config_save_filename(self, get_cmd_module, setup_single_broadcom_asic): + def read_json_file_side_effect(filename): + return {} + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('config.main.open', + mock.MagicMock()): + + (config, show) = get_cmd_module + + runner = CliRunner() + + output_file = os.path.join(os.sep, "tmp", "config_db.json") + result = runner.invoke(config.config.commands["save"], ["-y", output_file]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_filename_output + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + + +class TestConfigSaveMasic(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + import config.main + importlib.reload(config.main) + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + def test_config_save_masic(self): + def read_json_file_side_effect(filename): + return {} + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('config.main.open', + mock.MagicMock()): + + runner = CliRunner() + + result = runner.invoke(config.config.commands["save"], ["-y"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_masic_output + + def test_config_save_filename_masic(self): + def read_json_file_side_effect(filename): + return {} + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('config.main.open', + mock.MagicMock()): + + runner = CliRunner() + + result = runner.invoke( + config.config.commands["save"], + ["-y", "config_db.json,config_db0.json,config_db1.json"] + ) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_filename_masic_output + + def test_config_save_filename_wrong_cnt_masic(self): + def read_json_file_side_effect(filename): + return {} + + with mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): + + runner = CliRunner() + + result = runner.invoke( + config.config.commands["save"], + ["-y", "config_db.json,config_db0.json"] + ) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert "Input 3 config file(s) separated by comma for multiple files" in result.output + + def test_config_save_onefile_masic(self): + def get_config_side_effect(): + return {} + + with mock.patch('swsscommon.swsscommon.ConfigDBConnector.get_config', + mock.MagicMock(side_effect=get_config_side_effect)): + runner = CliRunner() + + output_file = os.path.join(os.sep, "tmp", "all_config_db.json") + print("Saving output in {}".format(output_file)) + try: + os.remove(output_file) + except OSError: + pass + result = runner.invoke( + config.config.commands["save"], + ["-y", output_file] + ) + + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_onefile_masic_output + + cwd = os.path.dirname(os.path.realpath(__file__)) + expected_result = os.path.join( + cwd, "config_save_output", "all_config_db.json" + ) + assert filecmp.cmp(output_file, expected_result, shallow=False) + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() + + class TestConfigReload(object): dummy_cfg_file = os.path.join(os.sep, "tmp", "config.json") @@ -392,6 +655,211 @@ def teardown_class(cls): dbconnector.load_namespace_config() +class TestConfigReloadMasic(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + import config.main + importlib.reload(config.main) + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + def test_config_reload_onefile_masic(self): + def read_json_file_side_effect(filename): + return { + "localhost": { + "DEVICE_METADATA": { + "localhost": { + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "1", + "docker_routing_config_mode": "separated", + "hostname": "sonic-switch", + "hwsku": "Mellanox-SN3800-D112C8", + "mac": "1d:34:db:16:a6:00", + "platform": "x86_64-mlnx_msn3800-r0", + "peer_switch": "sonic-switch", + "type": "ToRRouter", + "suppress-fib-pending": "enabled" + } + } + }, + "asic0": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "01.00.0", + "asic_name": "asic0", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "mac": "02:42:f0:7f:01:05", + "platform": "multi_asic", + "region": "None", + "sub_role": "FrontEnd", + "type": "LeafRouter" + } + } + }, + "asic1": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "08:00.0", + "asic_name": "asic1", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "mac": "02:42:f0:7f:01:06", + "platform": "multi_asic", + "region": "None", + "sub_role": "BackEnd", + "type": "LeafRouter" + } + } + } + } + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + + runner = CliRunner() + + result = runner.invoke(config.config.commands["reload"], ["-y", "-f", "all_config_db.json"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == reload_config_masic_onefile_output + + def test_config_reload_onefile_gen_sysinfo_masic(self): + def read_json_file_side_effect(filename): + return { + "localhost": { + "DEVICE_METADATA": { + "localhost": { + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "1", + "docker_routing_config_mode": "separated", + "hostname": "sonic-switch", + "hwsku": "Mellanox-SN3800-D112C8", + "peer_switch": "sonic-switch", + "type": "ToRRouter", + "suppress-fib-pending": "enabled" + } + } + }, + "asic0": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "01.00.0", + "asic_name": "asic0", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "region": "None", + "sub_role": "FrontEnd", + "type": "LeafRouter" + } + } + }, + "asic1": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "08:00.0", + "asic_name": "asic1", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "region": "None", + "sub_role": "BackEnd", + "type": "LeafRouter" + } + } + } + } + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + + runner = CliRunner() + + result = runner.invoke(config.config.commands["reload"], ["-y", "-f", "all_config_db.json"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join( + [li.rstrip() for li in result.output.split('\n')] + ) == reload_config_masic_onefile_gen_sysinfo_output + + def test_config_reload_onefile_bad_format_masic(self): + def read_json_file_side_effect(filename): + return { + "localhost": {}, + "asic0": {} + } + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): + + runner = CliRunner() + + result = runner.invoke(config.config.commands["reload"], ["-y", "-f", "all_config_db.json"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code != 0 + assert "Input file all_config_db.json must contain all asics config" in result.output + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() + + class TestLoadMinigraph(object): @classmethod def setup_class(cls): @@ -413,7 +881,7 @@ def test_load_minigraph(self, get_cmd_module, setup_single_broadcom_asic): assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_command_output # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) - assert mock_run_command.call_count == 8 + assert mock_run_command.call_count == 12 @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_path, None))) def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broadcom_asic): @@ -428,7 +896,7 @@ def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broad assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_platform_plugin_command_output # Verify "systemctl reset-failed" is called for services under sonic.target mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) - assert mock_run_command.call_count == 8 + assert mock_run_command.call_count == 12 @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_false_path, None))) def test_load_minigraph_platform_plugin_fail(self, get_cmd_module, setup_single_broadcom_asic): @@ -519,8 +987,13 @@ def is_file_side_effect(filename): def test_load_minigraph_with_specified_golden_config_path(self, get_cmd_module): def is_file_side_effect(filename): return True if 'golden_config' in filename else False + + def read_json_file_side_effect(filename): + return {} + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command, \ - mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): (config, show) = get_cmd_module runner = CliRunner() result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "--golden_config_path", "golden_config.json", "-y"]) @@ -531,14 +1004,48 @@ def is_file_side_effect(filename): def test_load_minigraph_with_default_golden_config_path(self, get_cmd_module): def is_file_side_effect(filename): return True if 'golden_config' in filename else False + + def read_json_file_side_effect(filename): + return {} + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command, \ - mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): (config, show) = get_cmd_module runner = CliRunner() result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "-y"]) assert result.exit_code == 0 assert "config override-config-table /etc/sonic/golden_config_db.json" in result.output + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', + mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_hard_dependency_check(self, get_cmd_module): + def is_file_side_effect(filename): + return True if 'golden_config' in filename else False + + def read_json_file_side_effect(filename): + return { + "AAA": { + "authentication": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + "passkey": "" + } + } + } + + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)), \ + mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): + (config, _) = get_cmd_module + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "-y"]) + assert result.exit_code != 0 + assert "Authentication with 'tacacs+' is not allowed when passkey not exits." in result.output + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=("dummy_path", None))) def test_load_minigraph_with_traffic_shift_away(self, get_cmd_module): with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: @@ -556,7 +1063,12 @@ def test_load_minigraph_with_traffic_shift_away_with_golden_config(self, get_cmd with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: def is_file_side_effect(filename): return True if 'golden_config' in filename else False - with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + + def read_json_file_side_effect(filename): + return {} + + with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): (config, show) = get_cmd_module db = Db() golden_config = {} @@ -1023,6 +1535,7 @@ def setUp(self): self.any_checkpoints_list = ["checkpoint1", "checkpoint2", "checkpoint3"] self.any_checkpoints_list_as_text = json.dumps(self.any_checkpoints_list, indent=4) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__no_params__get_required_params_error_msg(self): # Arrange unexpected_exit_code = 0 @@ -1035,6 +1548,7 @@ def test_apply_patch__no_params__get_required_params_error_msg(self): self.assertNotEqual(unexpected_exit_code, result.exit_code) self.assertTrue(expected_output in result.output) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__help__gets_help_msg(self): # Arrange expected_exit_code = 0 @@ -1047,6 +1561,7 @@ def test_apply_patch__help__gets_help_msg(self): self.assertEqual(expected_exit_code, result.exit_code) self.assertTrue(expected_output in result.output) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__only_required_params__default_values_used_for_optional_params(self): # Arrange expected_exit_code = 0 @@ -1065,6 +1580,7 @@ def test_apply_patch__only_required_params__default_values_used_for_optional_par mock_generic_updater.apply_patch.assert_called_once() mock_generic_updater.apply_patch.assert_has_calls([expected_call_with_default_values]) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__all_optional_params_non_default__non_default_values_used(self): # Arrange expected_exit_code = 0 @@ -1094,6 +1610,7 @@ def test_apply_patch__all_optional_params_non_default__non_default_values_used(s mock_generic_updater.apply_patch.assert_called_once() mock_generic_updater.apply_patch.assert_has_calls([expected_call_with_non_default_values]) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__exception_thrown__error_displayed_error_code_returned(self): # Arrange unexpected_exit_code = 0 @@ -1129,6 +1646,7 @@ def test_apply_patch__optional_parameters_passed_correctly(self): ["--ignore-path", "/ANY_TABLE"], mock.call(self.any_patch, ConfigFormat.CONFIGDB, False, False, False, ("/ANY_TABLE",))) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def validate_apply_patch_optional_parameter(self, param_args, expected_call): # Arrange expected_exit_code = 0 @@ -2651,6 +3169,7 @@ def setUp(self): self.runner = CliRunner() self.patch_file_path = 'path/to/patch.json' + self.replace_file_path = 'path/to/replace.json' self.patch_content = [ { "op": "add", @@ -2679,6 +3198,16 @@ def setUp(self): } ] + test_config = copy.deepcopy(config_temp) + data = test_config.pop("scope") + self.all_config = {} + self.all_config["localhost"] = data + self.all_config["asic0"] = data + self.all_config["asic0"]["bgpraw"] = "" + self.all_config["asic1"] = data + self.all_config["asic1"]["bgpraw"] = "" + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch_multiasic(self): # Mock open to simulate file reading with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: @@ -2698,7 +3227,201 @@ def test_apply_patch_multiasic(self): # Verify mocked_open was called as expected mocked_open.assert_called_with(self.patch_file_path, 'r') + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch_dryrun_multiasic(self): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_dryrun_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_called_once() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_check_running_in_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_called_once() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.apply_patch_wrapper') + def test_apply_patch_check_apply_call_parallel_multiasic(self, mock_apply_patch): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + self.assertEqual(mock_apply_patch.call_count, + multi_asic.get_num_asics() + 1, + "apply_patch_wrapper function should be called number of ASICs plus host times") + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_check_running_in_not_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_not_called() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + def test_apply_patch_parallel_with_error_multiasic(self): # Mock open to simulate file reading with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: # Mock GenericUpdater to avoid actual patch application @@ -2713,12 +3436,13 @@ def test_apply_patch_dryrun_multiasic(self): result = self.runner.invoke(config.config.commands["apply-patch"], [self.patch_file_path, "--format", ConfigFormat.SONICYANG.name, - "--dry-run", - "--ignore-non-yang-tables", - "--ignore-path", "/ANY_TABLE", - "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", - "--ignore-path", "", - "--verbose"], + "--dry-run", + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], catch_exceptions=False) print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) @@ -2732,6 +3456,269 @@ def test_apply_patch_dryrun_multiasic(self): # Ensure ConfigDBConnector was never instantiated or called mock_config_db_connector.assert_not_called() + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_validate_patch_multiasic(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 0) + mock_subprocess_popen.return_value = mock_instance + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed.") + self.assertIn("Patch applied successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_validate_patch_with_badpath_multiasic(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 0) + mock_subprocess_popen.return_value = mock_instance + + bad_patch = copy.deepcopy(self.patch_content) + bad_patch.append({ + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet3", "Ethernet4"], + "stage": "ingress", + "type": "L3" + } + }) + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(bad_patch)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed.") + self.assertIn("Failed to apply patch", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_parallel_badpath_multiasic(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 0) + mock_subprocess_popen.return_value = mock_instance + + bad_patch = copy.deepcopy(self.patch_content) + bad_patch.append({ + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet3", "Ethernet4"], + "stage": "ingress", + "type": "L3" + } + }) + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(bad_patch)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--parallel"], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed.") + self.assertIn("Failed to apply patch", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_validate_patch_with_wrong_fetch_config(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 2) + mock_subprocess_popen.return_value = mock_instance + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed.") + self.assertIn("Failed to apply patch", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + @patch('generic_config_updater.generic_updater.ConfigReplacer.replace', MagicMock()) + def test_replace_multiasic(self): + # Mock open to simulate file reading + mock_replace_content = copy.deepcopy(self.all_config) + with patch('builtins.open', mock_open(read_data=json.dumps(mock_replace_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.replace_all = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["replace"], + [self.replace_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Config replaced successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.replace_file_path, 'r') + + @patch('generic_config_updater.generic_updater.ConfigReplacer.replace', MagicMock()) + def test_replace_multiasic_missing_scope(self): + # Mock open to simulate file reading + mock_replace_content = copy.deepcopy(self.all_config) + mock_replace_content.pop("asic0") + with patch('builtins.open', mock_open(read_data=json.dumps(mock_replace_content)), create=True): + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["replace"], + [self.replace_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed") + self.assertIn("Failed to replace config", result.output) + + @patch('generic_config_updater.generic_updater.subprocess.Popen') + @patch('generic_config_updater.generic_updater.Util.ensure_checkpoints_dir_exists', mock.Mock(return_value=True)) + @patch('generic_config_updater.generic_updater.Util.save_json_file', MagicMock()) + def test_checkpoint_multiasic(self, mock_subprocess_popen): + allconfigs = copy.deepcopy(self.all_config) + + # Create mock instances for each subprocess call + mock_instance_localhost = MagicMock() + mock_instance_localhost.communicate.return_value = (json.dumps(allconfigs["localhost"]), 0) + mock_instance_localhost.returncode = 0 + + mock_instance_asic0 = MagicMock() + mock_instance_asic0.communicate.return_value = (json.dumps(allconfigs["asic0"]), 0) + mock_instance_asic0.returncode = 0 + + mock_instance_asic1 = MagicMock() + mock_instance_asic1.communicate.return_value = (json.dumps(allconfigs["asic1"]), 0) + mock_instance_asic1.returncode = 0 + + # Setup side effect to return different mock instances based on input arguments + def side_effect(*args, **kwargs): + if "asic" not in args[0]: + return mock_instance_localhost + elif "asic0" in args[0]: + return mock_instance_asic0 + elif "asic1" in args[0]: + return mock_instance_asic1 + else: + return MagicMock() # Default case + + mock_subprocess_popen.side_effect = side_effect + + checkpointname = "checkpointname" + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["checkpoint"], + [checkpointname], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Checkpoint created successfully.", result.output) + + @patch('generic_config_updater.generic_updater.Util.check_checkpoint_exists', mock.Mock(return_value=True)) + @patch('generic_config_updater.generic_updater.ConfigReplacer.replace', MagicMock()) + @patch('generic_config_updater.generic_updater.Util.get_checkpoint_content') + def test_rollback_multiasic(self, mock_get_checkpoint_content): + mock_get_checkpoint_content.return_value = copy.deepcopy(self.all_config) + checkpointname = "checkpointname" + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["rollback"], + [checkpointname], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Config rolled back successfully.", result.output) + + @patch('generic_config_updater.generic_updater.Util.checkpoints_dir_exist', mock.Mock(return_value=True)) + @patch('generic_config_updater.generic_updater.Util.get_checkpoint_names', + mock.Mock(return_value=["checkpointname"])) + def test_list_checkpoint_multiasic(self): + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["list-checkpoints"], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("checkpointname", result.output) + + @patch('generic_config_updater.generic_updater.Util.delete_checkpoint', MagicMock()) + @patch('generic_config_updater.generic_updater.Util.check_checkpoint_exists', mock.Mock(return_value=True)) + def test_delete_checkpoint_multiasic(self): + checkpointname = "checkpointname" + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.delete_checkpoint = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["delete-checkpoint"], + [checkpointname], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Checkpoint deleted successfully.", result.output) + @classmethod def teardown_class(cls): print("TEARDOWN") @@ -2741,4 +3728,4 @@ def teardown_class(cls): from .mock_tables import dbconnector from .mock_tables import mock_single_asic importlib.reload(mock_single_asic) - dbconnector.load_database_config() \ No newline at end of file + dbconnector.load_database_config() diff --git a/tests/conftest.py b/tests/conftest.py index c3bb69af71c..5dd31d523a2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -317,7 +317,7 @@ def mock_run_bgp_command_for_static(vtysh_cmd, bgp_namespace="", vtysh_shell_cmd else: return "" - def mock_run_bgp_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.RVTYSH_COMMAND): + def mock_run_bgp_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.RVTYSH_COMMAND, exit_on_fail=True): if m_asic_json_file.startswith('bgp_v4_network') or \ m_asic_json_file.startswith('bgp_v6_network'): return mock_show_bgp_network_multi_asic(m_asic_json_file) @@ -335,7 +335,8 @@ def mock_run_bgp_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.RVT else: return "" - def mock_run_show_sum_bgp_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.VTYSH_COMMAND): + def mock_run_show_sum_bgp_command( + vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.VTYSH_COMMAND, exit_on_fail=True): if vtysh_cmd == "show ip bgp summary json": m_asic_json_file = 'no_bgp_neigh.json' else: @@ -350,7 +351,8 @@ def mock_run_show_sum_bgp_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=cons else: return "" - def mock_run_show_summ_bgp_command_no_ext_neigh_on_all_asic(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.VTYSH_COMMAND): + def mock_run_show_summ_bgp_command_no_ext_neigh_on_all_asic( + vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.VTYSH_COMMAND, exit_on_fail=True): if vtysh_cmd == "show ip bgp summary json": m_asic_json_file = 'no_ext_bgp_neigh.json' else: @@ -365,7 +367,8 @@ def mock_run_show_summ_bgp_command_no_ext_neigh_on_all_asic(vtysh_cmd, bgp_names else: return "" - def mock_run_show_summ_bgp_command_no_ext_neigh_on_asic1(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.VTYSH_COMMAND): + def mock_run_show_summ_bgp_command_no_ext_neigh_on_asic1( + vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.VTYSH_COMMAND, exit_on_fail=True): if vtysh_cmd == "show ip bgp summary json": if bgp_namespace == "asic1": m_asic_json_file = 'no_ext_bgp_neigh.json' @@ -383,6 +386,13 @@ def mock_run_show_summ_bgp_command_no_ext_neigh_on_asic1(vtysh_cmd, bgp_namespac else: return "" + def mock_multi_asic_list(): + return ["asic0", "asic1"] + + # mock multi-asic list + if request.param == "bgp_v4_network_all_asic": + multi_asic.get_namespace_list = mock_multi_asic_list + _old_run_bgp_command = bgp_util.run_bgp_command if request.param == 'ip_route_for_int_ip': bgp_util.run_bgp_command = mock_run_bgp_command_for_static diff --git a/tests/console_mock/dev/ttyACM1 b/tests/console_mock/dev/ttyACM1 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/console_mock/dev/ttyUSB0 b/tests/console_mock/dev/ttyUSB0 new file mode 100644 index 00000000000..e69de29bb2d diff --git a/tests/console_test.py b/tests/console_test.py index 528f5f4ba8a..4a52a3c52e6 100644 --- a/tests/console_test.py +++ b/tests/console_test.py @@ -14,10 +14,15 @@ from click.testing import CliRunner from utilities_common.db import Db -from consutil.lib import * +from consutil.lib import ConsolePortProvider, ConsolePortInfo, ConsoleSession, SysInfoProvider, DbUtils, \ + InvalidConfigurationError, LineBusyError, LineNotFoundError, ConnectionFailedError from sonic_py_common import device_info from jsonpatch import JsonPatchConflict +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +CONSOLE_MOCK_DIR = SCRIPT_DIR + "/console_mock" + + class TestConfigConsoleCommands(object): @classmethod def setup_class(cls): @@ -543,17 +548,15 @@ def test_sys_info_provider_init_device_prefix_plugin(self): with mock.patch("builtins.open", mock.mock_open(read_data="C0-")): SysInfoProvider.init_device_prefix() assert SysInfoProvider.DEVICE_PREFIX == "/dev/C0-" - SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" - @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=("/dev/ttyUSB0\n/dev/ttyACM1", ""))) def test_sys_info_provider_list_console_ttys(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = CONSOLE_MOCK_DIR + "/dev/ttyUSB" ttys = SysInfoProvider.list_console_ttys() print(SysInfoProvider.DEVICE_PREFIX) assert len(ttys) == 1 - @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=("", "ls: cannot access '/dev/ttyUSB*': No such file or directory"))) def test_sys_info_provider_list_console_ttys_device_not_exists(self): + SysInfoProvider.DEVICE_PREFIX = CONSOLE_MOCK_DIR + "/dev_not_exist/ttyUSB" ttys = SysInfoProvider.list_console_ttys() assert len(ttys) == 0 @@ -563,7 +566,7 @@ def test_sys_info_provider_list_console_ttys_device_not_exists(self): """ @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=all_active_processes_output)) def test_sys_info_provider_list_active_console_processes(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" procs = SysInfoProvider.list_active_console_processes() assert len(procs) == 1 assert "0" in procs @@ -572,7 +575,7 @@ def test_sys_info_provider_list_active_console_processes(self): active_process_output = "13751 Wed Mar 6 08:31:35 2019 /usr/bin/sudo picocom -b 9600 -f n /dev/ttyUSB1" @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=active_process_output)) def test_sys_info_provider_get_active_console_process_info_exists(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" proc = SysInfoProvider.get_active_console_process_info("13751") assert proc is not None assert proc == ("1", "13751", "Wed Mar 6 08:31:35 2019") @@ -580,7 +583,7 @@ def test_sys_info_provider_get_active_console_process_info_exists(self): active_process_empty_output = "" @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=active_process_empty_output)) def test_sys_info_provider_get_active_console_process_info_nonexists(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" proc = SysInfoProvider.get_active_console_process_info("2") assert proc is None diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index e21539766a1..cdf4251bd75 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -74,24 +74,27 @@ class TestVersionComparison(object): def setup_class(cls): cls.version_comp_list = [ # Old format v.s old format - { 'v1' : 'version_1_0_1', 'v2' : 'version_1_0_2', 'result' : False }, - { 'v1' : 'version_1_0_2', 'v2' : 'version_1_0_1', 'result' : True }, - { 'v1' : 'version_1_0_1', 'v2' : 'version_2_0_1', 'result' : False }, - { 'v1' : 'version_2_0_1', 'v2' : 'version_1_0_1', 'result' : True }, + {'v1': 'version_1_0_1', 'v2': 'version_1_0_2', 'result': False}, + {'v1': 'version_1_0_2', 'v2': 'version_1_0_1', 'result': True}, + {'v1': 'version_1_0_1', 'v2': 'version_2_0_1', 'result': False}, + {'v1': 'version_2_0_1', 'v2': 'version_1_0_1', 'result': True}, # New format v.s old format - { 'v1' : 'version_1_0_1', 'v2' : 'version_202311_01', 'result' : False }, - { 'v1' : 'version_202311_01', 'v2' : 'version_1_0_1', 'result' : True }, - { 'v1' : 'version_1_0_1', 'v2' : 'version_master_01', 'result' : False }, - { 'v1' : 'version_master_01', 'v2' : 'version_1_0_1', 'result' : True }, + {'v1': 'version_1_0_1', 'v2': 'version_202311_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_1_0_1', 'result': True}, + {'v1': 'version_1_0_1', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_master_01', 'v2': 'version_1_0_1', 'result': True}, # New format v.s new format - { 'v1' : 'version_202311_01', 'v2' : 'version_202311_02', 'result' : False }, - { 'v1' : 'version_202311_02', 'v2' : 'version_202311_01', 'result' : True }, - { 'v1' : 'version_202305_01', 'v2' : 'version_202311_01', 'result' : False }, - { 'v1' : 'version_202311_01', 'v2' : 'version_202305_01', 'result' : True }, - { 'v1' : 'version_202311_01', 'v2' : 'version_master_01', 'result' : False }, - { 'v1' : 'version_master_01', 'v2' : 'version_202311_01', 'result' : True }, - { 'v1' : 'version_master_01', 'v2' : 'version_master_02', 'result' : False }, - { 'v1' : 'version_master_02', 'v2' : 'version_master_01', 'result' : True }, + {'v1': 'version_202311_01', 'v2': 'version_202311_02', 'result': False}, + {'v1': 'version_202311_02', 'v2': 'version_202311_01', 'result': True}, + {'v1': 'version_202305_01', 'v2': 'version_202311_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_202305_01', 'result': True}, + {'v1': 'version_202405_01', 'v2': 'version_202411_01', 'result': False}, + {'v1': 'version_202411_01', 'v2': 'version_202405_01', 'result': True}, + {'v1': 'version_202411_01', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_master_01', 'v2': 'version_202311_01', 'result': True}, + {'v1': 'version_master_01', 'v2': 'version_master_02', 'result': False}, + {'v1': 'version_master_02', 'v2': 'version_master_01', 'result': True}, ] def test_version_comparison(self): @@ -383,7 +386,7 @@ def test_dns_nameserver_migrator(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'dns-nameserver-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_keys = dbmgtr.configDB.keys(dbmgtr.configDB.CONFIG_DB, 'DNS_NAMESERVER*') expected_keys = expected_db.cfgdb.keys(expected_db.cfgdb.CONFIG_DB, 'DNS_NAMESERVER*') @@ -895,7 +898,7 @@ def test_init(self, mock_args): @mock.patch('swsscommon.swsscommon.SonicDBConfig.isInit', mock.MagicMock(return_value=False)) @mock.patch('swsscommon.swsscommon.SonicDBConfig.initialize', mock.MagicMock()) def test_init_no_namespace(self, mock_args): - mock_args.return_value=argparse.Namespace(namespace=None, operation='version_202405_01', socket=None) + mock_args.return_value = argparse.Namespace(namespace=None, operation='version_202411_01', socket=None) import db_migrator db_migrator.main() @@ -903,7 +906,7 @@ def test_init_no_namespace(self, mock_args): @mock.patch('swsscommon.swsscommon.SonicDBConfig.isGlobalInit', mock.MagicMock(return_value=False)) @mock.patch('swsscommon.swsscommon.SonicDBConfig.initializeGlobalConfig', mock.MagicMock()) def test_init_namespace(self, mock_args): - mock_args.return_value=argparse.Namespace(namespace="asic0", operation='version_202405_01', socket=None) + mock_args.return_value = argparse.Namespace(namespace="asic0", operation='version_202411_01', socket=None) import db_migrator db_migrator.main() @@ -940,7 +943,7 @@ def test_dns_nameserver_migrator_minigraph(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'gnmi-minigraph-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_table = dbmgtr.configDB.get_table("GNMI") expected_table = expected_db.cfgdb.get_table("GNMI") @@ -956,7 +959,7 @@ def test_dns_nameserver_migrator_configdb(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'gnmi-configdb-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_table = dbmgtr.configDB.get_table("GNMI") expected_table = expected_db.cfgdb.get_table("GNMI") diff --git a/tests/disk_check_test.py b/tests/disk_check_test.py index 82b8b16ff65..ac541b05b95 100644 --- a/tests/disk_check_test.py +++ b/tests/disk_check_test.py @@ -1,7 +1,6 @@ import sys import syslog from unittest.mock import patch -import pytest import subprocess sys.path.append("scripts") @@ -178,3 +177,7 @@ def test_readonly(self, mock_proc, mock_log): assert max_log_lvl == syslog.LOG_ERR + @classmethod + def teardown_class(cls): + subprocess.run("rm -rf /tmp/tmp*", shell=True) # cleanup the temporary dirs + print("TEARDOWN") diff --git a/tests/drops_group_test.py b/tests/drops_group_test.py index d374275a487..93f99e3f1b6 100644 --- a/tests/drops_group_test.py +++ b/tests/drops_group_test.py @@ -3,6 +3,7 @@ import shutil from click.testing import CliRunner +from utilities_common.cli import UserCache test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -20,13 +21,13 @@ SWITCH_EGRESS_DROPS 2 PORT_INGRESS_DROPS - IP_HEADER_ERROR - NO_L3_HEADER + IP_HEADER_ERROR + NO_L3_HEADER SWITCH_EGRESS_DROPS - ACL_ANY - L2_ANY - L3_ANY + ACL_ANY + L2_ANY + L3_ANY """ expected_counter_configuration = """\ @@ -56,6 +57,21 @@ sonic_drops_test 1000 0 """ +expected_counts_voq = """\ + SWITCH-ID PKT_INTEGRITY_ERR +---------------- ------------------- +sonic_drops_test 500 + + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 10 100 0 0 80 20 +Ethernet4 N/A 0 1000 0 0 800 100 +Ethernet8 N/A 100 10 0 0 10 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 1000 0 +""" expected_counts_with_group = """ DEVICE SWITCH_DROPS ---------------- -------------- @@ -82,14 +98,17 @@ sonic_drops_test 0 0 """ -dropstat_path = "/tmp/dropstat-27" + +def remove_tmp_dropstat_file(): + # remove the tmp portstat + cache = UserCache("dropstat") + cache.remove_all() class TestDropCounters(object): @classmethod def setup_class(cls): print("SETUP") - if os.path.exists(dropstat_path): - shutil.rmtree(dropstat_path) + remove_tmp_dropstat_file() os.environ["PATH"] += os.pathsep + scripts_path os.environ["UTILITIES_UNIT_TESTING"] = "1" @@ -117,6 +136,14 @@ def test_show_counts(self): print(result.output) assert result.output == expected_counts + def test_show_counts_voq(self): + runner = CliRunner() + os.environ["VOQ_DROP_COUNTER_TESTING"] = "1" + result = runner.invoke(show.cli.commands["dropcounters"].commands["counts"], []) + os.environ["VOQ_DROP_COUNTER_TESTING"] = "0" + print(result.output) + assert result.output == expected_counts_voq + def test_show_counts_with_group(self): runner = CliRunner() result = runner.invoke(show.cli.commands["dropcounters"].commands["counts"], ["-g", "PACKET_DROPS"]) diff --git a/tests/fabricstat_test.py b/tests/fabricstat_test.py index cc4c049806e..a8a334cb92d 100644 --- a/tests/fabricstat_test.py +++ b/tests/fabricstat_test.py @@ -200,6 +200,45 @@ 7 0 0 0 """ +multi_asic_fabric_rate = """\ + + ASIC Link ID Rx Data Mbps Tx Data Mbps +------ --------- -------------- -------------- + asic0 0 0 19.8 + asic0 1 0 19.8 + asic0 2 0 39.8 + asic0 3 0 39.8 + asic0 4 0 39.8 + asic0 5 0 39.8 + asic0 6 0 39.3 + asic0 7 0 39.3 + + ASIC Link ID Rx Data Mbps Tx Data Mbps +------ --------- -------------- -------------- + asic1 0 0 0 + asic1 1 0 0 + asic1 2 0 0 + asic1 3 0 0 + asic1 4 0 0 + asic1 5 0 0 + asic1 6 0 0 + asic1 7 0 0 +""" + +multi_asic_fabric_rate_asic0 = """\ + + ASIC Link ID Rx Data Mbps Tx Data Mbps +------ --------- -------------- -------------- + asic0 0 0 19.8 + asic0 1 0 19.8 + asic0 2 0 39.8 + asic0 3 0 39.8 + asic0 4 0 39.8 + asic0 5 0 39.8 + asic0 6 0 39.3 + asic0 7 0 39.3 +""" + class TestFabricStat(object): @classmethod def setup_class(cls): @@ -348,6 +387,20 @@ def test_multi_show_fabric_isolation_asic(self): assert return_code == 0 assert result == multi_asic_fabric_isolation_asic0 + def test_mutli_show_fabric_rate(self): + return_code, result = get_result_and_return_code(['fabricstat', '-s']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_rate + + def test_multi_show_fabric_rate_asic(self): + return_code, result = get_result_and_return_code(['fabricstat', '-s', '-n', 'asic0']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_rate_asic0 + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/generic_config_updater/change_applier_test.py b/tests/generic_config_updater/change_applier_test.py index 4c9b33c3a4d..7aad111f181 100644 --- a/tests/generic_config_updater/change_applier_test.py +++ b/tests/generic_config_updater/change_applier_test.py @@ -242,10 +242,11 @@ def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): running_config = copy.deepcopy(read_data["running_data"]) json_changes = copy.deepcopy(read_data["json_changes"]) + generic_config_updater.change_applier.ChangeApplier.updater_conf = None generic_config_updater.change_applier.UPDATER_CONF_FILE = CONF_FILE generic_config_updater.change_applier.set_verbose(True) generic_config_updater.services_validator.set_verbose(True) - + applier = generic_config_updater.change_applier.ChangeApplier() debug_print("invoked applier") @@ -254,7 +255,7 @@ def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): # Take copy for comparison start_running_config = copy.deepcopy(running_config) - + debug_print("main: json_change_index={}".format(json_change_index)) applier.apply(mock_obj()) @@ -297,4 +298,3 @@ def test_apply__calls_apply_change_to_config_db(self): # Assert applier.config_wrapper.apply_change_to_config_db.assert_has_calls([call(change)]) - diff --git a/tests/generic_config_updater/generic_updater_test.py b/tests/generic_config_updater/generic_updater_test.py index 96c25e3552b..8480dc23b0b 100644 --- a/tests/generic_config_updater/generic_updater_test.py +++ b/tests/generic_config_updater/generic_updater_test.py @@ -2,7 +2,7 @@ import os import shutil import unittest -from unittest.mock import MagicMock, Mock, call +from unittest.mock import MagicMock, Mock, call, patch from .gutest_helpers import create_side_effect_dict, Files import generic_config_updater.generic_updater as gu @@ -124,6 +124,8 @@ def __create_config_replacer(self, changes=None, verified_same_config=True): return gu.ConfigReplacer(patch_applier, config_wrapper, patch_wrapper) + +@patch('generic_config_updater.generic_updater.get_config_json', MagicMock(return_value={})) class TestFileSystemConfigRollbacker(unittest.TestCase): def setUp(self): self.checkpoints_dir = os.path.join(os.getcwd(),"checkpoints") diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index a2a776c0bb2..4a16a5ca4f0 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -76,6 +76,28 @@ def test_ctor__default_values_set(self): self.assertEqual("/usr/local/yang-models", gu_common.YANG_DIR) + @patch('generic_config_updater.gu_common.subprocess.Popen') + def test_get_config_db_as_text(self, mock_popen): + config_wrapper = gu_common.ConfigWrapper() + mock_proc = MagicMock() + mock_proc.communicate = MagicMock( + return_value=("[]", None)) + mock_proc.returncode = 0 + mock_popen.return_value = mock_proc + actual = config_wrapper._get_config_db_as_text() + expected = "[]" + self.assertEqual(actual, expected) + + config_wrapper = gu_common.ConfigWrapper(scope="asic0") + mock_proc = MagicMock() + mock_proc.communicate = MagicMock( + return_value=("[]", None)) + mock_proc.returncode = 0 + mock_popen.return_value = mock_proc + actual = config_wrapper._get_config_db_as_text() + expected = "[]" + self.assertEqual(actual, expected) + def test_get_sonic_yang_as_json__returns_sonic_yang_as_json(self): # Arrange config_wrapper = self.config_wrapper_mock diff --git a/tests/generic_config_updater/multiasic_change_applier_test.py b/tests/generic_config_updater/multiasic_change_applier_test.py index e8b277618f1..0102cfff000 100644 --- a/tests/generic_config_updater/multiasic_change_applier_test.py +++ b/tests/generic_config_updater/multiasic_change_applier_test.py @@ -9,25 +9,124 @@ class TestMultiAsicChangeApplier(unittest.TestCase): - def test_extract_scope(self): + @patch('sonic_py_common.multi_asic.is_multi_asic') + def test_extract_scope_multiasic(self, mock_is_multi_asic): + mock_is_multi_asic.return_value = True test_paths_expectedresults = { - "/asic0/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic01/PORTCHANNEL/PortChannel102/admin_status": (True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status"), - "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), - "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), - "/sometable/data": (True, "", "/sometable/data"), - "": (False, "", ""), - "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (False, "", ""), + "/asic0/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic01/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/sometable/data": ( + False, "", "/sometable/data" + ), + "": ( + False, "", "" + ), + "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + False, "", "" + ), + "/asic77": ( + False, "", "" + ), + "/Asic0/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/Localhost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asci1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asicx/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asic-12/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + } + + for test_path, (result, expectedscope, expectedremainder) in test_paths_expectedresults.items(): + try: + scope, remainder = extract_scope(test_path) + assert(scope == expectedscope) + assert(remainder == expectedremainder) + except Exception: + assert(not result) + + @patch('sonic_py_common.multi_asic.is_multi_asic') + def test_extract_scope_singleasic(self, mock_is_multi_asic): + mock_is_multi_asic.return_value = False + test_paths_expectedresults = { + "/asic0/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic01/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/sometable/data": ( + True, "", "/sometable/data" + ), + "": ( + False, "", "" + ), + "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + False, "", "" + ), "/asic77": (False, "", ""), - "/Asic0/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/Localhost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asci1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asicx/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asic-12/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/Asic0/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/Localhost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asci1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asicx/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asic-12/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), } for test_path, (result, expectedscope, expectedremainder) in test_paths_expectedresults.items(): @@ -35,12 +134,12 @@ def test_extract_scope(self): scope, remainder = extract_scope(test_path) assert(scope == expectedscope) assert(remainder == expectedremainder) - except Exception as e: - assert(result == False) + except Exception: + assert(not result) @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) - def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_running_config): + def test_apply_change_default_scope(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector mock_db = MagicMock() mock_ConfigDBConnector.return_value = mock_db @@ -67,7 +166,7 @@ def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_r } } - # Instantiate ChangeApplier with the default namespace + # Instantiate ChangeApplier with the default scope applier = generic_config_updater.change_applier.ChangeApplier() # Prepare a change object or data that applier.apply would use @@ -81,7 +180,7 @@ def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_r @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) - def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_running_config): + def test_apply_change_given_scope(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector mock_db = MagicMock() mock_ConfigDBConnector.return_value = mock_db @@ -108,8 +207,8 @@ def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_run } } - # Instantiate ChangeApplier with the default namespace - applier = generic_config_updater.change_applier.ChangeApplier(namespace="asic0") + # Instantiate ChangeApplier with the default scope + applier = generic_config_updater.change_applier.ChangeApplier(scope="asic0") # Prepare a change object or data that applier.apply would use change = MagicMock() @@ -117,7 +216,7 @@ def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_run # Call the apply method with the change object applier.apply(change) - # Assert ConfigDBConnector called with the correct namespace + # Assert ConfigDBConnector called with the correct scope mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="asic0") @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) @@ -129,9 +228,9 @@ def test_apply_change_failure(self, mock_ConfigDBConnector, mock_get_running_con # Setup mock for json.load to return some running configuration mock_get_running_config.side_effect = Exception("Failed to get running config") - # Instantiate ChangeApplier with a specific namespace to simulate applying changes in a multi-asic environment - namespace = "asic0" - applier = generic_config_updater.change_applier.ChangeApplier(namespace=namespace) + # Instantiate ChangeApplier with a specific scope to simulate applying changes in a multi-asic environment + scope = "asic0" + applier = generic_config_updater.change_applier.ChangeApplier(scope=scope) # Prepare a change object or data that applier.apply would use change = MagicMock() @@ -159,8 +258,8 @@ def test_apply_patch_with_empty_tables_failure(self, mock_ConfigDBConnector, moc } } - # Instantiate ChangeApplier with a specific namespace to simulate applying changes in a multi-asic environment - applier = generic_config_updater.change_applier.ChangeApplier(namespace="asic0") + # Instantiate ChangeApplier with a specific scope to simulate applying changes in a multi-asic environment + applier = generic_config_updater.change_applier.ChangeApplier(scope="asic0") # Prepare a change object or data that applier.apply would use, simulating a patch that requires non-empty tables change = MagicMock() diff --git a/tests/generic_config_updater/multiasic_generic_updater_test.py b/tests/generic_config_updater/multiasic_generic_updater_test.py index 4a55eb98be3..5acdd391f03 100644 --- a/tests/generic_config_updater/multiasic_generic_updater_test.py +++ b/tests/generic_config_updater/multiasic_generic_updater_test.py @@ -19,7 +19,7 @@ class TestMultiAsicPatchApplier(unittest.TestCase): @patch('generic_config_updater.gu_common.PatchWrapper.simulate_patch') @patch('generic_config_updater.generic_updater.ChangeApplier') def test_apply_patch_specific_namespace(self, mock_ChangeApplier, mock_simulate_patch, mock_get_config, mock_get_empty_tables): - namespace = "asic0" + scope = "asic0" patch_data = jsonpatch.JsonPatch([ { "op": "add", @@ -158,10 +158,10 @@ def test_apply_patch_specific_namespace(self, mock_ChangeApplier, mock_simulate_ } } - patch_applier = generic_config_updater.generic_updater.PatchApplier(namespace=namespace) + patch_applier = generic_config_updater.generic_updater.PatchApplier(scope=scope) # Apply the patch and verify patch_applier.apply(patch_data) # Assertions to ensure the namespace is correctly used in underlying calls - mock_ChangeApplier.assert_called_once_with(namespace=namespace) + mock_ChangeApplier.assert_called_once_with(scope=scope) diff --git a/tests/ip_config_test.py b/tests/ip_config_test.py index ffa29310939..9338d341e44 100644 --- a/tests/ip_config_test.py +++ b/tests/ip_config_test.py @@ -130,6 +130,35 @@ def test_add_del_interface_valid_ipv4(self): assert mock_run_command.call_count == 1 assert ('Eth36.10', '32.11.10.1/24') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') + # config int ip add vlan1000 10.21.20.1/24 as secondary + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Vlan1000", "10.11.20.1/24", "--secondary"], obj=obj) + assert result.exit_code == 0 + assert ('Vlan1000', '10.11.20.1/24') in db.cfgdb.get_table('VLAN_INTERFACE') + assert db.cfgdb.get_table('VLAN_INTERFACE')[('Vlan1000', '10.11.20.1/24')]['secondary'] == "true" + + # config int ip add vlan2000 10.21.20.1/24 as secondary + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Vlan2000", "10.21.20.1/24", "-s"], obj=obj) + assert result.exit_code == 0 + assert ('Vlan2000', '10.21.20.1/24') in db.cfgdb.get_table('VLAN_INTERFACE') + assert db.cfgdb.get_table('VLAN_INTERFACE')[('Vlan2000', '10.21.20.1/24')]['secondary'] == "true" + + # config int ip add vlan4000 10.16.20.1/24 as primary and make sure secondary is not present in table + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Vlan4000", "10.16.20.1/24"], obj=obj) + assert result.exit_code == 0 + assert ('Vlan4000', '10.16.20.1/24') in db.cfgdb.get_table('VLAN_INTERFACE') + assert 'secondary' not in db.cfgdb.get_table('VLAN_INTERFACE')[('Vlan4000', '10.16.20.1/24')] + + # create vlan 500 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["500"], obj=db) + # config int ip add vlan500 10.21.20.1/24 as secondary - should fail as vlan500 is not added in table + ERR_MSG = "Error: Primary for the interface Vlan500 is not set, so skipping adding the interface" + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Vlan500", "10.21.20.1/24", "--secondary"], obj=obj) + assert result.exit_code != 0 + assert ERR_MSG in result.output def test_add_interface_invalid_ipv4(self): db = Db() diff --git a/tests/ldap_input/assert_show_output.py b/tests/ldap_input/assert_show_output.py new file mode 100644 index 00000000000..c3ecaf472f8 --- /dev/null +++ b/tests/ldap_input/assert_show_output.py @@ -0,0 +1,20 @@ +""" +Module holding the correct values for show CLI command outputs for the ldap_test.py +""" + +show_ldap_global = """\ +BIND DN BIND PASSWORD BIND TIMEOUT VERSION BASE DN PORT TIMEOUT +---------------------------- --------------- -------------- --------- ----------------- ------ --------- +cn=ldapadm,dc=test1,dc=test2 password 3 3 dc=test1,dc=test2 389 2 +""" + +show_ldap_server = """\ +HOSTNAME PRIORITY +---------- ---------- +10.0.0.1 1 +""" + +show_ldap_server_deleted = """\ +HOSTNAME PRIORITY +---------- ---------- +""" diff --git a/tests/ldap_input/default_config_db.json b/tests/ldap_input/default_config_db.json new file mode 100644 index 00000000000..95aed20118a --- /dev/null +++ b/tests/ldap_input/default_config_db.json @@ -0,0 +1,11 @@ +{ + "LDAP|GLOBAL": { + "bind_dn": "cn=ldapadm,dc=test1,dc=test2", + "base_dn": "dc=test1,dc=test2", + "bind_password": "password", + "timeout": "2", + "bind_timeout": "3", + "version" : 3, + "port" : 389 + } +} diff --git a/tests/ldap_input/server_config_db.json b/tests/ldap_input/server_config_db.json new file mode 100644 index 00000000000..2fdea847485 --- /dev/null +++ b/tests/ldap_input/server_config_db.json @@ -0,0 +1,5 @@ +{ + "LDAP_SERVER|10.0.0.1": { + "priority": 1 + } +} diff --git a/tests/ldap_test.py b/tests/ldap_test.py new file mode 100644 index 00000000000..3ac824b4466 --- /dev/null +++ b/tests/ldap_test.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python + +import os +import logging +import show.main as show +import config.main as config + +from .ldap_input import assert_show_output +from utilities_common.db import Db +from click.testing import CliRunner +from .mock_tables import dbconnector + +logger = logging.getLogger(__name__) +test_path = os.path.dirname(os.path.abspath(__file__)) +mock_db_path = os.path.join(test_path, "ldap_input") + +SUCCESS = 0 +ERROR = 1 +INVALID_VALUE = 'INVALID' +EXP_GOOD_FLOW = 1 +EXP_BAD_FLOW = 0 + + +class TestLdap: + @classmethod + def setup_class(cls): + logger.info("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + logger.info("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + + def verify_ldap_global_output(self, db, runner, output, expected=EXP_GOOD_FLOW): + result = runner.invoke(show.cli.commands["ldap"].commands["global"], [], obj=db) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + logger.info("\n" + result.output) + logger.info(result.exit_code) + + if expected: # good flow expected (default) + assert result.exit_code == SUCCESS + assert result.output == output + else: # bad flow expected + assert result.exit_code == ERROR + + def verify_ldap_server_output(self, db, runner, output, expected=EXP_GOOD_FLOW): + result = runner.invoke(show.cli.commands["ldap-server"], [], obj=db) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + logger.info("\n" + result.output) + logger.info(result.exit_code) + + if expected: # good flow expected (default) + assert result.exit_code == SUCCESS + assert result.output == output + else: # bad flow expected + assert result.exit_code == ERROR + + def ldap_global_set_policy(self, runner, db, attr, value, expected=EXP_GOOD_FLOW): + result = runner.invoke( + config.config.commands["ldap"].commands["global"].commands[attr], + [value], obj=db + ) + if expected: # good flow expected (default) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + else: # bad flow expected + assert result.exit_code == ERROR + + def ldap_server_set_policy(self, runner, db, value, expected=EXP_GOOD_FLOW): + result = runner.invoke( + config.config.commands["ldap-server"].commands["add"], + value, obj=db + ) + + if expected: # good flow expected (default) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + else: # bad flow expected + assert result.exit_code == ERROR + + def ldap_server_del_policy(self, runner, db, value, expected=EXP_GOOD_FLOW): + result = runner.invoke( + config.config.commands["ldap-server"].commands["delete"], + value, obj=db + ) + if expected: # good flow expected (default) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + else: # bad flow expected + assert result.exit_code == ERROR + + # LDAP + + def test_ldap_global_feature_enabled(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'default_config_db.json') + db = Db() + runner = CliRunner() + + self.ldap_global_set_policy(runner, db, "base-dn", "dc=test1,dc=test2") + self.ldap_global_set_policy(runner, db, "bind-dn", "cn=ldapadm,dc=test1,dc=test2") + self.ldap_global_set_policy(runner, db, "bind-password", "password") + self.ldap_global_set_policy(runner, db, "bind-timeout", "3") + self.ldap_global_set_policy(runner, db, "port", "389") + self.ldap_global_set_policy(runner, db, "timeout", "2") + self.ldap_global_set_policy(runner, db, "version", "3") + + self.verify_ldap_global_output(db, runner, assert_show_output.show_ldap_global) + + def test_ldap_server(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'server_config_db.json') + db = Db() + runner = CliRunner() + + self.ldap_server_set_policy(runner, db, ["10.0.0.1", "--priority", "1"]) + self.verify_ldap_server_output(db, runner, assert_show_output.show_ldap_server) + + self.ldap_server_del_policy(runner, db, ["10.0.0.1"]) + self.verify_ldap_server_output(db, runner, assert_show_output.show_ldap_server_deleted) diff --git a/tests/lldp_test.py b/tests/lldp_test.py index 89177338e0c..1d6e55152c3 100644 --- a/tests/lldp_test.py +++ b/tests/lldp_test.py @@ -2,6 +2,7 @@ from click.testing import CliRunner from utilities_common.general import load_module_from_source +from importlib import reload test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -83,6 +84,22 @@ def test_get_info(self): output = lldp.get_summary_output(lldp_detail_info=True) assert output.strip('\n') == expected_lldpctl_xml_output[0].strip('\n') + def test_get_info_multi_asic(self): + from .mock_tables import mock_multi_asic + from .mock_tables import dbconnector + reload(mock_multi_asic) + dbconnector.load_namespace_config() + lldp = lldpshow.Lldpshow() + from .mock_tables import mock_single_asic + reload(mock_single_asic) + dbconnector.load_namespace_config() + lldp.lldp_instance = [''] + lldp.lldpraw = expected_lldpctl_xml_output + lldp.get_info(lldp_detail_info=True, lldp_port='Ethernet0') + lldp.parse_info(lldp_detail_info=True) + output = lldp.get_summary_output(lldp_detail_info=True) + assert output.strip('\n') == expected_lldpctl_xml_output[0].strip('\n') + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/mock_tables/asic0/state_db.json b/tests/mock_tables/asic0/state_db.json index 4f3f13c0ae0..5ae87ea9751 100644 --- a/tests/mock_tables/asic0/state_db.json +++ b/tests/mock_tables/asic0/state_db.json @@ -256,7 +256,12 @@ "FABRIC_PORT_TABLE|PORT0" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "79" + "REMOTE_PORT": "79", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "19.8", + "OLD_TX_DATA": "18490000000", + "LAST_TIME": "1676672799" }, "FABRIC_PORT_TABLE|PORT1" : { "STATUS": "down" @@ -264,7 +269,12 @@ "FABRIC_PORT_TABLE|PORT2" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "94" + "REMOTE_PORT": "94", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "39.8", + "OLD_TX_DATA": "24490000000", + "LAST_TIME": "1676672799" }, "FABRIC_PORT_TABLE|PORT3" : { "STATUS": "down" @@ -272,7 +282,12 @@ "FABRIC_PORT_TABLE|PORT4" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "85" + "REMOTE_PORT": "85", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "39.8", + "OLD_TX_DATA": "24490000000", + "LAST_TIME": "1676672799" }, "FABRIC_PORT_TABLE|PORT5" : { "STATUS": "down" @@ -280,12 +295,22 @@ "FABRIC_PORT_TABLE|PORT6" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "84" + "REMOTE_PORT": "84", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "39.3", + "OLD_TX_DATA": "24170000000", + "LAST_TIME": "1676672799" }, "FABRIC_PORT_TABLE|PORT7" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "93" + "REMOTE_PORT": "93", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "39.3", + "OLD_TX_DATA": "24190000000", + "LAST_TIME": "1676672799" }, "CHASSIS_MIDPLANE_TABLE|LINE-CARD0": { "ip_address": "127.0.0.1", diff --git a/tests/mock_tables/asic1/asic_db.json b/tests/mock_tables/asic1/asic_db.json new file mode 100644 index 00000000000..1a769b82b56 --- /dev/null +++ b/tests/mock_tables/asic1/asic_db.json @@ -0,0 +1,6 @@ +{ + "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH:oid:0x21000000000000": { + "SAI_SWITCH_ATTR_INIT_SWITCH": "true", + "SAI_SWITCH_ATTR_SRC_MAC_ADDRESS": "DE:AD:BE:EF:CA:FE" + } +} diff --git a/tests/mock_tables/asic1/counters_db.json b/tests/mock_tables/asic1/counters_db.json index c364d8599ef..f9197421571 100644 --- a/tests/mock_tables/asic1/counters_db.json +++ b/tests/mock_tables/asic1/counters_db.json @@ -207,6 +207,108 @@ "Ethernet-BP256": "oid:0x1000000000b06", "Ethernet-BP260": "oid:0x1000000000b08" }, + "COUNTERS_PG_NAME_MAP": { + "Ethernet-BP256:0": "oid:100000000b0f0", + "Ethernet-BP256:1": "oid:100000000b0f1", + "Ethernet-BP256:2": "oid:100000000b0f2", + "Ethernet-BP256:3": "oid:100000000b0f3", + "Ethernet-BP256:4": "oid:100000000b0f4", + "Ethernet-BP256:5": "oid:100000000b0f5", + "Ethernet-BP256:6": "oid:100000000b0f6", + "Ethernet-BP256:7": "oid:100000000b0f7", + "Ethernet-BP256:8": "oid:100000000b0f8", + "Ethernet-BP256:9": "oid:100000000b0f9", + "Ethernet-BP256:10": "oid:100000000b0fa", + "Ethernet-BP256:11": "oid:100000000b0fb", + "Ethernet-BP256:12": "oid:100000000b0fc", + "Ethernet-BP256:13": "oid:100000000b0fd", + "Ethernet-BP256:14": "oid:100000000b0fe", + "Ethernet-BP256:15": "oid:100000000b0ff", + "Ethernet-BP260:0": "oid:0x100000000b1f0", + "Ethernet-BP260:1": "oid:0x100000000b1f1", + "Ethernet-BP260:2": "oid:0x100000000b1f2", + "Ethernet-BP260:3": "oid:0x100000000b1f3", + "Ethernet-BP260:4": "oid:0x100000000b1f4", + "Ethernet-BP260:5": "oid:0x100000000b1f5", + "Ethernet-BP260:6": "oid:0x100000000b1f6", + "Ethernet-BP260:7": "oid:0x100000000b1f7", + "Ethernet-BP260:8": "oid:0x100000000b1f8", + "Ethernet-BP260:9": "oid:0x100000000b1f9", + "Ethernet-BP260:10": "oid:0x100000000b1fa", + "Ethernet-BP260:11": "oid:0x100000000b1fb", + "Ethernet-BP260:12": "oid:0x100000000b1fc", + "Ethernet-BP260:13": "oid:0x100000000b1fd", + "Ethernet-BP260:14": "oid:0x100000000b1fe", + "Ethernet-BP260:15": "oid:0x100000000b1ff" + }, + "COUNTERS_PG_PORT_MAP": { + "oid:100000000b0f0": "oid:0x1000000000b06", + "oid:100000000b0f1": "oid:0x1000000000b06", + "oid:100000000b0f2": "oid:0x1000000000b06", + "oid:100000000b0f3": "oid:0x1000000000b06", + "oid:100000000b0f4": "oid:0x1000000000b06", + "oid:100000000b0f5": "oid:0x1000000000b06", + "oid:100000000b0f6": "oid:0x1000000000b06", + "oid:100000000b0f7": "oid:0x1000000000b06", + "oid:100000000b0f8": "oid:0x1000000000b06", + "oid:100000000b0f9": "oid:0x1000000000b06", + "oid:100000000b0fa": "oid:0x1000000000b06", + "oid:100000000b0fb": "oid:0x1000000000b06", + "oid:100000000b0fc": "oid:0x1000000000b06", + "oid:100000000b0fd": "oid:0x1000000000b06", + "oid:100000000b0fe": "oid:0x1000000000b06", + "oid:100000000b0ff": "oid:0x1000000000b06", + "oid:0x100000000b1f0": "oid:0x1000000000b08", + "oid:0x100000000b1f1": "oid:0x1000000000b08", + "oid:0x100000000b1f2": "oid:0x1000000000b08", + "oid:0x100000000b1f3": "oid:0x1000000000b08", + "oid:0x100000000b1f4": "oid:0x1000000000b08", + "oid:0x100000000b1f5": "oid:0x1000000000b08", + "oid:0x100000000b1f6": "oid:0x1000000000b08", + "oid:0x100000000b1f7": "oid:0x1000000000b08", + "oid:0x100000000b1f8": "oid:0x1000000000b08", + "oid:0x100000000b1f9": "oid:0x1000000000b08", + "oid:0x100000000b1fa": "oid:0x1000000000b08", + "oid:0x100000000b1fb": "oid:0x1000000000b08", + "oid:0x100000000b1fc": "oid:0x1000000000b08", + "oid:0x100000000b1fd": "oid:0x1000000000b08", + "oid:0x100000000b1fe": "oid:0x1000000000b08", + "oid:0x100000000b1ff" : "oid:0x1000000000b08" + }, + "COUNTERS_PG_INDEX_MAP": { + "oid:100000000b0f0": "0", + "oid:100000000b0f1": "1", + "oid:100000000b0f2": "2", + "oid:100000000b0f3": "3", + "oid:100000000b0f4": "4", + "oid:100000000b0f5": "5", + "oid:100000000b0f6": "6", + "oid:100000000b0f7": "7", + "oid:100000000b0f8": "8", + "oid:100000000b0f9": "9", + "oid:100000000b0fa": "10", + "oid:100000000b0fb": "11", + "oid:100000000b0fc": "12", + "oid:100000000b0fd": "13", + "oid:100000000b0fe": "14", + "oid:100000000b0ff": "15", + "oid:0x100000000b1f0": "0", + "oid:0x100000000b1f1": "1", + "oid:0x100000000b1f2": "2", + "oid:0x100000000b1f3": "3", + "oid:0x100000000b1f4": "4", + "oid:0x100000000b1f5": "5", + "oid:0x100000000b1f6": "6", + "oid:0x100000000b1f7": "7", + "oid:0x100000000b1f8": "8", + "oid:0x100000000b1f9": "9", + "oid:0x100000000b1fa": "10", + "oid:0x100000000b1fb": "11", + "oid:0x100000000b1fc": "12", + "oid:0x100000000b1fd": "13", + "oid:0x100000000b1fe": "14", + "oid:0x100000000b1ff" : "15" + }, "COUNTERS_LAG_NAME_MAP": { "PortChannel0001": "oid:0x60000000005a1", "PortChannel0002": "oid:0x60000000005a2", diff --git a/tests/mock_tables/chassis_state_db.json b/tests/mock_tables/chassis_state_db.json index 5178c49ca06..6af9e19da44 100644 --- a/tests/mock_tables/chassis_state_db.json +++ b/tests/mock_tables/chassis_state_db.json @@ -4,6 +4,9 @@ }, "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" } } \ No newline at end of file diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index af37538447d..108fa7593df 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -848,6 +848,8 @@ "FEATURE|lldp": { "state": "enabled", "auto_restart": "enabled", + "has_global_scope": "False", + "has_per_asic_scope": "True", "high_mem_alert": "disabled", "set_owner": "kube" }, diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index d62c34cb3c4..2f16c7014d8 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -402,145 +402,169 @@ "SAI_QUEUE_STAT_BYTES": "0", "SAI_QUEUE_STAT_DROPPED_BYTES": "0", "SAI_QUEUE_STAT_DROPPED_PACKETS": "0", - "SAI_QUEUE_STAT_PACKETS": "0" + "SAI_QUEUE_STAT_PACKETS": "0", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "0" }, "COUNTERS:oid:0x15000000000658": { "SAI_QUEUE_STAT_BYTES": "43", "SAI_QUEUE_STAT_DROPPED_BYTES": "1", "SAI_QUEUE_STAT_DROPPED_PACKETS": "39", - "SAI_QUEUE_STAT_PACKETS": "60" + "SAI_QUEUE_STAT_PACKETS": "60", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "1" }, "COUNTERS:oid:0x15000000000659": { "SAI_QUEUE_STAT_BYTES": "7", "SAI_QUEUE_STAT_DROPPED_BYTES": "21", "SAI_QUEUE_STAT_DROPPED_PACKETS": "39", - "SAI_QUEUE_STAT_PACKETS": "82" + "SAI_QUEUE_STAT_PACKETS": "82", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "19" }, "COUNTERS:oid:0x1500000000065a": { "SAI_QUEUE_STAT_BYTES": "59", "SAI_QUEUE_STAT_DROPPED_BYTES": "94", "SAI_QUEUE_STAT_DROPPED_PACKETS": "12", - "SAI_QUEUE_STAT_PACKETS": "11" + "SAI_QUEUE_STAT_PACKETS": "11", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "12" }, "COUNTERS:oid:0x1500000000065b": { "SAI_QUEUE_STAT_BYTES": "62", "SAI_QUEUE_STAT_DROPPED_BYTES": "40", "SAI_QUEUE_STAT_DROPPED_PACKETS": "35", - "SAI_QUEUE_STAT_PACKETS": "36" + "SAI_QUEUE_STAT_PACKETS": "36", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "11" }, "COUNTERS:oid:0x1500000000065c": { "SAI_QUEUE_STAT_BYTES": "91", "SAI_QUEUE_STAT_DROPPED_BYTES": "88", "SAI_QUEUE_STAT_DROPPED_PACKETS": "2", - "SAI_QUEUE_STAT_PACKETS": "49" + "SAI_QUEUE_STAT_PACKETS": "49", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "15" }, "COUNTERS:oid:0x1500000000065d": { "SAI_QUEUE_STAT_BYTES": "17", "SAI_QUEUE_STAT_DROPPED_BYTES": "74", "SAI_QUEUE_STAT_DROPPED_PACKETS": "94", - "SAI_QUEUE_STAT_PACKETS": "33" + "SAI_QUEUE_STAT_PACKETS": "33", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "17" }, "COUNTERS:oid:0x1500000000065e": { "SAI_QUEUE_STAT_BYTES": "71", "SAI_QUEUE_STAT_DROPPED_BYTES": "33", "SAI_QUEUE_STAT_DROPPED_PACKETS": "95", - "SAI_QUEUE_STAT_PACKETS": "40" + "SAI_QUEUE_STAT_PACKETS": "40", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "73" }, "COUNTERS:oid:0x15000000000667": { "SAI_QUEUE_STAT_BYTES": "8", "SAI_QUEUE_STAT_DROPPED_BYTES": "78", "SAI_QUEUE_STAT_DROPPED_PACKETS": "93", - "SAI_QUEUE_STAT_PACKETS": "54" + "SAI_QUEUE_STAT_PACKETS": "54", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "29" }, "COUNTERS:oid:0x15000000000668": { "SAI_QUEUE_STAT_BYTES": "96", "SAI_QUEUE_STAT_DROPPED_BYTES": "9", "SAI_QUEUE_STAT_DROPPED_PACKETS": "74", - "SAI_QUEUE_STAT_PACKETS": "83" + "SAI_QUEUE_STAT_PACKETS": "83", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "42" }, "COUNTERS:oid:0x15000000000669": { "SAI_QUEUE_STAT_BYTES": "60", "SAI_QUEUE_STAT_DROPPED_BYTES": "31", "SAI_QUEUE_STAT_DROPPED_PACKETS": "61", - "SAI_QUEUE_STAT_PACKETS": "15" + "SAI_QUEUE_STAT_PACKETS": "15", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "54" }, "COUNTERS:oid:0x1500000000066a": { "SAI_QUEUE_STAT_BYTES": "52", "SAI_QUEUE_STAT_DROPPED_BYTES": "94", "SAI_QUEUE_STAT_DROPPED_PACKETS": "82", - "SAI_QUEUE_STAT_PACKETS": "45" + "SAI_QUEUE_STAT_PACKETS": "45", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "19" }, "COUNTERS:oid:0x1500000000066b": { "SAI_QUEUE_STAT_BYTES": "88", "SAI_QUEUE_STAT_DROPPED_BYTES": "52", "SAI_QUEUE_STAT_DROPPED_PACKETS": "89", - "SAI_QUEUE_STAT_PACKETS": "55" + "SAI_QUEUE_STAT_PACKETS": "55", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "28" }, "COUNTERS:oid:0x1500000000066c": { "SAI_QUEUE_STAT_BYTES": "70", "SAI_QUEUE_STAT_DROPPED_BYTES": "79", "SAI_QUEUE_STAT_DROPPED_PACKETS": "95", - "SAI_QUEUE_STAT_PACKETS": "14" + "SAI_QUEUE_STAT_PACKETS": "14", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "13" }, "COUNTERS:oid:0x1500000000066d": { "SAI_QUEUE_STAT_BYTES": "60", "SAI_QUEUE_STAT_DROPPED_BYTES": "81", "SAI_QUEUE_STAT_DROPPED_PACKETS": "66", - "SAI_QUEUE_STAT_PACKETS": "68" + "SAI_QUEUE_STAT_PACKETS": "68", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "22" }, "COUNTERS:oid:0x1500000000066e": { "SAI_QUEUE_STAT_BYTES": "4", "SAI_QUEUE_STAT_DROPPED_BYTES": "76", "SAI_QUEUE_STAT_DROPPED_PACKETS": "48", - "SAI_QUEUE_STAT_PACKETS": "63" + "SAI_QUEUE_STAT_PACKETS": "63", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "53" }, "COUNTERS:oid:0x15000000000677": { "SAI_QUEUE_STAT_BYTES": "73", "SAI_QUEUE_STAT_DROPPED_BYTES": "74", "SAI_QUEUE_STAT_DROPPED_PACKETS": "77", - "SAI_QUEUE_STAT_PACKETS": "41" + "SAI_QUEUE_STAT_PACKETS": "41", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "67" }, "COUNTERS:oid:0x15000000000678": { "SAI_QUEUE_STAT_BYTES": "21", "SAI_QUEUE_STAT_DROPPED_BYTES": "54", "SAI_QUEUE_STAT_DROPPED_PACKETS": "56", - "SAI_QUEUE_STAT_PACKETS": "60" + "SAI_QUEUE_STAT_PACKETS": "60", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "79" }, "COUNTERS:oid:0x15000000000679": { "SAI_QUEUE_STAT_BYTES": "31", "SAI_QUEUE_STAT_DROPPED_BYTES": "39", "SAI_QUEUE_STAT_DROPPED_PACKETS": "12", - "SAI_QUEUE_STAT_PACKETS": "57" + "SAI_QUEUE_STAT_PACKETS": "57", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "35" }, "COUNTERS:oid:0x1500000000067a": { "SAI_QUEUE_STAT_BYTES": "96", "SAI_QUEUE_STAT_DROPPED_BYTES": "98", "SAI_QUEUE_STAT_DROPPED_PACKETS": "70", - "SAI_QUEUE_STAT_PACKETS": "41" + "SAI_QUEUE_STAT_PACKETS": "41", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "47" }, "COUNTERS:oid:0x1500000000067b": { "SAI_QUEUE_STAT_BYTES": "49", "SAI_QUEUE_STAT_DROPPED_BYTES": "36", "SAI_QUEUE_STAT_DROPPED_PACKETS": "63", - "SAI_QUEUE_STAT_PACKETS": "18" + "SAI_QUEUE_STAT_PACKETS": "18", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "62" }, "COUNTERS:oid:0x1500000000067c": { "SAI_QUEUE_STAT_BYTES": "90", "SAI_QUEUE_STAT_DROPPED_BYTES": "15", "SAI_QUEUE_STAT_DROPPED_PACKETS": "3", - "SAI_QUEUE_STAT_PACKETS": "99" + "SAI_QUEUE_STAT_PACKETS": "99", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "19" }, "COUNTERS:oid:0x1500000000067d": { "SAI_QUEUE_STAT_BYTES": "84", "SAI_QUEUE_STAT_DROPPED_BYTES": "94", "SAI_QUEUE_STAT_DROPPED_PACKETS": "82", - "SAI_QUEUE_STAT_PACKETS": "8" + "SAI_QUEUE_STAT_PACKETS": "8", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "33" }, "COUNTERS:oid:0x1500000000067e": { "SAI_QUEUE_STAT_BYTES": "15", "SAI_QUEUE_STAT_DROPPED_BYTES": "92", "SAI_QUEUE_STAT_DROPPED_PACKETS": "75", - "SAI_QUEUE_STAT_PACKETS": "83" + "SAI_QUEUE_STAT_PACKETS": "83", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "3" }, "COUNTERS:oid:0x60000000005a3": { "SAI_ROUTER_INTERFACE_STAT_IN_ERROR_OCTETS": "0", @@ -982,7 +1006,8 @@ }, "COUNTERS:oid:0x21000000000000": { "SAI_SWITCH_STAT_OUT_DROP_REASON_RANGE_BASE": "1000", - "SAI_SWITCH_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS": "0" + "SAI_SWITCH_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS": "0", + "SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP": "500" }, "COUNTERS:oid:0x1a00000000034f": { @@ -1772,7 +1797,8 @@ }, "COUNTERS_DEBUG_NAME_SWITCH_STAT_MAP": { "DEBUG_1": "SAI_SWITCH_STAT_OUT_DROP_REASON_RANGE_BASE", - "lowercase_counter": "SAI_SWITCH_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS" + "lowercase_counter": "SAI_SWITCH_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS", + "SWITCH_STD_DROP_COUNTER-SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP": "SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP" }, "COUNTERS:oid:0x1500000000035a": { "PFC_WD_ACTION": "drop", diff --git a/tests/multi_asic_dropstat_test.py b/tests/multi_asic_dropstat_test.py new file mode 100644 index 00000000000..8b9dd72826b --- /dev/null +++ b/tests/multi_asic_dropstat_test.py @@ -0,0 +1,122 @@ +import os +import sys +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +dropstat_masic_result_asic0 = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +------------ ------- -------- ---------- -------- ---------- --------- --------- + Ethernet0 U 10 100 0 0 80 20 + Ethernet4 U 0 1000 0 0 800 100 +Ethernet-BP0 U 0 1000 0 0 800 100 +Ethernet-BP4 U 0 1000 0 0 800 100 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 1000 +""" + +dropstat_masic_result_asic1 = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +-------------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet-BP256 U 10 100 0 0 80 20 +Ethernet-BP260 U 0 1000 0 0 800 100 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 1000 +""" + +dropstat_masic_result_clear_all = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +------------ ------- -------- ---------- -------- ---------- --------- --------- + Ethernet0 U 0 0 0 0 0 0 + Ethernet4 U 0 0 0 0 0 0 +Ethernet-BP0 U 0 0 0 0 0 0 +Ethernet-BP4 U 0 0 0 0 0 0 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 0 + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +-------------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet-BP256 U 0 0 0 0 0 0 +Ethernet-BP260 U 0 0 0 0 0 0 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 0 +""" + + +class TestMultiAsicDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def test_show_dropcount_masic_asic0(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show', '-n', 'asic0' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_asic0 and return_code == 0 + + def test_show_dropcount_masic_all_and_clear(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_asic0 + dropstat_masic_result_asic1 + assert return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == 'Cleared drop counters\n' and return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_clear_all and return_code == 0 + + def test_show_dropcount_masic_invalid_ns(self): + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show', '-n', 'asic5' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 2 + assert "invalid choice: asic5" in result + + def test_show_dropcount_version(self): + return_code, result = get_result_and_return_code([ + 'dropstat', '--version' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ.pop("UTILITIES_UNIT_TESTING") + os.environ.pop("UTILITIES_UNIT_TESTING_TOPOLOGY") + print("TEARDOWN") diff --git a/tests/multi_asic_pgdropstat_test.py b/tests/multi_asic_pgdropstat_test.py new file mode 100644 index 00000000000..94bb13011b4 --- /dev/null +++ b/tests/multi_asic_pgdropstat_test.py @@ -0,0 +1,95 @@ +import os +import sys +from utilities_common.cli import UserCache +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +pg_drop_masic_one_result = """\ +Ingress PG dropped packets: + Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 PG8 PG9 PG10 PG11 PG12 PG13\ + PG14 PG15 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ + ------ ------ +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +""" + +pg_drop_masic_all_result = """\ +Ingress PG dropped packets: + Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 PG8 PG9 PG10 PG11 PG12 PG13\ + PG14 PG15 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ + ------ ------ + Ethernet0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet-BP0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet-BP4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +""" + + +class TestMultiAsicPgDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def test_show_pg_drop_masic_all(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == pg_drop_masic_all_result + + def test_show_pg_drop_masic(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show', '-n', 'asic1' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == pg_drop_masic_one_result + + def test_show_pg_drop_masic_not_exist(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show', '-n', 'asic5' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 1 + assert result == "Input value for '--namespace' / '-n'. Choose from one of (asic0, asic1)" + + def test_clear_pg_drop(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == "Cleared PG drop counter\n" + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + UserCache('pg-drop').remove_all() + print("TEARDOWN") diff --git a/tests/pbh_input/assert_show_output.py b/tests/pbh_input/assert_show_output.py index 7a701ba4bc8..5538f3aadad 100644 --- a/tests/pbh_input/assert_show_output.py +++ b/tests/pbh_input/assert_show_output.py @@ -78,6 +78,14 @@ """ +show_pbh_statistics_partial = """\ +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +---------- ------ ------------------ ---------------- +pbh_table1 nvgre 100 200 +pbh_table2 vxlan 0 0 +""" + + show_pbh_statistics_updated="""\ TABLE RULE RX PACKETS COUNT RX BYTES COUNT ---------- ------ ------------------ ---------------- diff --git a/tests/pbh_input/counters_db_partial.json b/tests/pbh_input/counters_db_partial.json new file mode 100644 index 00000000000..aa140188c8f --- /dev/null +++ b/tests/pbh_input/counters_db_partial.json @@ -0,0 +1,11 @@ +{ + "COUNTERS:oid:0x9000000000000": { }, + "COUNTERS:oid:0x9000000000001": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "300", + "SAI_ACL_COUNTER_ATTR_BYTES": "400" + }, + "ACL_COUNTER_RULE_MAP": { + "pbh_table1:nvgre": "oid:0x9000000000000", + "pbh_table2:vxlan": "oid:0x9000000000001" + } +} diff --git a/tests/pbh_test.py b/tests/pbh_test.py index 7dddfea9ca9..0d68f458ee2 100644 --- a/tests/pbh_test.py +++ b/tests/pbh_test.py @@ -946,6 +946,34 @@ def test_show_pbh_statistics_after_clear(self): assert result.exit_code == SUCCESS assert result.output == assert_show_output.show_pbh_statistics_zero + def test_show_pbh_statistics_after_clear_and_counters_partial(self): + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db_partial') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + + self.remove_pbh_counters_file() + + db = Db() + runner = CliRunner() + + result = runner.invoke( + clear.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_statistics_partial def test_show_pbh_statistics_after_clear_and_counters_updated(self): dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') diff --git a/tests/pfc_input/assert_show_output.py b/tests/pfc_input/assert_show_output.py new file mode 100644 index 00000000000..2406f8b49f8 --- /dev/null +++ b/tests/pfc_input/assert_show_output.py @@ -0,0 +1,82 @@ +pfc_asym_cannot_find_intf = """\ + +Interface Asymmetric +----------- ------------ + +""" + +pfc_cannot_find_intf = """\ +Cannot find interface Ethernet1234 +""" + +pfc_show_asymmetric_all = """\ + +Interface Asymmetric +----------- ------------ +Ethernet0 off +Ethernet4 off +Ethernet8 off +Ethernet12 off +Ethernet16 off +Ethernet20 off +Ethernet24 off +Ethernet28 off +Ethernet32 off +Ethernet36 off +Ethernet40 off +Ethernet44 off +Ethernet48 off +Ethernet52 off +Ethernet56 off +Ethernet60 off +Ethernet64 off +Ethernet68 off +Ethernet72 off +Ethernet76 off +Ethernet80 off +Ethernet84 off +Ethernet88 off +Ethernet92 off +Ethernet96 off +Ethernet100 off +Ethernet104 off +Ethernet108 off +Ethernet112 off +Ethernet116 off +Ethernet120 off +Ethernet124 off + +""" + +pfc_show_asymmetric_intf = """\ + +Interface Asymmetric +----------- ------------ +Ethernet0 off + +""" + +pfc_show_priority_all = """\ + +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 + +""" + +pfc_show_priority_intf = """\ + +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 + +""" + +pfc_config_priority_on = """\ + +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4,5 + +""" diff --git a/tests/pfc_test.py b/tests/pfc_test.py new file mode 100644 index 00000000000..101aa476cc4 --- /dev/null +++ b/tests/pfc_test.py @@ -0,0 +1,81 @@ +import os +import sys +import pfc.main as pfc +from .pfc_input.assert_show_output import pfc_cannot_find_intf, pfc_show_asymmetric_all, \ + pfc_show_asymmetric_intf, pfc_show_priority_all, pfc_show_priority_intf, \ + pfc_config_priority_on, pfc_asym_cannot_find_intf +from utilities_common.db import Db + +from click.testing import CliRunner +from importlib import reload + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "pfc") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + + +class TestPfcBase(object): + + def executor(self, cliobj, command, expected_rc=0, expected_output=None, expected_cfgdb_entry=None, + runner=CliRunner()): + db = Db() + result = runner.invoke(cliobj, command, obj=db) + print(result.exit_code) + print(result.output) + + if result.exit_code != expected_rc: + print(result.exception) + assert result.exit_code == expected_rc + + if expected_output: + assert result.output == expected_output + + if expected_cfgdb_entry: + (table, key, field, expected_val) = expected_cfgdb_entry + configdb = db.cfgdb + entry = configdb.get_entry(table, key) + assert entry.get(field) == expected_val + + +class TestPfc(TestPfcBase): + + @classmethod + def setup_class(cls): + from mock_tables import dbconnector + from mock_tables import mock_single_asic + reload(mock_single_asic) + dbconnector.load_namespace_config() + + def test_pfc_show_asymmetric_all(self): + self.executor(pfc.cli, ['show', 'asymmetric'], + expected_output=pfc_show_asymmetric_all) + + def test_pfc_show_asymmetric_intf(self): + self.executor(pfc.cli, ['show', 'asymmetric', 'Ethernet0'], + expected_output=pfc_show_asymmetric_intf) + + def test_pfc_show_asymmetric_intf_fake(self): + self.executor(pfc.cli, ['show', 'asymmetric', 'Ethernet1234'], + expected_output=pfc_asym_cannot_find_intf) + + def test_pfc_show_priority_all(self): + self.executor(pfc.cli, ['show', 'priority'], + expected_output=pfc_show_priority_all) + + def test_pfc_show_priority_intf(self): + self.executor(pfc.cli, ['show', 'priority', 'Ethernet0'], + expected_output=pfc_show_priority_intf) + + def test_pfc_show_priority_intf_fake(self): + self.executor(pfc.cli, ['show', 'priority', 'Ethernet1234'], + expected_output=pfc_cannot_find_intf) + + def test_pfc_config_asymmetric(self): + self.executor(pfc.cli, ['config', 'asymmetric', 'on', 'Ethernet0'], + expected_cfgdb_entry=('PORT', 'Ethernet0', 'pfc_asym', 'on')) + + def test_pfc_config_priority(self): + self.executor(pfc.cli, ['config', 'priority', 'on', 'Ethernet0', '5'], + expected_output=pfc_config_priority_on) diff --git a/tests/queue_counter_test.py b/tests/queue_counter_test.py index 20b9516fbc1..391d0048720 100644 --- a/tests/queue_counter_test.py +++ b/tests/queue_counter_test.py @@ -1851,136 +1851,136 @@ show_queue_voq_counters = """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ0 0 0 0 0 -testsw|Ethernet0 VOQ1 60 43 39 1 -testsw|Ethernet0 VOQ2 82 7 39 21 -testsw|Ethernet0 VOQ3 11 59 12 94 -testsw|Ethernet0 VOQ4 36 62 35 40 -testsw|Ethernet0 VOQ5 49 91 2 88 -testsw|Ethernet0 VOQ6 33 17 94 74 -testsw|Ethernet0 VOQ7 40 71 95 33 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ0 0 0 0 0 0 +testsw|Ethernet0 VOQ1 60 43 39 1 1 +testsw|Ethernet0 VOQ2 82 7 39 21 19 +testsw|Ethernet0 VOQ3 11 59 12 94 12 +testsw|Ethernet0 VOQ4 36 62 35 40 11 +testsw|Ethernet0 VOQ5 49 91 2 88 15 +testsw|Ethernet0 VOQ6 33 17 94 74 17 +testsw|Ethernet0 VOQ7 40 71 95 33 73 - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet4 VOQ0 54 8 93 78 -testsw|Ethernet4 VOQ1 83 96 74 9 -testsw|Ethernet4 VOQ2 15 60 61 31 -testsw|Ethernet4 VOQ3 45 52 82 94 -testsw|Ethernet4 VOQ4 55 88 89 52 -testsw|Ethernet4 VOQ5 14 70 95 79 -testsw|Ethernet4 VOQ6 68 60 66 81 -testsw|Ethernet4 VOQ7 63 4 48 76 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet4 VOQ0 54 8 93 78 29 +testsw|Ethernet4 VOQ1 83 96 74 9 42 +testsw|Ethernet4 VOQ2 15 60 61 31 54 +testsw|Ethernet4 VOQ3 45 52 82 94 19 +testsw|Ethernet4 VOQ4 55 88 89 52 28 +testsw|Ethernet4 VOQ5 14 70 95 79 13 +testsw|Ethernet4 VOQ6 68 60 66 81 22 +testsw|Ethernet4 VOQ7 63 4 48 76 53 - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet8 VOQ0 41 73 77 74 -testsw|Ethernet8 VOQ1 60 21 56 54 -testsw|Ethernet8 VOQ2 57 31 12 39 -testsw|Ethernet8 VOQ3 41 96 70 98 -testsw|Ethernet8 VOQ4 18 49 63 36 -testsw|Ethernet8 VOQ5 99 90 3 15 -testsw|Ethernet8 VOQ6 8 84 82 94 -testsw|Ethernet8 VOQ7 83 15 75 92 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet8 VOQ0 41 73 77 74 67 +testsw|Ethernet8 VOQ1 60 21 56 54 79 +testsw|Ethernet8 VOQ2 57 31 12 39 35 +testsw|Ethernet8 VOQ3 41 96 70 98 47 +testsw|Ethernet8 VOQ4 18 49 63 36 62 +testsw|Ethernet8 VOQ5 99 90 3 15 19 +testsw|Ethernet8 VOQ6 8 84 82 94 33 +testsw|Ethernet8 VOQ7 83 15 75 92 3 """ show_queue_voq_counters_nz = """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ1 60 43 39 1 -testsw|Ethernet0 VOQ2 82 7 39 21 -testsw|Ethernet0 VOQ3 11 59 12 94 -testsw|Ethernet0 VOQ4 36 62 35 40 -testsw|Ethernet0 VOQ5 49 91 2 88 -testsw|Ethernet0 VOQ6 33 17 94 74 -testsw|Ethernet0 VOQ7 40 71 95 33 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ1 60 43 39 1 1 +testsw|Ethernet0 VOQ2 82 7 39 21 19 +testsw|Ethernet0 VOQ3 11 59 12 94 12 +testsw|Ethernet0 VOQ4 36 62 35 40 11 +testsw|Ethernet0 VOQ5 49 91 2 88 15 +testsw|Ethernet0 VOQ6 33 17 94 74 17 +testsw|Ethernet0 VOQ7 40 71 95 33 73 - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet4 VOQ0 54 8 93 78 -testsw|Ethernet4 VOQ1 83 96 74 9 -testsw|Ethernet4 VOQ2 15 60 61 31 -testsw|Ethernet4 VOQ3 45 52 82 94 -testsw|Ethernet4 VOQ4 55 88 89 52 -testsw|Ethernet4 VOQ5 14 70 95 79 -testsw|Ethernet4 VOQ6 68 60 66 81 -testsw|Ethernet4 VOQ7 63 4 48 76 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet4 VOQ0 54 8 93 78 29 +testsw|Ethernet4 VOQ1 83 96 74 9 42 +testsw|Ethernet4 VOQ2 15 60 61 31 54 +testsw|Ethernet4 VOQ3 45 52 82 94 19 +testsw|Ethernet4 VOQ4 55 88 89 52 28 +testsw|Ethernet4 VOQ5 14 70 95 79 13 +testsw|Ethernet4 VOQ6 68 60 66 81 22 +testsw|Ethernet4 VOQ7 63 4 48 76 53 - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet8 VOQ0 41 73 77 74 -testsw|Ethernet8 VOQ1 60 21 56 54 -testsw|Ethernet8 VOQ2 57 31 12 39 -testsw|Ethernet8 VOQ3 41 96 70 98 -testsw|Ethernet8 VOQ4 18 49 63 36 -testsw|Ethernet8 VOQ5 99 90 3 15 -testsw|Ethernet8 VOQ6 8 84 82 94 -testsw|Ethernet8 VOQ7 83 15 75 92 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet8 VOQ0 41 73 77 74 67 +testsw|Ethernet8 VOQ1 60 21 56 54 79 +testsw|Ethernet8 VOQ2 57 31 12 39 35 +testsw|Ethernet8 VOQ3 41 96 70 98 47 +testsw|Ethernet8 VOQ4 18 49 63 36 62 +testsw|Ethernet8 VOQ5 99 90 3 15 19 +testsw|Ethernet8 VOQ6 8 84 82 94 33 +testsw|Ethernet8 VOQ7 83 15 75 92 3 """ show_queue_voq_counters_with_clear = ["""\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ0 0 0 0 0 -testsw|Ethernet0 VOQ1 0 0 0 0 -testsw|Ethernet0 VOQ2 0 0 0 0 -testsw|Ethernet0 VOQ3 0 0 0 0 -testsw|Ethernet0 VOQ4 0 0 0 0 -testsw|Ethernet0 VOQ5 0 0 0 0 -testsw|Ethernet0 VOQ6 0 0 0 0 -testsw|Ethernet0 VOQ7 0 0 0 0 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ0 0 0 0 0 0 +testsw|Ethernet0 VOQ1 0 0 0 0 0 +testsw|Ethernet0 VOQ2 0 0 0 0 0 +testsw|Ethernet0 VOQ3 0 0 0 0 0 +testsw|Ethernet0 VOQ4 0 0 0 0 0 +testsw|Ethernet0 VOQ5 0 0 0 0 0 +testsw|Ethernet0 VOQ6 0 0 0 0 0 +testsw|Ethernet0 VOQ7 0 0 0 0 0 """, """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet4 VOQ0 0 0 0 0 -testsw|Ethernet4 VOQ1 0 0 0 0 -testsw|Ethernet4 VOQ2 0 0 0 0 -testsw|Ethernet4 VOQ3 0 0 0 0 -testsw|Ethernet4 VOQ4 0 0 0 0 -testsw|Ethernet4 VOQ5 0 0 0 0 -testsw|Ethernet4 VOQ6 0 0 0 0 -testsw|Ethernet4 VOQ7 0 0 0 0 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet4 VOQ0 0 0 0 0 0 +testsw|Ethernet4 VOQ1 0 0 0 0 0 +testsw|Ethernet4 VOQ2 0 0 0 0 0 +testsw|Ethernet4 VOQ3 0 0 0 0 0 +testsw|Ethernet4 VOQ4 0 0 0 0 0 +testsw|Ethernet4 VOQ5 0 0 0 0 0 +testsw|Ethernet4 VOQ6 0 0 0 0 0 +testsw|Ethernet4 VOQ7 0 0 0 0 0 """, """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet8 VOQ0 0 0 0 0 -testsw|Ethernet8 VOQ1 0 0 0 0 -testsw|Ethernet8 VOQ2 0 0 0 0 -testsw|Ethernet8 VOQ3 0 0 0 0 -testsw|Ethernet8 VOQ4 0 0 0 0 -testsw|Ethernet8 VOQ5 0 0 0 0 -testsw|Ethernet8 VOQ6 0 0 0 0 -testsw|Ethernet8 VOQ7 0 0 0 0 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet8 VOQ0 0 0 0 0 0 +testsw|Ethernet8 VOQ1 0 0 0 0 0 +testsw|Ethernet8 VOQ2 0 0 0 0 0 +testsw|Ethernet8 VOQ3 0 0 0 0 0 +testsw|Ethernet8 VOQ4 0 0 0 0 0 +testsw|Ethernet8 VOQ5 0 0 0 0 0 +testsw|Ethernet8 VOQ6 0 0 0 0 0 +testsw|Ethernet8 VOQ7 0 0 0 0 0 """ ] show_queue_port_voq_counters = """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ0 0 0 0 0 -testsw|Ethernet0 VOQ1 60 43 39 1 -testsw|Ethernet0 VOQ2 82 7 39 21 -testsw|Ethernet0 VOQ3 11 59 12 94 -testsw|Ethernet0 VOQ4 36 62 35 40 -testsw|Ethernet0 VOQ5 49 91 2 88 -testsw|Ethernet0 VOQ6 33 17 94 74 -testsw|Ethernet0 VOQ7 40 71 95 33 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ0 0 0 0 0 0 +testsw|Ethernet0 VOQ1 60 43 39 1 1 +testsw|Ethernet0 VOQ2 82 7 39 21 19 +testsw|Ethernet0 VOQ3 11 59 12 94 12 +testsw|Ethernet0 VOQ4 36 62 35 40 11 +testsw|Ethernet0 VOQ5 49 91 2 88 15 +testsw|Ethernet0 VOQ6 33 17 94 74 17 +testsw|Ethernet0 VOQ7 40 71 95 33 73 """ show_queue_port_voq_counters_nz = """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ1 60 43 39 1 -testsw|Ethernet0 VOQ2 82 7 39 21 -testsw|Ethernet0 VOQ3 11 59 12 94 -testsw|Ethernet0 VOQ4 36 62 35 40 -testsw|Ethernet0 VOQ5 49 91 2 88 -testsw|Ethernet0 VOQ6 33 17 94 74 -testsw|Ethernet0 VOQ7 40 71 95 33 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ1 60 43 39 1 1 +testsw|Ethernet0 VOQ2 82 7 39 21 19 +testsw|Ethernet0 VOQ3 11 59 12 94 12 +testsw|Ethernet0 VOQ4 36 62 35 40 11 +testsw|Ethernet0 VOQ5 49 91 2 88 15 +testsw|Ethernet0 VOQ6 33 17 94 74 17 +testsw|Ethernet0 VOQ7 40 71 95 33 73 """ @@ -1988,48 +1988,56 @@ { "testsw|Ethernet0": { "VOQ0": { + "creditWDPkts": "0", "dropbytes": "0", "droppacket": "0", "totalbytes": "0", "totalpacket": "0" }, "VOQ1": { + "creditWDPkts": "1", "dropbytes": "1", "droppacket": "39", "totalbytes": "43", "totalpacket": "60" }, "VOQ2": { + "creditWDPkts": "19", "dropbytes": "21", "droppacket": "39", "totalbytes": "7", "totalpacket": "82" }, "VOQ3": { + "creditWDPkts": "12", "dropbytes": "94", "droppacket": "12", "totalbytes": "59", "totalpacket": "11" }, "VOQ4": { + "creditWDPkts": "11", "dropbytes": "40", "droppacket": "35", "totalbytes": "62", "totalpacket": "36" }, "VOQ5": { + "creditWDPkts": "15", "dropbytes": "88", "droppacket": "2", "totalbytes": "91", "totalpacket": "49" }, "VOQ6": { + "creditWDPkts": "17", "dropbytes": "74", "droppacket": "94", "totalbytes": "17", "totalpacket": "33" }, "VOQ7": { + "creditWDPkts": "73", "dropbytes": "33", "droppacket": "95", "totalbytes": "71", @@ -2038,48 +2046,56 @@ }, "testsw|Ethernet4": { "VOQ0": { + "creditWDPkts": "29", "dropbytes": "78", "droppacket": "93", "totalbytes": "8", "totalpacket": "54" }, "VOQ1": { + "creditWDPkts": "42", "dropbytes": "9", "droppacket": "74", "totalbytes": "96", "totalpacket": "83" }, "VOQ2": { + "creditWDPkts": "54", "dropbytes": "31", "droppacket": "61", "totalbytes": "60", "totalpacket": "15" }, "VOQ3": { + "creditWDPkts": "19", "dropbytes": "94", "droppacket": "82", "totalbytes": "52", "totalpacket": "45" }, "VOQ4": { + "creditWDPkts": "28", "dropbytes": "52", "droppacket": "89", "totalbytes": "88", "totalpacket": "55" }, "VOQ5": { + "creditWDPkts": "13", "dropbytes": "79", "droppacket": "95", "totalbytes": "70", "totalpacket": "14" }, "VOQ6": { + "creditWDPkts": "22", "dropbytes": "81", "droppacket": "66", "totalbytes": "60", "totalpacket": "68" }, "VOQ7": { + "creditWDPkts": "53", "dropbytes": "76", "droppacket": "48", "totalbytes": "4", @@ -2088,48 +2104,56 @@ }, "testsw|Ethernet8": { "VOQ0": { + "creditWDPkts": "67", "dropbytes": "74", "droppacket": "77", "totalbytes": "73", "totalpacket": "41" }, "VOQ1": { + "creditWDPkts": "79", "dropbytes": "54", "droppacket": "56", "totalbytes": "21", "totalpacket": "60" }, "VOQ2": { + "creditWDPkts": "35", "dropbytes": "39", "droppacket": "12", "totalbytes": "31", "totalpacket": "57" }, "VOQ3": { + "creditWDPkts": "47", "dropbytes": "98", "droppacket": "70", "totalbytes": "96", "totalpacket": "41" }, "VOQ4": { + "creditWDPkts": "62", "dropbytes": "36", "droppacket": "63", "totalbytes": "49", "totalpacket": "18" }, "VOQ5": { + "creditWDPkts": "19", "dropbytes": "15", "droppacket": "3", "totalbytes": "90", "totalpacket": "99" }, "VOQ6": { + "creditWDPkts": "33", "dropbytes": "94", "droppacket": "82", "totalbytes": "84", "totalpacket": "8" }, "VOQ7": { + "creditWDPkts": "3", "dropbytes": "92", "droppacket": "75", "totalbytes": "15", @@ -2142,48 +2166,56 @@ { "testsw|Ethernet0": { "VOQ0": { + "creditWDPkts": "0", "dropbytes": "0", "droppacket": "0", "totalbytes": "0", "totalpacket": "0" }, "VOQ1": { + "creditWDPkts": "1", "dropbytes": "1", "droppacket": "39", "totalbytes": "43", "totalpacket": "60" }, "VOQ2": { + "creditWDPkts": "19", "dropbytes": "21", "droppacket": "39", "totalbytes": "7", "totalpacket": "82" }, "VOQ3": { + "creditWDPkts": "12", "dropbytes": "94", "droppacket": "12", "totalbytes": "59", "totalpacket": "11" }, "VOQ4": { + "creditWDPkts": "11", "dropbytes": "40", "droppacket": "35", "totalbytes": "62", "totalpacket": "36" }, "VOQ5": { + "creditWDPkts": "15", "dropbytes": "88", "droppacket": "2", "totalbytes": "91", "totalpacket": "49" }, "VOQ6": { + "creditWDPkts": "17", "dropbytes": "74", "droppacket": "94", "totalbytes": "17", "totalpacket": "33" }, "VOQ7": { + "creditWDPkts": "73", "dropbytes": "33", "droppacket": "95", "totalbytes": "71", diff --git a/tests/remote_cli_test.py b/tests/remote_cli_test.py index d9fd6721023..9883dfa16be 100644 --- a/tests/remote_cli_test.py +++ b/tests/remote_cli_test.py @@ -12,9 +12,9 @@ import socket import termios -MULTI_LC_REXEC_OUTPUT = '''======== sonic-lc1 output: ======== +MULTI_LC_REXEC_OUTPUT = '''======== LINE-CARD0|sonic-lc1 output: ======== hello world -======== LINE-CARD2 output: ======== +======== LINE-CARD2|sonic-lc3 output: ======== hello world ''' REXEC_HELP = '''Usage: cli [OPTIONS] LINECARD_NAMES... @@ -152,12 +152,12 @@ def test_rexec_all(self): @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_invalid_lc(self): runner = CliRunner() - LINECARD_NAME = "sonic-lc-3" + LINECARD_NAME = "sonic-lc-100" result = runner.invoke( rexec.cli, [LINECARD_NAME, "-c", "show version"]) print(result.output) assert result.exit_code == 1, result.output - assert "Linecard sonic-lc-3 not found\n" == result.output + assert "Linecard sonic-lc-100 not found\n" == result.output @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) diff --git a/tests/remote_show_test.py b/tests/remote_show_test.py new file mode 100644 index 00000000000..e1be3d0302c --- /dev/null +++ b/tests/remote_show_test.py @@ -0,0 +1,73 @@ +import mock +import subprocess +from io import BytesIO +from click.testing import CliRunner + + +def mock_rexec_command(*args): + mock_stdout = BytesIO(b"""hello world""") + print(mock_stdout.getvalue().decode()) + return subprocess.CompletedProcess(args=[], returncode=0, stdout=mock_stdout, stderr=BytesIO()) + + +def mock_rexec_error_cmd(*args): + mock_stderr = BytesIO(b"""Error""") + print(mock_stderr.getvalue().decode()) + return subprocess.CompletedProcess(args=[], returncode=1, stdout=BytesIO(), stderr=mock_stderr) + + +MULTI_LC_REXEC_OUTPUT = '''Since the current device is a chassis supervisor, this command will be executed remotely on all linecards +hello world +''' + +MULTI_LC_ERR_OUTPUT = '''Since the current device is a chassis supervisor, this command will be executed remotely on all linecards +Error +''' + + +class TestRexecBgp(object): + @classmethod + def setup_class(cls): + pass + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "summary"]) + def test_show_ip_bgp_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_command + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["summary"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 0 + assert MULTI_LC_REXEC_OUTPUT == result.output + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "summary"]) + def test_show_ip_bgp_error_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_error_cmd + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["summary"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 1 + assert MULTI_LC_ERR_OUTPUT == result.output + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "network", "10.0.0.0/24"]) + def test_show_ip_bgp_network_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_command + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["network", "10.0.0.0/24"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 0 + assert MULTI_LC_REXEC_OUTPUT == result.output diff --git a/tests/route_check_test.py b/tests/route_check_test.py index 1f92b3d19ae..26c632d7427 100644 --- a/tests/route_check_test.py +++ b/tests/route_check_test.py @@ -252,8 +252,11 @@ def run_test(self, ct_data): def mock_check_output(self, ct_data, *args, **kwargs): ns = self.extract_namespace_from_args(args[0]) - routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) - return json.dumps(routes) + if 'show runningconfiguration bgp' in ' '.join(args[0]): + return 'bgp suppress-fib-pending' + else: + routes = ct_data.get(FRR_ROUTES, {}).get(ns, {}) + return json.dumps(routes) def assert_results(self, ct_data, ret, res): expect_ret = ct_data.get(RET, 0) diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 523848ec453..0e58daa18e4 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -20,6 +20,46 @@ ERROR_NOT_IMPLEMENTED = 5 ERROR_INVALID_PORT = 6 +FLAT_MEMORY_MODULE_EEPROM_SFP_INFO_DICT = { + 'type': 'QSFP28 or later', + 'type_abbrv_name': 'QSFP28', + 'manufacturer': 'Mellanox', + 'model': 'MCP1600-C003', + 'vendor_rev': 'A2', + 'serial': 'MT1636VS10561', + 'vendor_oui': '00-02-c9', + 'vendor_date': '2016-07-18', + 'connector': 'No separable connector', + 'encoding': '64B66B', + 'ext_identifier': 'Power Class 1(1.5W max)', + 'ext_rateselect_compliance': 'QSFP+ Rate Select Version 1', + 'cable_type': 'Length Cable Assembly(m)', + 'cable_length': '3', + 'application_advertisement': 'N/A', + 'specification_compliance': "{'10/40G Ethernet Compliance Code': '40GBASE-CR4'}", + 'dom_capability': "{'Tx_power_support': 'no', 'Rx_power_support': 'no',\ + 'Voltage_support': 'no', 'Temp_support': 'no'}", + 'nominal_bit_rate': '255' +} +FLAT_MEMORY_MODULE_EEPROM = """Ethernet16: SFP EEPROM detected + Application Advertisement: N/A + Connector: No separable connector + Encoding: 64B66B + Extended Identifier: Power Class 1(1.5W max) + Extended RateSelect Compliance: QSFP+ Rate Select Version 1 + Identifier: QSFP28 or later + Length Cable Assembly(m): 3 + Nominal Bit Rate(100Mbs): 255 + Specification compliance: + 10/40G Ethernet Compliance Code: 40GBASE-CR4 + Vendor Date Code(YYYY-MM-DD Lot): 2016-07-18 + Vendor Name: Mellanox + Vendor OUI: 00-02-c9 + Vendor PN: MCP1600-C003 + Vendor Rev: A2 + Vendor SN: MT1636VS10561 +""" + class TestSfputil(object): def test_format_dict_value_to_string(self): sorted_key_table = [ @@ -570,6 +610,51 @@ def test_show_lpmode(self, mock_chassis): """ assert result.output == expected_output + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=True)) + def test_power_RJ45(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_sfp.get_presence.return_value = True + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Power disable/enable is not available for RJ45 port Ethernet0.\n' + assert result.exit_code == EXIT_FAIL + + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + def test_power(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.get_presence.return_value = True + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.exit_code == 0 + + mock_sfp.get_presence.return_value = False + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Ethernet0: SFP EEPROM not detected\n\n' + + mock_sfp.get_presence.return_value = True + mock_sfp.set_power = MagicMock(side_effect=NotImplementedError) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'This functionality is currently not implemented for this platform\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_sfp.set_power = MagicMock(return_value=False) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Failed\n' + + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) @@ -585,6 +670,39 @@ def test_show_eeprom_RJ45(self, mock_chassis): expected_output = "Ethernet16: SFP EEPROM is not applicable for RJ45 port\n\n\n" assert result.output == expected_output + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @pytest.mark.parametrize("exception, xcvr_api_none, expected_output", [ + (None, False, '''DOM values not supported for flat memory module\n\n'''), + (NotImplementedError, False, '''API is currently not implemented for this platform\n\n'''), + (None, True, '''API is none while getting DOM info!\n\n''') + ]) + @patch('sfputil.main.platform_chassis') + def test_show_eeprom_dom_conditions(self, mock_chassis, exception, xcvr_api_none, expected_output): + mock_sfp = MagicMock() + mock_sfp.get_presence.return_value = True + mock_sfp.get_transceiver_info.return_value = FLAT_MEMORY_MODULE_EEPROM_SFP_INFO_DICT + mock_chassis.get_sfp.return_value = mock_sfp + + if exception: + mock_chassis.get_sfp().get_xcvr_api.side_effect = exception + elif xcvr_api_none: + mock_chassis.get_sfp().get_xcvr_api.return_value = None + else: + mock_api = MagicMock() + mock_chassis.get_sfp().get_xcvr_api.return_value = mock_api + + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['show'].commands['eeprom'], ["-p", "Ethernet16", "-d"]) + + if exception or xcvr_api_none: + assert result.exit_code == ERROR_NOT_IMPLEMENTED + else: + assert result.exit_code == 0 + assert result.output == FLAT_MEMORY_MODULE_EEPROM + expected_output + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=0))) def test_show_eeprom_hexdump_invalid_port(self, mock_chassis): @@ -1510,3 +1628,46 @@ def test_load_port_config(self, mock_is_multi_asic): mock_is_multi_asic.return_value = False assert sfputil.load_port_config() == True + + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + def test_debug_loopback(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.get_presence.return_value = True + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + + runner = CliRunner() + mock_sfp.get_presence.return_value = False + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input"]) + assert result.output == 'Ethernet0: SFP EEPROM not detected\n' + mock_sfp.get_presence.return_value = True + + mock_sfp.get_xcvr_api = MagicMock(side_effect=NotImplementedError) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input"]) + assert result.output == 'Ethernet0: This functionality is not implemented\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input"]) + assert result.output == 'Ethernet0: Set host-side-input loopback\n' + assert result.exit_code != ERROR_NOT_IMPLEMENTED + + mock_api.set_loopback_mode.return_value = False + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "none"]) + assert result.output == 'Ethernet0: Set none loopback failed\n' + assert result.exit_code == EXIT_FAIL + + mock_api.set_loopback_mode.return_value = True + mock_api.set_loopback_mode.side_effect = AttributeError + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "none"]) + assert result.output == 'Ethernet0: Set loopback mode is not applicable for this module\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED diff --git a/tests/show_bgp_network_test.py b/tests/show_bgp_network_test.py index f610199538c..bfc23d89126 100644 --- a/tests/show_bgp_network_test.py +++ b/tests/show_bgp_network_test.py @@ -57,7 +57,8 @@ def setup_class(cls): ('bgp_v4_network_bestpath', 'bgp_v4_network_bestpath'), ('bgp_v6_network_longer_prefixes', 'bgp_v6_network_longer_prefixes'), ('bgp_v4_network', 'bgp_v4_network_longer_prefixes_error'), - ('bgp_v4_network', 'bgp_v6_network_longer_prefixes_error')], + ('bgp_v4_network', 'bgp_v6_network_longer_prefixes_error'), + ('bgp_v4_network', 'bgp_v4_network_all_asic_on_single_asic')], indirect=['setup_single_bgp_instance']) def test_bgp_network(self, setup_bgp_commands, test_vector, setup_single_bgp_instance): @@ -77,14 +78,16 @@ def setup_class(cls): @pytest.mark.parametrize( 'setup_multi_asic_bgp_instance, test_vector', - [('bgp_v4_network', 'bgp_v4_network_multi_asic'), + [('bgp_v4_network_all_asic', 'bgp_v4_network_default_multi_asic'), ('bgp_v6_network', 'bgp_v6_network_multi_asic'), ('bgp_v4_network_asic0', 'bgp_v4_network_asic0'), ('bgp_v4_network_ip_address_asic0', 'bgp_v4_network_ip_address_asic0'), ('bgp_v4_network_bestpath_asic0', 'bgp_v4_network_bestpath_asic0'), ('bgp_v6_network_asic0', 'bgp_v6_network_asic0'), ('bgp_v6_network_ip_address_asic0', 'bgp_v6_network_ip_address_asic0'), - ('bgp_v6_network_bestpath_asic0', 'bgp_v6_network_bestpath_asic0')], + ('bgp_v6_network_bestpath_asic0', 'bgp_v6_network_bestpath_asic0'), + ('bgp_v4_network_all_asic', 'bgp_v4_network_all_asic'), + ('bgp_v4_network', 'bgp_v4_network_asic_unknown')], indirect=['setup_multi_asic_bgp_instance']) def test_bgp_network(self, setup_bgp_commands, test_vector, setup_multi_asic_bgp_instance): diff --git a/tests/single_asic_dropstat_test.py b/tests/single_asic_dropstat_test.py new file mode 100644 index 00000000000..c521bcfa601 --- /dev/null +++ b/tests/single_asic_dropstat_test.py @@ -0,0 +1,72 @@ +import os +import sys +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +dropstat_result = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 10 100 0 0 80 20 +Ethernet4 N/A 0 1000 0 0 800 100 +Ethernet8 N/A 100 10 0 0 10 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 1000 0 +""" + +dropstat_result_clear_all = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 0 0 0 0 0 0 +Ethernet4 N/A 0 0 0 0 0 0 +Ethernet8 N/A 0 0 0 0 0 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 0 0 +""" + + +class TestMultiAsicDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + print("SETUP") + + def test_show_dropcount_and_clear(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_result + assert return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == 'Cleared drop counters\n' and return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_result_clear_all and return_code == 0 + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ.pop("UTILITIES_UNIT_TESTING") + print("TEARDOWN") diff --git a/tests/sonic_package_manager/conftest.py b/tests/sonic_package_manager/conftest.py index 10fe72cac1a..98db8879414 100644 --- a/tests/sonic_package_manager/conftest.py +++ b/tests/sonic_package_manager/conftest.py @@ -16,6 +16,7 @@ from sonic_package_manager.registry import RegistryResolver from sonic_package_manager.version import Version from sonic_package_manager.service_creator.creator import * +from sonic_package_manager.service_creator.creator import ETC_SYSTEMD_LOCATION @pytest.fixture @@ -133,20 +134,20 @@ def __init__(self): self.add('Azure/docker-test-6', '2.0.0', 'test-package-6', '2.0.0') self.add('Azure/docker-test-6', 'latest', 'test-package-6', '1.5.0') - def from_registry(self, repository: str, reference: str): + def from_registry(self, repository: str, reference: str, use_local_manifest=None, name=None): manifest = Manifest.marshal(self.metadata_store[repository][reference]['manifest']) components = self.metadata_store[repository][reference]['components'] yang = self.metadata_store[repository][reference]['yang'] return Metadata(manifest, components, yang) - def from_local(self, image: str): + def from_local(self, image: str, use_local_manfiest=None, name=None, use_edit=None): ref = Reference.parse(image) manifest = Manifest.marshal(self.metadata_store[ref['name']][ref['tag']]['manifest']) components = self.metadata_store[ref['name']][ref['tag']]['components'] yang = self.metadata_store[ref['name']][ref['tag']]['yang'] return Metadata(manifest, components, yang) - def from_tarball(self, filepath: str) -> Manifest: + def from_tarball(self, filepath: str, use_local_manifest=None, name=None) -> Manifest: path, ref = filepath.split(':') manifest = Manifest.marshal(self.metadata_store[path][ref]['manifest']) components = self.metadata_store[path][ref]['components'] @@ -405,6 +406,7 @@ def fake_db_for_migration(fake_metadata_resolver): def sonic_fs(fs): fs.create_file('/proc/1/root') fs.create_dir(ETC_SONIC_PATH) + fs.create_dir(ETC_SYSTEMD_LOCATION) fs.create_dir(SYSTEMD_LOCATION) fs.create_dir(DOCKER_CTL_SCRIPT_LOCATION) fs.create_dir(SERVICE_MGMT_SCRIPT_LOCATION) diff --git a/tests/sonic_package_manager/test_cli.py b/tests/sonic_package_manager/test_cli.py index 695d8cba58b..1b7556ae68f 100644 --- a/tests/sonic_package_manager/test_cli.py +++ b/tests/sonic_package_manager/test_cli.py @@ -4,6 +4,15 @@ from sonic_package_manager import main +from unittest.mock import patch, mock_open, MagicMock + +MANIFEST_LOCATION = 'fake_manifest_location' +DMFILE_NAME = 'fake_dmfile_name' +DEFAUT_MANIFEST_NAME = 'fake_default_manifest_name' +LOCAL_JSON = 'fake_local_json' +sample_manifest_json = '{"package": {"name": "test", "version": "1.0.0"}, "service": {"name": "test"}}' +fake_manifest_name = 'test-manifest' +MANIFEST_CONTENT = '{"package": {"name": "test", "version": "1.0.0"}, "service": {"name": "test"}}' def test_show_changelog(package_manager, fake_metadata_resolver): """ Test case for "sonic-package-manager package show changelog [NAME]" """ @@ -61,3 +70,217 @@ def test_show_changelog_no_changelog(package_manager): assert result.exit_code == 1 assert result.output == 'Failed to print package changelog: No changelog for package test-package\n' + + +def test_manifests_create_command_existing_manifest(package_manager): + """ Test case for "sonic-package-manager manifests create" with an existing manifest file """ + + runner = CliRunner() + + with patch('os.path.exists', side_effect=[True, False]), \ + patch('sonic_package_manager.main.PackageManager.is_installed', return_value=False), \ + patch('builtins.open', new_callable=mock_open()), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['create'], + ['test-manifest'], + input=sample_manifest_json, + obj=package_manager) + + assert 'Error: Manifest file \'test-manifest\' already exists.' in result.output + assert result.exit_code == 0 + + +def test_manifests_create_command_existing_package(package_manager): + """ Test case for "sonic-package-manager manifests create" with an existing installed package """ + + runner = CliRunner() + + with patch('os.path.exists', return_value=False), \ + patch('sonic_package_manager.main.PackageManager.is_installed', return_value=True), \ + patch('builtins.open', new_callable=mock_open()), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['create'], + ['test-manifest'], + input=sample_manifest_json, + obj=package_manager) + + assert 'Error: A package with the same name test-manifest is already installed' in result.output + assert result.exit_code == 0 + + +def test_manifests_update_command_error_handling(package_manager): + + runner = CliRunner() + + with patch('os.path.exists', return_value=False), \ + patch('builtins.open', new_callable=mock_open(read_data='{"key": "value"}')), \ + patch('json.load', side_effect=lambda x: MagicMock(return_value='{"key": "value"}')), \ + patch('json.dump'), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['update'], + ['non-existent-manifest', '--from-json', 'fake_json_path'], + obj=package_manager) + assert 'Local Manifest file for non-existent-manifest does not exists to update\n' in result.output + assert result.exit_code == 0 + + +def test_manifests_delete_command_deletion_cancelled(package_manager): + runner = CliRunner() + + with patch('os.path.exists', return_value=True), \ + patch('click.prompt', return_value='n'), \ + patch('os.remove') as mock_remove, \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['delete'], ['test-manifest'], obj=package_manager) + + # Check if the cancellation message is present in the result output + assert 'Deletion cancelled.' in result.output + # Check if os.remove is not called when the deletion is cancelled + assert not mock_remove.called + + +def test_manifests_list_command_no_manifests(package_manager): + runner = CliRunner() + + with patch('os.listdir', return_value=[]), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['list'], [], obj=package_manager) + + # Check if the appropriate message is present in the result output + assert 'No custom local manifest files found.\n' in result.output + + +def test_manifests_command(): + """ Test case for "sonic-package-manager manifests" """ + + runner = CliRunner() + result = runner.invoke(main.manifests) + assert result.exit_code == 0 + + +def test_manifests_create_command_exception(package_manager): + """Test case for "sonic-package-manager manifests create" with an exception during manifest creation""" + + runner = CliRunner() + + with patch('sonic_package_manager.main.PackageManager.create_package_manifest', + side_effect=Exception("Custom error")), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['create'], ['test-manifest'], obj=package_manager) + + assert 'Error: Manifest test-manifest creation failed - Custom error' in result.output + assert result.exit_code == 0 + + +def test_manifests_update_command_exception(package_manager): + """Test case for 'sonic-package-manager manifests update' with an exception during manifest update""" + + runner = CliRunner() + + with patch('sonic_package_manager.main.PackageManager.update_package_manifest', + side_effect=Exception("Custom error")), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['update'], + ['test-manifest', '--from-json', 'new_manifest.json'], + obj=package_manager) + + assert 'Error occurred while updating manifest \'test-manifest\': Custom error' in result.output + assert result.exit_code == 0 + + +def test_manifests_delete_command_exception(package_manager): + """Test case for 'sonic-package-manager manifests delete' with an exception during manifest deletion""" + + runner = CliRunner() + + with patch('sonic_package_manager.main.PackageManager.delete_package_manifest', + side_effect=Exception("Custom error")), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['delete'], + ['test-manifest'], obj=package_manager) + + assert "Error: Failed to delete manifest file 'test-manifest'. Custom error" in result.output + assert result.exit_code == 0 + + +def test_manifests_show_command_file_not_found(package_manager): + """Test case for 'sonic-package-manager manifests show' with a non-existent manifest file""" + + runner = CliRunner() + + with patch('sonic_package_manager.main.PackageManager.show_package_manifest', + side_effect=FileNotFoundError()), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['show'], + ['nonexistent_manifest.json'], obj=package_manager) + + assert "Manifest file 'nonexistent_manifest.json' not found." in result.output + assert result.exit_code == 0 + + +def test_install_with_local_manifest(package_manager): + """Test case for 'install' command with use_local_manifest=True and name provided""" + + runner = CliRunner() + + with patch('os.path.exists', return_value=True), \ + patch('os.geteuid', return_value=0): + result = runner.invoke(main.install, + ['package_name', '--use-local-manifest', '-y'], + obj=package_manager) + + assert 'name argument is not provided to use local manifest' in result.output + assert result.exit_code == 0 + + +def test_install_with_nonexistent_manifest(package_manager): + """Test case for 'install' command with use_local_manifest=True and non-existent name provided""" + + runner = CliRunner() + + with patch('os.path.exists', return_value=False), \ + patch('os.geteuid', return_value=0): + result = runner.invoke( + main.install, + ['package_name', '--use-local-manifest', '--name', 'nonexistent_manifest', '-y'], + obj=package_manager) + + assert 'Local Manifest file for nonexistent_manifest does not exists to install' in result.output + assert result.exit_code == 0 + + +def test_update_command_exception(package_manager): + """Test case for 'update' command with an exception during package update""" + + runner = CliRunner() + + with patch('sonic_package_manager.main.PackageManager.update', + side_effect=Exception("Custom error")), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.update, ['package_name'], obj=package_manager) + + assert 'Failed to update package package_name: Custom error' in result.output + + +def test_update_command_keyboard_interrupt(package_manager): + """Test case for 'update' command with a keyboard interrupt""" + + runner = CliRunner() + + with patch('sonic_package_manager.main.PackageManager.update', + side_effect=KeyboardInterrupt()), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.update, ['package_name'], obj=package_manager) + + assert 'Operation canceled by user' in result.output diff --git a/tests/sonic_package_manager/test_manager.py b/tests/sonic_package_manager/test_manager.py index 46ea3f6acb0..a3a311ebb29 100644 --- a/tests/sonic_package_manager/test_manager.py +++ b/tests/sonic_package_manager/test_manager.py @@ -1,13 +1,14 @@ #!/usr/bin/env python import re -from unittest.mock import Mock, call, patch - +import unittest +from unittest.mock import Mock, call, patch, mock_open import pytest import sonic_package_manager from sonic_package_manager.errors import * from sonic_package_manager.version import Version +import json @pytest.fixture(autouse=True) def mock_run_command(): @@ -352,10 +353,10 @@ def test_manager_migration(package_manager, fake_db_for_migration): call('test-package-3=1.6.0'), # test-package-4 was not present in DB at all, but it is present and installed in # fake_db_for_migration, thus asserting that it is going to be installed. - call('test-package-4=1.5.0'), + call(None, 'Azure/docker-test-4:1.5.0', name='test-package-4'), # test-package-5 1.5.0 was installed in fake_db_for_migration but the default # in current db is 1.9.0, assert that migration will install the newer version. - call('test-package-5=1.9.0'), + call(None, 'Azure/docker-test-5:1.9.0', name='test-package-5'), # test-package-6 2.0.0 was installed in fake_db_for_migration but the default # in current db is 1.5.0, assert that migration will install the newer version. call('test-package-6=2.0.0')], @@ -389,3 +390,204 @@ def test_manager_migration_dockerd(package_manager, fake_db_for_migration, mock_ package_manager.migrate_packages(fake_db_for_migration, '/var/run/docker.sock') package_manager.get_docker_client.assert_has_calls([ call('/var/run/docker.sock')], any_order=True) + + +def test_create_package_manifest_default_manifest(package_manager): + """Test case for creating a default manifest.""" + + with patch('os.path.exists', return_value=False), \ + patch('os.mkdir'), \ + patch('builtins.open', new_callable=mock_open()), \ + patch('click.echo') as mock_echo: + + package_manager.create_package_manifest("default_manifest", from_json=None) + + mock_echo.assert_called_once_with("Default Manifest creation is not allowed by user") + + +def test_create_package_manifest_existing_package(package_manager): + """Test case for creating a manifest with an existing package.""" + + with patch('os.path.exists', side_effect=[False, True]), \ + patch('sonic_package_manager.main.PackageManager.is_installed', return_value=True), \ + patch('click.echo') as mock_echo: + + package_manager.create_package_manifest("test-package", from_json=None) + + mock_echo.assert_called_once_with("Error: A package with the same name test-package is already installed") + + +def test_create_package_manifest_existing_manifest(package_manager): + """Test case for creating a manifest with an existing manifest file.""" + + with patch('os.path.exists', return_value=True), \ + patch('click.echo') as mock_echo: + + package_manager.create_package_manifest("test-manifest", from_json=None) + + mock_echo.assert_called_once_with("Error: Manifest file 'test-manifest' already exists.") + + +def test_manifests_create_command(package_manager): + with patch('click.echo') as mock_echo, \ + patch('os.path.exists') as mock_exists, \ + patch('os.mkdir'), \ + patch('builtins.open', new_callable=mock_open()), \ + patch('json.dump'), \ + patch('json.load') as mock_json_load, \ + patch('sonic_package_manager.manifest.Manifest.marshal') as mock_marshal, \ + patch('sonic_package_manager.manager.PackageManager.is_installed') as mock_is_installed, \ + patch('sonic_package_manager.manager.PackageManager.download_file') as mock_download_file: + + dummy_json = {"package": {"name": "test", "version": "1.0.0"}, "service": {"name": "test"}} + # Setup mocks + mock_exists.return_value = False + mock_is_installed.return_value = False + mock_download_file.return_value = True + mock_marshal.return_value = None + mock_json_load.return_value = dummy_json + + # Run the function + package_manager.create_package_manifest("test_manifest", dummy_json) + + # Assertions + mock_echo.assert_called_with("Manifest 'test_manifest' created successfully.") + + +def test_manifests_update_command(package_manager): + with patch('click.echo') as mock_echo, \ + patch('os.path.exists') as mock_exists, \ + patch('os.mkdir'), \ + patch('builtins.open', new_callable=unittest.mock.mock_open), \ + patch('json.dump'), \ + patch('json.load') as mock_json_load, \ + patch('sonic_package_manager.manifest.Manifest.marshal') as mock_marshal, \ + patch('sonic_package_manager.manager.PackageManager.is_installed') as mock_is_installed, \ + patch('sonic_package_manager.manager.PackageManager.download_file') as mock_download_file: + + dummy_json = {"package": {"name": "test", "version": "2.0.0"}, "service": {"name": "test"}} + # Setup mocks + mock_exists.return_value = True + mock_is_installed.return_value = True + mock_download_file.return_value = True + mock_marshal.return_value = None + mock_json_load.return_value = dummy_json + + # Run the function + package_manager.update_package_manifest("test_manifest", "dummy_json") + + # Assertions + mock_echo.assert_called_with("Manifest 'test_manifest' updated successfully.") + + +def test_delete_package_manifest(package_manager): + with patch('click.echo') as mock_echo, \ + patch('click.prompt') as mock_prompt, \ + patch('os.path.exists') as mock_exists, \ + patch('os.remove'): + + # Test case 1: deleting default manifest + package_manager.delete_package_manifest("default_manifest") + mock_echo.assert_called_with("Default Manifest deletion is not allowed") + mock_echo.reset_mock() # Reset the mock for the next test case + + # Test case 2: manifest file doesn't exist + mock_exists.return_value = True + mock_exists.side_effect = lambda x: False if x.endswith("test_manifest") else True + package_manager.delete_package_manifest("test_manifest") + mock_echo.assert_called_with("Error: Manifest file 'test_manifest' not found.") + mock_echo.reset_mock() + + # Test case 3: user confirms deletion + mock_exists.side_effect = lambda x: True if x.endswith("test_manifest") else False + mock_prompt.return_value = "y" + package_manager.delete_package_manifest("test_manifest") + mock_echo.assert_called_with("Manifest 'test_manifest' deleted successfully.") + mock_echo.reset_mock() + + # Test case 4: user cancels deletion + mock_prompt.return_value = "n" + package_manager.delete_package_manifest("test_manifest") + mock_echo.assert_called_with("Deletion cancelled.") + mock_echo.reset_mock() + + +def test_show_package_manifest(package_manager): + with patch('click.echo') as mock_echo, \ + patch('os.path.exists') as mock_exists, \ + patch('builtins.open', unittest.mock.mock_open()), \ + patch('json.load') as mock_json_load: + + mock_exists.return_value = True + mock_exists.side_effect = lambda x: True if x.endswith("test_manifest") else False + + dummy_json = {"package": {"name": "test", "version": "2.0.0"}, "service": {"name": "test"}} + mock_json_load.return_value = dummy_json + + package_manager.show_package_manifest("test_manifest") + mock_echo.assert_called_with(json.dumps(dummy_json, indent=4)) + + +def test_list_package_manifest(package_manager): + with patch('click.echo') as mock_echo, \ + patch('os.path.exists') as mock_exists, \ + patch('os.listdir') as mock_listdir: + + # Test case 1: no custom local manifest files found + mock_exists.return_value = True + mock_listdir.return_value = [] + package_manager.list_package_manifest() + mock_echo.assert_called_with("No custom local manifest files found.") + + # Test case 2: custom local manifest files found + mock_listdir.return_value = ["manifest1.json", "manifest2.json"] + package_manager.list_package_manifest() + mock_echo.assert_any_call("Custom Local Manifest files:") + mock_echo.assert_any_call("- manifest1.json") + mock_echo.assert_any_call("- manifest2.json") + + +def test_download_file_http(package_manager): + fake_remote_url = "http://www.example.com/index.html" + fake_local_path = "local_path" + with patch("requests.get") as mock_requests_get: + with patch("builtins.open", mock_open()) as mock_file: + package_manager.download_file(fake_remote_url, fake_local_path) + mock_requests_get.assert_called_once_with(fake_remote_url, stream=True) + mock_file.assert_called_once_with("local_path", "wb") + + +def test_download_file_scp(package_manager): + fake_remote_url = "scp://admin@10.x.x.x:/home/admin/sec_update.json" + fake_local_path = "local_path" + + with patch("paramiko.SSHClient") as mock_ssh_client: + with patch("scp.SCPClient"): + with patch("getpass.getpass", return_value="test_password"): + package_manager.download_file(fake_remote_url, fake_local_path) + + mock_ssh_client.assert_called_once() + mock_ssh_client.return_value.set_missing_host_key_policy.assert_called_once() + mock_ssh_client.return_value.connect.assert_called_once_with( + "10.x.x.x", + username="admin", + password="test_password" + ) + + +def test_download_file_sftp(package_manager): + fake_remote_url = "sftp://admin@10.x.x.x:/home/admin/sec_update.json" + fake_local_path = "local_path" + + with patch("paramiko.SSHClient") as mock_ssh_client: + with patch("paramiko.SFTPClient.from_transport"): + with patch("getpass.getpass", return_value="test_password"): + package_manager.download_file(fake_remote_url, fake_local_path) + + mock_ssh_client.assert_called_once() + mock_ssh_client.return_value.set_missing_host_key_policy.assert_called_once() + mock_ssh_client.return_value.connect.assert_called_once_with( + "10.x.x.x", + username="admin", + password="test_password" + ) diff --git a/tests/sonic_package_manager/test_manifest.py b/tests/sonic_package_manager/test_manifest.py index 009895991a7..5eaa2f6053a 100644 --- a/tests/sonic_package_manager/test_manifest.py +++ b/tests/sonic_package_manager/test_manifest.py @@ -1,9 +1,11 @@ #!/usr/bin/env python import pytest +import json +from unittest.mock import patch, mock_open from sonic_package_manager.constraint import ComponentConstraints -from sonic_package_manager.manifest import Manifest, ManifestError +from sonic_package_manager.manifest import Manifest, ManifestError, MANIFESTS_LOCATION def test_manifest_v1_defaults(): @@ -85,3 +87,33 @@ def test_manifest_v1_unmarshal(): for key, section in manifest_json_input.items(): for field, value in section.items(): assert manifest_json[key][field] == value + + +@patch("sonic_package_manager.manifest.open", new_callable=mock_open) +def test_get_manifest_from_local_file_existing_manifest(mock_open, sonic_fs): + # Create a mock manifest file + manifest_name = "test_manifest.json" + manifest_content = {"package": {"name": "test_package", "version": "1.0.0"}, + "service": {"name": "test_service"}} + mock_open.return_value.__enter__.return_value.read.return_value = json.dumps(manifest_content) + sonic_fs.create_dir(MANIFESTS_LOCATION) + + # Call the function + desired_dict = Manifest.get_manifest_from_local_file(manifest_name) + + exp_manifest_content = {"package": {"name": "test_manifest.json", "version": "1.0.0"}, + "service": {"name": "test_manifest.json"}} + manifest_string = json.dumps(exp_manifest_content, indent=4) + desired_output = { + 'Tag': 'master', + 'com': { + 'azure': { + 'sonic': { + 'manifest': manifest_string + } + } + } + } + + # Check if the returned dictionary matches the expected structure + assert desired_dict == desired_output diff --git a/tests/sonic_package_manager/test_metadata.py b/tests/sonic_package_manager/test_metadata.py index 96f9bbc38dd..f386836a838 100644 --- a/tests/sonic_package_manager/test_metadata.py +++ b/tests/sonic_package_manager/test_metadata.py @@ -2,13 +2,14 @@ import json import contextlib -from unittest.mock import Mock, MagicMock - +from unittest.mock import Mock, MagicMock, patch +import tempfile +import os import pytest from sonic_package_manager.database import PackageEntry from sonic_package_manager.errors import MetadataError -from sonic_package_manager.manifest import Manifest +from sonic_package_manager.manifest import MANIFESTS_LOCATION, DEFAULT_MANIFEST_FILE from sonic_package_manager.metadata import MetadataResolver from sonic_package_manager.version import Version @@ -87,3 +88,125 @@ def test_metadata_construction(manifest_str): }) assert metadata.yang_modules == ['TEST', 'TEST 2'] + +@pytest.fixture +def temp_manifest_dir(): + with tempfile.TemporaryDirectory() as temp_dir: + yield temp_dir + + +@pytest.fixture +def temp_tarball(temp_manifest_dir): + tarball_path = os.path.join(temp_manifest_dir, 'image.tar') + # Create an empty tarball file for testing + open(tarball_path, 'w').close() + yield tarball_path + + +def test_metadata_resolver_local_with_name_and_use_local_manifest(mock_registry_resolver, + mock_docker_api, + temp_manifest_dir): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + # Patching the get_manifest_from_local_file method to avoid FileNotFoundError + with patch('sonic_package_manager.manifest.Manifest.get_manifest_from_local_file') as mock_get_manifest: + # Setting the side_effect to None to simulate the absence of a manifest file + mock_get_manifest.side_effect = None + with contextlib.suppress(MetadataError): + metadata_resolver.from_local('image', use_local_manifest=True, name='test_manifest', use_edit=False) + + +def test_metadata_resolver_local_manifest_file_not_exist(mock_registry_resolver, mock_docker_api, temp_manifest_dir): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + # Patching the get_manifest_from_local_file method to avoid FileNotFoundError + with patch('sonic_package_manager.manifest.Manifest.get_manifest_from_local_file') as mock_get_manifest: + # Setting the side_effect to None to simulate the absence of a manifest file + mock_get_manifest.side_effect = None + with pytest.raises(MetadataError): + metadata_resolver.from_local('image', use_local_manifest=True, name='test_manifest', use_edit=False) + + +def test_metadata_resolver_tarball_with_use_local_manifest(mock_registry_resolver, + mock_docker_api, + temp_manifest_dir): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + # Patching the get_manifest_from_local_file method to avoid FileNotFoundError + with patch('sonic_package_manager.manifest.Manifest.get_manifest_from_local_file') as mock_get_manifest: + # Setting the side_effect to None to simulate the absence of a manifest file + mock_get_manifest.side_effect = None + with pytest.raises(MetadataError): + metadata_resolver.from_tarball('image.tar', use_local_manifest=True, name='test_manifest') + + +def test_metadata_resolver_no_name_and_no_metadata_in_labels_for_remote(mock_registry_resolver, mock_docker_api): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + # Mocking the registry resolver's get_registry_for method to return a MagicMock + mock_registry_resolver.get_registry_for = MagicMock(return_value=Mock()) + with pytest.raises(TypeError): + metadata_resolver.from_registry('test-repository', '1.2.0') + + +def test_metadata_resolver_tarball_with_use_local_manifest_true(mock_registry_resolver, + mock_docker_api, + temp_manifest_dir): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + # Patching the get_manifest_from_local_file method to avoid FileNotFoundError + with patch('sonic_package_manager.manifest.Manifest.get_manifest_from_local_file') as mock_get_manifest: + # Setting the side_effect to None to simulate the absence of a manifest file + mock_get_manifest.side_effect = None + with pytest.raises(MetadataError): + metadata_resolver.from_tarball('image.tar', use_local_manifest=True) + + +def test_metadata_resolver_no_metadata_in_labels_for_tarball(mock_registry_resolver, mock_docker_api): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + with pytest.raises(FileNotFoundError): + metadata_resolver.from_tarball('image.tar') + + +def test_metadata_resolver_local_with_name_and_use_edit(mock_registry_resolver, + mock_docker_api, + temp_manifest_dir, + sonic_fs): + with patch('builtins.open') as mock_open, \ + patch('json.loads') as mock_json_loads: + sonic_fs.create_dir(MANIFESTS_LOCATION) # Create the directory using sonic_fs fixture + mock_open.side_effect = FileNotFoundError # Simulate FileNotFoundError when opening the manifest file + mock_json_loads.side_effect = ValueError # Simulate ValueError when parsing JSON + + # Create the default manifest file + sonic_fs.create_file(DEFAULT_MANIFEST_FILE) + sonic_fs.create_file(os.path.join(MANIFESTS_LOCATION, "test_manifest.edit")) + + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + with pytest.raises(FileNotFoundError): + metadata_resolver.from_local('image', + use_local_manifest=True, + name='test_manifest', + use_edit=True) + + mock_open.assert_called_with(os.path.join(MANIFESTS_LOCATION, 'test_manifest.edit'), 'r') + mock_json_loads.assert_not_called() # Ensure json.loads is not called + + +def test_metadata_resolver_local_with_name_and_default_manifest(mock_registry_resolver, + mock_docker_api, + temp_manifest_dir, + sonic_fs): + with patch('builtins.open') as mock_open, \ + patch('json.loads') as mock_json_loads: + sonic_fs.create_dir(MANIFESTS_LOCATION) # Create the directory using sonic_fs fixture + mock_open.side_effect = FileNotFoundError # Simulate FileNotFoundError when opening the manifest file + mock_json_loads.side_effect = ValueError # Simulate ValueError when parsing JSON + + # Create the default manifest file + sonic_fs.create_file(DEFAULT_MANIFEST_FILE) + + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + with pytest.raises(FileNotFoundError): + metadata_resolver.from_local('image', + use_local_manifest=False, + name='test_manifest', + use_edit=True) + + mock_open.assert_called_with(DEFAULT_MANIFEST_FILE, 'r') + mock_json_loads.assert_not_called() # Ensure json.loads is not called diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py index 8e6edcd0f06..8278a8da2b3 100644 --- a/tests/sonic_package_manager/test_service_creator.py +++ b/tests/sonic_package_manager/test_service_creator.py @@ -12,6 +12,7 @@ from sonic_package_manager.metadata import Metadata from sonic_package_manager.package import Package from sonic_package_manager.service_creator.creator import * +from sonic_package_manager.service_creator.creator import ETC_SYSTEMD_LOCATION from sonic_package_manager.service_creator.feature import FeatureRegistry @@ -106,6 +107,14 @@ def test_service_creator(sonic_fs, manifest, service_creator, package_manager): assert sonic_fs.exists(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, 'test.sh')) assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.service')) + # Create symlinks and directory featured creates + os.symlink('/dev/null', os.path.join(ETC_SYSTEMD_LOCATION, 'test.service')) + os.symlink('/dev/null', os.path.join(ETC_SYSTEMD_LOCATION, 'test@1.service')) + os.symlink('/dev/null', os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service')) + os.mkdir(os.path.join(ETC_SYSTEMD_LOCATION, 'test.service.d')) + os.mkdir(os.path.join(ETC_SYSTEMD_LOCATION, 'test@1.service.d')) + os.mkdir(os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service.d')) + def read_file(name): with open(os.path.join(ETC_SONIC_PATH, name)) as file: return file.read() @@ -118,6 +127,15 @@ def read_file(name): assert generated_services_conf_content.endswith('\n') assert set(generated_services_conf_content.split()) == set(['test.service', 'test@.service']) + service_creator.remove(package) + + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test.service')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@1.service')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test.service.d')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@1.service.d')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service.d')) + def test_service_creator_with_timer_unit(sonic_fs, manifest, service_creator): entry = PackageEntry('test', 'azure/sonic-test') diff --git a/tests/ssdutil_test.py b/tests/ssdutil_test.py new file mode 100644 index 00000000000..bd57b0cbe7f --- /dev/null +++ b/tests/ssdutil_test.py @@ -0,0 +1,42 @@ +import sys +import argparse +from unittest.mock import patch, MagicMock +import sonic_platform_base # noqa: F401 + +sys.modules['sonic_platform'] = MagicMock() +sys.modules['sonic_platform_base.sonic_ssd.ssd_generic'] = MagicMock() + +import ssdutil.main as ssdutil # noqa: E402 + + +class Ssd(): + + def get_model(self): + return 'SkyNet' + + def get_firmware(self): + return 'ABC' + + def get_serial(self): + return 'T1000' + + def get_health(self): + return 5 + + def get_temperature(self): + return 3000 + + def get_vendor_output(self): + return 'SONiC Test' + + +class TestSsdutil: + + @patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', MagicMock(return_value=("test_path", ""))) # noqa: E501 + @patch('os.geteuid', MagicMock(return_value=0)) + def test_sonic_storage_path(self): + + with patch('argparse.ArgumentParser.parse_args', MagicMock()) as mock_args: # noqa: E501 + sys.modules['sonic_platform_base.sonic_storage.ssd'] = MagicMock(return_value=Ssd()) # noqa: E501 + mock_args.return_value = argparse.Namespace(device='/dev/sda', verbose=True, vendor=True) # noqa: E501 + ssdutil.ssdutil() diff --git a/tests/suppress_pending_fib_test.py b/tests/suppress_pending_fib_test.py deleted file mode 100644 index 04064d306ed..00000000000 --- a/tests/suppress_pending_fib_test.py +++ /dev/null @@ -1,34 +0,0 @@ -from click.testing import CliRunner - -import config.main as config -import show.main as show -from utilities_common.db import Db - - -class TestSuppressFibPending: - def test_synchronous_mode(self): - runner = CliRunner() - - db = Db() - - result = runner.invoke(config.config.commands['suppress-fib-pending'], ['enabled'], obj=db) - print(result.output) - assert result.exit_code == 0 - assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'enabled' - - result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) - assert result.exit_code == 0 - assert result.output == 'Enabled\n' - - result = runner.invoke(config.config.commands['suppress-fib-pending'], ['disabled'], obj=db) - print(result.output) - assert result.exit_code == 0 - assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'disabled' - - result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) - assert result.exit_code == 0 - assert result.output == 'Disabled\n' - - result = runner.invoke(config.config.commands['suppress-fib-pending'], ['invalid-input'], obj=db) - print(result.output) - assert result.exit_code != 0 diff --git a/tests/test_sonic_installer.py b/tests/test_sonic_installer.py index 9e8438a7fc6..66eb972fdf6 100644 --- a/tests/test_sonic_installer.py +++ b/tests/test_sonic_installer.py @@ -86,6 +86,9 @@ def rootfs_path_mock(path): call(["sh", "-c", f"echo 'DOCKER_OPTS=\"$DOCKER_OPTS {' '.join(dockerd_opts)}\"' >> {mounted_image_folder}/etc/default/docker"]), # dockerd started with added options as host dockerd call(["chroot", mounted_image_folder, "/usr/lib/docker/docker.sh", "start"]), call(["cp", "/var/lib/sonic-package-manager/packages.json", f"{mounted_image_folder}/tmp/packages.json"]), + call(["mkdir", "-p", "/var/lib/sonic-package-manager/manifests"]), + call(["cp", "-arf", "/var/lib/sonic-package-manager/manifests", + f"{mounted_image_folder}/var/lib/sonic-package-manager"]), call(["touch", f"{mounted_image_folder}/tmp/docker.sock"]), call(["mount", "--bind", "/var/run/docker.sock", f"{mounted_image_folder}/tmp/docker.sock"]), call(["cp", f"{mounted_image_folder}/etc/resolv.conf", "/tmp/resolv.conf.backup"]), diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 2d3c1dcf1b6..fc3569b87d2 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -1426,7 +1426,7 @@ def test_config_set_router_port_on_member_interface(self): ["Ethernet4", "10.10.10.1/24"], obj=obj) print(result.exit_code, result.output) assert result.exit_code == 0 - assert 'Interface Ethernet4 is in trunk mode and needs to be in routed mode!' in result.output + assert 'Interface Ethernet4 is a member of vlan\nAborting!\n' in result.output def test_config_vlan_add_member_of_portchannel(self): runner = CliRunner() diff --git a/utilities_common/bgp.py b/utilities_common/bgp.py new file mode 100644 index 00000000000..640be87ee00 --- /dev/null +++ b/utilities_common/bgp.py @@ -0,0 +1,23 @@ +from swsscommon.swsscommon import CFG_BGP_DEVICE_GLOBAL_TABLE_NAME as CFG_BGP_DEVICE_GLOBAL # noqa + +# +# BGP constants ------------------------------------------------------------------------------------------------------- +# + +BGP_DEVICE_GLOBAL_KEY = "STATE" + +SYSLOG_IDENTIFIER = "bgp-cli" + + +# +# BGP helpers --------------------------------------------------------------------------------------------------------- +# + + +def to_str(state): + """ Convert boolean to string representation """ + if state == "true": + return "enabled" + elif state == "false": + return "disabled" + return state diff --git a/utilities_common/bgp_util.py b/utilities_common/bgp_util.py index 65f9a594963..cb49123c4bf 100644 --- a/utilities_common/bgp_util.py +++ b/utilities_common/bgp_util.py @@ -197,7 +197,8 @@ def get_neighbor_dict_from_table(db, table_name): return neighbor_dict -def run_bgp_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE, vtysh_shell_cmd=constants.VTYSH_COMMAND): +def run_bgp_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE, + vtysh_shell_cmd=constants.VTYSH_COMMAND, exit_on_fail=True): bgp_instance_id = [] output = None if bgp_namespace is not multi_asic.DEFAULT_NAMESPACE: @@ -208,16 +209,16 @@ def run_bgp_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE, vtysh output, ret = clicommon.run_command(cmd, return_cmd=True) if ret != 0: click.echo(output.rstrip('\n')) - sys.exit(ret) + output = "" if not exit_on_fail else sys.exit(ret) except Exception: ctx = click.get_current_context() - ctx.fail("Unable to get summary from bgp {}".format(bgp_instance_id)) + ctx.fail("Unable to get summary from bgp {}".format(bgp_instance_id)) if exit_on_fail else None return output -def run_bgp_show_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE): - output = run_bgp_command(vtysh_cmd, bgp_namespace, constants.RVTYSH_COMMAND) +def run_bgp_show_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE, exit_on_fail=True): + output = run_bgp_command(vtysh_cmd, bgp_namespace, constants.RVTYSH_COMMAND, exit_on_fail) # handle the the alias mode in the following code if output is not None: if clicommon.get_interface_naming_mode() == "alias" and re.search("show ip|ipv6 route", vtysh_cmd): @@ -298,6 +299,10 @@ def display_bgp_summary(bgp_summary, af): af: IPV4 or IPV6 ''' + + # "Neighbhor" is a known typo, + # but fix it will impact lots of automation scripts that the community users may have developed for years + # for now, let's keep it as it is. headers = ["Neighbhor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", "InQ", "OutQ", "Up/Down", "State/PfxRcd", "NeighborName"] diff --git a/utilities_common/cli.py b/utilities_common/cli.py index 63336377a8e..c8a314b7042 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -731,8 +731,7 @@ def run_command(command, display_cmd=False, ignore_error=False, return_cmd=False # with a list for next hops if (get_interface_naming_mode() == "alias" and not command_str.startswith("intfutil") and not re.search( "show ip|ipv6 route", command_str)): - run_command_in_alias_mode(command, shell=shell) - sys.exit(0) + return run_command_in_alias_mode(command, shell=shell) proc = subprocess.Popen(command, shell=shell, text=True, stdout=subprocess.PIPE) diff --git a/utilities_common/general.py b/utilities_common/general.py index 6ed70a46a11..97155532caf 100644 --- a/utilities_common/general.py +++ b/utilities_common/general.py @@ -2,8 +2,11 @@ import importlib.util import sys -from sonic_py_common.multi_asic import is_multi_asic +from sonic_py_common import multi_asic from swsscommon import swsscommon +FEATURE_TABLE = "FEATURE" +FEATURE_HAS_PER_ASIC_SCOPE = 'has_per_asic_scope' +FEATURE_HAS_GLOBAL_SCOPE = 'has_global_scope' def load_module_from_source(module_name, file_path): """ @@ -25,7 +28,7 @@ def load_db_config(): - database_global.json for multi asic - database_config.json for single asic ''' - if is_multi_asic(): + if multi_asic.is_multi_asic(): if not swsscommon.SonicDBConfig.isGlobalInit(): swsscommon.SonicDBConfig.load_sonic_global_db_config() else: @@ -39,6 +42,28 @@ def get_optional_value_for_key_in_config_tbl(config_db, port, key, table): return None value = info_dict.get(key, None) - return value + +def get_feature_state_data(config_db, feature): + ''' + Get feature state from FEATURE table from CONFIG_DB. + return global_scope, per_asic_scope + - if feature state is disabled, return "False" for both global_scope and per_asic_scope + - if is not a multi-asic, return feature state for global_scope ("True/False") and + "False" for asic_scope + ''' + global_scope = "False" + asic_scope = "False" + info_dict = {} + info_dict = config_db.get_entry(FEATURE_TABLE, feature) + if info_dict is None: + return global_scope, asic_scope + if multi_asic.is_multi_asic(): + if info_dict['state'].lower() == "enabled": + global_scope = info_dict[FEATURE_HAS_GLOBAL_SCOPE] + asic_scope = info_dict[FEATURE_HAS_PER_ASIC_SCOPE] + else: + if info_dict['state'].lower() == "enabled": + global_scope = "True" + return global_scope, asic_scope