diff --git a/README.md b/README.md index f63b0832a2..d6f9a5e25a 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ A convenient alternative is to let the SONiC build system configure a build envi 2. Build the sonic-utilities Python wheel package inside the Bullseye slave container, and tell the build system to keep the container alive when finished ``` - make NOSTRETCH=1 NOBUSTER=1 KEEP_SLAVE_ON=yes target/python-wheels/bullseye/sonic_utilities-1.2-py3-none-any.whl + make -f Makefile.work BLDENV=bookworm KEEP_SLAVE_ON=yes target/python-wheels/bookworm/sonic_utilities-1.2-py3-none-any.whl ``` 3. When the build finishes, your prompt will change to indicate you are inside the slave container. Change into the `src/sonic-utilities/` directory @@ -66,6 +66,7 @@ A convenient alternative is to let the SONiC build system configure a build envi ``` python3 setup.py bdist_wheel ``` +Note: This command by default will not update the wheel package in target/. To specify the destination location of wheel package, use "-d" option. #### To run unit tests @@ -73,6 +74,12 @@ python3 setup.py bdist_wheel python3 setup.py test ``` +#### To install the package on a SONiC machine +``` +sudo pip uninstall sonic-utilities +sudo pip install YOUR_WHEEL_PACKAGE +``` +Note: Don't use "--force-reinstall". ### sonic-utilities-data diff --git a/azure-pipelines.yml b/azure-pipelines.yml index dec731eea4..5781be9436 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -27,7 +27,7 @@ stages: displayName: "Static Analysis" timeoutInMinutes: 10 continueOnError: true - pool: ubuntu-20.04 + pool: sonic-ubuntu-1c steps: - template: .azure-pipelines/pre-commit-check.yml @@ -46,6 +46,13 @@ stages: image: sonicdev-microsoft.azurecr.io:443/sonic-slave-bullseye:$(BUILD_BRANCH) steps: + - script: | + set -ex + sudo apt-get update + sudo apt-get install -y python3-pip + sudo pip3 install requests==2.31.0 + displayName: "Install dependencies" + - script: | sourceBranch=$(Build.SourceBranchName) if [[ "$(Build.Reason)" == "PullRequest" ]];then @@ -114,7 +121,7 @@ stages: curl -sSL https://packages.microsoft.com/keys/microsoft.asc | sudo apt-key add - sudo apt-add-repository https://packages.microsoft.com/debian/11/prod sudo apt-get update - sudo apt-get install -y dotnet-sdk-5.0 + sudo apt-get install -y dotnet-sdk-8.0 displayName: "Install .NET CORE" - script: | diff --git a/clear/main.py b/clear/main.py index 5ffcd2dba4..cb1e3243b7 100755 --- a/clear/main.py +++ b/clear/main.py @@ -12,7 +12,7 @@ from show.plugins.pbh import read_pbh_counters from config.plugins.pbh import serialize_pbh_counters from . import plugins - +from . import stp # This is from the aliases example: # https://github.com/pallets/click/blob/57c6f09611fc47ca80db0bd010f05998b3c0aa95/examples/aliases/aliases.py class Config(object): @@ -145,6 +145,10 @@ def ipv6(): pass +# 'STP' +# +cli.add_command(stp.spanning_tree) + # # Inserting BGP functionality into cli's clear parse-chain. # BGP commands are determined by the routing-stack being elected. @@ -229,16 +233,38 @@ def watermark(): if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) @watermark.command('headroom') -def clear_wm_pg_headroom(): +def clear_wm_pg_headroom(namespace): """Clear user headroom WM for pg""" command = ['watermarkstat', '-c', '-t', 'pg_headroom'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @watermark.command('shared') -def clear_wm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_pg_shared(namespace): """Clear user shared WM for pg""" command = ['watermarkstat', '-c', '-t', 'pg_shared'] + if namespace: + command += ['-n', str(namespace)] run_command(command) @priority_group.group() @@ -261,16 +287,38 @@ def persistent_watermark(): if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + @persistent_watermark.command('headroom') -def clear_pwm_pg_headroom(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_pg_headroom(namespace): """Clear persistent headroom WM for pg""" command = ['watermarkstat', '-c', '-p', '-t', 'pg_headroom'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('shared') -def clear_pwm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_pg_shared(namespace): """Clear persistent shared WM for pg""" command = ['watermarkstat', '-c', '-p', '-t', 'pg_shared'] + if namespace: + command += ['-n', str(namespace)] run_command(command) @@ -285,69 +333,159 @@ def watermark(): if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + @watermark.command('unicast') -def clear_wm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_q_uni(namespace): """Clear user WM for unicast queues""" command = ['watermarkstat', '-c', '-t', 'q_shared_uni'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @watermark.command('multicast') -def clear_wm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_q_multi(namespace): """Clear user WM for multicast queues""" command = ['watermarkstat', '-c', '-t', 'q_shared_multi'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @watermark.command('all') -def clear_wm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_wm_q_all(namespace): """Clear user WM for all queues""" command = ['watermarkstat', '-c', '-t', 'q_shared_all'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @queue.group(name='persistent-watermark') def persistent_watermark(): """Clear queue persistent WM. One does not simply clear WM, root is required""" if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") + @persistent_watermark.command('unicast') -def clear_pwm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_q_uni(namespace): """Clear persistent WM for persistent queues""" command = ['watermarkstat', '-c', '-p', '-t', 'q_shared_uni'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('multicast') -def clear_pwm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_q_multi(namespace): """Clear persistent WM for multicast queues""" command = ['watermarkstat', '-c', '-p', '-t', 'q_shared_multi'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('all') -def clear_pwm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def clear_pwm_q_all(namespace): """Clear persistent WM for all queues""" command = ['watermarkstat', '-c', '-p', '-t', 'q_shared_all'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @cli.group(name='headroom-pool') def headroom_pool(): """Clear headroom pool WM""" pass + @headroom_pool.command('watermark') -def watermark(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def watermark(namespace): """Clear headroom pool user WM. One does not simply clear WM, root is required""" if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") command = ['watermarkstat', '-c', '-t', 'headroom_pool'] + if namespace: + command += ['-n', str(namespace)] run_command(command) + @headroom_pool.command('persistent-watermark') -def persistent_watermark(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def persistent_watermark(namespace): """Clear headroom pool persistent WM. One does not simply clear WM, root is required""" if os.geteuid() != 0: sys.exit("Root privileges are required for this operation") command = ['watermarkstat', '-c', '-p', '-t', 'headroom_pool'] + if namespace: + command += ['-n', str(namespace)] run_command(command) # diff --git a/clear/stp.py b/clear/stp.py new file mode 100644 index 0000000000..c3e3a4b098 --- /dev/null +++ b/clear/stp.py @@ -0,0 +1,46 @@ +import click +import utilities_common.cli as clicommon + +# +# This group houses Spanning_tree commands and subgroups +# + + +@click.group(cls=clicommon.AliasedGroup) +@click.pass_context +def spanning_tree(ctx): + '''Clear Spanning-tree counters''' + pass + + +@spanning_tree.group('statistics', cls=clicommon.AliasedGroup, invoke_without_command=True) +@click.pass_context +def stp_clr_stats(ctx): + if ctx.invoked_subcommand is None: + command = 'sudo stpctl clrstsall' + clicommon.run_command(command) + + +@stp_clr_stats.command('interface') +@click.argument('interface_name', metavar='', required=True) +@click.pass_context +def stp_clr_stats_intf(ctx, interface_name): + command = 'sudo stpctl clrstsintf ' + interface_name + clicommon.run_command(command) + + +@stp_clr_stats.command('vlan') +@click.argument('vlan_id', metavar='', required=True) +@click.pass_context +def stp_clr_stats_vlan(ctx, vlan_id): + command = 'sudo stpctl clrstsvlan ' + vlan_id + clicommon.run_command(command) + + +@stp_clr_stats.command('vlan-interface') +@click.argument('vlan_id', metavar='', required=True) +@click.argument('interface_name', metavar='', required=True) +@click.pass_context +def stp_clr_stats_vlan_intf(ctx, vlan_id, interface_name): + command = 'sudo stpctl clrstsvlanintf ' + vlan_id + ' ' + interface_name + clicommon.run_command(command) diff --git a/config/aaa.py b/config/aaa.py index 3c76187126..fdb784dc4a 100644 --- a/config/aaa.py +++ b/config/aaa.py @@ -114,9 +114,9 @@ def trace(option): @click.command() -@click.argument('auth_protocol', nargs=-1, type=click.Choice(["radius", "tacacs+", "local", "default"])) +@click.argument('auth_protocol', nargs=-1, type=click.Choice(["ldap", "radius", "tacacs+", "local", "default"])) def login(auth_protocol): - """Switch login authentication [ {radius, tacacs+, local} | default ]""" + """Switch login authentication [ {ldap, radius, tacacs+, local} | default ]""" if len(auth_protocol) is 0: click.echo('Argument "auth_protocol" is required') return @@ -135,9 +135,9 @@ def login(auth_protocol): val2 = auth_protocol[1] good_ap = False if val == 'local': - if val2 == 'radius' or val2 == 'tacacs+': + if val2 == 'radius' or val2 == 'tacacs+' or val2 == 'ldap': good_ap = True - elif val == 'radius' or val == 'tacacs+': + elif val == 'radius' or val == 'tacacs+' or val == 'ldap': if val2 == 'local': good_ap = True if good_ap == True: diff --git a/config/bgp_cli.py b/config/bgp_cli.py new file mode 100644 index 0000000000..a5a565359a --- /dev/null +++ b/config/bgp_cli.py @@ -0,0 +1,192 @@ +import click +import utilities_common.cli as clicommon + +from sonic_py_common import logger +from utilities_common.bgp import ( + CFG_BGP_DEVICE_GLOBAL, + BGP_DEVICE_GLOBAL_KEY, + SYSLOG_IDENTIFIER, + to_str, +) + + +log = logger.Logger(SYSLOG_IDENTIFIER) +log.set_min_log_priority_info() + + +# +# BGP DB interface ---------------------------------------------------------------------------------------------------- +# + + +def update_entry_validated(db, table, key, data, create_if_not_exists=False): + """ Update entry in table and validate configuration. + If attribute value in data is None, the attribute is deleted. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector object. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + data (Dict): Entry data. + create_if_not_exists (bool): + In case entry does not exists already a new entry + is not created if this flag is set to False and + creates a new entry if flag is set to True. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + + if not data: + raise click.ClickException(f"No field/values to update {key}") + + if create_if_not_exists: + cfg[table].setdefault(key, {}) + + if key not in cfg[table]: + raise click.ClickException(f"{key} does not exist") + + entry_changed = False + for attr, value in data.items(): + if value == cfg[table][key].get(attr): + continue + entry_changed = True + if value is None: + cfg[table][key].pop(attr, None) + else: + cfg[table][key][attr] = value + + if not entry_changed: + return + + db.set_entry(table, key, cfg[table][key]) + + +# +# BGP handlers -------------------------------------------------------------------------------------------------------- +# + + +def tsa_handler(ctx, db, state): + """ Handle config updates for Traffic-Shift-Away (TSA) feature """ + + table = CFG_BGP_DEVICE_GLOBAL + key = BGP_DEVICE_GLOBAL_KEY + data = { + "tsa_enabled": state, + } + + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + log.log_notice("Configured TSA state: {}".format(to_str(state))) + except Exception as e: + log.log_error("Failed to configure TSA state: {}".format(str(e))) + ctx.fail(str(e)) + + +def wcmp_handler(ctx, db, state): + """ Handle config updates for Weighted-Cost Multi-Path (W-ECMP) feature """ + + table = CFG_BGP_DEVICE_GLOBAL + key = BGP_DEVICE_GLOBAL_KEY + data = { + "wcmp_enabled": state, + } + + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + log.log_notice("Configured W-ECMP state: {}".format(to_str(state))) + except Exception as e: + log.log_error("Failed to configure W-ECMP state: {}".format(str(e))) + ctx.fail(str(e)) + + +# +# BGP device-global --------------------------------------------------------------------------------------------------- +# + + +@click.group( + name="device-global", + cls=clicommon.AliasedGroup +) +def DEVICE_GLOBAL(): + """ Configure BGP device global state """ + + pass + + +# +# BGP device-global tsa ----------------------------------------------------------------------------------------------- +# + + +@DEVICE_GLOBAL.group( + name="tsa", + cls=clicommon.AliasedGroup +) +def DEVICE_GLOBAL_TSA(): + """ Configure Traffic-Shift-Away (TSA) feature """ + + pass + + +@DEVICE_GLOBAL_TSA.command( + name="enabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_TSA_ENABLED(ctx, db): + """ Enable Traffic-Shift-Away (TSA) feature """ + + tsa_handler(ctx, db, "true") + + +@DEVICE_GLOBAL_TSA.command( + name="disabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_TSA_DISABLED(ctx, db): + """ Disable Traffic-Shift-Away (TSA) feature """ + + tsa_handler(ctx, db, "false") + + +# +# BGP device-global w-ecmp -------------------------------------------------------------------------------------------- +# + + +@DEVICE_GLOBAL.group( + name="w-ecmp", + cls=clicommon.AliasedGroup +) +def DEVICE_GLOBAL_WCMP(): + """ Configure Weighted-Cost Multi-Path (W-ECMP) feature """ + + pass + + +@DEVICE_GLOBAL_WCMP.command( + name="enabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_WCMP_ENABLED(ctx, db): + """ Enable Weighted-Cost Multi-Path (W-ECMP) feature """ + + wcmp_handler(ctx, db, "true") + + +@DEVICE_GLOBAL_WCMP.command( + name="disabled" +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL_WCMP_DISABLED(ctx, db): + """ Disable Weighted-Cost Multi-Path (W-ECMP) feature """ + + wcmp_handler(ctx, db, "false") diff --git a/config/chassis_modules.py b/config/chassis_modules.py old mode 100644 new mode 100755 index e640779d16..5f70ef404a --- a/config/chassis_modules.py +++ b/config/chassis_modules.py @@ -1,9 +1,14 @@ #!/usr/sbin/env python import click - +import time +import re +import subprocess import utilities_common.cli as clicommon +TIMEOUT_SECS = 10 + + # # 'chassis_modules' group ('config chassis_modules ...') # @@ -17,6 +22,81 @@ def modules(): """Configure chassis modules""" pass + +def get_config_module_state(db, chassis_module_name): + config_db = db.cfgdb + fvs = config_db.get_entry('CHASSIS_MODULE', chassis_module_name) + if not fvs: + return 'up' + else: + return fvs['admin_status'] + + +# +# Name: check_config_module_state_with_timeout +# return: True: timeout, False: not timeout +# +def check_config_module_state_with_timeout(ctx, db, chassis_module_name, state): + counter = 0 + while get_config_module_state(db, chassis_module_name) != state: + time.sleep(1) + counter += 1 + if counter >= TIMEOUT_SECS: + ctx.fail("get_config_module_state {} timeout".format(chassis_module_name)) + return True + return False + + +def get_asic_list_from_db(chassisdb, chassis_module_name): + asic_list = [] + asics_keys_list = chassisdb.keys("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE*") + for asic_key in asics_keys_list: + name = chassisdb.get("CHASSIS_STATE_DB", asic_key, "name") + if name == chassis_module_name: + asic_id = int(re.search(r"(\d+)$", asic_key).group()) + asic_list.append(asic_id) + return asic_list + + +# +# Syntax: fabric_module_set_admin_status <'up'/'down'> +# +def fabric_module_set_admin_status(db, chassis_module_name, state): + chassisdb = db.db + chassisdb.connect("CHASSIS_STATE_DB") + asic_list = get_asic_list_from_db(chassisdb, chassis_module_name) + + if len(asic_list) == 0: + return + + if state == "down": + for asic in asic_list: + click.echo("Stop swss@{} and peer services".format(asic)) + clicommon.run_command(['sudo', 'systemctl', 'stop', 'swss@{}.service'.format(asic)]) + + is_active = subprocess.call(["systemctl", "is-active", "--quiet", "swss@{}.service".format(asic)]) + + if is_active == 0: # zero active, non-zero, inactive + click.echo("Stop swss@{} and peer services failed".format(asic)) + return + + click.echo("Delete related CAHSSIS_FABRIC_ASIC_TABLE entries") + + for asic in asic_list: + chassisdb.delete("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic" + str(asic)) + + # Start the services in case of the users just execute issue command "systemctl stop swss@/syncd@" + # without bring down the hardware + for asic in asic_list: + # To address systemd service restart limit by resetting the count + clicommon.run_command(['sudo', 'systemctl', 'reset-failed', 'swss@{}.service'.format(asic)]) + click.echo("Start swss@{} and peer services".format(asic)) + clicommon.run_command(['sudo', 'systemctl', 'start', 'swss@{}.service'.format(asic)]) + elif state == "up": + for asic in asic_list: + click.echo("Start swss@{} and peer services".format(asic)) + clicommon.run_command(['sudo', 'systemctl', 'start', 'swss@{}.service'.format(asic)]) + # # 'shutdown' subcommand ('config chassis_modules shutdown ...') # @@ -33,8 +113,17 @@ def shutdown_chassis_module(db, chassis_module_name): not chassis_module_name.startswith("FABRIC-CARD"): ctx.fail("'module_name' has to begin with 'SUPERVISOR', 'LINE-CARD' or 'FABRIC-CARD'") + # To avoid duplicate operation + if get_config_module_state(db, chassis_module_name) == 'down': + click.echo("Module {} is already in down state".format(chassis_module_name)) + return + + click.echo("Shutting down chassis module {}".format(chassis_module_name)) fvs = {'admin_status': 'down'} config_db.set_entry('CHASSIS_MODULE', chassis_module_name, fvs) + if chassis_module_name.startswith("FABRIC-CARD"): + if not check_config_module_state_with_timeout(ctx, db, chassis_module_name, 'down'): + fabric_module_set_admin_status(db, chassis_module_name, 'down') # # 'startup' subcommand ('config chassis_modules startup ...') @@ -45,5 +134,15 @@ def shutdown_chassis_module(db, chassis_module_name): def startup_chassis_module(db, chassis_module_name): """Chassis-module startup of module""" config_db = db.cfgdb + ctx = click.get_current_context() + + # To avoid duplicate operation + if get_config_module_state(db, chassis_module_name) == 'up': + click.echo("Module {} is already set to up state".format(chassis_module_name)) + return + click.echo("Starting up chassis module {}".format(chassis_module_name)) config_db.set_entry('CHASSIS_MODULE', chassis_module_name, None) + if chassis_module_name.startswith("FABRIC-CARD"): + if not check_config_module_state_with_timeout(ctx, db, chassis_module_name, 'up'): + fabric_module_set_admin_status(db, chassis_module_name, 'up') diff --git a/config/main.py b/config/main.py index 6474a181f8..665af5fd6f 100644 --- a/config/main.py +++ b/config/main.py @@ -1,6 +1,8 @@ #!/usr/sbin/env python +import threading import click +import concurrent.futures import datetime import ipaddress import json @@ -15,11 +17,13 @@ import itertools import copy import tempfile +import sonic_yang from jsonpatch import JsonPatchConflict from jsonpointer import JsonPointerException from collections import OrderedDict from generic_config_updater.generic_updater import GenericUpdater, ConfigFormat, extract_scope +from generic_config_updater.gu_common import HOST_NAMESPACE, GenericConfigUpdaterError from minigraph import parse_device_desc_xml, minigraph_encoder from natsort import natsorted from portconfig import get_child_ports @@ -27,9 +31,10 @@ from sonic_py_common import device_info, multi_asic from sonic_py_common.general import getstatusoutput_noshell from sonic_py_common.interface import get_interface_table_name, get_port_table_name, get_intf_longname +from sonic_yang_cfg_generator import SonicYangCfgDbGenerator from utilities_common import util_base from swsscommon import swsscommon -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, ConfigDBPipeConnector from utilities_common.db import Db from utilities_common.intf_filter import parse_interface_in_filter from utilities_common import bgp_util @@ -38,6 +43,7 @@ from utilities_common.general import load_db_config, load_module_from_source from .validated_config_db_connector import ValidatedConfigDBConnector import utilities_common.multi_asic as multi_asic_util +from utilities_common.flock import try_lock from .utils import log @@ -54,12 +60,13 @@ from . import vlan from . import vxlan from . import plugins -from .config_mgmt import ConfigMgmtDPB, ConfigMgmt +from .config_mgmt import ConfigMgmtDPB, ConfigMgmt, YANG_DIR from . import mclag from . import syslog from . import switchport from . import dns - +from . import bgp_cli +from . import stp # mock masic APIs for unit test try: @@ -119,6 +126,12 @@ GRE_TYPE_RANGE = click.IntRange(min=0, max=65535) ADHOC_VALIDATION = True +if os.environ.get("UTILITIES_UNIT_TESTING", "0") in ("1", "2"): + temp_system_reload_lockfile = tempfile.NamedTemporaryFile() + SYSTEM_RELOAD_LOCK = temp_system_reload_lockfile.name +else: + SYSTEM_RELOAD_LOCK = "/etc/sonic/reload.lock" + # Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. sonic_cfggen = load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') @@ -246,7 +259,7 @@ def breakout_Ports(cm, delPorts=list(), portJson=dict(), force=False, \ click.echo("*** Printing dependencies ***") for dep in deps: click.echo(dep) - sys.exit(0) + sys.exit(1) else: click.echo("[ERROR] Port breakout Failed!!! Opting Out") raise click.Abort() @@ -895,10 +908,47 @@ def _reset_failed_services(): for service in _get_sonic_services(): clicommon.run_command(['systemctl', 'reset-failed', str(service)]) + +def get_service_finish_timestamp(service): + out, _ = clicommon.run_command(['sudo', + 'systemctl', + 'show', + '--no-pager', + service, + '-p', + 'ExecMainExitTimestamp', + '--value'], + return_cmd=True) + return out.strip(' \t\n\r') + + +def wait_service_restart_finish(service, last_timestamp, timeout=30): + start_time = time.time() + elapsed_time = 0 + while elapsed_time < timeout: + current_timestamp = get_service_finish_timestamp(service) + if current_timestamp and (current_timestamp != last_timestamp): + return + + time.sleep(1) + elapsed_time = time.time() - start_time + + log.log_warning("Service: {} does not restart in {} seconds, stop waiting".format(service, timeout)) + + def _restart_services(): + last_interface_config_timestamp = get_service_finish_timestamp('interfaces-config') + last_networking_timestamp = get_service_finish_timestamp('networking') + click.echo("Restarting SONiC target ...") clicommon.run_command(['sudo', 'systemctl', 'restart', 'sonic.target']) + # These service will restart eth0 and cause device lost network for 10 seconds + # When enable TACACS, every remote user commands will authorize by TACACS service via network + # If load_minigraph exit before eth0 restart, commands after load_minigraph may failed + wait_service_restart_finish('interfaces-config', last_interface_config_timestamp) + wait_service_restart_finish('networking', last_networking_timestamp) + try: subprocess.check_call(['sudo', 'monit', 'status'], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) click.echo("Enabling container monitoring ...") @@ -1155,25 +1205,186 @@ def validate_gre_type(ctx, _, value): return gre_type_value except ValueError: raise click.UsageError("{} is not a valid GRE type".format(value)) - + + +def multiasic_save_to_singlefile(db, filename): + """A function to save all asic's config to single file + """ + all_current_config = {} + cfgdb_clients = db.cfgdb_clients + + for ns, config_db in cfgdb_clients.items(): + current_config = config_db.get_config() + sonic_cfggen.FormatConverter.to_serialized(current_config) + asic_name = "localhost" if ns == DEFAULT_NAMESPACE else ns + all_current_config[asic_name] = sort_dict(current_config) + click.echo("Integrate each ASIC's config into a single JSON file {}.".format(filename)) + with open(filename, 'w') as file: + json.dump(all_current_config, file, indent=4) + + +def apply_patch_wrapper(args): + return apply_patch_for_scope(*args) + + # Function to apply patch for a single ASIC. def apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path): scope, changes = scope_changes # Replace localhost to DEFAULT_NAMESPACE which is db definition of Host - if scope.lower() == "localhost" or scope == "": + if scope.lower() == HOST_NAMESPACE or scope == "": scope = multi_asic.DEFAULT_NAMESPACE - - scope_for_log = scope if scope else "localhost" + + scope_for_log = scope if scope else HOST_NAMESPACE + thread_id = threading.get_ident() + log.log_notice(f"apply_patch_for_scope started for {scope_for_log} by {changes} in thread:{thread_id}") + try: # Call apply_patch with the ASIC-specific changes and predefined parameters - GenericUpdater(namespace=scope).apply_patch(jsonpatch.JsonPatch(changes), config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + GenericUpdater(scope=scope).apply_patch(jsonpatch.JsonPatch(changes), + config_format, + verbose, + dry_run, + ignore_non_yang_tables, + ignore_path) results[scope_for_log] = {"success": True, "message": "Success"} - log.log_notice(f"'apply-patch' executed successfully for {scope_for_log} by {changes}") + log.log_notice(f"'apply-patch' executed successfully for {scope_for_log} by {changes} in thread:{thread_id}") except Exception as e: results[scope_for_log] = {"success": False, "message": str(e)} log.log_error(f"'apply-patch' executed failed for {scope_for_log} by {changes} due to {str(e)}") +def validate_patch(patch): + try: + command = ["show", "runningconfiguration", "all"] + proc = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) + all_running_config, returncode = proc.communicate() + if returncode: + log.log_notice(f"Fetch all runningconfiguration failed as output:{all_running_config}") + return False + + # Structure validation and simulate apply patch. + all_target_config = patch.apply(json.loads(all_running_config)) + + # Verify target config by YANG models + target_config = all_target_config.pop(HOST_NAMESPACE) if multi_asic.is_multi_asic() else all_target_config + target_config.pop("bgpraw", None) + if not SonicYangCfgDbGenerator().validate_config_db_json(target_config): + return False + + if multi_asic.is_multi_asic(): + for asic in multi_asic.get_namespace_list(): + target_config = all_target_config.pop(asic) + target_config.pop("bgpraw", None) + if not SonicYangCfgDbGenerator().validate_config_db_json(target_config): + return False + + return True + except Exception as e: + raise GenericConfigUpdaterError(f"Validate json patch: {patch} failed due to:{e}") + + +def multiasic_validate_single_file(filename): + ns_list = [DEFAULT_NAMESPACE, *multi_asic.get_namespace_list()] + file_input = read_json_file(filename) + file_ns_list = [DEFAULT_NAMESPACE if key == HOST_NAMESPACE else key for key in file_input] + if set(ns_list) != set(file_ns_list): + click.echo( + "Input file {} must contain all asics config. ns_list: {} file ns_list: {}".format( + filename, ns_list, file_ns_list) + ) + raise click.Abort() + + +def load_sysinfo_if_missing(asic_config): + device_metadata = asic_config.get('DEVICE_METADATA', {}) + platform = device_metadata.get("localhost", {}).get("platform") + mac = device_metadata.get("localhost", {}).get("mac") + if not platform: + log.log_warning("platform is missing from Input file") + return True + elif not mac: + log.log_warning("mac is missing from Input file") + return True + else: + return False + + +def flush_configdb(namespace=DEFAULT_NAMESPACE): + if namespace is DEFAULT_NAMESPACE: + config_db = ConfigDBConnector() + else: + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + + config_db.connect() + client = config_db.get_redis_client(config_db.CONFIG_DB) + client.flushdb() + return client, config_db + + +def migrate_db_to_lastest(namespace=DEFAULT_NAMESPACE): + # Migrate DB contents to latest version + db_migrator = '/usr/local/bin/db_migrator.py' + if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): + if namespace is DEFAULT_NAMESPACE: + command = [db_migrator, '-o', 'migrate'] + else: + command = [db_migrator, '-o', 'migrate', '-n', namespace] + clicommon.run_command(command, display_cmd=True) + + +def multiasic_write_to_db(filename, load_sysinfo): + file_input = read_json_file(filename) + for ns in [DEFAULT_NAMESPACE, *multi_asic.get_namespace_list()]: + asic_name = HOST_NAMESPACE if ns == DEFAULT_NAMESPACE else ns + asic_config = file_input[asic_name] + + asic_load_sysinfo = True if load_sysinfo else False + if not asic_load_sysinfo: + asic_load_sysinfo = load_sysinfo_if_missing(asic_config) + + if asic_load_sysinfo: + cfg_hwsku = asic_config.get("DEVICE_METADATA", {}).\ + get("localhost", {}).get("hwsku") + if not cfg_hwsku: + click.secho("Could not get the HWSKU from config file, Exiting!", fg='magenta') + sys.exit(1) + + client, _ = flush_configdb(ns) + + if asic_load_sysinfo: + if ns is DEFAULT_NAMESPACE: + command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] + else: + command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(ns), '--write-to-db'] + clicommon.run_command(command, display_cmd=True) + + if ns is DEFAULT_NAMESPACE: + config_db = ConfigDBPipeConnector(use_unix_socket_path=True) + else: + config_db = ConfigDBPipeConnector(use_unix_socket_path=True, namespace=ns) + + config_db.connect(False) + sonic_cfggen.FormatConverter.to_deserialized(asic_config) + data = sonic_cfggen.FormatConverter.output_to_db(asic_config) + config_db.mod_config(sonic_cfggen.FormatConverter.output_to_db(data)) + client.set(config_db.INIT_INDICATOR, 1) + + migrate_db_to_lastest(ns) + + +def config_file_yang_validation(filename): + config_to_check = read_json_file(filename) + sy = sonic_yang.SonicYang(YANG_DIR) + sy.loadYangModel() + try: + sy.loadData(configdbJson=config_to_check) + sy.validate_data_tree() + except sonic_yang.SonicYangException as e: + click.secho("{} fails YANG validation! Error: {}".format(filename, str(e)), + fg='magenta') + raise click.Abort() + + # This is our main entrypoint - the main 'config' command @click.group(cls=clicommon.AbbreviationGroup, context_settings=CONTEXT_SETTINGS) @click.pass_context @@ -1223,7 +1434,10 @@ def config(ctx): config.add_command(vlan.vlan) config.add_command(vxlan.vxlan) -#add mclag commands +# add stp commands +config.add_command(stp.spanning_tree) + +# add mclag commands config.add_command(mclag.mclag) config.add_command(mclag.mclag_member) config.add_command(mclag.mclag_unique_ip) @@ -1241,7 +1455,8 @@ def config(ctx): @click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, prompt='Existing files will be overwritten, continue?') @click.argument('filename', required=False) -def save(filename): +@clicommon.pass_db +def save(db, filename): """Export current config DB to a file on disk.\n : Names of configuration file(s) to save, separated by comma with no spaces in between """ @@ -1256,7 +1471,13 @@ def save(filename): if filename is not None: cfg_files = filename.split(',') - if len(cfg_files) != num_cfg_file: + # If only one filename is provided in multi-ASIC mode, + # save all ASIC configurations to that single file. + if len(cfg_files) == 1 and multi_asic.is_multi_asic(): + filename = cfg_files[0] + multiasic_save_to_singlefile(db, filename) + return + elif len(cfg_files) != num_cfg_file: click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) return @@ -1362,11 +1583,12 @@ def print_dry_run_message(dry_run): help='format of config of the patch is either ConfigDb(ABNF) or SonicYang', show_default=True) @click.option('-d', '--dry-run', is_flag=True, default=False, help='test out the command without affecting config state') +@click.option('-p', '--parallel', is_flag=True, default=False, help='applying the change to all ASICs parallelly') @click.option('-n', '--ignore-non-yang-tables', is_flag=True, default=False, help='ignore validation for tables without YANG models', hidden=True) @click.option('-i', '--ignore-path', multiple=True, help='ignore validation for config specified by given path which is a JsonPointer', hidden=True) @click.option('-v', '--verbose', is_flag=True, default=False, help='print additional details of what the operation is doing') @click.pass_context -def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, ignore_path, verbose): +def apply_patch(ctx, patch_file_path, format, dry_run, parallel, ignore_non_yang_tables, ignore_path, verbose): """Apply given patch of updates to Config. A patch is a JsonPatch which follows rfc6902. This command can be used do partial updates to the config with minimum disruption to running processes. It allows addition as well as deletion of configs. The patch file represents a diff of ConfigDb(ABNF) @@ -1381,6 +1603,9 @@ def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, i patch_as_json = json.loads(text) patch = jsonpatch.JsonPatch(patch_as_json) + if not validate_patch(patch): + raise GenericConfigUpdaterError(f"Failed validating patch:{patch}") + results = {} config_format = ConfigFormat[format.upper()] # Initialize a dictionary to hold changes categorized by scope @@ -1403,20 +1628,39 @@ def apply_patch(ctx, patch_file_path, format, dry_run, ignore_non_yang_tables, i # Empty case to force validate YANG model. if not changes_by_scope: asic_list = [multi_asic.DEFAULT_NAMESPACE] - asic_list.extend(multi_asic.get_namespace_list()) + if multi_asic.is_multi_asic(): + asic_list.extend(multi_asic.get_namespace_list()) for asic in asic_list: changes_by_scope[asic] = [] # Apply changes for each scope - for scope_changes in changes_by_scope.items(): - apply_patch_for_scope(scope_changes, results, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_path) + if parallel: + with concurrent.futures.ThreadPoolExecutor() as executor: + # Prepare the argument tuples + arguments = [(scope_changes, results, config_format, + verbose, dry_run, ignore_non_yang_tables, ignore_path) + for scope_changes in changes_by_scope.items()] + + # Submit all tasks and wait for them to complete + futures = [executor.submit(apply_patch_wrapper, args) for args in arguments] + + # Wait for all tasks to complete + concurrent.futures.wait(futures) + else: + for scope_changes in changes_by_scope.items(): + apply_patch_for_scope(scope_changes, + results, + config_format, + verbose, dry_run, + ignore_non_yang_tables, + ignore_path) # Check if any updates failed failures = [scope for scope, result in results.items() if not result['success']] if failures: failure_messages = '\n'.join([f"- {failed_scope}: {results[failed_scope]['message']}" for failed_scope in failures]) - raise Exception(f"Failed to apply patch on the following scopes:\n{failure_messages}") + raise GenericConfigUpdaterError(f"Failed to apply patch on the following scopes:\n{failure_messages}") log.log_notice(f"Patch applied successfully for {patch}.") click.secho("Patch applied successfully.", fg="cyan", underline=True) @@ -1533,9 +1777,11 @@ def list_checkpoints(ctx, verbose): @click.option('-n', '--no_service_restart', default=False, is_flag=True, help='Do not restart docker services') @click.option('-f', '--force', default=False, is_flag=True, help='Force config reload without system checks') @click.option('-t', '--file_format', default='config_db',type=click.Choice(['config_yang', 'config_db']),show_default=True,help='specify the file format') +@click.option('-b', '--bypass-lock', default=False, is_flag=True, help='Do reload without acquiring lock') @click.argument('filename', required=False) @clicommon.pass_db -def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_format): +@try_lock(SYSTEM_RELOAD_LOCK, timeout=0) +def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_format, bypass_lock): """Clear current configuration and import a previous saved config DB dump file. : Names of configuration file(s) to load, separated by comma with no spaces in between """ @@ -1568,140 +1814,133 @@ def reload(db, filename, yes, load_sysinfo, no_service_restart, force, file_form if multi_asic.is_multi_asic() and file_format == 'config_db': num_cfg_file += num_asic + multiasic_single_file_mode = False # If the user give the filename[s], extract the file names. if filename is not None: cfg_files = filename.split(',') - if len(cfg_files) != num_cfg_file: + if len(cfg_files) == 1 and multi_asic.is_multi_asic(): + multiasic_validate_single_file(cfg_files[0]) + multiasic_single_file_mode = True + elif len(cfg_files) != num_cfg_file: click.echo("Input {} config file(s) separated by comma for multiple files ".format(num_cfg_file)) return + if filename is not None and filename != "/dev/stdin": + if multi_asic.is_multi_asic(): + # Multiasic has not 100% fully validated. Thus pass here. + pass + else: + config_file_yang_validation(filename) + #Stop services before config push if not no_service_restart: log.log_notice("'reload' stopping services...") _stop_services() - # In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB - # service running in the host + DB services running in each ASIC namespace created per ASIC. - # In the below logic, we get all namespaces in this platform and add an empty namespace '' - # denoting the current namespace which we are in ( the linux host ) - for inst in range(-1, num_cfg_file-1): - # Get the namespace name, for linux host it is None - if inst == -1: - namespace = None - else: - namespace = "{}{}".format(NAMESPACE_PREFIX, inst) - - # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json - if cfg_files: - file = cfg_files[inst+1] - # Save to tmpfile in case of stdin input which can only be read once - if file == "/dev/stdin": - file_input = read_json_file(file) - (_, tmpfname) = tempfile.mkstemp(dir="/tmp", suffix="_configReloadStdin") - write_json_file(file_input, tmpfname) - file = tmpfname - else: - if file_format == 'config_db': - if namespace is None: - file = DEFAULT_CONFIG_DB_FILE - else: - file = "/etc/sonic/config_db{}.json".format(inst) + if multiasic_single_file_mode: + multiasic_write_to_db(cfg_files[0], load_sysinfo) + else: + # In Single ASIC platforms we have single DB service. In multi-ASIC platforms we have a global DB + # service running in the host + DB services running in each ASIC namespace created per ASIC. + # In the below logic, we get all namespaces in this platform and add an empty namespace '' + # denoting the current namespace which we are in ( the linux host ) + for inst in range(-1, num_cfg_file-1): + # Get the namespace name, for linux host it is DEFAULT_NAMESPACE + if inst == -1: + namespace = DEFAULT_NAMESPACE else: - file = DEFAULT_CONFIG_YANG_FILE - - - # Check the file exists before proceeding. - if not os.path.exists(file): - click.echo("The config file {} doesn't exist".format(file)) - continue - - if file_format == 'config_db': - file_input = read_json_file(file) + namespace = "{}{}".format(NAMESPACE_PREFIX, inst) + + # Get the file from user input, else take the default file /etc/sonic/config_db{NS_id}.json + if cfg_files: + file = cfg_files[inst+1] + # Save to tmpfile in case of stdin input which can only be read once + if file == "/dev/stdin": + file_input = read_json_file(file) + (_, tmpfname) = tempfile.mkstemp(dir="/tmp", suffix="_configReloadStdin") + write_json_file(file_input, tmpfname) + file = tmpfname + else: + if file_format == 'config_db': + if namespace is DEFAULT_NAMESPACE: + file = DEFAULT_CONFIG_DB_FILE + else: + file = "/etc/sonic/config_db{}.json".format(inst) + else: + file = DEFAULT_CONFIG_YANG_FILE - platform = file_input.get("DEVICE_METADATA", {}).\ - get("localhost", {}).get("platform") - mac = file_input.get("DEVICE_METADATA", {}).\ - get("localhost", {}).get("mac") + # Check the file exists before proceeding. + if not os.path.exists(file): + click.echo("The config file {} doesn't exist".format(file)) + continue - if not platform or not mac: - log.log_warning("Input file does't have platform or mac. platform: {}, mac: {}" - .format(None if platform is None else platform, None if mac is None else mac)) - load_sysinfo = True + if file_format == 'config_db': + file_input = read_json_file(file) + if not load_sysinfo: + load_sysinfo = load_sysinfo_if_missing(file_input) + + if load_sysinfo: + try: + command = [SONIC_CFGGEN_PATH, "-j", file, '-v', "DEVICE_METADATA.localhost.hwsku"] + proc = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) + output, err = proc.communicate() + + except FileNotFoundError as e: + click.echo("{}".format(str(e)), err=True) + raise click.Abort() + except Exception as e: + click.echo("{}\n{}".format(type(e), str(e)), err=True) + raise click.Abort() + + if not output: + click.secho("Could not get the HWSKU from config file, Exiting!!!", fg='magenta') + sys.exit(1) - if load_sysinfo: - try: - command = [SONIC_CFGGEN_PATH, "-j", file, '-v', "DEVICE_METADATA.localhost.hwsku"] - proc = subprocess.Popen(command, text=True, stdout=subprocess.PIPE) - output, err = proc.communicate() + cfg_hwsku = output.strip() - except FileNotFoundError as e: - click.echo("{}".format(str(e)), err=True) - raise click.Abort() - except Exception as e: - click.echo("{}\n{}".format(type(e), str(e)), err=True) - raise click.Abort() + client, config_db = flush_configdb(namespace) - if not output: - click.secho("Could not get the HWSKU from config file, Exiting!!!", fg='magenta') - sys.exit(1) + if load_sysinfo: + if namespace is DEFAULT_NAMESPACE: + command = [ + str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] + else: + command = [ + str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(namespace), '--write-to-db'] + clicommon.run_command(command, display_cmd=True) - cfg_hwsku = output.strip() + # For the database service running in linux host we use the file user gives as input + # or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace, + # the default config_db.json format is used. - if namespace is None: - config_db = ConfigDBConnector() - else: - config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + config_gen_opts = [] - config_db.connect() - client = config_db.get_redis_client(config_db.CONFIG_DB) - client.flushdb() + if os.path.isfile(INIT_CFG_FILE): + config_gen_opts += ['-j', str(INIT_CFG_FILE)] - if load_sysinfo: - if namespace is None: - command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '--write-to-db'] + if file_format == 'config_db': + config_gen_opts += ['-j', str(file)] else: - command = [str(SONIC_CFGGEN_PATH), '-H', '-k', str(cfg_hwsku), '-n', str(namespace), '--write-to-db'] - clicommon.run_command(command, display_cmd=True) - - # For the database service running in linux host we use the file user gives as input - # or by default DEFAULT_CONFIG_DB_FILE. In the case of database service running in namespace, - # the default config_db.json format is used. - - - config_gen_opts = [] + config_gen_opts += ['-Y', str(file)] - if os.path.isfile(INIT_CFG_FILE): - config_gen_opts += ['-j', str(INIT_CFG_FILE)] + if namespace is not DEFAULT_NAMESPACE: + config_gen_opts += ['-n', str(namespace)] - if file_format == 'config_db': - config_gen_opts += ['-j', str(file)] - else: - config_gen_opts += ['-Y', str(file)] - - if namespace is not None: - config_gen_opts += ['-n', str(namespace)] + command = [SONIC_CFGGEN_PATH] + config_gen_opts + ['--write-to-db'] - command = [SONIC_CFGGEN_PATH] + config_gen_opts + ['--write-to-db'] - - clicommon.run_command(command, display_cmd=True) - client.set(config_db.INIT_INDICATOR, 1) + clicommon.run_command(command, display_cmd=True) + client.set(config_db.INIT_INDICATOR, 1) - if os.path.exists(file) and file.endswith("_configReloadStdin"): - # Remove tmpfile - try: - os.remove(file) - except OSError as e: - click.echo("An error occurred while removing the temporary file: {}".format(str(e)), err=True) + if os.path.exists(file) and file.endswith("_configReloadStdin"): + # Remove tmpfile + try: + os.remove(file) + except OSError as e: + click.echo("An error occurred while removing the temporary file: {}".format(str(e)), err=True) - # Migrate DB contents to latest version - db_migrator='/usr/local/bin/db_migrator.py' - if os.path.isfile(db_migrator) and os.access(db_migrator, os.X_OK): - if namespace is None: - command = [db_migrator, '-o', 'migrate'] - else: - command = [db_migrator, '-o', 'migrate', '-n', str(namespace)] - clicommon.run_command(command, display_cmd=True) + # Migrate DB contents to latest version + migrate_db_to_lastest(namespace) # Re-generate the environment variable in case config_db.json was edited update_sonic_environment() @@ -1762,8 +2001,10 @@ def load_mgmt_config(filename): @click.option('-t', '--traffic_shift_away', default=False, is_flag=True, help='Keep device in maintenance with TSA') @click.option('-o', '--override_config', default=False, is_flag=True, help='Enable config override. Proceed with default path.') @click.option('-p', '--golden_config_path', help='Provide golden config path to override. Use with --override_config') +@click.option('-b', '--bypass-lock', default=False, is_flag=True, help='Do load minigraph without acquiring lock') @clicommon.pass_db -def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, golden_config_path): +@try_lock(SYSTEM_RELOAD_LOCK, timeout=0) +def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, golden_config_path, bypass_lock): """Reconfigure based on minigraph.""" argv_str = ' '.join(['config', *sys.argv[1:]]) log.log_notice(f"'load_minigraph' executing with command: {argv_str}") @@ -1777,6 +2018,20 @@ def load_minigraph(db, no_service_restart, traffic_shift_away, override_config, fg='magenta') raise click.Abort() + config_to_check = read_json_file(golden_config_path) + if multi_asic.is_multi_asic(): + # Multiasic has not 100% fully validated. Thus pass here. + pass + else: + config_file_yang_validation(golden_config_path) + + # Dependency check golden config json + if multi_asic.is_multi_asic(): + host_config = config_to_check.get('localhost', {}) + else: + host_config = config_to_check + table_hard_dependency_check(host_config) + #Stop services before config push if not no_service_restart: log.log_notice("'load_minigraph' stopping services...") @@ -1995,8 +2250,8 @@ def override_config_table(db, input_config_db, dry_run): if multi_asic.is_multi_asic() and len(config_input): # Golden Config will use "localhost" to represent host name if ns == DEFAULT_NAMESPACE: - if "localhost" in config_input.keys(): - ns_config_input = config_input["localhost"] + if HOST_NAMESPACE in config_input.keys(): + ns_config_input = config_input[HOST_NAMESPACE] else: click.secho("Wrong config format! 'localhost' not found in host config! cannot override.. abort") sys.exit(1) @@ -2097,7 +2352,7 @@ def aaa_table_hard_dependency_check(config_json): tacacs_enable = "tacacs+" in aaa_authentication_login.split(",") tacplus_passkey = TACPLUS_TABLE.get("global", {}).get("passkey", "") if tacacs_enable and len(tacplus_passkey) == 0: - click.secho("Authentication with 'tacacs+' is not allowed when passkey not exits.", fg="magenta") + click.secho("Authentication with 'tacacs+' is not allowed when passkey not exists.", fg="magenta") sys.exit(1) @@ -2151,6 +2406,7 @@ def synchronous_mode(sync_mode): config reload -y \n Option 2. systemctl restart swss""" % sync_mode) + # # 'suppress-fib-pending' command ('config suppress-fib-pending ...') # @@ -2158,10 +2414,11 @@ def synchronous_mode(sync_mode): @click.argument('state', metavar='', required=True, type=click.Choice(['enabled', 'disabled'])) @clicommon.pass_db def suppress_pending_fib(db, state): - ''' Enable or disable pending FIB suppression. Once enabled, BGP will not advertise routes that are not yet installed in the hardware ''' + ''' Enable or disable pending FIB suppression. Once enabled, + BGP will not advertise routes that are not yet installed in the hardware ''' config_db = db.cfgdb - config_db.mod_entry('DEVICE_METADATA' , 'localhost', {"suppress-fib-pending" : state}) + config_db.mod_entry('DEVICE_METADATA', 'localhost', {"suppress-fib-pending": state}) # # 'yang_config_validation' command ('config yang_config_validation ...') @@ -2914,7 +3171,7 @@ def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports, verbose) _, hwsku_path = device_info.get_paths_to_platform_and_hwsku_dirs() sonic_version_file = device_info.get_sonic_version_file() - from_db = ['-d', '--write-to-db'] + from_db = ['-d'] if dry_run: from_db = ['--additional-data'] + [str(json_data)] if json_data else [] @@ -2960,11 +3217,27 @@ def reload(ctx, no_dynamic_buffer, no_delay, dry_run, json_data, ports, verbose) ) if os.path.isfile(qos_template_file): cmd_ns = [] if ns is DEFAULT_NAMESPACE else ['-n', str(ns)] - fname = "{}{}".format(dry_run, asic_id_suffix) if dry_run else "config-db" - command = [SONIC_CFGGEN_PATH] + cmd_ns + from_db + ['-t', '{},{}'.format(buffer_template_file, fname), '-t', '{},{}'.format(qos_template_file, fname), '-y', sonic_version_file] - # Apply the configurations only when both buffer and qos - # configuration files are present + buffer_fname = "/tmp/cfg_buffer{}.json".format(asic_id_suffix) + qos_fname = "/tmp/cfg_qos{}.json".format(asic_id_suffix) + + command = [SONIC_CFGGEN_PATH] + cmd_ns + from_db + [ + '-t', '{},{}'.format(buffer_template_file, buffer_fname), + '-t', '{},{}'.format(qos_template_file, qos_fname), + '-y', sonic_version_file + ] clicommon.run_command(command, display_cmd=True) + + command = [SONIC_CFGGEN_PATH] + cmd_ns + ["-j", buffer_fname, "-j", qos_fname] + if dry_run: + out, rc = clicommon.run_command(command + ["--print-data"], display_cmd=True, return_cmd=True) + if rc != 0: + # clicommon.run_command does this by default when rc != 0 and return_cmd=False + sys.exit(rc) + with open("{}{}".format(dry_run, asic_id_suffix), 'w') as f: + json.dump(json.loads(out), f, sort_keys=True, indent=4) + else: + clicommon.run_command(command + ["--write-to-db"], display_cmd=True) + else: click.secho("QoS definition template not found at {}".format( qos_template_file @@ -3296,7 +3569,10 @@ def add_snmp_agent_address(ctx, agentip, port, vrf): """Add the SNMP agent listening IP:Port%Vrf configuration""" #Construct SNMP_AGENT_ADDRESS_CONFIG table key in the format ip|| - if not clicommon.is_ipaddress(agentip): + # Link local IP address should be provided along with zone id + # % for ex fe80::1%eth0 + agent_ip_addr = agentip.split('%')[0] + if not clicommon.is_ipaddress(agent_ip_addr): click.echo("Invalid IP address") return False config_db = ctx.obj['db'] @@ -3306,7 +3582,7 @@ def add_snmp_agent_address(ctx, agentip, port, vrf): click.echo("ManagementVRF is Enabled. Provide vrf.") return False found = 0 - ip = ipaddress.ip_address(agentip) + ip = ipaddress.ip_address(agent_ip_addr) for intf in netifaces.interfaces(): ipaddresses = netifaces.ifaddresses(intf) if ip_family[ip.version] in ipaddresses: @@ -3975,6 +4251,105 @@ def del_user(db, user): click.echo("Restart service snmp failed with error {}".format(e)) raise click.Abort() + +# +# 'bmp' group ('config bmp ...') +# +@config.group() +@clicommon.pass_db +def bmp(db): + """BMP-related configuration""" + pass + + +# +# common function to update bmp config table +# +@clicommon.pass_db +def update_bmp_table(db, table_name, value): + log.log_info(f"'bmp {value} {table_name}' executing...") + bmp_table = db.cfgdb.get_table('BMP') + if not bmp_table: + bmp_table = {'table': {table_name: value}} + else: + bmp_table['table'][table_name] = value + db.cfgdb.mod_entry('BMP', 'table', bmp_table['table']) + + +# +# 'enable' subgroup ('config bmp enable ...') +# +@bmp.group() +@clicommon.pass_db +def enable(db): + """Enable BMP table dump """ + pass + + +# +# 'bgp-neighbor-table' command ('config bmp enable bgp-neighbor-table') +# +@enable.command('bgp-neighbor-table') +@clicommon.pass_db +def enable_bgp_neighbor_table(db): + update_bmp_table('bgp_neighbor_table', 'true') + + +# +# 'bgp-rib-out-table' command ('config bmp enable bgp-rib-out-table') +# +@enable.command('bgp-rib-out-table') +@clicommon.pass_db +def enable_bgp_rib_out_table(db): + update_bmp_table('bgp_rib_out_table', 'true') + + +# +# 'bgp-rib-in-table' command ('config bmp enable bgp-rib-in-table') +# +@enable.command('bgp-rib-in-table') +@clicommon.pass_db +def enable_bgp_rib_in_table(db): + update_bmp_table('bgp_rib_in_table', 'true') + + +# +# 'disable' subgroup ('config bmp disable ...') +# +@bmp.group() +@clicommon.pass_db +def disable(db): + """Disable BMP table dump """ + pass + + +# +# 'bgp-neighbor-table' command ('config bmp disable bgp-neighbor-table') +# +@disable.command('bgp-neighbor-table') +@clicommon.pass_db +def disable_bgp_neighbor_table(db): + update_bmp_table('bgp_neighbor_table', 'false') + + +# +# 'bgp-rib-out-table' command ('config bmp disable bgp-rib-out-table') +# +@disable.command('bgp-rib-out-table') +@clicommon.pass_db +def diable_bgp_rib_out_table(db): + update_bmp_table('bgp_rib_out_table', 'false') + + +# +# 'bgp-rib-in-table' command ('config bmp disable bgp-rib-in-table') +# +@disable.command('bgp-rib-in-table') +@clicommon.pass_db +def disable_bgp_rib_in_table(db): + update_bmp_table('bgp_rib_in_table', 'false') + + # # 'bgp' group ('config bgp ...') # @@ -3984,6 +4359,11 @@ def bgp(): """BGP-related configuration tasks""" pass + + +# BGP module extensions +config.commands['bgp'].add_command(bgp_cli.DEVICE_GLOBAL) + # # 'shutdown' subgroup ('config bgp shutdown ...') # @@ -4519,7 +4899,7 @@ def breakout(ctx, interface_name, mode, verbose, force_remove_dependencies, load except Exception as e: click.secho("Failed to break out Port. Error: {}".format(str(e)), fg='magenta') - sys.exit(0) + sys.exit(1) def _get_all_mgmtinterface_keys(): """Returns list of strings containing mgmt interface keys @@ -4630,12 +5010,14 @@ def validate_vlan_exists(db,text): # 'add' subcommand # -@ip.command() + +@ip.command('add') @click.argument('interface_name', metavar='', required=True) @click.argument("ip_addr", metavar="", required=True) @click.argument('gw', metavar='', required=False) +@click.option('--secondary', "-s", is_flag=True, default=False) @click.pass_context -def add(ctx, interface_name, ip_addr, gw): +def add_interface_ip(ctx, interface_name, ip_addr, gw, secondary): """Add an IP address towards the interface""" # Get the config_db connector config_db = ValidatedConfigDBConnector(ctx.obj['config_db']) @@ -4644,6 +5026,14 @@ def add(ctx, interface_name, ip_addr, gw): interface_name = interface_alias_to_name(config_db, interface_name) if interface_name is None: ctx.fail("'interface_name' is None!") + # Add a validation to check this interface is not a member in vlan before + # changing it to a router port mode + vlan_member_table = config_db.get_table('VLAN_MEMBER') + + if (interface_is_in_vlan(vlan_member_table, interface_name)): + click.echo("Interface {} is a member of vlan\nAborting!".format(interface_name)) + return + portchannel_member_table = config_db.get_table('PORTCHANNEL_MEMBER') @@ -4720,7 +5110,25 @@ def add(ctx, interface_name, ip_addr, gw): config_db.set_entry(table_name, interface_name, {"admin_status": "up"}) else: config_db.set_entry(table_name, interface_name, {"NULL": "NULL"}) - config_db.set_entry(table_name, (interface_name, str(ip_address)), {"NULL": "NULL"}) + + if secondary: + # We update the secondary flag only in case of VLAN Interface. + if table_name == "VLAN_INTERFACE": + vlan_interface_table = config_db.get_table(table_name) + contains_primary = False + for key, value in vlan_interface_table.items(): + if not isinstance(key, tuple): + continue + name, prefix = key + if name == interface_name and "secondary" not in value: + contains_primary = True + if contains_primary: + config_db.set_entry(table_name, (interface_name, str(ip_address)), {"secondary": "true"}) + else: + ctx.fail("Primary for the interface {} is not set, so skipping adding the interface" + .format(interface_name)) + else: + config_db.set_entry(table_name, (interface_name, str(ip_address)), {"NULL": "NULL"}) # # 'del' subcommand @@ -5605,22 +6013,869 @@ def disable_use_link_local_only(ctx, interface_name): interface_dict = db.get_table(interface_type) set_ipv6_link_local_only_on_interface(db, interface_dict, interface_type, interface_name, "disable") + +def is_vaild_intf_ip_addr(ip_addr) -> bool: + """Check whether the ip address is valid""" + try: + ip_address = ipaddress.ip_interface(ip_addr) + except ValueError as err: + click.echo("IP address {} is not valid: {}".format(ip_addr, err)) + return False + + if ip_address.version == 6: + if ip_address.is_unspecified: + click.echo("IPv6 address {} is unspecified".format(str(ip_address))) + return False + elif ip_address.version == 4: + if str(ip_address.ip) == "0.0.0.0": + click.echo("IPv4 address {} is Zero".format(str(ip_address))) + return False + + if ip_address.is_multicast: + click.echo("IP address {} is multicast".format(str(ip_address))) + return False + + ip = ip_address.ip + if ip.is_loopback: + click.echo("IP address {} is loopback address".format(str(ip_address))) + return False + + return True + + # -# 'vrf' group ('config vrf ...') +# 'vrrp' subgroup ('config interface vrrp ...') # - -@config.group(cls=clicommon.AbbreviationGroup, name='vrf') +@interface.group(cls=clicommon.AbbreviationGroup) @click.pass_context -def vrf(ctx): - """VRF-related configuration tasks""" - config_db = ConfigDBConnector() - config_db.connect() - ctx.obj = {} - ctx.obj['config_db'] = config_db +def vrrp(ctx): + """Vrrp configuration""" + pass -@vrf.command('add') -@click.argument('vrf_name', metavar='', required=True) -@click.pass_context + +# +# ip subgroup ('config interface vrrp ip ...') +# +@vrrp.group(cls=clicommon.AbbreviationGroup, name='ip') +@click.pass_context +def ip(ctx): + """vrrp ip configuration """ + pass + + +@ip.command('add') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("ip_addr", metavar="", required=True) +@click.pass_context +def add_vrrp_ip(ctx, interface_name, vrrp_id, ip_addr): + """Add IPv4 address to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + if not is_vaild_intf_ip_addr(ip_addr): + ctx.abort() + if check_vrrp_ip_exist(config_db, ip_addr): + ctx.abort() + + if "/" not in ip_addr: + ctx.fail("IP address {} is missing a mask. Such as xx.xx.xx.xx/yy or xx:xx::xx/yy".format(str(ip_addr))) + + # check vip exist + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + address_list = [] + if vrrp_entry: + # update vrrp + if "vip" in vrrp_entry: + address_list = vrrp_entry.get("vip") + # add ip address + if len(address_list) >= 4: + ctx.fail("The vrrp instance {} has already configured 4 IP addresses".format(vrrp_id)) + + else: + # create new vrrp + vrrp_entry = {} + vrrp_keys = config_db.get_keys("VRRP") + if len(vrrp_keys) >= 254: + ctx.fail("Has already configured 254 vrrp instances") + intf_cfg = 0 + for key in vrrp_keys: + if key[1] == str(vrrp_id): + ctx.fail("The vrrp instance {} has already configured!".format(vrrp_id)) + if key[0] == interface_name: + intf_cfg += 1 + if intf_cfg >= 16: + ctx.fail("{} has already configured 16 vrrp instances!".format(interface_name)) + vrrp_entry["vid"] = vrrp_id + + address_list.append(ip_addr) + vrrp_entry['vip'] = address_list + + config_db.set_entry("VRRP", (interface_name, str(vrrp_id)), vrrp_entry) + + +@ip.command('remove') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("ip_addr", metavar="", required=True) +@click.pass_context +def remove_vrrp_ip(ctx, interface_name, vrrp_id, ip_addr): + """Remove IPv4 address to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + try: + ipaddress.ip_interface(ip_addr) + except ValueError as err: + ctx.fail("IP address is not valid: {}".format(err)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("{} is not configured on the vrrp instance {}!".format(ip_addr, vrrp_id)) + + address_list = vrrp_entry.get("vip") + # address_list = vrrp_entry.get("vip") + if not address_list: + ctx.fail("{} is not configured on the vrrp instance {}!".format(ip_addr, vrrp_id)) + + # del ip address + if ip_addr in address_list: + address_list.remove(ip_addr) + else: + ctx.fail("{} is not configured on the vrrp instance {}!".format(ip_addr, vrrp_id)) + vrrp_entry['vip'] = address_list + config_db.set_entry("VRRP", (interface_name, str(vrrp_id)), vrrp_entry) + + +# +# track interface subgroup ('config interface vrrp track_interface ...') +# +@vrrp.group(cls=clicommon.AbbreviationGroup, name='track_interface') +@click.pass_context +def track_interface(ctx): + """ vrrp track_interface configuration """ + pass + + +@track_interface.command('add') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("track_interface", metavar="", required=True) +@click.argument('priority_increment', metavar='', required=True, type=click.IntRange(10, 50), + default=20) +@click.pass_context +def add_track_interface(ctx, interface_name, vrrp_id, track_interface, priority_increment): + """add track_interface to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + track_interface = interface_alias_to_name(config_db, track_interface) + if interface_name is None: + ctx.fail("'interface_name' is None!") + if track_interface is None: + ctx.fail("'track_interface' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + table_name_t = get_interface_table_name(track_interface) + if table_name_t == "" or table_name_t == "LOOPBACK_INTERFACE": + ctx.fail("'track_interface' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if track_interface not in config_db.get_table(table_name_t): + ctx.fail("Router Interface '{}' not found".format(track_interface)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp instance {} not found on interface {}".format(vrrp_id, interface_name)) + + track_entry = config_db.get_entry("VRRP_TRACK", (interface_name, str(vrrp_id), track_interface)) + if track_entry: + track_entry['priority_increment'] = priority_increment + else: + track_entry = {} + track_entry["priority_increment"] = priority_increment + + vrrp_track_keys = config_db.get_keys("VRRP_TRACK") + if vrrp_track_keys: + track_key = (interface_name, str(vrrp_id)) + count = 0 + for item in vrrp_track_keys: + subtuple1 = item[:2] + if subtuple1 == track_key: + count += 1 + + if count >= 8: + ctx.fail("The Vrrpv instance {} has already configured 8 track interfaces".format(vrrp_id)) + + config_db.set_entry("VRRP_TRACK", (interface_name, str(vrrp_id), track_interface), track_entry) + + +@track_interface.command('remove') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("track_interface", metavar="", required=True) +@click.pass_context +def remove_track_interface(ctx, interface_name, vrrp_id, track_interface): + """Remove track_interface to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + track_interface = interface_alias_to_name(config_db, track_interface) + if interface_name is None: + ctx.fail("'interface_name' is None!") + if track_interface is None: + ctx.fail("'track_interface' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + table_name_t = get_interface_table_name(track_interface) + if table_name_t == "" or table_name_t == "LOOPBACK_INTERFACE": + ctx.fail("'track_interface' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp instance {} not found on interface {}".format(vrrp_id, interface_name)) + + track_entry = config_db.get_entry("VRRP_TRACK", (interface_name, str(vrrp_id), track_interface)) + if not track_entry: + ctx.fail("{} is not configured on the vrrp instance {}!".format(track_interface, vrrp_id)) + config_db.set_entry('VRRP_TRACK', (interface_name, str(vrrp_id), track_interface), None) + + +# +# 'vrrp' subcommand ('config interface vrrp priority ...') +# +@vrrp.command("priority") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('priority', metavar='', required=True, type=click.IntRange(1, 254), default=100) +@click.pass_context +def priority(ctx, interface_name, vrrp_id, priority): + """config priority to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp_entry['priority'] = priority + config_db.set_entry("VRRP", (interface_name, str(vrrp_id)), vrrp_entry) + + +# +# 'vrrp' subcommand ('config interface vrrp adv_interval ...') +# +@vrrp.command("adv_interval") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('interval', metavar='', required=True, type=click.IntRange(1, 255), default=1) +@click.pass_context +def adv_interval(ctx, interface_name, vrrp_id, interval): + """config adv_interval to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp_entry['adv_interval'] = interval + config_db.set_entry("VRRP", (interface_name, str(vrrp_id)), vrrp_entry) + + +# +# 'vrrp' subcommand ('config interface vrrp pre_empt ...') +# +@vrrp.command("pre_empt") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('mode', metavar='', required=True, type=click.Choice(["enabled", "disabled"])) +@click.pass_context +def pre_empt(ctx, interface_name, vrrp_id, mode): + """Config pre_empt mode to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp_entry['preempt'] = mode + config_db.set_entry("VRRP", (interface_name, str(vrrp_id)), vrrp_entry) + + +# +# 'vrrp' subcommand ('config interface vrrp version...') +# +@vrrp.command("version") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('version', metavar='', required=True, type=click.Choice(["2", "3"]), default=3) +@click.pass_context +def version(ctx, interface_name, vrrp_id, version): + """Config vrrp packet version to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp_entry['version'] = version + config_db.set_entry("VRRP", (interface_name, str(vrrp_id)), vrrp_entry) + + +# +# 'vrrp' subcommand +# +@vrrp.command("add") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.pass_context +def add_vrrp(ctx, interface_name, vrrp_id): + """Add vrrp instance to the interface""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if vrrp_entry: + ctx.fail("{} has already configured the vrrp instance {}!".format(interface_name, vrrp_id)) + else: + vrrp_keys = config_db.get_keys("VRRP") + if len(vrrp_keys) >= 254: + ctx.fail("Has already configured 254 vrrp instances!") + intf_cfg = 0 + for key in vrrp_keys: + if key[1] == str(vrrp_id): + ctx.fail("The vrrp instance {} has already configured!".format(vrrp_id)) + if key[0] == interface_name: + intf_cfg += 1 + if intf_cfg >= 16: + ctx.fail("{} has already configured 16 vrrp instances!".format(interface_name)) + + config_db.set_entry('VRRP', (interface_name, str(vrrp_id)), {"vid": vrrp_id}) + + +@vrrp.command("remove") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.pass_context +def remove_vrrp(ctx, interface_name, vrrp_id): + """Remove vrrp instance to the interface""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp_entry = config_db.get_entry("VRRP", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("{} dose not configured the vrrp instance {}!".format(interface_name, vrrp_id)) + config_db.set_entry('VRRP', (interface_name, str(vrrp_id)), None) + + +# +# 'vrrp6' subgroup ('config interface vrrp6 ...') +# +@interface.group(cls=clicommon.AbbreviationGroup) +@click.pass_context +def vrrp6(ctx): + """Vrrpv6 configuration""" + pass + + +# +# ip subgroup ('config interface vrrp6 ipv6 ...') +# +@vrrp6.group(cls=clicommon.AbbreviationGroup, name='ipv6') +@click.pass_context +def ipv6(ctx): + """Vrrpv6 ipv6 configuration """ + pass + + +@ipv6.command('add') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("ipv6_addr", metavar="", required=True) +@click.pass_context +def add_vrrp6_ipv6(ctx, interface_name, vrrp_id, ipv6_addr): + """Add IPv6 address to the Vrrpv6 instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + if not is_vaild_intf_ip_addr(ipv6_addr): + ctx.abort() + if check_vrrp_ip_exist(config_db, ipv6_addr): + ctx.abort() + + if "/" not in ipv6_addr: + ctx.fail("IPv6 address {} is missing a mask. Such as xx:xx::xx/yy".format(str(ipv6_addr))) + + # check vip exist + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + address_list = [] + if vrrp6_entry: + # update vrrp + if "vip" in vrrp6_entry: + address_list = vrrp6_entry.get("vip") + # add ip address + if len(address_list) >= 4: + ctx.fail("The vrrp instance {} has already configured 4 IPv6 addresses".format(vrrp_id)) + + else: + # create new vrrp + vrrp6_entry = {} + vrrp6_keys = config_db.get_keys("VRRP6") + if len(vrrp6_keys) >= 254: + ctx.fail("Has already configured 254 Vrrpv6 instances.") + intf_cfg = 0 + for key in vrrp6_keys: + if key[1] == str(vrrp_id): + ctx.fail("The Vrrpv6 instance {} has already configured!".format(vrrp_id)) + if key[0] == interface_name: + intf_cfg += 1 + if intf_cfg >= 16: + ctx.fail("{} has already configured 16 Vrrpv6 instances!".format(interface_name)) + vrrp6_entry["vid"] = vrrp_id + + address_list.append(ipv6_addr) + vrrp6_entry['vip'] = address_list + + config_db.set_entry("VRRP6", (interface_name, str(vrrp_id)), vrrp6_entry) + + +@ipv6.command('remove') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("ipv6_addr", metavar="", required=True) +@click.pass_context +def remove_vrrp_ipv6(ctx, interface_name, vrrp_id, ipv6_addr): + """Remove IPv6 address to the Vrrpv6 instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + try: + ipaddress.ip_interface(ipv6_addr) + except ValueError as err: + ctx.fail("IPv6 address is not valid: {}".format(err)) + + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp6_entry: + ctx.fail("{} is not configured on the Vrrpv6 instance {}!".format(ipv6_addr, vrrp_id)) + + address_list = vrrp6_entry.get("vip") + # address_list = vrrp6_entry.get("vip") + if not address_list: + ctx.fail("{} is not configured on the Vrrpv6 instance {}!".format(ipv6_addr, vrrp_id)) + + # del ip address + if ipv6_addr in address_list: + address_list.remove(ipv6_addr) + else: + ctx.fail("{} is not configured on the Vrrpv6 instance {}!".format(ipv6_addr, vrrp_id)) + vrrp6_entry['vip'] = address_list + config_db.set_entry("VRRP6", (interface_name, str(vrrp_id)), vrrp6_entry) + + +def check_vrrp_ip_exist(config_db, ip_addr) -> bool: + addr_type = ipaddress.ip_interface(ip_addr).version + vrrp_table = "VRRP" if addr_type == 4 else "VRRP6" + vrrp_keys = config_db.get_keys(vrrp_table) + for vrrp_key in vrrp_keys: + vrrp_entry = config_db.get_entry(vrrp_table, vrrp_key) + if "vip" not in vrrp_entry: + continue + if ip_addr in vrrp_entry["vip"]: + click.echo("{} has already configured on the {} vrrp instance {}!".format(ip_addr, vrrp_key[0], + vrrp_key[1])) + return True + return False + + +# +# track interface subgroup ('config interface vrrp6 track_interface ...') +# +@vrrp6.group(cls=clicommon.AbbreviationGroup, name='track_interface') +@click.pass_context +def vrrp6_track_interface(ctx): + """ Vrrpv6 track_interface configuration """ + pass + + +@vrrp6_track_interface.command('add') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("track_interface", metavar="", required=True) +@click.argument('priority_increment', metavar='', required=True, type=click.IntRange(10, 50), + default=20) +@click.pass_context +def add_track_interface_v6(ctx, interface_name, vrrp_id, track_interface, priority_increment): + """add track_interface to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + track_interface = interface_alias_to_name(config_db, track_interface) + if interface_name is None: + ctx.fail("'interface_name' is None!") + if track_interface is None: + ctx.fail("'track_interface' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + table_name_t = get_interface_table_name(track_interface) + if table_name_t == "" or table_name_t == "LOOPBACK_INTERFACE": + ctx.fail("'track_interface' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if track_interface not in config_db.get_table(table_name_t): + ctx.fail("Router Interface '{}' not found".format(track_interface)) + + vrrp_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp6 instance {} not found on interface {}".format(vrrp_id, interface_name)) + + # track_intf_key = track_interface + "|weight|" + str(weight) + vrrp6_track_keys = config_db.get_keys("VRRP6_TRACK") + if vrrp6_track_keys: + track_key = (interface_name, str(vrrp_id)) + count = 0 + for item in vrrp6_track_keys: + subtuple1 = item[:2] + if subtuple1 == track_key: + count += 1 + + if count >= 8: + ctx.fail("The Vrrpv6 instance {} has already configured 8 track interfaces".format(vrrp_id)) + + # create a new entry + track6_entry = {} + track6_entry["priority_increment"] = priority_increment + config_db.set_entry("VRRP6_TRACK", (interface_name, str(vrrp_id), track_interface), track6_entry) + + +@vrrp6_track_interface.command('remove') +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument("track_interface", metavar="", required=True) +@click.pass_context +def remove_track_interface_v6(ctx, interface_name, vrrp_id, track_interface): + """Remove track_interface to the vrrp instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + track_interface = interface_alias_to_name(config_db, track_interface) + if interface_name is None: + ctx.fail("'interface_name' is None!") + if track_interface is None: + ctx.fail("'track_interface' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + table_name_t = get_interface_table_name(track_interface) + if table_name_t == "" or table_name_t == "LOOPBACK_INTERFACE": + ctx.fail("'track_interface' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + + vrrp_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp_entry: + ctx.fail("vrrp6 instance {} not found on interface {}".format(vrrp_id, interface_name)) + + track6_entry = config_db.get_entry("VRRP6_TRACK", (interface_name, str(vrrp_id), track_interface)) + if not track6_entry: + ctx.fail("{} is not configured on the vrrp6 instance {}!".format(track_interface, vrrp_id)) + config_db.set_entry('VRRP6_TRACK', (interface_name, str(vrrp_id), track_interface), None) + + +# +# 'vrrp6' subcommand ('config interface vrrp6 priority ...') +# +@vrrp6.command("priority") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('priority', metavar='', required=True, type=click.IntRange(1, 254), default=100) +@click.pass_context +def priority_v6(ctx, interface_name, vrrp_id, priority): + """config priority to the Vrrpv6 instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp6_entry: + ctx.fail("Vrrpv6 instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp6_entry['priority'] = priority + config_db.set_entry("VRRP6", (interface_name, str(vrrp_id)), vrrp6_entry) + + +# +# 'vrrp' subcommand ('config interface vrrp6 adv_interval ...') +# +@vrrp6.command("adv_interval") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('interval', metavar='', required=True, type=click.IntRange(1, 255), default=1000) +@click.pass_context +def adv_interval_v6(ctx, interface_name, vrrp_id, interval): + """config adv_interval to the Vrrpv6 instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp6_entry: + ctx.fail("Vrrpv6 instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp6_entry['adv_interval'] = interval + config_db.set_entry("VRRP6", (interface_name, str(vrrp_id)), vrrp6_entry) + + +# +# 'vrrp' subcommand ('config interface vrrp6 pre_empt ...') +# +@vrrp6.command("pre_empt") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.argument('mode', metavar='', required=True, type=click.Choice(["enabled", "disabled"])) +@click.pass_context +def pre_empt_v6(ctx, interface_name, vrrp_id, mode): + """Config pre_empt mode to the Vrrpv6 instance""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp6_entry: + ctx.fail("Vrrpv6 instance {} not found on interface {}".format(vrrp_id, interface_name)) + + vrrp6_entry['preempt'] = mode + config_db.set_entry("VRRP6", (interface_name, str(vrrp_id)), vrrp6_entry) + + +# +# 'vrrp6' subcommand +# +@vrrp6.command("add") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.pass_context +def add_vrrp_v6(ctx, interface_name, vrrp_id): + """Add Vrrpv6 instance to the interface""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if vrrp6_entry: + ctx.fail("{} has already configured the Vrrpv6 instance {}!".format(interface_name, vrrp_id)) + else: + vrrp6_keys = config_db.get_keys("VRRP6") + if len(vrrp6_keys) >= 254: + ctx.fail("Has already configured 254 Vrrpv6 instances!") + intf_cfg = 0 + for key in vrrp6_keys: + if key[1] == str(vrrp_id): + ctx.fail("The Vrrpv6 instance {} has already configured!".format(vrrp_id)) + if key[0] == interface_name: + intf_cfg += 1 + if intf_cfg >= 16: + ctx.fail("{} has already configured 16 Vrrpv6 instances!".format(interface_name)) + + config_db.set_entry('VRRP6', (interface_name, str(vrrp_id)), {"vid": vrrp_id}) + + +@vrrp6.command("remove") +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrrp_id', metavar='', required=True, type=click.IntRange(1, 255)) +@click.pass_context +def remove_vrrp_v6(ctx, interface_name, vrrp_id): + """Remove Vrrpv6 instance to the interface""" + config_db = ctx.obj["config_db"] + + if clicommon.get_interface_naming_mode() == "alias": + interface_name = interface_alias_to_name(config_db, interface_name) + if interface_name is None: + ctx.fail("'interface_name' is None!") + + table_name = get_interface_table_name(interface_name) + if table_name == "" or table_name == "LOOPBACK_INTERFACE": + ctx.fail("'interface_name' is not valid. Valid names [Ethernet/PortChannel/Vlan]") + if interface_name not in config_db.get_table(table_name): + ctx.fail("Router Interface '{}' not found".format(interface_name)) + + vrrp6_entry = config_db.get_entry("VRRP6", (interface_name, str(vrrp_id))) + if not vrrp6_entry: + ctx.fail("{} dose not configured the Vrrpv6 instance {}!".format(interface_name, vrrp_id)) + config_db.set_entry('VRRP6', (interface_name, str(vrrp_id)), None) + + +# +# 'vrf' group ('config vrf ...') +# + +@config.group(cls=clicommon.AbbreviationGroup, name='vrf') +@click.pass_context +def vrf(ctx): + """VRF-related configuration tasks""" + config_db = ConfigDBConnector() + config_db.connect() + ctx.obj = {} + ctx.obj['config_db'] = config_db + +@vrf.command('add') +@click.argument('vrf_name', metavar='', required=True) +@click.pass_context def add_vrf(ctx, vrf_name): """Add vrf""" config_db = ValidatedConfigDBConnector(ctx.obj['config_db']) @@ -6150,7 +7405,8 @@ def remove_reasons(counter_name, reasons, verbose): @click.option('-ydrop', metavar='', type=click.IntRange(0, 100), help="Set yellow drop probability") @click.option('-gdrop', metavar='', type=click.IntRange(0, 100), help="Set green drop probability") @click.option('-v', '--verbose', is_flag=True, help="Enable verbose output") -def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbose): +@multi_asic_util.multi_asic_click_option_namespace +def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbose, namespace): """ECN-related configuration tasks""" log.log_info("'ecn -profile {}' executing...".format(profile)) command = ['ecnconfig', '-p', str(profile)] @@ -6164,6 +7420,8 @@ def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbos if ydrop is not None: command += ['-ydrop', str(ydrop)] if gdrop is not None: command += ['-gdrop', str(gdrop)] if verbose: command += ["-vv"] + if namespace is not None: + command += ['-n', str(namespace)] clicommon.run_command(command, display_cmd=verbose) @@ -6173,13 +7431,26 @@ def ecn(profile, rmax, rmin, ymax, ymin, gmax, gmin, rdrop, ydrop, gdrop, verbos @config.command() @click.option('-p', metavar='', type=str, required=True, help="Profile name") @click.option('-a', metavar='', type=click.IntRange(-8,8), help="Set alpha for profile type dynamic") -@click.option('-s', metavar='', type=int, help="Set staticth for profile type static") -def mmu(p, a, s): +@click.option('-s', metavar='', type=click.IntRange(min=0), help="Set staticth for profile type static") +@click.option('--verbose', '-vv', is_flag=True, help="Enable verbose output") +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def mmu(p, a, s, namespace, verbose): """mmuconfig configuration tasks""" log.log_info("'mmuconfig -p {}' executing...".format(p)) command = ['mmuconfig', '-p', str(p)] if a is not None: command += ['-a', str(a)] if s is not None: command += ['-s', str(s)] + if namespace is not None: + command += ['-n', str(namespace)] + if verbose: + command += ['-vv'] clicommon.run_command(command) @@ -6201,8 +7472,9 @@ def pfc(ctx): @pfc.command() @click.argument('interface_name', metavar='', required=True) @click.argument('status', type=click.Choice(['on', 'off'])) +@multi_asic_util.multi_asic_click_option_namespace @click.pass_context -def asymmetric(ctx, interface_name, status): +def asymmetric(ctx, interface_name, status, namespace): """Set asymmetric PFC configuration.""" # Get the config_db connector config_db = ctx.obj['config_db'] @@ -6212,7 +7484,11 @@ def asymmetric(ctx, interface_name, status): if interface_name is None: ctx.fail("'interface_name' is None!") - clicommon.run_command(['pfc', 'config', 'asymmetric', str(status), str(interface_name)]) + cmd = ['pfc', 'config', 'asymmetric', str(status), str(interface_name)] + if namespace is not None: + cmd += ['-n', str(namespace)] + + clicommon.run_command(cmd) # # 'pfc priority' command ('config interface pfc priority ...') @@ -6222,8 +7498,9 @@ def asymmetric(ctx, interface_name, status): @click.argument('interface_name', metavar='', required=True) @click.argument('priority', type=click.Choice([str(x) for x in range(8)])) @click.argument('status', type=click.Choice(['on', 'off'])) +@multi_asic_util.multi_asic_click_option_namespace @click.pass_context -def priority(ctx, interface_name, priority, status): +def priority(ctx, interface_name, priority, status, namespace): """Set PFC priority configuration.""" # Get the config_db connector config_db = ctx.obj['config_db'] @@ -6233,7 +7510,11 @@ def priority(ctx, interface_name, priority, status): if interface_name is None: ctx.fail("'interface_name' is None!") - clicommon.run_command(['pfc', 'config', 'priority', str(status), str(interface_name), str(priority)]) + cmd = ['pfc', 'config', 'priority', str(status), str(interface_name), str(priority)] + if namespace is not None: + cmd += ['-n', str(namespace)] + + clicommon.run_command(cmd) # # 'buffer' group ('config buffer ...') @@ -7680,5 +8961,123 @@ def state(db, state): config_db.mod_entry(swsscommon.CFG_LOCAL_USERS_PASSWORDS_RESET, 'global', {'state': state}) +# 'serial_console' group ('config serial_console') +# +@config.group(cls=clicommon.AbbreviationGroup, name='serial_console') +def serial_console(): + """Configuring system serial-console behavior""" + pass + + +@serial_console.command('sysrq-capabilities') +@click.argument('sysrq_capabilities', metavar='', required=True, + type=click.Choice(['enabled', 'disabled'])) +def sysrq_capabilities(sysrq_capabilities): + """Set serial console sysrq-capabilities state""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SERIAL_CONSOLE", 'POLICIES', + {'sysrq_capabilities': sysrq_capabilities}) + + +@serial_console.command('inactivity-timeout') +@click.argument('inactivity_timeout', metavar='', required=True, + type=click.IntRange(0, 35000)) +def inactivity_timeout_serial(inactivity_timeout): + """Set serial console inactivity timeout""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SERIAL_CONSOLE", 'POLICIES', + {'inactivity_timeout': inactivity_timeout}) + + +# +# 'ssh' group ('config ssh') +# +@config.group(cls=clicommon.AbbreviationGroup, name='ssh') +def ssh(): + """Configuring system ssh behavior""" + pass + + +@ssh.command('inactivity-timeout') +@click.argument('inactivity_timeout', metavar='', required=True, + type=click.IntRange(0, 35000)) +def inactivity_timeout_ssh(inactivity_timeout): + """Set ssh inactivity timeout""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SSH_SERVER", 'POLICIES', + {'inactivity_timeout': inactivity_timeout}) + + +@ssh.command('max-sessions') +@click.argument('max-sessions', metavar='', required=True, + type=click.IntRange(0, 100)) +def max_sessions(max_sessions): + """Set max number of concurrent logins""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry("SSH_SERVER", 'POLICIES', + {'max_sessions': max_sessions}) + + +# +# 'banner' group ('config banner ...') +# +@config.group() +def banner(): + """Configuring system banner messages""" + pass + + +@banner.command() +@click.argument('state', metavar='', required=True, type=click.Choice(['enabled', 'disabled'])) +def state(state): + """Set banner feature state""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry(swsscommon.CFG_BANNER_MESSAGE_TABLE_NAME, 'global', + {'state': state}) + + +@banner.command() +@click.argument('message', metavar='', required=True) +def login(message): + """Set login message""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry(swsscommon.CFG_BANNER_MESSAGE_TABLE_NAME, 'global', + {'login': message}) + + +@banner.command() +@click.argument('message', metavar='', required=True) +def logout(message): + """Set logout message""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry(swsscommon.CFG_BANNER_MESSAGE_TABLE_NAME, 'global', + {'logout': message}) + + +@banner.command() +@click.argument('message', metavar='', required=True) +def motd(message): + """Set message of the day""" + + config_db = ConfigDBConnector() + config_db.connect() + config_db.mod_entry(swsscommon.CFG_BANNER_MESSAGE_TABLE_NAME, 'global', + {'motd': message}) + + if __name__ == '__main__': config() diff --git a/config/plugins/mlnx.py b/config/plugins/mlnx.py index accf944ce6..115b310f69 100644 --- a/config/plugins/mlnx.py +++ b/config/plugins/mlnx.py @@ -1,6 +1,6 @@ #!/usr/bin/env python3 # -# Copyright (c) 2017-2021 NVIDIA CORPORATION & AFFILIATES. +# Copyright (c) 2017-2024 NVIDIA CORPORATION & AFFILIATES. # Apache-2.0 # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -164,40 +164,6 @@ def mlnx(): """ Mellanox platform configuration tasks """ pass - -# 'sniffer' group -@mlnx.group() -def sniffer(): - """ Utility for managing Mellanox SDK/PRM sniffer """ - pass - - -# 'sdk' subgroup -@sniffer.group() -def sdk(): - """SDK Sniffer - Command Line to enable/disable SDK sniffer""" - pass - - -@sdk.command() -@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, - prompt='Swss service will be restarted, continue?') -def enable(): - """Enable SDK Sniffer""" - click.echo("Enabling SDK sniffer") - sdk_sniffer_enable() - click.echo("Note: the sniffer file may exhaust the space on /var/log, please disable it when you are done with this sniffering.") - - -@sdk.command() -@click.option('-y', '--yes', is_flag=True, callback=_abort_if_false, expose_value=False, - prompt='Swss service will be restarted, continue?') -def disable(): - """Disable SDK Sniffer""" - click.echo("Disabling SDK sniffer") - sdk_sniffer_disable() - - def sdk_sniffer_enable(): """Enable SDK Sniffer""" sdk_sniffer_filename = sniffer_filename_generate(SDK_SNIFFER_TARGET_PATH, diff --git a/config/plugins/sonic-system-ldap_yang.py b/config/plugins/sonic-system-ldap_yang.py new file mode 100644 index 0000000000..cc211cdb90 --- /dev/null +++ b/config/plugins/sonic-system-ldap_yang.py @@ -0,0 +1,393 @@ +""" +Autogenerated config CLI plugin. + + +""" + +import copy +import click +import utilities_common.cli as clicommon +import utilities_common.general as general +from config import config_mgmt + +# Load sonic-cfggen from source since /usr/local/bin/sonic-cfggen does not have .py extension. +sonic_cfggen = general.load_module_from_source('sonic_cfggen', '/usr/local/bin/sonic-cfggen') + + +def exit_with_error(*args, **kwargs): + """ Print a message with click.secho and abort CLI. + + Args: + args: Positional arguments to pass to click.secho + kwargs: Keyword arguments to pass to click.secho + """ + + click.secho(*args, **kwargs) + raise click.Abort() + + +def validate_config_or_raise(cfg): + """ Validate config db data using ConfigMgmt. + + Args: + cfg (Dict): Config DB data to validate. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + try: + cfg = sonic_cfggen.FormatConverter.to_serialized(copy.deepcopy(cfg)) + config_mgmt.ConfigMgmt().loadData(cfg) + except Exception as err: + raise Exception('Failed to validate configuration: {}'.format(err)) + + +def add_entry_validated(db, table, key, data): + """ Add new entry in table and validate configuration. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector obect. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + data (Dict): Entry data. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key in cfg[table]: + raise Exception(f"{key} already exists") + + cfg[table][key] = data + + validate_config_or_raise(cfg) + db.set_entry(table, key, data) + + +def update_entry_validated(db, table, key, data, create_if_not_exists=False): + """ Update entry in table and validate configuration. + If attribute value in data is None, the attribute is deleted. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector obect. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + data (Dict): Entry data. + create_if_not_exists (bool): + In case entry does not exists already a new entry + is not created if this flag is set to False and + creates a new entry if flag is set to True. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + + if not data: + raise Exception(f"No field/values to update {key}") + + if create_if_not_exists: + cfg[table].setdefault(key, {}) + + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + + entry_changed = False + for attr, value in data.items(): + if value == cfg[table][key].get(attr): + continue + entry_changed = True + if value is None: + cfg[table][key].pop(attr, None) + else: + cfg[table][key][attr] = value + + if not entry_changed: + return + + validate_config_or_raise(cfg) + db.set_entry(table, key, cfg[table][key]) + + +def del_entry_validated(db, table, key): + """ Delete entry in table and validate configuration. + + Args: + db (swsscommon.ConfigDBConnector): Config DB connector obect. + table (str): Table name to add new entry to. + key (Union[str, Tuple]): Key name in the table. + Raises: + Exception: when cfg does not satisfy YANG schema. + """ + + cfg = db.get_config() + cfg.setdefault(table, {}) + if key not in cfg[table]: + raise Exception(f"{key} does not exist") + + cfg[table].pop(key) + + validate_config_or_raise(cfg) + db.set_entry(table, key, None) + + +@click.group(name="ldap-server", + cls=clicommon.AliasedGroup) +def LDAP_SERVER(): + """ """ + + pass + + +@LDAP_SERVER.command(name="add") +@click.argument( + "hostname", + nargs=1, + required=True, +) +@click.option( + "--priority", + help="Server priority", +) +@clicommon.pass_db +def LDAP_SERVER_add(db, hostname, priority): + """ Add object in LDAP_SERVER. """ + + table = "LDAP_SERVER" + key = hostname + data = {} + if priority is not None: + data["priority"] = priority + + try: + add_entry_validated(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_SERVER.command(name="update") +@click.argument( + "hostname", + nargs=1, + required=True, +) +@click.option( + "--priority", + help="Server priority", +) +@clicommon.pass_db +def LDAP_SERVER_update(db, hostname, priority): + """ Add object in LDAP_SERVER. """ + + table = "LDAP_SERVER" + key = hostname + data = {} + if priority is not None: + data["priority"] = priority + + try: + update_entry_validated(db.cfgdb, table, key, data) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_SERVER.command(name="delete") +@click.argument( + "hostname", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_SERVER_delete(db, hostname): + """ Delete object in LDAP_SERVER. """ + + table = "LDAP_SERVER" + key = hostname + try: + del_entry_validated(db.cfgdb, table, key) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@click.group(name="ldap", + cls=clicommon.AliasedGroup) +def LDAP(): + """ """ + + pass + + +@LDAP.group(name="global", cls=clicommon.AliasedGroup) +@clicommon.pass_db +def LDAP_global(db): + """ """ + + pass + + +@LDAP_global.command(name="bind-dn") +@click.argument( + "bind-dn", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_bind_dn(db, bind_dn): + """ LDAP global bind dn """ + + table = "LDAP" + key = "global" + data = { + "bind_dn": bind_dn, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_global.command(name="bind-password") +@click.argument( + "bind-password", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_bind_password(db, bind_password): + """ Shared secret used for encrypting the communication """ + + table = "LDAP" + key = "global" + data = { + "bind_password": bind_password, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_global.command(name="bind-timeout") +@click.argument( + "bind-timeout", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_bind_timeout(db, bind_timeout): + """ Ldap bind timeout """ + + table = "LDAP" + key = "global" + data = { + "bind_timeout": bind_timeout, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_global.command(name="version") +@click.argument( + "version", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_version(db, version): + """ Ldap version """ + + table = "LDAP" + key = "global" + data = { + "version": version, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_global.command(name="base-dn") +@click.argument( + "base-dn", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_base_dn(db, base_dn): + """ Ldap user base dn """ + + table = "LDAP" + key = "global" + data = { + "base_dn": base_dn, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_global.command(name="port") +@click.argument( + "port", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_port(db, port): + """ TCP port to communicate with LDAP server """ + + table = "LDAP" + key = "global" + data = { + "port": port, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +@LDAP_global.command(name="timeout") +@click.argument( + "timeout", + nargs=1, + required=True, +) +@clicommon.pass_db +def LDAP_global_timeout(db, timeout): + """ Ldap timeout duration in sec """ + + table = "LDAP" + key = "global" + data = { + "timeout": timeout, + } + try: + update_entry_validated(db.cfgdb, table, key, data, create_if_not_exists=True) + except Exception as err: + exit_with_error(f"Error: {err}", fg="red") + + +def register(cli): + """ Register new CLI nodes in root CLI. + + Args: + cli: Root CLI node. + Raises: + Exception: when root CLI already has a command + we are trying to register. + """ + cli_node = LDAP_SERVER + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(LDAP_SERVER) + cli_node = LDAP + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(LDAP) diff --git a/config/stp.py b/config/stp.py new file mode 100644 index 0000000000..85d7041847 --- /dev/null +++ b/config/stp.py @@ -0,0 +1,917 @@ + +# +# 'spanning-tree' group ('config spanning-tree ...') +# + +import click +import utilities_common.cli as clicommon +from natsort import natsorted +import logging + +STP_MIN_ROOT_GUARD_TIMEOUT = 5 +STP_MAX_ROOT_GUARD_TIMEOUT = 600 +STP_DEFAULT_ROOT_GUARD_TIMEOUT = 30 + +STP_MIN_FORWARD_DELAY = 4 +STP_MAX_FORWARD_DELAY = 30 +STP_DEFAULT_FORWARD_DELAY = 15 + +STP_MIN_HELLO_INTERVAL = 1 +STP_MAX_HELLO_INTERVAL = 10 +STP_DEFAULT_HELLO_INTERVAL = 2 + +STP_MIN_MAX_AGE = 6 +STP_MAX_MAX_AGE = 40 +STP_DEFAULT_MAX_AGE = 20 + +STP_MIN_BRIDGE_PRIORITY = 0 +STP_MAX_BRIDGE_PRIORITY = 61440 +STP_DEFAULT_BRIDGE_PRIORITY = 32768 + +PVST_MAX_INSTANCES = 255 + + +def get_intf_list_in_vlan_member_table(config_db): + """ + Get info from REDIS ConfigDB and create interface to vlan mapping + """ + get_int_vlan_configdb_info = config_db.get_table('VLAN_MEMBER') + int_list = [] + for key in get_int_vlan_configdb_info: + interface = key[1] + if interface not in int_list: + int_list.append(interface) + return int_list + +################################## +# STP parameter validations +################################## + + +def is_valid_root_guard_timeout(ctx, root_guard_timeout): + if root_guard_timeout not in range(STP_MIN_ROOT_GUARD_TIMEOUT, STP_MAX_ROOT_GUARD_TIMEOUT + 1): + ctx.fail("STP root guard timeout must be in range 5-600") + + +def is_valid_forward_delay(ctx, forward_delay): + if forward_delay not in range(STP_MIN_FORWARD_DELAY, STP_MAX_FORWARD_DELAY + 1): + ctx.fail("STP forward delay value must be in range 4-30") + + +def is_valid_hello_interval(ctx, hello_interval): + if hello_interval not in range(STP_MIN_HELLO_INTERVAL, STP_MAX_HELLO_INTERVAL + 1): + ctx.fail("STP hello timer must be in range 1-10") + + +def is_valid_max_age(ctx, max_age): + if max_age not in range(STP_MIN_MAX_AGE, STP_MAX_MAX_AGE + 1): + ctx.fail("STP max age value must be in range 6-40") + + +def is_valid_bridge_priority(ctx, priority): + if priority % 4096 != 0: + ctx.fail("STP bridge priority must be multiple of 4096") + if priority not in range(STP_MIN_BRIDGE_PRIORITY, STP_MAX_BRIDGE_PRIORITY + 1): + ctx.fail("STP bridge priority must be in range 0-61440") + + +def validate_params(forward_delay, max_age, hello_time): + if (2 * (int(forward_delay) - 1)) >= int(max_age) >= (2 * (int(hello_time) + 1)): + return True + else: + return False + + +def is_valid_stp_vlan_parameters(ctx, db, vlan_name, param_type, new_value): + stp_vlan_entry = db.get_entry('STP_VLAN', vlan_name) + cfg_vlan_forward_delay = stp_vlan_entry.get("forward_delay") + cfg_vlan_max_age = stp_vlan_entry.get("max_age") + cfg_vlan_hello_time = stp_vlan_entry.get("hello_time") + ret_val = False + if param_type == "forward_delay": + ret_val = validate_params(new_value, cfg_vlan_max_age, cfg_vlan_hello_time) + elif param_type == "max_age": + ret_val = validate_params(cfg_vlan_forward_delay, new_value, cfg_vlan_hello_time) + elif param_type == "hello_time": + ret_val = validate_params(cfg_vlan_forward_delay, cfg_vlan_max_age, new_value) + + if ret_val is not True: + ctx.fail("2*(forward_delay-1) >= max_age >= 2*(hello_time +1 ) not met for VLAN") + + +def is_valid_stp_global_parameters(ctx, db, param_type, new_value): + stp_global_entry = db.get_entry('STP', "GLOBAL") + cfg_forward_delay = stp_global_entry.get("forward_delay") + cfg_max_age = stp_global_entry.get("max_age") + cfg_hello_time = stp_global_entry.get("hello_time") + ret_val = False + if param_type == "forward_delay": + ret_val = validate_params(new_value, cfg_max_age, cfg_hello_time) + elif param_type == "max_age": + ret_val = validate_params(cfg_forward_delay, new_value, cfg_hello_time) + elif param_type == "hello_time": + ret_val = validate_params(cfg_forward_delay, cfg_max_age, new_value) + + if ret_val is not True: + ctx.fail("2*(forward_delay-1) >= max_age >= 2*(hello_time +1 ) not met") + + +def get_max_stp_instances(): + return PVST_MAX_INSTANCES + + +def update_stp_vlan_parameter(ctx, db, param_type, new_value): + stp_global_entry = db.get_entry('STP', "GLOBAL") + + allowed_params = {"priority", "max_age", "hello_time", "forward_delay"} + if param_type not in allowed_params: + ctx.fail("Invalid parameter") + + current_global_value = stp_global_entry.get("forward_delay") + + vlan_dict = db.get_table('STP_VLAN') + for vlan in vlan_dict.keys(): + vlan_entry = db.get_entry('STP_VLAN', vlan) + current_vlan_value = vlan_entry.get(param_type) + if current_global_value == current_vlan_value: + db.mod_entry('STP_VLAN', vlan, {param_type: new_value}) + + +def check_if_vlan_exist_in_db(db, ctx, vid): + vlan_name = 'Vlan{}'.format(vid) + vlan = db.get_entry('VLAN', vlan_name) + if len(vlan) == 0: + ctx.fail("{} doesn't exist".format(vlan_name)) + + +def enable_stp_for_vlans(db): + vlan_count = 0 + fvs = {'enabled': 'true', + 'forward_delay': get_global_stp_forward_delay(db), + 'hello_time': get_global_stp_hello_time(db), + 'max_age': get_global_stp_max_age(db), + 'priority': get_global_stp_priority(db) + } + vlan_dict = natsorted(db.get_table('VLAN')) + max_stp_instances = get_max_stp_instances() + for vlan_key in vlan_dict: + if vlan_count >= max_stp_instances: + logging.warning("Exceeded maximum STP configurable VLAN instances for {}".format(vlan_key)) + break + db.set_entry('STP_VLAN', vlan_key, fvs) + vlan_count += 1 + + +def get_stp_enabled_vlan_count(db): + count = 0 + stp_vlan_keys = db.get_table('STP_VLAN').keys() + for key in stp_vlan_keys: + if db.get_entry('STP_VLAN', key).get('enabled') == 'true': + count += 1 + return count + + +def vlan_enable_stp(db, vlan_name): + fvs = {'enabled': 'true', + 'forward_delay': get_global_stp_forward_delay(db), + 'hello_time': get_global_stp_hello_time(db), + 'max_age': get_global_stp_max_age(db), + 'priority': get_global_stp_priority(db) + } + if is_global_stp_enabled(db): + if get_stp_enabled_vlan_count(db) < get_max_stp_instances(): + db.set_entry('STP_VLAN', vlan_name, fvs) + else: + logging.warning("Exceeded maximum STP configurable VLAN instances for {}".format(vlan_name)) + + +def interface_enable_stp(db, interface_name): + fvs = {'enabled': 'true', + 'root_guard': 'false', + 'bpdu_guard': 'false', + 'bpdu_guard_do_disable': 'false', + 'portfast': 'false', + 'uplink_fast': 'false' + } + if is_global_stp_enabled(db): + db.set_entry('STP_PORT', interface_name, fvs) + + +def is_vlan_configured_interface(db, interface_name): + intf_to_vlan_list = get_vlan_list_for_interface(db, interface_name) + if intf_to_vlan_list: # if empty + return True + else: + return False + + +def is_interface_vlan_member(db, vlan_name, interface_name): + ctx = click.get_current_context() + key = vlan_name + '|' + interface_name + entry = db.get_entry('VLAN_MEMBER', key) + if len(entry) == 0: # if empty + ctx.fail("{} is not member of {}".format(interface_name, vlan_name)) + + +def get_vlan_list_for_interface(db, interface_name): + vlan_intf_info = db.get_table('VLAN_MEMBER') + vlan_list = [] + for line in vlan_intf_info: + if interface_name == line[1]: + vlan_name = line[0] + vlan_list.append(vlan_name) + return vlan_list + + +def get_pc_member_port_list(db): + pc_member_info = db.get_table('PORTCHANNEL_MEMBER') + pc_member_port_list = [] + for line in pc_member_info: + intf_name = line[1] + pc_member_port_list.append(intf_name) + return pc_member_port_list + + +def get_vlan_list_from_stp_vlan_intf_table(db, intf_name): + stp_vlan_intf_info = db.get_table('STP_VLAN_PORT') + vlan_list = [] + for line in stp_vlan_intf_info: + if line[1] == intf_name: + vlan_list.append(line[0]) + return vlan_list + + +def get_intf_list_from_stp_vlan_intf_table(db, vlan_name): + stp_vlan_intf_info = db.get_table('STP_VLAN_PORT') + intf_list = [] + for line in stp_vlan_intf_info: + if line[0] == vlan_name: + intf_list.append(line[1]) + return intf_list + + +def is_portchannel_member_port(db, interface_name): + return interface_name in get_pc_member_port_list(db) + + +def enable_stp_for_interfaces(db): + fvs = {'enabled': 'true', + 'root_guard': 'false', + 'bpdu_guard': 'false', + 'bpdu_guard_do_disable': 'false', + 'portfast': 'false', + 'uplink_fast': 'false' + } + port_dict = natsorted(db.get_table('PORT')) + intf_list_in_vlan_member_table = get_intf_list_in_vlan_member_table(db) + + for port_key in port_dict: + if port_key in intf_list_in_vlan_member_table: + db.set_entry('STP_PORT', port_key, fvs) + + po_ch_dict = natsorted(db.get_table('PORTCHANNEL')) + for po_ch_key in po_ch_dict: + if po_ch_key in intf_list_in_vlan_member_table: + db.set_entry('STP_PORT', po_ch_key, fvs) + + +def is_global_stp_enabled(db): + stp_entry = db.get_entry('STP', "GLOBAL") + mode = stp_entry.get("mode") + if mode: + return True + else: + return False + + +def check_if_global_stp_enabled(db, ctx): + if not is_global_stp_enabled(db): + ctx.fail("Global STP is not enabled - first configure STP mode") + + +def get_global_stp_mode(db): + stp_entry = db.get_entry('STP', "GLOBAL") + mode = stp_entry.get("mode") + return mode + + +def get_global_stp_forward_delay(db): + stp_entry = db.get_entry('STP', "GLOBAL") + forward_delay = stp_entry.get("forward_delay") + return forward_delay + + +def get_global_stp_hello_time(db): + stp_entry = db.get_entry('STP', "GLOBAL") + hello_time = stp_entry.get("hello_time") + return hello_time + + +def get_global_stp_max_age(db): + stp_entry = db.get_entry('STP', "GLOBAL") + max_age = stp_entry.get("max_age") + return max_age + + +def get_global_stp_priority(db): + stp_entry = db.get_entry('STP', "GLOBAL") + priority = stp_entry.get("priority") + return priority + + +@click.group() +@clicommon.pass_db +def spanning_tree(_db): + """STP command line""" + pass + + +############################################### +# STP Global commands implementation +############################################### + +# cmd: STP enable +@spanning_tree.command('enable') +@click.argument('mode', metavar='', required=True, type=click.Choice(["pvst"])) +@clicommon.pass_db +def spanning_tree_enable(_db, mode): + """enable STP """ + ctx = click.get_current_context() + db = _db.cfgdb + if mode == "pvst" and get_global_stp_mode(db) == "pvst": + ctx.fail("PVST is already configured") + fvs = {'mode': mode, + 'rootguard_timeout': STP_DEFAULT_ROOT_GUARD_TIMEOUT, + 'forward_delay': STP_DEFAULT_FORWARD_DELAY, + 'hello_time': STP_DEFAULT_HELLO_INTERVAL, + 'max_age': STP_DEFAULT_MAX_AGE, + 'priority': STP_DEFAULT_BRIDGE_PRIORITY + } + db.set_entry('STP', "GLOBAL", fvs) + # Enable STP for VLAN by default + enable_stp_for_interfaces(db) + enable_stp_for_vlans(db) + + +# cmd: STP disable +@spanning_tree.command('disable') +@click.argument('mode', metavar='', required=True, type=click.Choice(["pvst"])) +@clicommon.pass_db +def stp_disable(_db, mode): + """disable STP """ + db = _db.cfgdb + db.set_entry('STP', "GLOBAL", None) + # Disable STP for all VLANs and interfaces + db.delete_table('STP_VLAN') + db.delete_table('STP_PORT') + db.delete_table('STP_VLAN_PORT') + if get_global_stp_mode(db) == "pvst": + print("Error PVST disable failed") + + +# cmd: STP global root guard timeout +@spanning_tree.command('root_guard_timeout') +@click.argument('root_guard_timeout', metavar='<5-600 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_global_root_guard_timeout(_db, root_guard_timeout): + """Configure STP global root guard timeout value""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + is_valid_root_guard_timeout(ctx, root_guard_timeout) + db.mod_entry('STP', "GLOBAL", {'rootguard_timeout': root_guard_timeout}) + + +# cmd: STP global forward delay +@spanning_tree.command('forward_delay') +@click.argument('forward_delay', metavar='<4-30 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_global_forward_delay(_db, forward_delay): + """Configure STP global forward delay""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + is_valid_forward_delay(ctx, forward_delay) + is_valid_stp_global_parameters(ctx, db, "forward_delay", forward_delay) + update_stp_vlan_parameter(ctx, db, "forward_delay", forward_delay) + db.mod_entry('STP', "GLOBAL", {'forward_delay': forward_delay}) + + +# cmd: STP global hello interval +@spanning_tree.command('hello') +@click.argument('hello_interval', metavar='<1-10 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_global_hello_interval(_db, hello_interval): + """Configure STP global hello interval""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + is_valid_hello_interval(ctx, hello_interval) + is_valid_stp_global_parameters(ctx, db, "hello_time", hello_interval) + update_stp_vlan_parameter(ctx, db, "hello_time", hello_interval) + db.mod_entry('STP', "GLOBAL", {'hello_time': hello_interval}) + + +# cmd: STP global max age +@spanning_tree.command('max_age') +@click.argument('max_age', metavar='<6-40 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_global_max_age(_db, max_age): + """Configure STP global max_age""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + is_valid_max_age(ctx, max_age) + is_valid_stp_global_parameters(ctx, db, "max_age", max_age) + update_stp_vlan_parameter(ctx, db, "max_age", max_age) + db.mod_entry('STP', "GLOBAL", {'max_age': max_age}) + + +# cmd: STP global bridge priority +@spanning_tree.command('priority') +@click.argument('priority', metavar='<0-61440>', required=True, type=int) +@clicommon.pass_db +def stp_global_priority(_db, priority): + """Configure STP global bridge priority""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + is_valid_bridge_priority(ctx, priority) + update_stp_vlan_parameter(ctx, db, "priority", priority) + db.mod_entry('STP', "GLOBAL", {'priority': priority}) + + +############################################### +# STP VLAN commands implementation +############################################### +@spanning_tree.group('vlan') +@clicommon.pass_db +def spanning_tree_vlan(_db): + """Configure STP for a VLAN""" + pass + + +def is_stp_enabled_for_vlan(db, vlan_name): + stp_entry = db.get_entry('STP_VLAN', vlan_name) + stp_enabled = stp_entry.get("enabled") + if stp_enabled == "true": + return True + else: + return False + + +def check_if_stp_enabled_for_vlan(ctx, db, vlan_name): + if not is_stp_enabled_for_vlan(db, vlan_name): + ctx.fail("STP is not enabled for VLAN") + + +@spanning_tree_vlan.command('enable') +@click.argument('vid', metavar='', required=True, type=int) +@clicommon.pass_db +def stp_vlan_enable(_db, vid): + """Enable STP for a VLAN""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_vlan_exist_in_db(db, ctx, vid) + vlan_name = 'Vlan{}'.format(vid) + if is_stp_enabled_for_vlan(db, vlan_name): + ctx.fail("STP is already enabled for " + vlan_name) + if get_stp_enabled_vlan_count(db) >= get_max_stp_instances(): + ctx.fail("Exceeded maximum STP configurable VLAN instances") + check_if_global_stp_enabled(db, ctx) + # when enabled for first time, create VLAN entry with + # global values - else update only VLAN STP state + stp_vlan_entry = db.get_entry('STP_VLAN', vlan_name) + if len(stp_vlan_entry) == 0: + fvs = {'enabled': 'true', + 'forward_delay': get_global_stp_forward_delay(db), + 'hello_time': get_global_stp_hello_time(db), + 'max_age': get_global_stp_max_age(db), + 'priority': get_global_stp_priority(db) + } + db.set_entry('STP_VLAN', vlan_name, fvs) + else: + db.mod_entry('STP_VLAN', vlan_name, {'enabled': 'true'}) + # Refresh stp_vlan_intf entry for vlan + for vlan, intf in db.get_table('STP_VLAN_PORT'): + if vlan == vlan_name: + vlan_intf_key = "{}|{}".format(vlan_name, intf) + vlan_intf_entry = db.get_entry('STP_VLAN_PORT', vlan_intf_key) + db.mod_entry('STP_VLAN_PORT', vlan_intf_key, vlan_intf_entry) + + +@spanning_tree_vlan.command('disable') +@click.argument('vid', metavar='', required=True, type=int) +@clicommon.pass_db +def stp_vlan_disable(_db, vid): + """Disable STP for a VLAN""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_vlan_exist_in_db(db, ctx, vid) + vlan_name = 'Vlan{}'.format(vid) + db.mod_entry('STP_VLAN', vlan_name, {'enabled': 'false'}) + + +@spanning_tree_vlan.command('forward_delay') +@click.argument('vid', metavar='', required=True, type=int) +@click.argument('forward_delay', metavar='<4-30 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_vlan_forward_delay(_db, vid, forward_delay): + """Configure STP forward delay for VLAN""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_vlan_exist_in_db(db, ctx, vid) + vlan_name = 'Vlan{}'.format(vid) + check_if_stp_enabled_for_vlan(ctx, db, vlan_name) + is_valid_forward_delay(ctx, forward_delay) + is_valid_stp_vlan_parameters(ctx, db, vlan_name, "forward_delay", forward_delay) + db.mod_entry('STP_VLAN', vlan_name, {'forward_delay': forward_delay}) + + +@spanning_tree_vlan.command('hello') +@click.argument('vid', metavar='', required=True, type=int) +@click.argument('hello_interval', metavar='<1-10 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_vlan_hello_interval(_db, vid, hello_interval): + """Configure STP hello interval for VLAN""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_vlan_exist_in_db(db, ctx, vid) + vlan_name = 'Vlan{}'.format(vid) + check_if_stp_enabled_for_vlan(ctx, db, vlan_name) + is_valid_hello_interval(ctx, hello_interval) + is_valid_stp_vlan_parameters(ctx, db, vlan_name, "hello_time", hello_interval) + db.mod_entry('STP_VLAN', vlan_name, {'hello_time': hello_interval}) + + +@spanning_tree_vlan.command('max_age') +@click.argument('vid', metavar='', required=True, type=int) +@click.argument('max_age', metavar='<6-40 seconds>', required=True, type=int) +@clicommon.pass_db +def stp_vlan_max_age(_db, vid, max_age): + """Configure STP max age for VLAN""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_vlan_exist_in_db(db, ctx, vid) + vlan_name = 'Vlan{}'.format(vid) + check_if_stp_enabled_for_vlan(ctx, db, vlan_name) + is_valid_max_age(ctx, max_age) + is_valid_stp_vlan_parameters(ctx, db, vlan_name, "max_age", max_age) + db.mod_entry('STP_VLAN', vlan_name, {'max_age': max_age}) + + +@spanning_tree_vlan.command('priority') +@click.argument('vid', metavar='', required=True, type=int) +@click.argument('priority', metavar='<0-61440>', required=True, type=int) +@clicommon.pass_db +def stp_vlan_priority(_db, vid, priority): + """Configure STP bridge priority for VLAN""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_vlan_exist_in_db(db, ctx, vid) + vlan_name = 'Vlan{}'.format(vid) + check_if_stp_enabled_for_vlan(ctx, db, vlan_name) + is_valid_bridge_priority(ctx, priority) + db.mod_entry('STP_VLAN', vlan_name, {'priority': priority}) + + +############################################### +# STP interface commands implementation +############################################### + + +def is_stp_enabled_for_interface(db, intf_name): + stp_entry = db.get_entry('STP_PORT', intf_name) + stp_enabled = stp_entry.get("enabled") + if stp_enabled == "true": + return True + else: + return False + + +def check_if_stp_enabled_for_interface(ctx, db, intf_name): + if not is_stp_enabled_for_interface(db, intf_name): + ctx.fail("STP is not enabled for interface {}".format(intf_name)) + + +def check_if_interface_is_valid(ctx, db, interface_name): + from config.main import interface_name_is_valid + if interface_name_is_valid(db, interface_name) is False: + ctx.fail("Interface name is invalid. Please enter a valid interface name!!") + for key in db.get_table('INTERFACE'): + if type(key) != tuple: + continue + if key[0] == interface_name: + ctx.fail(" {} has ip address {} configured - It's not a L2 interface".format(interface_name, key[1])) + if is_portchannel_member_port(db, interface_name): + ctx.fail(" {} is a portchannel member port - STP can't be configured".format(interface_name)) + if not is_vlan_configured_interface(db, interface_name): + ctx.fail(" {} has no VLAN configured - It's not a L2 interface".format(interface_name)) + + +@spanning_tree.group('interface') +@clicommon.pass_db +def spanning_tree_interface(_db): + """Configure STP for interface""" + pass + + +@spanning_tree_interface.command('enable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_enable(_db, interface_name): + """Enable STP for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + if is_stp_enabled_for_interface(db, interface_name): + ctx.fail("STP is already enabled for " + interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + stp_intf_entry = db.get_entry('STP_PORT', interface_name) + if len(stp_intf_entry) == 0: + fvs = {'enabled': 'true', + 'root_guard': 'false', + 'bpdu_guard': 'false', + 'bpdu_guard_do_disable': 'false', + 'portfast': 'false', + 'uplink_fast': 'false'} + db.set_entry('STP_PORT', interface_name, fvs) + else: + db.mod_entry('STP_PORT', interface_name, {'enabled': 'true'}) + + +@spanning_tree_interface.command('disable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_disable(_db, interface_name): + """Disable STP for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_global_stp_enabled(db, ctx) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'enabled': 'false'}) + + +# STP interface port priority +STP_INTERFACE_MIN_PRIORITY = 0 +STP_INTERFACE_MAX_PRIORITY = 240 +STP_INTERFACE_DEFAULT_PRIORITY = 128 + + +def is_valid_interface_priority(ctx, intf_priority): + if intf_priority not in range(STP_INTERFACE_MIN_PRIORITY, STP_INTERFACE_MAX_PRIORITY + 1): + ctx.fail("STP interface priority must be in range 0-240") + + +@spanning_tree_interface.command('priority') +@click.argument('interface_name', metavar='', required=True) +@click.argument('priority', metavar='<0-240>', required=True, type=int) +@clicommon.pass_db +def stp_interface_priority(_db, interface_name, priority): + """Configure STP port priority for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + is_valid_interface_priority(ctx, priority) + curr_intf_proirty = db.get_entry('STP_PORT', interface_name).get('priority') + db.mod_entry('STP_PORT', interface_name, {'priority': priority}) + # update interface priority in all stp_vlan_intf entries if entry exists + for vlan, intf in db.get_table('STP_VLAN_PORT'): + if intf == interface_name: + vlan_intf_key = "{}|{}".format(vlan, interface_name) + vlan_intf_entry = db.get_entry('STP_VLAN_PORT', vlan_intf_key) + if len(vlan_intf_entry) != 0: + vlan_intf_priority = vlan_intf_entry.get('priority') + if curr_intf_proirty == vlan_intf_priority: + db.mod_entry('STP_VLAN_PORT', vlan_intf_key, {'priority': priority}) + # end + + +# STP interface port path cost +STP_INTERFACE_MIN_PATH_COST = 1 +STP_INTERFACE_MAX_PATH_COST = 200000000 + + +def is_valid_interface_path_cost(ctx, intf_path_cost): + if intf_path_cost < STP_INTERFACE_MIN_PATH_COST or intf_path_cost > STP_INTERFACE_MAX_PATH_COST: + ctx.fail("STP interface path cost must be in range 1-200000000") + + +@spanning_tree_interface.command('cost') +@click.argument('interface_name', metavar='', required=True) +@click.argument('cost', metavar='<1-200000000>', required=True, type=int) +@clicommon.pass_db +def stp_interface_path_cost(_db, interface_name, cost): + """Configure STP path cost for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + is_valid_interface_path_cost(ctx, cost) + curr_intf_cost = db.get_entry('STP_PORT', interface_name).get('path_cost') + db.mod_entry('STP_PORT', interface_name, {'path_cost': cost}) + # update interface path_cost in all stp_vlan_intf entries if entry exists + for vlan, intf in db.get_table('STP_VLAN_PORT'): + if intf == interface_name: + vlan_intf_key = "{}|{}".format(vlan, interface_name) + vlan_intf_entry = db.get_entry('STP_VLAN_PORT', vlan_intf_key) + if len(vlan_intf_entry) != 0: + vlan_intf_cost = vlan_intf_entry.get('path_cost') + if curr_intf_cost == vlan_intf_cost: + db.mod_entry('STP_VLAN_PORT', vlan_intf_key, {'path_cost': cost}) + # end + + +# STP interface root guard +@spanning_tree_interface.group('root_guard') +@clicommon.pass_db +def spanning_tree_interface_root_guard(_db): + """Configure STP root guard for interface""" + pass + + +@spanning_tree_interface_root_guard.command('enable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_root_guard_enable(_db, interface_name): + """Enable STP root guard for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'root_guard': 'true'}) + + +@spanning_tree_interface_root_guard.command('disable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_root_guard_disable(_db, interface_name): + """Disable STP root guard for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'root_guard': 'false'}) + + +# STP interface bpdu guard +@spanning_tree_interface.group('bpdu_guard') +@clicommon.pass_db +def spanning_tree_interface_bpdu_guard(_db): + """Configure STP bpdu guard for interface""" + pass + + +@spanning_tree_interface_bpdu_guard.command('enable') +@click.argument('interface_name', metavar='', required=True) +@click.option('-s', '--shutdown', is_flag=True) +@clicommon.pass_db +def stp_interface_bpdu_guard_enable(_db, interface_name, shutdown): + """Enable STP bpdu guard for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + if shutdown is True: + bpdu_guard_do_disable = 'true' + else: + bpdu_guard_do_disable = 'false' + fvs = {'bpdu_guard': 'true', + 'bpdu_guard_do_disable': bpdu_guard_do_disable} + db.mod_entry('STP_PORT', interface_name, fvs) + + +@spanning_tree_interface_bpdu_guard.command('disable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_bpdu_guard_disable(_db, interface_name): + """Disable STP bpdu guard for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'bpdu_guard': 'false'}) + + +# STP interface portfast +@spanning_tree_interface.group('portfast') +@clicommon.pass_db +def spanning_tree_interface_portfast(_db): + """Configure STP portfast for interface""" + pass + + +@spanning_tree_interface_portfast.command('enable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_portfast_enable(_db, interface_name): + """Enable STP portfast for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'portfast': 'true'}) + + +@spanning_tree_interface_portfast.command('disable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_portfast_disable(_db, interface_name): + """Disable STP portfast for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'portfast': 'false'}) + + +# STP interface root uplink_fast +@spanning_tree_interface.group('uplink_fast') +@clicommon.pass_db +def spanning_tree_interface_uplink_fast(_db): + """Configure STP uplink fast for interface""" + pass + + +@spanning_tree_interface_uplink_fast.command('enable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_uplink_fast_enable(_db, interface_name): + """Enable STP uplink fast for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'uplink_fast': 'true'}) + + +@spanning_tree_interface_uplink_fast.command('disable') +@click.argument('interface_name', metavar='', required=True) +@clicommon.pass_db +def stp_interface_uplink_fast_disable(_db, interface_name): + """Disable STP uplink fast for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_interface_is_valid(ctx, db, interface_name) + db.mod_entry('STP_PORT', interface_name, {'uplink_fast': 'false'}) + + +############################################### +# STP interface per VLAN commands implementation +############################################### +@spanning_tree_vlan.group('interface') +@clicommon.pass_db +def spanning_tree_vlan_interface(_db): + """Configure STP parameters for interface per VLAN""" + pass + + +# STP interface per vlan port priority +def is_valid_vlan_interface_priority(ctx, priority): + if priority not in range(STP_INTERFACE_MIN_PRIORITY, STP_INTERFACE_MAX_PRIORITY + 1): + ctx.fail("STP per vlan port priority must be in range 0-240") + + +@spanning_tree_vlan_interface.command('priority') +@click.argument('vid', metavar='', required=True, type=int) +@click.argument('interface_name', metavar='', required=True) +@click.argument('priority', metavar='<0-240>', required=True, type=int) +@clicommon.pass_db +def stp_vlan_interface_priority(_db, vid, interface_name, priority): + """Configure STP per vlan port priority for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + vlan_name = 'Vlan{}'.format(vid) + check_if_stp_enabled_for_vlan(ctx, db, vlan_name) + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_vlan_exist_in_db(db, ctx, vid) + is_interface_vlan_member(db, vlan_name, interface_name) + is_valid_vlan_interface_priority(ctx, priority) + vlan_interface = str(vlan_name) + "|" + interface_name + db.mod_entry('STP_VLAN_PORT', vlan_interface, {'priority': priority}) + + +@spanning_tree_vlan_interface.command('cost') +@click.argument('vid', metavar='', required=True, type=int) +@click.argument('interface_name', metavar='', required=True) +@click.argument('cost', metavar='<1-200000000>', required=True, type=int) +@clicommon.pass_db +def stp_vlan_interface_cost(_db, vid, interface_name, cost): + """Configure STP per vlan path cost for interface""" + ctx = click.get_current_context() + db = _db.cfgdb + vlan_name = 'Vlan{}'.format(vid) + check_if_stp_enabled_for_vlan(ctx, db, vlan_name) + check_if_stp_enabled_for_interface(ctx, db, interface_name) + check_if_vlan_exist_in_db(db, ctx, vid) + is_interface_vlan_member(db, vlan_name, interface_name) + is_valid_interface_path_cost(ctx, cost) + vlan_interface = str(vlan_name) + "|" + interface_name + db.mod_entry('STP_VLAN_PORT', vlan_interface, {'path_cost': cost}) + + +# Invoke main() +# if __name__ == '__main__': +# spanning_tree() diff --git a/config/syslog.py b/config/syslog.py index a5d520d9cf..7228e365c8 100644 --- a/config/syslog.py +++ b/config/syslog.py @@ -642,3 +642,57 @@ def disable_rate_limit_feature(db, service_name, namespace): if not failed: click.echo(f'Disabled syslog rate limit feature for {feature_name}') + + +@syslog.command('level') +@click.option("-i", "--identifier", + required=True, + help="Log identifier in DB for which loglevel is applied (provided with -l)") +@click.option("-l", "--level", + required=True, + help="Loglevel value", + type=click.Choice(['DEBUG', 'INFO', 'NOTICE', 'WARN', 'ERROR'])) +@click.option("--container", + help="Container name to which the SIGHUP is sent (provided with --pid or --program)") +@click.option("--program", + help="Program name to which the SIGHUP is sent (provided with --container)") +@click.option("--pid", + help="Process ID to which the SIGHUP is sent (provided with --container if PID is from container)") +@click.option('--namespace', '-n', 'namespace', default=None, + type=click.Choice(multi_asic_util.multi_asic_ns_choices()), + show_default=True, help='Namespace name') +@clicommon.pass_db +def level(db, identifier, level, container, program, pid, namespace): + """ Configure log level """ + if program and not container: + raise click.UsageError('--program must be specified with --container') + + if container and not program and not pid: + raise click.UsageError('--container must be specified with --pid or --program') + + if not namespace: + cfg_db = db.cfgdb + else: + asic_id = multi_asic.get_asic_id_from_name(namespace) + container = f'{container}{asic_id}' + cfg_db = db.cfgdb_clients[namespace] + + cfg_db.mod_entry('LOGGER', identifier, {'LOGLEVEL': level}) + if not container and not program and not pid: + return + + log_config = cfg_db.get_entry('LOGGER', identifier) + require_manual_refresh = log_config.get('require_manual_refresh') + if not require_manual_refresh: + return + + if container: + if program: + command = ['docker', 'exec', '-i', container, 'supervisorctl', 'signal', 'HUP', program] + else: + command = ['docker', 'exec', '-i', container, 'kill', '-s', 'SIGHUP', pid] + else: + command = ['kill', '-s', 'SIGHUP', pid] + output, ret = clicommon.run_command(command, return_cmd=True) + if ret != 0: + raise click.ClickException(f'Failed: {output}') diff --git a/config/vlan.py b/config/vlan.py index 98cc95757e..eae51eb312 100644 --- a/config/vlan.py +++ b/config/vlan.py @@ -6,6 +6,7 @@ from time import sleep from .utils import log from .validated_config_db_connector import ValidatedConfigDBConnector +from . import stp ADHOC_VALIDATION = True DHCP_RELAY_TABLE = "DHCP_RELAY" @@ -76,6 +77,9 @@ def add_vlan(db, vid, multiple): if clicommon.check_if_vlanid_exist(db.cfgdb, vlan, "DHCP_RELAY"): ctx.fail("DHCPv6 relay config for {} already exists".format(vlan)) + # Enable STP on VLAN if PVST is enabled globally + stp.vlan_enable_stp(db.cfgdb, vlan) + # set dhcpv4_relay table set_dhcp_relay_table('VLAN', config_db, vlan, {'vlanid': str(vid)}) @@ -97,6 +101,29 @@ def delete_db_entry(entry_name, db_connector, db_name): db_connector.delete(db_name, entry_name) +def enable_stp_on_port(db, port): + if stp.is_global_stp_enabled(db) is True: + vlan_list_for_intf = stp.get_vlan_list_for_interface(db, port) + if len(vlan_list_for_intf) == 0: + stp.interface_enable_stp(db, port) + + +def disable_stp_on_vlan_port(db, vlan, port): + if stp.is_global_stp_enabled(db) is True: + vlan_interface = str(vlan) + "|" + port + db.set_entry('STP_VLAN_PORT', vlan_interface, None) + vlan_list_for_intf = stp.get_vlan_list_for_interface(db, port) + if len(vlan_list_for_intf) == 0: + db.set_entry('STP_PORT', port, None) + + +def disable_stp_on_vlan(db, vlan_interface): + db.set_entry('STP_VLAN', vlan_interface, None) + stp_intf_list = stp.get_intf_list_from_stp_vlan_intf_table(db, vlan_interface) + for intf_name in stp_intf_list: + key = vlan_interface + "|" + intf_name + db.set_entry('STP_VLAN_PORT', key, None) + @vlan.command('del') @click.argument('vid', metavar='', required=True) @click.option('-m', '--multiple', is_flag=True, help="Add Multiple Vlan(s) in Range or in Comma separated list") @@ -154,7 +181,8 @@ def del_vlan(db, vid, multiple, no_restart_dhcp_relay): for vxmap_key, vxmap_data in vxlan_table.items(): if vxmap_data['vlan'] == 'Vlan{}'.format(vid): ctx.fail("vlan: {} can not be removed. " - "First remove vxlan mapping '{}' assigned to VLAN".format(vid, '|'.join(vxmap_key))) + "First remove vxlan mapping '{}' assigned to VLAN".format( + vid, '|'.join(vxmap_key))) # set dhcpv4_relay table set_dhcp_relay_table('VLAN', config_db, vlan, None) @@ -169,6 +197,9 @@ def del_vlan(db, vid, multiple, no_restart_dhcp_relay): delete_db_entry("DHCPv6_COUNTER_TABLE|{}".format(vlan), db.db, db.db.STATE_DB) delete_db_entry("DHCP_COUNTER_TABLE|{}".format(vlan), db.db, db.db.STATE_DB) + # Delete STP_VLAN & STP_VLAN_PORT entries when VLAN is deleted. + disable_stp_on_vlan(db.cfgdb, 'Vlan{}'.format(vid)) + vlans = db.cfgdb.get_keys('VLAN') if not vlans: docker_exec_cmd = ['docker', 'exec', '-i', 'swss'] @@ -312,6 +343,10 @@ def add_vlan_member(db, vid, port, untagged, multiple, except_flag): ctx.fail("{} is in access mode! Tagged Members cannot be added".format(port)) elif existing_mode == mode_type or (existing_mode == "trunk" and mode_type == "access"): pass + + # If port is being made L2 port, enable STP + enable_stp_on_port(db.cfgdb, port) + try: config_db.set_entry('VLAN_MEMBER', (vlan, port), {'tagging_mode': "untagged" if untagged else "tagged"}) except ValueError: @@ -356,6 +391,9 @@ def del_vlan_member(db, vid, port, multiple, except_flag): if not clicommon.is_port_vlan_member(db.cfgdb, port, vlan): # TODO: MISSING CONSTRAINT IN YANG MODEL ctx.fail("{} is not a member of {}".format(port, vlan)) + # If port is being made non-L2 port, disable STP + disable_stp_on_vlan_port(db.cfgdb, vlan, port) + try: config_db.set_entry('VLAN_MEMBER', (vlan, port), None) delete_db_entry("DHCPv6_COUNTER_TABLE|{}".format(port), db.db, db.db.STATE_DB) diff --git a/consutil/lib.py b/consutil/lib.py index 1d7f967bd3..e597e3b643 100644 --- a/consutil/lib.py +++ b/consutil/lib.py @@ -277,7 +277,7 @@ def init_device_prefix(): @staticmethod def list_console_ttys(): """Lists all console tty devices""" - cmd = ["ls", SysInfoProvider.DEVICE_PREFIX + "*"] + cmd = ["bash", "-c", "ls " + SysInfoProvider.DEVICE_PREFIX + "*"] output, _ = SysInfoProvider.run_command(cmd, abort=False) ttys = output.split('\n') ttys = list([dev for dev in ttys if re.match(SysInfoProvider.DEVICE_PREFIX + r"\d+", dev) != None]) diff --git a/counterpoll/main.py b/counterpoll/main.py index ad15c8c248..2bfcd7377c 100644 --- a/counterpoll/main.py +++ b/counterpoll/main.py @@ -3,17 +3,29 @@ from flow_counter_util.route import exit_if_route_flow_counter_not_support from swsscommon.swsscommon import ConfigDBConnector from tabulate import tabulate +from sonic_py_common import device_info BUFFER_POOL_WATERMARK = "BUFFER_POOL_WATERMARK" PORT_BUFFER_DROP = "PORT_BUFFER_DROP" PG_DROP = "PG_DROP" ACL = "ACL" +ENI = "ENI" DISABLE = "disable" ENABLE = "enable" DEFLT_60_SEC= "default (60000)" DEFLT_10_SEC= "default (10000)" DEFLT_1_SEC = "default (1000)" + +def is_dpu(db): + """ Check if the device is DPU """ + platform_info = device_info.get_platform_info(db) + if platform_info.get('switch_type') == 'dpu': + return True + else: + return False + + @click.group() def cli(): """ SONiC Static Counter Poll configurations """ @@ -126,6 +138,7 @@ def disable(): port_info['FLEX_COUNTER_STATUS'] = DISABLE configdb.mod_entry("FLEX_COUNTER_TABLE", PORT_BUFFER_DROP, port_info) + # Ingress PG drop packet stat @cli.group() @click.pass_context @@ -382,6 +395,47 @@ def disable(ctx): fc_info['FLEX_COUNTER_STATUS'] = 'disable' ctx.obj.mod_entry("FLEX_COUNTER_TABLE", "FLOW_CNT_ROUTE", fc_info) + +# ENI counter commands +@cli.group() +@click.pass_context +def eni(ctx): + """ ENI counter commands """ + ctx.obj = ConfigDBConnector() + ctx.obj.connect() + if not is_dpu(ctx.obj): + click.echo("ENI counters are not supported on non DPU platforms") + exit(1) + + +@eni.command(name='interval') +@click.argument('poll_interval', type=click.IntRange(1000, 30000)) +@click.pass_context +def eni_interval(ctx, poll_interval): + """ Set eni counter query interval """ + eni_info = {} + eni_info['POLL_INTERVAL'] = poll_interval + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ENI, eni_info) + + +@eni.command(name='enable') +@click.pass_context +def eni_enable(ctx): + """ Enable eni counter query """ + eni_info = {} + eni_info['FLEX_COUNTER_STATUS'] = 'enable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ENI, eni_info) + + +@eni.command(name='disable') +@click.pass_context +def eni_disable(ctx): + """ Disable eni counter query """ + eni_info = {} + eni_info['FLEX_COUNTER_STATUS'] = 'disable' + ctx.obj.mod_entry("FLEX_COUNTER_TABLE", ENI, eni_info) + + @cli.command() def show(): """ Show the counter configuration """ @@ -399,6 +453,7 @@ def show(): tunnel_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'TUNNEL') trap_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_TRAP') route_info = configdb.get_entry('FLEX_COUNTER_TABLE', 'FLOW_CNT_ROUTE') + eni_info = configdb.get_entry('FLEX_COUNTER_TABLE', ENI) header = ("Type", "Interval (in ms)", "Status") data = [] @@ -428,6 +483,10 @@ def show(): data.append(["FLOW_CNT_ROUTE_STAT", route_info.get("POLL_INTERVAL", DEFLT_10_SEC), route_info.get("FLEX_COUNTER_STATUS", DISABLE)]) + if is_dpu(configdb) and eni_info: + data.append(["ENI_STAT", eni_info.get("POLL_INTERVAL", DEFLT_10_SEC), + eni_info.get("FLEX_COUNTER_STATUS", DISABLE)]) + click.echo(tabulate(data, headers=header, tablefmt="simple", missingval="")) def _update_config_db_flex_counter_table(status, filename): diff --git a/debug/main.py b/debug/main.py index 069159fc75..1c12dffe85 100755 --- a/debug/main.py +++ b/debug/main.py @@ -4,6 +4,7 @@ import subprocess from shlex import join + def run_command(command, pager=False): command_str = join(command) click.echo(click.style("Command: ", fg='cyan') + click.style(command_str, fg='green')) @@ -25,6 +26,7 @@ def cli(): """SONiC command line - 'debug' command""" pass + prefix_pattern = '^[A-Za-z0-9.:/]*$' p = subprocess.check_output(['sudo', 'vtysh', '-c', 'show version'], text=True) if 'FRRouting' in p: diff --git a/debug/stp.py b/debug/stp.py new file mode 100644 index 0000000000..c154537e2a --- /dev/null +++ b/debug/stp.py @@ -0,0 +1,92 @@ +import click +import utilities_common.cli as clicommon + + +# +# This group houses Spanning_tree commands and subgroups +# +@click.group(cls=clicommon.AliasedGroup, default_if_no_args=False, invoke_without_command=True) +@click.pass_context +def spanning_tree(ctx): + '''debug spanning_tree commands''' + if ctx.invoked_subcommand is None: + command = 'sudo stpctl dbg enable' + clicommon.run_command(command) + + +@spanning_tree.group('dump', cls=clicommon.AliasedGroup, default_if_no_args=False, invoke_without_command=True) +def stp_debug_dump(): + pass + + +@stp_debug_dump.command('global') +def stp_debug_dump_global(): + command = 'sudo stpctl global' + clicommon.run_command(command) + + +@stp_debug_dump.command('vlan') +@click.argument('vlan_id', metavar='', required=True) +def stp_debug_dump_vlan(vlan_id): + command = 'sudo stpctl vlan ' + vlan_id + clicommon.run_command(command) + + +@stp_debug_dump.command('interface') +@click.argument('vlan_id', metavar='', required=True) +@click.argument('interface_name', metavar='', required=True) +def stp_debug_dump_vlan_intf(vlan_id, interface_name): + command = 'sudo stpctl port ' + vlan_id + " " + interface_name + clicommon.run_command(command) + + +@spanning_tree.command('show') +def stp_debug_show(): + command = 'sudo stpctl dbg show' + clicommon.run_command(command) + + +@spanning_tree.command('reset') +def stp_debug_reset(): + command = 'sudo stpctl dbg disable' + clicommon.run_command(command) + + +@spanning_tree.command('bpdu') +@click.argument('mode', metavar='{rx|tx}', required=False) +@click.option('-d', '--disable', is_flag=True) +def stp_debug_bpdu(mode, disable): + command = 'sudo stpctl dbg bpdu {}{}'.format( + ('rx-' if mode == 'rx' else 'tx-' if mode == 'tx' else ''), + ('off' if disable else 'on')) + clicommon.run_command(command) + + +@spanning_tree.command('verbose') +@click.option('-d', '--disable', is_flag=True) +def stp_debug_verbose(disable): + command = 'sudo stpctl dbg verbose {}'.format("off" if disable else "on") + clicommon.run_command(command) + + +@spanning_tree.command('event') +@click.option('-d', '--disable', is_flag=True) +def stp_debug_event(disable): + command = 'sudo stpctl dbg event {}'.format("off" if disable else "on") + clicommon.run_command(command) + + +@spanning_tree.command('vlan') +@click.argument('vlan_id', metavar='', required=True) +@click.option('-d', '--disable', is_flag=True) +def stp_debug_vlan(vlan_id, disable): + command = 'sudo stpctl dbg vlan {} {}'.format(vlan_id, "off" if disable else "on") + clicommon.run_command(command) + + +@spanning_tree.command('interface') +@click.argument('interface_name', metavar='', required=True) +@click.option('-d', '--disable', is_flag=True) +def stp_debug_intf(interface_name, disable): + command = 'sudo stpctl dbg port {} {}'.format(interface_name, "off" if disable else "on") + clicommon.run_command(command) diff --git a/doc/Command-Reference.md b/doc/Command-Reference.md index 766e432f48..b7217bb721 100644 --- a/doc/Command-Reference.md +++ b/doc/Command-Reference.md @@ -43,10 +43,13 @@ * [Console config commands](#console-config-commands) * [Console connect commands](#console-connect-commands) * [Console clear commands](#console-clear-commands) + * [DPU serial console utility](#dpu-serial-console-utility) * [CMIS firmware upgrade](#cmis-firmware-upgrade) * [CMIS firmware version show commands](#cmis-firmware-version-show-commands) * [CMIS firmware upgrade commands](#cmis-firmware-upgrade-commands) * [CMIS firmware target mode commands](#cmis-firmware-target-mode-commands) +* [CMIS debug](#cmis-debug) +* [CMIS debug loopback](#cmis-debug-loopback) * [DHCP Relay](#dhcp-relay) * [DHCP Relay show commands](#dhcp-relay-show-commands) * [DHCP Relay clear commands](#dhcp-relay-clear-commands) @@ -96,6 +99,11 @@ * [Linux Kernel Dump](#linux-kernel-dump) * [Linux Kernel Dump show commands](#Linux-Kernel-Dump-show-commands) * [Linux Kernel Dump config commands](#Linux-Kernel-Dump-config-commands) +* [LDAP](#LDAP) + * [show LDAP global commands](#LDAP-global-show-commands) + * [LDAP global config commands](#LDAP-global-config-commands) + * [show LDAP server commands](#LDAP-server-show-commands) + * [LDAP server config commands](#LDAP-server-config-commands) * [LLDP](#lldp) * [LLDP show commands](#lldp-show-commands) * [Loading, Reloading And Saving Configuration](#loading-reloading-and-saving-configuration) @@ -220,10 +228,15 @@ * [Local Users' Passwords Reset Commands](#local-users-passwords-reset-commands) * [Local Users' Passwords Config Command](#local-users-passwords-reset-config-command) * [Reset Local Users' Passwords Show command](#local-users-passwords-reset-show-command) +* [Banner Commands](#banner-commands) + * [Banner config commands](#banner-config-commands) + * [Banner show command](#banner-show-command) + ## Document History | Version | Modification Date | Details | | --- | --- | --- | +| v9 | Sep-19-2024 | Add DPU serial console utility | | v8 | Oct-09-2023 | Add CMIS firmware upgrade commands | | v7 | Jun-22-2023 | Add static DNS show and config commands | | v6 | May-06-2021 | Add SNMP show and config commands | @@ -2627,6 +2640,26 @@ When enabled, BGP will not advertise routes which aren't yet offloaded. Disabled ``` +**show bgp device-global** + +This command displays BGP device global configuration. + +- Usage: + ```bash + show bgp device-global + ``` + +- Options: + - _-j,--json_: display in JSON format + +- Example: + ```bash + admin@sonic:~$ show bgp device-global + TSA W-ECMP + ------- ------- + enabled enabled + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#bgp) ### BGP config commands @@ -2737,6 +2770,26 @@ Once enabled, BGP will not advertise routes which aren't yet offloaded. admin@sonic:~$ sudo config suppress-fib-pending disabled ``` +**config bgp device-global tsa/w-ecmp** + +This command is used to manage BGP device global configuration. + +Feature list: +1. TSA - Traffic-Shift-Away +2. W-ECMP - Weighted-Cost Multi-Path + +- Usage: + ```bash + config bgp device-global tsa + config bgp device-global w-ecmp + ``` + +- Examples: + ```bash + admin@sonic:~$ config bgp device-global tsa enabled + admin@sonic:~$ config bgp device-global w-ecmp enabled + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#bgp) ## Console @@ -2780,7 +2833,7 @@ Optionally, you can display configured console ports only by specifying the `-b` 1 9600 Enabled - - switch1 ``` -## Console config commands +### Console config commands This sub-section explains the list of configuration options available for console management module. @@ -2956,6 +3009,88 @@ Optionally, you can clear with a remote device name by specifying the `-d` or `- Go Back To [Beginning of the document](#) or [Beginning of this section](#console) +### DPU serial console utility + +**dpu-tty.py** + +This command allows user to connect to a DPU serial console via TTY device with +interactive CLI program: picocom. The configuration is from platform.json. The +utility works only on smart switch that provides DPU UART connections through +/dev/ttyS* devices. + +- Usage: + ``` + dpu-tty.py (-n|--name) [(-b|-baud) ] [(-t|-tty) ] + ``` + +- Example: + ``` + root@MtFuji:/home/cisco# dpu-tty.py -n dpu0 + picocom v3.1 + + port is : /dev/ttyS4 + flowcontrol : none + baudrate is : 115200 + parity is : none + databits are : 8 + stopbits are : 1 + escape is : C-a + local echo is : no + noinit is : no + noreset is : no + hangup is : no + nolock is : no + send_cmd is : sz -vv + receive_cmd is : rz -vv -E + imap is : + omap is : + emap is : crcrlf,delbs, + logfile is : none + initstring : none + exit_after is : not set + exit is : no + + Type [C-a] [C-h] to see available commands + Terminal ready + + sonic login: admin + Password: + Linux sonic 6.1.0-11-2-arm64 #1 SMP Debian 6.1.38-4 (2023-08-08) aarch64 + You are on + ____ ___ _ _ _ ____ + / ___| / _ \| \ | (_)/ ___| + \___ \| | | | \| | | | + ___) | |_| | |\ | | |___ + |____/ \___/|_| \_|_|\____| + + -- Software for Open Networking in the Cloud -- + + Unauthorized access and/or use are prohibited. + All access and/or use are subject to monitoring. + + Help: https://sonic-net.github.io/SONiC/ + + Last login: Mon Sep 9 21:39:44 UTC 2024 on ttyS0 + admin@sonic:~$ + Terminating... + Thanks for using picocom + root@MtFuji:/home/cisco# + ``` + +Optionally, user may overwrite baud rate for experiment. + +- Example: + ``` + root@MtFuji:/home/cisco# dpu-tty.py -n dpu1 -b 9600 + ``` + +Optionally, user may overwrite TTY device for experiment. + +- Example: + ``` + root@MtFuji:/home/cisco# dpu-tty.py -n dpu2 -t ttyS4 + ``` + ## CMIS firmware upgrade ### CMIS firmware version show commands @@ -3089,6 +3224,31 @@ Example of the module supporting target mode Target Mode set to 1 ``` +## CMIS debug + +### CMIS debug loopback + +This command is the standard CMIS diagnostic control used for troubleshooting link and performance issues between the host switch and transceiver module. + +**sfputil debug loopback** + +- Usage: + ``` + sfputil debug loopback PORT_NAME LOOPBACK_MODE + + Valid values for loopback mode + host-side-input: host side input loopback mode + host-side-output: host side output loopback mode + media-side-input: media side input loopback mode + media-side-output: media side output loopback mode + ``` + +- Example: + ``` + admin@sonic:~$ sfputil debug loopback Ethernet88 host-side-input enable + admin@sonic:~$ sfputil debug loopback Ethernet88 media-side-output disable + ``` + ## DHCP Relay ### DHCP Relay show commands @@ -4736,6 +4896,8 @@ Optional argument "-p" specify a period (in seconds) with which to gather counte show interfaces counters errors show interfaces counters rates show interfaces counters rif [-p|--period ] [-i ] + show interfaces counters fec-histogram [-i ] + show interfaces counters fec-stats ``` - Example: @@ -4853,6 +5015,47 @@ Optionally, you can specify a period (in seconds) with which to gather counters admin@sonic:~$ sonic-clear rifcounters ``` +The "fec-histogram" subcommand is used to display the fec histogram for the port. + +When data is transmitted, it's broken down into units called codewords. FEC algorithms add extra data to each codeword that can be used to detect and correct errors in transmission. +In a FEC histogram, "bins" represent ranges of errors or specific categories of errors. For instance, Bin0 might represent codewords with no errors, while Bin1 could represent codewords with a single bit error, and so on. The histogram shows how many codewords fell into each bin. A high number in the higher bins might indicate a problem with the transmission link, such as signal degradation. + +- Example: + ``` + admin@str-s6000-acs-11:/usr/bin$ show interface counters fec-histogram -i + Symbol Errors Per Codeword Codewords + -------------------------- --------- + BIN0: 1000000 + BIN1: 900000 + BIN2: 800000 + BIN3: 700000 + BIN4: 600000 + BIN5: 500000 + BIN6: 400000 + BIN7: 300000 + BIN8: 0 + BIN9: 0 + BIN10: 0 + BIN11: 0 + BIN12: 0 + BIN13: 0 + BIN14: 0 + BIN15: 0 + ``` + +The "fec-stats" subcommand is used to disply the interface fec related statistic. + +- Example: + ``` + admin@ctd615:~$ show interfaces counters fec-stats + IFACE STATE FEC_CORR FEC_UNCORR FEC_SYMBOL_ERR FEC_PRE_BER FEC_POST_BER + ----------- ------- ---------- ------------ ---------------- ------------- -------------- + Ethernet0 U 0 0 0 1.48e-20 0.00e+00 + Ethernet8 U 0 0 0 1.98e-19 0.00e+00 + Ethernet16 U 0 0 0 1.77e-20 0.00e+00 + ``` + + **show interfaces description** This command displays the key fields of the interfaces such as Operational Status, Administrative Status, Alias and Description. @@ -6300,6 +6503,86 @@ This command displays the kubernetes server status. ``` Go Back To [Beginning of the document](#) or [Beginning of this section](#Kubernetes) +## LDAP + +### show LDAP global commands + +This command displays the global LDAP configuration that includes the following parameters: base_dn, bind_password, bind_timeout, version, port, timeout. + +- Usage: + ``` + show ldap global + ``` +- Example: + + ``` + admin@sonic:~$ show ldap global + base-dn Ldap user base dn + bind-dn LDAP global bind dn + bind-password Shared secret used for encrypting the communication + bind-timeout Ldap bind timeout <0-120> + port TCP port to communicate with LDAP server <1-65535> + timeout Ldap timeout duration in sec <1-60> + version Ldap version <1-3> + + ``` + +### LDAP global config commands + +These commands are used to configure the LDAP global parameters + + - Usage: + ``` + config ldap global + ``` +- Example: + ``` + admin@sonic:~$ config ldap global + + host
--prio <1 - 8> + base-dn Ldap user base dn + bind-dn LDAP global bind dn + bind-password Shared secret used for encrypting the communication + bind-timeout Ldap bind timeout <0-120> + port TCP port to communicate with LDAP server <1-65535> + timeout Ldap timeout duration in sec <1-60> + version Ldap version <1-3> + ``` + +### show LDAP server commands + +This command displays the global LDAP configuration that includes the following parameters: base_dn, bind_password, bind_timeout, version, port, timeout. + +- Usage: + ``` + show ldap-server + ``` +- Example: + + ``` + admin@sonic:~$ show ldap-server + hostname Ldap hostname or IP of the configured LDAP server + priority priority for the relevant LDAP server <1-8> + ``` + +### LDAP server config commands + +These commands are used to manage the LDAP servers in the system, they are created in correspondance to the global config parameters mentioned earlier. + + - Usage: + ``` + config ldap-server + ``` +- Example: + ``` + admin@sonic:~$ config ldap-server + + add Add a new LDAP server --priority <1-8> + delete Delete an existing LDAP server from the list --priority <1-8> + update Update and existing LDAP server + +Go Back To [Beginning of the document](#) or [Beginning of this section](#LDAP) + ## Linux Kernel Dump This section demonstrates the show commands and configuration commands of Linux kernel dump mechanism in SONiC. @@ -8336,74 +8619,11 @@ Go Back To [Beginning of the document](#) or [Beginning of this section](#platfo ### Mellanox Platform Specific Commands -There are few commands that are platform specific. Mellanox has used this feature and implemented Mellanox specific commands as follows. - -**show platform mlnx sniffer** - -This command shows the SDK sniffer status - -- Usage: - ``` - show platform mlnx sniffer - ``` - -- Example: - ``` - admin@sonic:~$ show platform mlnx sniffer - sdk sniffer is disabled - ``` - -**show platform mlnx sniffer** - -Another show command available on ‘show platform mlnx’ which is the issu status. -This means if ISSU is enabled on this SKU or not. A warm boot command can be executed only when ISSU is enabled on the SKU. - -- Usage: - ``` - show platform mlnx issu - ``` - -- Example: - ``` - admin@sonic:~$ show platform mlnx issu - ISSU is enabled - ``` - -In the case ISSU is disabled and warm-boot is called, the user will get a notification message explaining that the command cannot be invoked. - -- Example: - ``` - admin@sonic:~$ sudo warm-reboot - ISSU is not enabled on this HWSKU - Warm reboot is not supported - ``` - -**config platform mlnx** - -This command is valid only on mellanox devices. The sub-commands for "config platform" gets populated only on mellanox platforms. -There are no other subcommands on non-Mellanox devices and hence this command appears empty and useless in other platforms. -The platform mellanox command currently includes a single sub command which is the SDK sniffer. -The SDK sniffer is a troubleshooting tool which records the RPC calls from the Mellanox SDK user API library to the sx_sdk task into a .pcap file. -This .pcap file can be replayed afterward to get the exact same configuration state on SDK and FW to reproduce and investigate issues. +config platform mlnx -A new folder will be created to store the sniffer files: "/var/log/mellanox/sniffer/". The result file will be stored in a .pcap file, which includes a time stamp of the starting time in the file name, for example, "sx_sdk_sniffer_20180224081306.pcap" -In order to have a complete .pcap file with all the RPC calls, the user should disable the SDK sniffer. Swss service will be restarted and no capturing is taken place from that moment. -It is recommended to review the .pcap file while sniffing is disabled. -Once SDK sniffer is enabled/disabled, the user is requested to approve that swss service will be restarted. -For example: To change SDK sniffer status, swss service will be restarted, continue? [y/N]: -In order to avoid that confirmation the -y / --yes option should be used. +This command is valid only on mellanox devices. The sub-commands for "config platform" gets populated only on mellanox platforms. There are no other subcommands on non-Mellanox devices and hence this command appears empty and useless in other platforms. -- Usage: - ``` - config platform mlnx sniffer sdk [-y|--yes] - ``` - -- Example: - ``` - admin@sonic:~$ config platform mlnx sniffer sdk - To change SDK sniffer status, swss service will be restarted, continue? [y/N]: y - NOTE: In order to avoid that confirmation the -y / --yes option should be used. - ``` +The platform mellanox command currently includes no sub command. ### Barefoot Platform Specific Commands @@ -10590,6 +10810,35 @@ This command is used to disable syslog rate limit feature. config syslog rate-limit-feature disable database -n asci0 ``` +**config syslog level** + +This command is used to configure log level for a given log identifier. + +- Usage: + ``` + config syslog level -i -l --container [] --program [] + + config syslog level -i -l --container [] --pid [] + + config syslog level -i -l ---pid [] + ``` + +- Example: + + ``` + # Update the log level without refresh the configuration + config syslog level -i xcvrd -l DEBUG + + # Update the log level and send SIGHUP to xcvrd running in PMON + config syslog level -i xcvrd -l DEBUG --container pmon --program xcvrd + + # Update the log level and send SIGHUP to PID 20 running in PMON + config syslog level -i xcvrd -l DEBUG --container pmon --pid 20 + + # Update the log level and send SIGHUP to PID 20 running in host + config syslog level -i xcvrd -l DEBUG --pid 20 + ``` + Go Back To [Beginning of the document](#) or [Beginning of this section](#syslog) ## System State @@ -13624,4 +13873,90 @@ Please note, The commands will not have any effect if the feature is disabled in state ------- enabled - ``` \ No newline at end of file + ``` + +# Banner Commands + +This sub-section explains the list of the configuration options available for Banner feature. + +## Banner config commands + +- Set banner feature state + +``` +admin@sonic:~$ config banner state +Usage: config config banner state + + Set banner feature state + +Options: + -?, -h, --help Show this message and exit. +``` + +- Set login message + +``` +admin@sonic:~$ config banner login +Usage: config banner login + + Set login message + +Options: + -?, -h, --help Show this message and exit. +``` + +- Set logout message + +``` +admin@sonic:~$ config banner logout +Usage: config banner logout + + Set logout message + +Options: + -?, -h, --help Show this message and exit. +``` + +- Set message of the day + +``` +admin@sonic:~$ config banner motd +Usage: config banner motd + + Set message of the day + +Options: + -?, -h, --help Show this message and exit. +``` + +## Banner show command + +- how banner messages + +``` +admin@sonic:~$ show banner +Usage: show banner + + Show banner messages + +Options: + -h, -?, --help Show this message and exit. +``` +``` +admin@sonic:~$ show banner +state login motd logout +------- ------- ------------------------------------------------ -------- +enabled Login You are on + Message ____ ___ _ _ _ ____ + / ___| / _ \| \ | (_)/ ___| + \___ \| | | | \| | | | + ___) | |_| | |\ | | |___ + |____/ \___/|_| \_|_|\____| + + -- Software for Open Networking in the Cloud -- + + Unauthorized access and/or use are prohibited. + All access and/or use are subject to monitoring. + + Help: https://sonic-net.github.io/SONiC/ +``` diff --git a/generic_config_updater/change_applier.py b/generic_config_updater/change_applier.py index 32a356bf9a..b5712d024f 100644 --- a/generic_config_updater/change_applier.py +++ b/generic_config_updater/change_applier.py @@ -9,6 +9,7 @@ from swsscommon.swsscommon import ConfigDBConnector from sonic_py_common import multi_asic from .gu_common import GenericConfigUpdaterError, genericUpdaterLogging +from .gu_common import get_config_db_as_json SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) UPDATER_CONF_FILE = f"{SCRIPT_DIR}/gcu_services_validator.conf.json" @@ -16,6 +17,7 @@ print_to_console = False + def set_verbose(verbose=False): global print_to_console, logger @@ -34,11 +36,12 @@ def log_error(m): logger.log(logger.LOG_PRIORITY_ERROR, m, print_to_console) -def get_config_db(namespace=multi_asic.DEFAULT_NAMESPACE): - config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) +def get_config_db(scope=multi_asic.DEFAULT_NAMESPACE): + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=scope) config_db.connect() return config_db + def set_config(config_db, tbl, key, data): config_db.set_entry(tbl, key, data) @@ -61,11 +64,9 @@ class DryRunChangeApplier: def __init__(self, config_wrapper): self.config_wrapper = config_wrapper - def apply(self, change): self.config_wrapper.apply_change_to_config_db(change) - def remove_backend_tables_from_config(self, data): return data @@ -74,9 +75,9 @@ class ChangeApplier: updater_conf = None - def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace - self.config_db = get_config_db(self.namespace) + def __init__(self, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope + self.config_db = get_config_db(self.scope) self.backend_tables = [ "BUFFER_PG", "BUFFER_PROFILE", @@ -86,7 +87,6 @@ def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): with open(UPDATER_CONF_FILE, "r") as s: ChangeApplier.updater_conf = json.load(s) - def _invoke_cmd(self, cmd, old_cfg, upd_cfg, keys): # cmd is in the format as . # @@ -98,7 +98,6 @@ def _invoke_cmd(self, cmd, old_cfg, upd_cfg, keys): return method_to_call(old_cfg, upd_cfg, keys) - def _services_validate(self, old_cfg, upd_cfg, keys): lst_svcs = set() lst_cmds = set() @@ -124,7 +123,6 @@ def _services_validate(self, old_cfg, upd_cfg, keys): log_debug("service invoked: {}".format(cmd)) return 0 - def _upd_data(self, tbl, run_tbl, upd_tbl, upd_keys): for key in set(run_tbl.keys()).union(set(upd_tbl.keys())): run_data = run_tbl.get(key, None) @@ -135,24 +133,21 @@ def _upd_data(self, tbl, run_tbl, upd_tbl, upd_keys): upd_keys[tbl][key] = {} log_debug("Patch affected tbl={} key={}".format(tbl, key)) - def _report_mismatch(self, run_data, upd_data): log_error("run_data vs expected_data: {}".format( str(jsondiff.diff(run_data, upd_data))[0:40])) - def apply(self, change): - run_data = self._get_running_config() + run_data = get_config_db_as_json(self.scope) upd_data = prune_empty_table(change.apply(copy.deepcopy(run_data))) upd_keys = defaultdict(dict) for tbl in sorted(set(run_data.keys()).union(set(upd_data.keys()))): - self._upd_data(tbl, run_data.get(tbl, {}), - upd_data.get(tbl, {}), upd_keys) + self._upd_data(tbl, run_data.get(tbl, {}), upd_data.get(tbl, {}), upd_keys) ret = self._services_validate(run_data, upd_data, upd_keys) if not ret: - run_data = self._get_running_config() + run_data = get_config_db_as_json(self.scope) self.remove_backend_tables_from_config(upd_data) self.remove_backend_tables_from_config(run_data) if upd_data != run_data: @@ -165,29 +160,3 @@ def apply(self, change): def remove_backend_tables_from_config(self, data): for key in self.backend_tables: data.pop(key, None) - - def _get_running_config(self): - _, fname = tempfile.mkstemp(suffix="_changeApplier") - - if self.namespace: - cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.namespace] - else: - cmd = ['sonic-cfggen', '-d', '--print-data'] - - with open(fname, "w") as file: - result = subprocess.Popen(cmd, stdout=file, stderr=subprocess.PIPE, text=True) - _, err = result.communicate() - - return_code = result.returncode - if return_code: - os.remove(fname) - raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.namespace}, Return code: {return_code}, Error: {err}") - - run_data = {} - try: - with open(fname, "r") as file: - run_data = json.load(file) - finally: - if os.path.isfile(fname): - os.remove(fname) - return run_data diff --git a/generic_config_updater/gcu_field_operation_validators.conf.json b/generic_config_updater/gcu_field_operation_validators.conf.json index 68e49b6c03..9084a5ee96 100644 --- a/generic_config_updater/gcu_field_operation_validators.conf.json +++ b/generic_config_updater/gcu_field_operation_validators.conf.json @@ -20,15 +20,20 @@ "spc1": [ "ACS-MSN2700", "ACS-MSN2740", "ACS-MSN2100", "ACS-MSN2410", "ACS-MSN2010", "Mellanox-SN2700", "Mellanox-SN2700-C28D8", "Mellanox-SN2700-D40C8S8", "Mellanox-SN2700-D44C10", "Mellanox-SN2700-D48C8", "ACS-MSN2700-A1", "Mellanox-SN2700-A1", "Mellanox-SN2700-A1-C28D8", "Mellanox-SN2700-A1-D40C8S8", "Mellanox-SN2700-A1-D44C10", "Mellanox-SN2700-A1-D48C8" ], "spc2": [ "ACS-MSN3800", "Mellanox-SN3800-D112C8", "ACS-MSN3420", "ACS-MSN3700C", "ACS-MSN3700", "Mellanox-SN3800-C64", "Mellanox-SN3800-D100C12S2", "Mellanox-SN3800-D24C52", "Mellanox-SN3800-D28C49S1", "Mellanox-SN3800-D28C50" ], - "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40", - "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32"], - "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "ACS-SN5400" ] + "spc3": [ "ACS-MSN4700", "ACS-MSN4600", "ACS-MSN4600C", "ACS-MSN4410", "ACS-SN4280", "Mellanox-SN4600C-D112C8", "Mellanox-SN4600C-C64", "Mellanox-SN4700-O8C48", "Mellanox-SN4600C-D100C12S2", "Mellanox-SN4600C-D48C40","Mellanox-SN4700-O32","Mellanox-SN4700-V64", + "Mellanox-SN4700-A96C8V8", "Mellanox-SN4700-C128", "Mellanox-SN4700-O28", "Mellanox-SN4700-O8V48", "Mellanox-SN4700-V48C32", "Mellanox-SN4280-O28"], + "spc4": [ "ACS-SN5600", "Mellanox-SN5600-O128", "Mellanox-SN5600-V256", "Mellanox-SN5600-C256S1", "ACS-SN5400", "Mellanox-SN5600-C224O8" ], + "spc5": ["ACS-SN5640"] }, "broadcom_asics": { "th": [ "Force10-S6100", "Arista-7060CX-32S-C32", "Arista-7060CX-32S-C32-T1", "Arista-7060CX-32S-D48C8", "Celestica-DX010-C32", "Seastone-DX010" ], "th2": [ "Arista-7260CX3-D108C8", "Arista-7260CX3-C64", "Arista-7260CX3-Q64" ], + "th3": [ "Nokia-IXR7220-H3" ], + "th4": [ "Nokia-IXR7220-H4-64D", "Nokia-IXR7220-H4-32D" ], + "th5": [ "Nokia-IXR7220-H5-64D" ], "td2": [ "Force10-S6000", "Force10-S6000-Q24S32", "Arista-7050-QX32", "Arista-7050-QX-32S", "Nexus-3164", "Arista-7050QX32S-Q32" ], - "td3": [ "Arista-7050CX3-32S-C32", "Arista-7050CX3-32S-D48C8" ] + "td3": [ "Arista-7050CX3-32S-C32", "Arista-7050CX3-32S-D48C8" ], + "j2c+": [ "Nokia-IXR7250E-36x100G", "Nokia-IXR7250E-36x400G" ] } } }, @@ -54,7 +59,11 @@ "td2": "20181100", "th": "20181100", "th2": "20181100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20201200", + "j2c+": "20220500", "cisco-8000": "20201200" } } @@ -80,7 +89,11 @@ "td2": "", "th": "20221100", "th2": "20221100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20221100", + "j2c+": "20220500", "cisco-8000": "20201200" } } @@ -104,7 +117,11 @@ "td2": "20181100", "th": "20181100", "th2": "20181100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20201200", + "j2c+": "20220500", "cisco-8000": "20201200" } }, @@ -121,7 +138,11 @@ "td2": "", "th": "20221100", "th2": "20221100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20221100", + "j2c+": "20220500", "cisco-8000": "20201200" } } @@ -147,7 +168,11 @@ "td2": "20181100", "th": "20181100", "th2": "20181100", + "th3": "20240500", + "th4": "20240500", + "th5": "20240500", "td3": "20201200", + "j2c+": "20220500", "cisco-8000": "20201200" } } diff --git a/generic_config_updater/generic_updater.py b/generic_config_updater/generic_updater.py index b75939749c..e8bb021808 100644 --- a/generic_config_updater/generic_updater.py +++ b/generic_config_updater/generic_updater.py @@ -1,43 +1,76 @@ import json import jsonpointer import os +import subprocess + from enum import Enum -from .gu_common import GenericConfigUpdaterError, EmptyTableError, ConfigWrapper, \ - DryRunConfigWrapper, PatchWrapper, genericUpdaterLogging +from .gu_common import HOST_NAMESPACE, GenericConfigUpdaterError, EmptyTableError, ConfigWrapper, \ + DryRunConfigWrapper, PatchWrapper, genericUpdaterLogging from .patch_sorter import StrictPatchSorter, NonStrictPatchSorter, ConfigSplitter, \ - TablesWithoutYangConfigSplitter, IgnorePathsFromYangConfigSplitter + TablesWithoutYangConfigSplitter, IgnorePathsFromYangConfigSplitter from .change_applier import ChangeApplier, DryRunChangeApplier from sonic_py_common import multi_asic CHECKPOINTS_DIR = "/etc/sonic/checkpoints" CHECKPOINT_EXT = ".cp.json" + def extract_scope(path): if not path: - raise Exception("Wrong patch with empty path.") - - try: - pointer = jsonpointer.JsonPointer(path) - parts = pointer.parts - except Exception as e: - raise Exception(f"Error resolving path: '{path}' due to {e}") + raise GenericConfigUpdaterError("Wrong patch with empty path.") + pointer = jsonpointer.JsonPointer(path) + # Re-escapes + parts = [jsonpointer.escape(part) for part in pointer.parts] if not parts: - raise Exception("Wrong patch with empty path.") + raise GenericConfigUpdaterError("Wrong patch with empty path.") if parts[0].startswith("asic"): if not parts[0][len("asic"):].isnumeric(): - raise Exception(f"Error resolving path: '{path}' due to incorrect ASIC number.") + raise GenericConfigUpdaterError(f"Error resolving path: '{path}' due to incorrect ASIC number.") scope = parts[0] remainder = "/" + "/".join(parts[1:]) - elif parts[0] == "localhost": - scope = "localhost" + elif parts[0] == HOST_NAMESPACE: + scope = HOST_NAMESPACE remainder = "/" + "/".join(parts[1:]) else: + if multi_asic.is_multi_asic(): + raise GenericConfigUpdaterError(f"Multi ASIC must have namespace prefix in path: '{path}'.") + scope = "" remainder = path - return scope, remainder + +def get_cmd_output(cmd): + proc = subprocess.Popen(cmd, text=True, stdout=subprocess.PIPE) + return proc.communicate()[0], proc.returncode + + +def get_config_json(): + scope_list = [multi_asic.DEFAULT_NAMESPACE] + all_running_config = {} + if multi_asic.is_multi_asic(): + scope_list.extend(multi_asic.get_namespace_list()) + for scope in scope_list: + command = ["sonic-cfggen", "-d", "--print-data"] + if scope != multi_asic.DEFAULT_NAMESPACE: + command += ["-n", scope] + + running_config_text, returncode = get_cmd_output(command) + if returncode: + raise GenericConfigUpdaterError( + f"Fetch all runningconfiguration failed as output:{running_config_text}") + running_config = json.loads(running_config_text) + + if multi_asic.is_multi_asic(): + if scope == multi_asic.DEFAULT_NAMESPACE: + scope = HOST_NAMESPACE + all_running_config[scope] = running_config + else: + all_running_config = running_config + return all_running_config + + class ConfigLock: def acquire_lock(self): # TODO: Implement ConfigLock @@ -52,22 +85,23 @@ class ConfigFormat(Enum): CONFIGDB = 1 SONICYANG = 2 + class PatchApplier: def __init__(self, patchsorter=None, changeapplier=None, config_wrapper=None, patch_wrapper=None, - namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.logger = genericUpdaterLogging.get_logger(title="Patch Applier", print_all_to_console=True) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) - self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(namespace=self.namespace) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(scope=self.scope) + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(scope=self.scope) self.patchsorter = patchsorter if patchsorter is not None else StrictPatchSorter(self.config_wrapper, self.patch_wrapper) - self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier(namespace=self.namespace) + self.changeapplier = changeapplier if changeapplier is not None else ChangeApplier(scope=self.scope) def apply(self, patch, sort=True): - scope = self.namespace if self.namespace else 'localhost' + scope = self.scope if self.scope else HOST_NAMESPACE self.logger.log_notice(f"{scope}: Patch application starting.") self.logger.log_notice(f"{scope}: Patch: {patch}") @@ -84,15 +118,14 @@ def apply(self, patch, sort=True): self.config_wrapper.validate_field_operation(old_config, target_config) # Validate target config does not have empty tables since they do not show up in ConfigDb - self.logger.log_notice(f"{scope}: alidating target config does not have empty tables, " \ - "since they do not show up in ConfigDb.") + self.logger.log_notice(f"""{scope}: validating target config does not have empty tables, + since they do not show up in ConfigDb.""") empty_tables = self.config_wrapper.get_empty_tables(target_config) - if empty_tables: # if there are empty tables + if empty_tables: # if there are empty tables empty_tables_txt = ", ".join(empty_tables) - raise EmptyTableError(f"{scope}: given patch is not valid because it will result in empty tables " \ - "which is not allowed in ConfigDb. " \ - f"Table{'s' if len(empty_tables) != 1 else ''}: {empty_tables_txt}") - + raise EmptyTableError(f"{scope}: given patch is not valid because it will result in empty tables \ + which is not allowed in ConfigDb. \ + Table{'s' if len(empty_tables) != 1 else ''}: {empty_tables_txt}") # Generate list of changes to apply if sort: self.logger.log_notice(f"{scope}: sorting patch updates.") @@ -105,9 +138,6 @@ def apply(self, patch, sort=True): self.logger.log_notice(f"The {scope} patch was converted into {changes_len} " \ f"change{'s' if changes_len != 1 else ''}{':' if changes_len > 0 else '.'}") - for change in changes: - self.logger.log_notice(f" * {change}") - # Apply changes in order self.logger.log_notice(f"{scope}: applying {changes_len} change{'s' if changes_len != 1 else ''} " \ f"in order{':' if changes_len > 0 else '.'}") @@ -120,19 +150,19 @@ def apply(self, patch, sort=True): new_config = self.config_wrapper.get_config_db_as_json() self.changeapplier.remove_backend_tables_from_config(target_config) self.changeapplier.remove_backend_tables_from_config(new_config) - if not(self.patch_wrapper.verify_same_json(target_config, new_config)): + if not (self.patch_wrapper.verify_same_json(target_config, new_config)): raise GenericConfigUpdaterError(f"{scope}: after applying patch to config, there are still some parts not updated") self.logger.log_notice(f"{scope} patch application completed.") class ConfigReplacer: - def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + def __init__(self, patch_applier=None, config_wrapper=None, patch_wrapper=None, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.logger = genericUpdaterLogging.get_logger(title="Config Replacer", print_all_to_console=True) - self.patch_applier = patch_applier if patch_applier is not None else PatchApplier(namespace=self.namespace) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) - self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(namespace=self.namespace) + self.patch_applier = patch_applier if patch_applier is not None else PatchApplier(scope=self.scope) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(scope=self.scope) + self.patch_wrapper = patch_wrapper if patch_wrapper is not None else PatchWrapper(scope=self.scope) def replace(self, target_config): self.logger.log_notice("Config replacement starting.") @@ -150,7 +180,7 @@ def replace(self, target_config): self.logger.log_notice("Verifying config replacement is reflected on ConfigDB.") new_config = self.config_wrapper.get_config_db_as_json() - if not(self.patch_wrapper.verify_same_json(target_config, new_config)): + if not (self.patch_wrapper.verify_same_json(target_config, new_config)): raise GenericConfigUpdaterError(f"After replacing config, there is still some parts not updated") self.logger.log_notice("Config replacement completed.") @@ -161,23 +191,24 @@ def __init__(self, checkpoints_dir=CHECKPOINTS_DIR, config_replacer=None, config_wrapper=None, - namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.logger = genericUpdaterLogging.get_logger(title="Config Rollbacker", print_all_to_console=True) + self.util = Util(checkpoints_dir=checkpoints_dir) self.checkpoints_dir = checkpoints_dir - self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer(namespace=self.namespace) - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(namespace=self.namespace) + self.config_replacer = config_replacer if config_replacer is not None else ConfigReplacer(scope=self.scope) + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(scope=self.scope) def rollback(self, checkpoint_name): self.logger.log_notice("Config rollbacking starting.") self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") self.logger.log_notice(f"Verifying '{checkpoint_name}' exists.") - if not self._check_checkpoint_exists(checkpoint_name): + if not self.util.check_checkpoint_exists(checkpoint_name): raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") self.logger.log_notice(f"Loading checkpoint into memory.") - target_config = self._get_checkpoint_content(checkpoint_name) + target_config = self.util.get_checkpoint_content(checkpoint_name) self.logger.log_notice(f"Replacing config using 'Config Replacer'.") self.config_replacer.replace(target_config) @@ -189,16 +220,16 @@ def checkpoint(self, checkpoint_name): self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") self.logger.log_notice("Getting current config db.") - json_content = self.config_wrapper.get_config_db_as_json() + json_content = get_config_json() self.logger.log_notice("Getting checkpoint full-path.") - path = self._get_checkpoint_full_path(checkpoint_name) + path = self.util.get_checkpoint_full_path(checkpoint_name) self.logger.log_notice("Ensuring checkpoint directory exist.") - self._ensure_checkpoints_dir_exists() + self.util.ensure_checkpoints_dir_exists() self.logger.log_notice(f"Saving config db content to {path}.") - self._save_json_file(path, json_content) + self.util.save_json_file(path, json_content) self.logger.log_notice("Config checkpoint completed.") @@ -206,12 +237,12 @@ def list_checkpoints(self): self.logger.log_info("Listing checkpoints starting.") self.logger.log_info(f"Verifying checkpoints directory '{self.checkpoints_dir}' exists.") - if not self._checkpoints_dir_exist(): + if not self.util.checkpoints_dir_exist(): self.logger.log_info("Checkpoints directory is empty, returning empty checkpoints list.") return [] self.logger.log_info("Getting checkpoints in checkpoints directory.") - checkpoint_names = self._get_checkpoint_names() + checkpoint_names = self.util.get_checkpoint_names() checkpoints_len = len(checkpoint_names) self.logger.log_info(f"Found {checkpoints_len} checkpoint{'s' if checkpoints_len != 1 else ''}{':' if checkpoints_len > 0 else '.'}") @@ -227,59 +258,139 @@ def delete_checkpoint(self, checkpoint_name): self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") self.logger.log_notice(f"Checking checkpoint exists.") - if not self._check_checkpoint_exists(checkpoint_name): + if not self.util.check_checkpoint_exists(checkpoint_name): raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") self.logger.log_notice(f"Deleting checkpoint.") - self._delete_checkpoint(checkpoint_name) + self.util.delete_checkpoint(checkpoint_name) self.logger.log_notice("Deleting checkpoint completed.") - def _ensure_checkpoints_dir_exists(self): + +class MultiASICConfigReplacer(ConfigReplacer): + def __init__(self, + patch_applier=None, + config_wrapper=None, + patch_wrapper=None, + scope=multi_asic.DEFAULT_NAMESPACE): + self.logger = genericUpdaterLogging.get_logger(title="MultiASICConfigReplacer", + print_all_to_console=True) + self.scopelist = [HOST_NAMESPACE, *multi_asic.get_namespace_list()] + super().__init__(patch_applier, config_wrapper, patch_wrapper, scope) + + def replace(self, target_config): + config_keys = set(target_config.keys()) + missing_scopes = set(self.scopelist) - config_keys + if missing_scopes: + raise GenericConfigUpdaterError(f"To be replace config is missing scope: {missing_scopes}") + + for scope in self.scopelist: + scope_config = target_config.pop(scope) + if scope.lower() == HOST_NAMESPACE: + scope = multi_asic.DEFAULT_NAMESPACE + ConfigReplacer(scope=scope).replace(scope_config) + + +class MultiASICConfigRollbacker(FileSystemConfigRollbacker): + def __init__(self, + checkpoints_dir=CHECKPOINTS_DIR, + config_replacer=None, + config_wrapper=None): + self.logger = genericUpdaterLogging.get_logger(title="MultiASICConfigRollbacker", + print_all_to_console=True) + self.scopelist = [HOST_NAMESPACE, *multi_asic.get_namespace_list()] + self.checkpoints_dir = checkpoints_dir + self.util = Util(checkpoints_dir=checkpoints_dir) + super().__init__(config_wrapper=config_wrapper, config_replacer=config_replacer) + + def rollback(self, checkpoint_name): + self.logger.log_notice("Config rollbacking starting.") + self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") + self.logger.log_notice(f"Verifying '{checkpoint_name}' exists.") + + if not self.util.check_checkpoint_exists(checkpoint_name): + raise ValueError(f"Checkpoint '{checkpoint_name}' does not exist") + + self.logger.log_notice(f"Loading checkpoint '{checkpoint_name}' into memory.") + target_config = self.util.get_checkpoint_content(checkpoint_name) + self.logger.log_notice(f"Replacing config '{checkpoint_name}' using 'Config Replacer'.") + + for scope in self.scopelist: + config = target_config.pop(scope) + if scope.lower() == HOST_NAMESPACE: + scope = multi_asic.DEFAULT_NAMESPACE + ConfigReplacer(scope=scope).replace(config) + + self.logger.log_notice("Config rollbacking completed.") + + def checkpoint(self, checkpoint_name): + all_configs = get_config_json() + self.logger.log_notice("Config checkpoint starting.") + self.logger.log_notice(f"Checkpoint name: {checkpoint_name}.") + + self.logger.log_notice("Getting checkpoint full-path.") + path = self.util.get_checkpoint_full_path(checkpoint_name) + + self.logger.log_notice("Ensuring checkpoint directory exist.") + self.util.ensure_checkpoints_dir_exists() + + self.logger.log_notice(f"Saving config db content to {path}.") + self.util.save_json_file(path, all_configs) + + self.logger.log_notice("Config checkpoint completed.") + + +class Util: + def __init__(self, checkpoints_dir=CHECKPOINTS_DIR): + self.checkpoints_dir = checkpoints_dir + + def ensure_checkpoints_dir_exists(self): os.makedirs(self.checkpoints_dir, exist_ok=True) - def _save_json_file(self, path, json_content): + def save_json_file(self, path, json_content): with open(path, "w") as fh: fh.write(json.dumps(json_content)) - def _get_checkpoint_content(self, checkpoint_name): - path = self._get_checkpoint_full_path(checkpoint_name) + def get_checkpoint_content(self, checkpoint_name): + path = self.get_checkpoint_full_path(checkpoint_name) with open(path) as fh: text = fh.read() return json.loads(text) - def _get_checkpoint_full_path(self, name): + def get_checkpoint_full_path(self, name): return os.path.join(self.checkpoints_dir, f"{name}{CHECKPOINT_EXT}") - def _get_checkpoint_names(self): + def get_checkpoint_names(self): file_names = [] for file_name in os.listdir(self.checkpoints_dir): if file_name.endswith(CHECKPOINT_EXT): # Remove extension from file name. # Example assuming ext is '.cp.json', then 'checkpoint1.cp.json' becomes 'checkpoint1' file_names.append(file_name[:-len(CHECKPOINT_EXT)]) - return file_names - def _checkpoints_dir_exist(self): + def checkpoints_dir_exist(self): return os.path.isdir(self.checkpoints_dir) - def _check_checkpoint_exists(self, name): - path = self._get_checkpoint_full_path(name) + def check_checkpoint_exists(self, name): + path = self.get_checkpoint_full_path(name) return os.path.isfile(path) - def _delete_checkpoint(self, name): - path = self._get_checkpoint_full_path(name) + def delete_checkpoint(self, name): + path = self.get_checkpoint_full_path(name) return os.remove(path) class Decorator(PatchApplier, ConfigReplacer, FileSystemConfigRollbacker): - def __init__(self, decorated_patch_applier=None, decorated_config_replacer=None, decorated_config_rollbacker=None, namespace=multi_asic.DEFAULT_NAMESPACE): + def __init__(self, + decorated_patch_applier=None, + decorated_config_replacer=None, + decorated_config_rollbacker=None, + scope=multi_asic.DEFAULT_NAMESPACE): # initing base classes to make LGTM happy - PatchApplier.__init__(self, namespace=namespace) - ConfigReplacer.__init__(self, namespace=namespace) - FileSystemConfigRollbacker.__init__(self, namespace=namespace) - + PatchApplier.__init__(self, scope=scope) + ConfigReplacer.__init__(self, scope=scope) + FileSystemConfigRollbacker.__init__(self, scope=scope) self.decorated_patch_applier = decorated_patch_applier self.decorated_config_replacer = decorated_config_replacer self.decorated_config_rollbacker = decorated_config_rollbacker @@ -304,10 +415,14 @@ def delete_checkpoint(self, checkpoint_name): class SonicYangDecorator(Decorator): - def __init__(self, patch_wrapper, config_wrapper, decorated_patch_applier=None, decorated_config_replacer=None, namespace=multi_asic.DEFAULT_NAMESPACE): - Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, namespace=namespace) - - self.namespace = namespace + def __init__(self, + patch_wrapper, + config_wrapper, + decorated_patch_applier=None, + decorated_config_replacer=None, + scope=multi_asic.DEFAULT_NAMESPACE): + Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, scope=scope) + self.scope = scope self.patch_wrapper = patch_wrapper self.config_wrapper = config_wrapper @@ -326,9 +441,12 @@ def __init__(self, decorated_config_replacer=None, decorated_config_rollbacker=None, config_lock=ConfigLock(), - namespace=multi_asic.DEFAULT_NAMESPACE): - Decorator.__init__(self, decorated_patch_applier, decorated_config_replacer, decorated_config_rollbacker, namespace=namespace) - + scope=multi_asic.DEFAULT_NAMESPACE): + Decorator.__init__(self, + decorated_patch_applier, + decorated_config_replacer, + decorated_config_rollbacker, + scope=scope) self.config_lock = config_lock def apply(self, patch, sort=True): @@ -350,20 +468,20 @@ def execute_write_action(self, action, *args): class GenericUpdateFactory: - def __init__(self, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + def __init__(self, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope def create_patch_applier(self, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): self.init_verbose_logging(verbose) config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) + patch_wrapper = PatchWrapper(config_wrapper, scope=self.scope) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, changeapplier=change_applier, - namespace=self.namespace) + scope=self.scope) if config_format == ConfigFormat.CONFIGDB: pass @@ -371,62 +489,75 @@ def create_patch_applier(self, config_format, verbose, dry_run, ignore_non_yang_ patch_applier = SonicYangDecorator(decorated_patch_applier=patch_applier, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper, - namespace=self.namespace) + scope=self.scope) else: raise ValueError(f"config-format '{config_format}' is not supported") if not dry_run: - patch_applier = ConfigLockDecorator(decorated_patch_applier=patch_applier, namespace=self.namespace) + patch_applier = ConfigLockDecorator(decorated_patch_applier=patch_applier, scope=self.scope) return patch_applier def create_config_replacer(self, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths): self.init_verbose_logging(verbose) - config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) + patch_wrapper = PatchWrapper(config_wrapper, scope=self.scope) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, changeapplier=change_applier, - namespace=self.namespace) + scope=self.scope) + if multi_asic.is_multi_asic(): + config_replacer = MultiASICConfigReplacer(patch_applier=patch_applier, + config_wrapper=config_wrapper) + else: + config_replacer = ConfigReplacer(patch_applier=patch_applier, + config_wrapper=config_wrapper, + scope=self.scope) - config_replacer = ConfigReplacer(patch_applier=patch_applier, config_wrapper=config_wrapper, namespace=self.namespace) if config_format == ConfigFormat.CONFIGDB: pass elif config_format == ConfigFormat.SONICYANG: config_replacer = SonicYangDecorator(decorated_config_replacer=config_replacer, patch_wrapper=patch_wrapper, config_wrapper=config_wrapper, - namespace=self.namespace) + scope=self.scope) else: raise ValueError(f"config-format '{config_format}' is not supported") if not dry_run: - config_replacer = ConfigLockDecorator(decorated_config_replacer=config_replacer, namespace=self.namespace) + config_replacer = ConfigLockDecorator(decorated_config_replacer=config_replacer, scope=self.scope) return config_replacer def create_config_rollbacker(self, verbose, dry_run=False, ignore_non_yang_tables=False, ignore_paths=[]): self.init_verbose_logging(verbose) - config_wrapper = self.get_config_wrapper(dry_run) change_applier = self.get_change_applier(dry_run, config_wrapper) - patch_wrapper = PatchWrapper(config_wrapper, namespace=self.namespace) + patch_wrapper = PatchWrapper(config_wrapper, scope=self.scope) patch_sorter = self.get_patch_sorter(ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper) patch_applier = PatchApplier(config_wrapper=config_wrapper, patchsorter=patch_sorter, patch_wrapper=patch_wrapper, changeapplier=change_applier, - namespace=self.namespace) - - config_replacer = ConfigReplacer(config_wrapper=config_wrapper, patch_applier=patch_applier, namespace=self.namespace) - config_rollbacker = FileSystemConfigRollbacker(config_wrapper=config_wrapper, config_replacer=config_replacer, namespace=self.namespace) + scope=self.scope) + if multi_asic.is_multi_asic(): + config_replacer = MultiASICConfigReplacer(config_wrapper=config_wrapper, + patch_applier=patch_applier) + config_rollbacker = MultiASICConfigRollbacker(config_wrapper=config_wrapper, + config_replacer=config_replacer) + else: + config_replacer = ConfigReplacer(config_wrapper=config_wrapper, + patch_applier=patch_applier, + scope=self.scope) + config_rollbacker = FileSystemConfigRollbacker(config_wrapper=config_wrapper, + config_replacer=config_replacer, + scope=self.scope) if not dry_run: - config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker=config_rollbacker, namespace=self.namespace) + config_rollbacker = ConfigLockDecorator(decorated_config_rollbacker=config_rollbacker, scope=self.scope) return config_rollbacker @@ -435,15 +566,15 @@ def init_verbose_logging(self, verbose): def get_config_wrapper(self, dry_run): if dry_run: - return DryRunConfigWrapper(namespace=self.namespace) + return DryRunConfigWrapper(scope=self.scope) else: - return ConfigWrapper(namespace=self.namespace) + return ConfigWrapper(scope=self.scope) def get_change_applier(self, dry_run, config_wrapper): if dry_run: return DryRunChangeApplier(config_wrapper) else: - return ChangeApplier(namespace=self.namespace) + return ChangeApplier(scope=self.scope) def get_patch_sorter(self, ignore_non_yang_tables, ignore_paths, config_wrapper, patch_wrapper): if not ignore_non_yang_tables and not ignore_paths: @@ -462,9 +593,9 @@ def get_patch_sorter(self, ignore_non_yang_tables, ignore_paths, config_wrapper, class GenericUpdater: - def __init__(self, generic_update_factory=None, namespace=multi_asic.DEFAULT_NAMESPACE): + def __init__(self, generic_update_factory=None, scope=multi_asic.DEFAULT_NAMESPACE): self.generic_update_factory = \ - generic_update_factory if generic_update_factory is not None else GenericUpdateFactory(namespace=namespace) + generic_update_factory if generic_update_factory is not None else GenericUpdateFactory(scope=scope) def apply_patch(self, patch, config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths, sort=True): patch_applier = self.generic_update_factory.create_patch_applier(config_format, verbose, dry_run, ignore_non_yang_tables, ignore_paths) diff --git a/generic_config_updater/gu_common.py b/generic_config_updater/gu_common.py index 974c540c07..7821557e71 100644 --- a/generic_config_updater/gu_common.py +++ b/generic_config_updater/gu_common.py @@ -16,6 +16,8 @@ SYSLOG_IDENTIFIER = "GenericConfigUpdater" SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) GCU_FIELD_OP_CONF_FILE = f"{SCRIPT_DIR}/gcu_field_operation_validators.conf.json" +HOST_NAMESPACE = "localhost" + class GenericConfigUpdaterError(Exception): pass @@ -51,30 +53,39 @@ def __eq__(self, other): return self.patch == other.patch return False + +def get_config_db_as_json(scope=None): + text = get_config_db_as_text(scope=scope) + config_db_json = json.loads(text) + config_db_json.pop("bgpraw", None) + return config_db_json + + +def get_config_db_as_text(scope=None): + if scope is not None and scope != multi_asic.DEFAULT_NAMESPACE: + cmd = ['sonic-cfggen', '-d', '--print-data', '-n', scope] + else: + cmd = ['sonic-cfggen', '-d', '--print-data'] + result = subprocess.Popen(cmd, shell=False, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + text, err = result.communicate() + return_code = result.returncode + if return_code: + raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {scope}," + f" Return code: {return_code}, Error: {err}") + return text + + class ConfigWrapper: - def __init__(self, yang_dir=YANG_DIR, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace + def __init__(self, yang_dir=YANG_DIR, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope self.yang_dir = YANG_DIR self.sonic_yang_with_loaded_models = None def get_config_db_as_json(self): - text = self._get_config_db_as_text() - config_db_json = json.loads(text) - config_db_json.pop("bgpraw", None) - return config_db_json + return get_config_db_as_json(self.scope) def _get_config_db_as_text(self): - if self.namespace is not None and self.namespace != multi_asic.DEFAULT_NAMESPACE: - cmd = ['sonic-cfggen', '-d', '--print-data', '-n', self.namespace] - else: - cmd = ['sonic-cfggen', '-d', '--print-data'] - - result = subprocess.Popen(cmd, shell=False, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - text, err = result.communicate() - return_code = result.returncode - if return_code: # non-zero means failure - raise GenericConfigUpdaterError(f"Failed to get running config for namespace: {self.namespace}, Return code: {return_code}, Error: {err}") - return text + return get_config_db_as_text(self.scope) def get_sonic_yang_as_json(self): config_db_json = self.get_config_db_as_json() @@ -300,8 +311,8 @@ def create_sonic_yang_with_loaded_models(self): class DryRunConfigWrapper(ConfigWrapper): # This class will simulate all read/write operations to ConfigDB on a virtual storage unit. - def __init__(self, initial_imitated_config_db = None, namespace=multi_asic.DEFAULT_NAMESPACE): - super().__init__(namespace=namespace) + def __init__(self, initial_imitated_config_db=None, scope=multi_asic.DEFAULT_NAMESPACE): + super().__init__(scope=scope) self.logger = genericUpdaterLogging.get_logger(title="** DryRun", print_all_to_console=True) self.imitated_config_db = copy.deepcopy(initial_imitated_config_db) @@ -321,9 +332,9 @@ def _init_imitated_config_db_if_none(self): class PatchWrapper: - def __init__(self, config_wrapper=None, namespace=multi_asic.DEFAULT_NAMESPACE): - self.namespace = namespace - self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(self.namespace) + def __init__(self, config_wrapper=None, scope=multi_asic.DEFAULT_NAMESPACE): + self.scope = scope + self.config_wrapper = config_wrapper if config_wrapper is not None else ConfigWrapper(self.scope) self.path_addressing = PathAddressing(self.config_wrapper) def validate_config_db_patch_has_yang_models(self, patch): diff --git a/mmuconfig b/mmuconfig deleted file mode 100755 index f9dc178625..0000000000 --- a/mmuconfig +++ /dev/null @@ -1,199 +0,0 @@ -#!/usr/bin/python3 - -""" -mmuconfig is the utility to show and change mmu configuration - -usage: mmuconfig [-h] [-v] [-l] [-p PROFILE] [-a ALPHA] [-s staticth] [-vv] - -optional arguments: - -h --help show this help message and exit - -v --version show program's version number and exit - -vv --verbose verbose output - -l --list show mmu configuration - -p --profile specify buffer profile name - -a --alpha set n for dyanmic threshold alpha 2^(n) - -s --staticth set static threshold - -""" - -import os -import sys -import argparse -import tabulate -import traceback - -BUFFER_POOL_TABLE_NAME = "BUFFER_POOL" -BUFFER_PROFILE_TABLE_NAME = "BUFFER_PROFILE" -DEFAULT_LOSSLESS_BUFFER_PARAMETER_NAME = "DEFAULT_LOSSLESS_BUFFER_PARAMETER" - -DYNAMIC_THRESHOLD = "dynamic_th" -STATIC_THRESHOLD = "static_th" -BUFFER_PROFILE_FIELDS = { - "alpha": DYNAMIC_THRESHOLD, - "staticth": STATIC_THRESHOLD -} - -# mock the redis for unit test purposes # -try: - if os.environ["UTILITIES_UNIT_TESTING"] == "2": - modules_path = os.path.join(os.path.dirname(__file__), "..") - tests_path = os.path.join(modules_path, "tests") - sys.path.insert(0, modules_path) - sys.path.insert(0, tests_path) - import mock_tables.dbconnector - -except KeyError: - pass - -from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector - -BUFFER_POOL_TABLE_NAME = "BUFFER_POOL" -BUFFER_PROFILE_TABLE_NAME = "BUFFER_PROFILE" - -''' -DYNAMIC_THRESHOLD = "dynamic_th" -BUFFER_PROFILE_FIELDS = { - "alpha": DYNAMIC_THRESHOLD -} -''' - -class MmuConfig(object): - def __init__(self, verbose, config): - self.verbose = verbose - self.config = config - - # Set up db connections - if self.config: - self.db = ConfigDBConnector() - self.db.connect() - else: - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.STATE_DB, False) - - def get_table(self, tablename): - if self.config: - return self.db.get_table(tablename) - - entries = {} - keys = self.db.keys(self.db.STATE_DB, tablename + '*') - - if not keys: - return None - - for key in keys: - entries[key.split('|')[1]] = self.db.get_all(self.db.STATE_DB, key) - - return entries - - def list(self): - lossless_traffic_pattern = self.get_table(DEFAULT_LOSSLESS_BUFFER_PARAMETER_NAME) - if lossless_traffic_pattern: - for _, pattern in lossless_traffic_pattern.items(): - config = [] - - print("Lossless traffic pattern:") - for field, value in pattern.items(): - config.append([field, value]) - print(tabulate.tabulate(config) + "\n") - - buf_pools = self.get_table(BUFFER_POOL_TABLE_NAME) - if buf_pools: - for pool_name, pool_data in buf_pools.items(): - config = [] - - print("Pool: " + pool_name) - for field, value in pool_data.items(): - config.append([field, value]) - print(tabulate.tabulate(config) + "\n") - if self.verbose: - print("Total pools: %d\n\n" % len(buf_pools)) - else: - print("No buffer pool information available") - - buf_profs = self.get_table(BUFFER_PROFILE_TABLE_NAME) - if buf_profs: - for prof_name, prof_data in buf_profs.items(): - config = [] - - print("Profile: " + prof_name) - for field, value in prof_data.items(): - config.append([field, value]) - print(tabulate.tabulate(config) + "\n") - if self.verbose: - print("Total profiles: %d" % len(buf_profs)) - else: - print("No buffer profile information available") - - def set(self, profile, field_alias, value): - if os.geteuid() != 0: - sys.exit("Root privileges required for this operation") - - field = BUFFER_PROFILE_FIELDS[field_alias] - buf_profs = self.db.get_table(BUFFER_PROFILE_TABLE_NAME) - v = int(value) - if field == DYNAMIC_THRESHOLD: - if v < -8 or v > 8: - sys.exit("Invalid alpha value: 2^(%s)" % (value)) - - if profile in buf_profs and DYNAMIC_THRESHOLD not in buf_profs[profile]: - sys.exit("%s not using dynamic thresholding" % (profile)) - elif field == STATIC_THRESHOLD: - if v < 0: - sys.exit("Invalid static threshold value: (%s)" % (value)) - - buf_profs = self.db.get_table(BUFFER_PROFILE_TABLE_NAME) - if profile in buf_profs and STATIC_THRESHOLD not in buf_profs[profile]: - sys.exit("%s not using static threshold" % (profile)) - else: - sys.exit("Set field %s not supported" % (field)) - - if self.verbose: - print("Setting %s %s value to %s" % (profile, field, value)) - self.db.mod_entry(BUFFER_PROFILE_TABLE_NAME, profile, {field: value}) - - -def main(config): - if config: - parser = argparse.ArgumentParser(description='Show and change: mmu configuration', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show mmu configuration') - parser.add_argument('-p', '--profile', type=str, help='specify buffer profile name', default=None) - parser.add_argument('-a', '--alpha', type=str, help='set n for dyanmic threshold alpha 2^(n)', default=None) - parser.add_argument('-s', '--staticth', type=str, help='set n for static threshold', default=None) - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - else: - parser = argparse.ArgumentParser(description='Show buffer state', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show buffer state') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - - parser.add_argument('-vv', '--verbose', action='store_true', help='verbose output', default=False) - - args = parser.parse_args() - - try: - mmu_cfg = MmuConfig(args.verbose, config) - if args.list: - mmu_cfg.list() - elif config and args.profile: - import pdb; pdb.set_trace() - if args.alpha: - mmu_cfg.set(args.profile, "alpha", args.alpha) - elif args.staticth: - mmu_cfg.set(args.profile, "staticth", args.staticth) - else: - parser.print_help() - sys.exit(1) - - except Exception as e: - print("Exception caught: ", str(e), file=sys.stderr) - traceback.print_exc() - sys.exit(1) - -if __name__ == "__main__": - if sys.argv[0].split('/')[-1] == "mmuconfig": - main(True) - else: - main(False) diff --git a/pfc/main.py b/pfc/main.py index b31d3c755e..f894a5d7c5 100644 --- a/pfc/main.py +++ b/pfc/main.py @@ -1,160 +1,197 @@ #!/usr/bin/env python3 - +import os import click -from swsscommon.swsscommon import ConfigDBConnector +import json +from sonic_py_common import multi_asic from tabulate import tabulate from natsort import natsorted +from utilities_common import multi_asic as multi_asic_util +# Constants ALL_PRIORITIES = [str(x) for x in range(8)] PRIORITY_STATUS = ['on', 'off'] +PORT_TABLE_NAME = "PORT" +PORT_QOS_MAP_TABLE_NAME = "PORT_QOS_MAP" + + +class Pfc(object): + def __init__(self, namespace=None): + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + + # For unit testing + self.updated_port_tables = {} + self.test_filename = '/tmp/pfc_testdata.json' + + def dump_config_to_json(self, table_name, namespace): + """ + This function dumps the current config in a JSON file for unit testing. + """ + # Only dump files in unit testing mode + if os.getenv("UTILITIES_UNIT_TESTING") != "2": + return + + if namespace not in self.updated_port_tables.keys(): + self.updated_port_tables[namespace] = {} + + self.updated_port_tables[namespace][table_name] = self.config_db.get_table(table_name) + with open(self.test_filename, "w") as fd: + json.dump(self.updated_port_tables, fd) + + @multi_asic_util.run_on_multi_asic + def configPfcAsym(self, interface, pfc_asym): + """ + PFC handler to configure asymmetric PFC. + """ + self.config_db.mod_entry(PORT_TABLE_NAME, interface, {'pfc_asym': pfc_asym}) + self.dump_config_to_json(PORT_TABLE_NAME, self.multi_asic.current_namespace) + + @multi_asic_util.run_on_multi_asic + def showPfcAsym(self, interface): + """ + PFC handler to display asymmetric PFC information. + """ + namespace_str = f"Namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' + header = ('Interface', 'Asymmetric') + + if interface: + db_keys = self.config_db.keys(self.config_db.CONFIG_DB, 'PORT|{0}'.format(interface)) + else: + db_keys = self.config_db.keys(self.config_db.CONFIG_DB, 'PORT|*') + + table = [] + + for i in db_keys or [None]: + key = None + if i: + key = i.split('|')[-1] + + if key and key.startswith('Ethernet'): + entry = self.config_db.get_entry(PORT_TABLE_NAME, key) + table.append([key, entry.get('pfc_asym', 'N/A')]) + + sorted_table = natsorted(table) + + click.echo(namespace_str) + click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) + click.echo() + + @multi_asic_util.run_on_multi_asic + def configPfcPrio(self, status, interface, priority): + if interface not in self.config_db.get_keys(PORT_QOS_MAP_TABLE_NAME): + click.echo('Cannot find interface {0}'.format(interface)) + return + + """Current lossless priorities on the interface""" + entry = self.config_db.get_entry('PORT_QOS_MAP', interface) + enable_prio = entry.get('pfc_enable').split(',') + + """Avoid '' in enable_prio""" + enable_prio = [x.strip() for x in enable_prio if x.strip()] + + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' + if status == 'on' and priority in enable_prio: + click.echo('Priority {0} has already been enabled on {1}{2}'.format(priority, interface, namespace_str)) + return + + if status == 'off' and priority not in enable_prio: + click.echo('Priority {0} is not enabled on {1}{2}'.format(priority, interface, namespace_str)) + return + + if status == 'on': + enable_prio.append(priority) + + else: + enable_prio.remove(priority) + + enable_prio.sort() + self.config_db.mod_entry(PORT_QOS_MAP_TABLE_NAME, interface, {'pfc_enable': ','.join(enable_prio)}) + self.dump_config_to_json(PORT_QOS_MAP_TABLE_NAME, self.multi_asic.current_namespace) + + @multi_asic_util.run_on_multi_asic + def showPfcPrio(self, interface): + """ + PFC handler to display PFC enabled priority information. + """ + header = ('Interface', 'Lossless priorities') + table = [] + + """Get all the interfaces with QoS map information""" + intfs = self.config_db.get_keys('PORT_QOS_MAP') + + """The user specifies an interface but we cannot find it""" + namespace_str = f"Namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' + if interface and interface not in intfs: + if multi_asic.is_multi_asic(): + click.echo('Cannot find interface {0} for {1}'.format(interface, namespace_str)) + else: + click.echo('Cannot find interface {0}'.format(interface)) + return + + if interface: + intfs = [interface] + + for intf in intfs: + entry = self.config_db.get_entry('PORT_QOS_MAP', intf) + table.append([intf, entry.get('pfc_enable', 'N/A')]) + + sorted_table = natsorted(table) + click.echo(namespace_str) + click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) + click.echo() + -def configPfcAsym(interface, pfc_asym): - """ - PFC handler to configure asymmentric PFC. - """ - configdb = ConfigDBConnector() - configdb.connect() - - configdb.mod_entry("PORT", interface, {'pfc_asym': pfc_asym}) - - -def showPfcAsym(interface): - """ - PFC handler to display asymmetric PFC information. - """ - header = ('Interface', 'Asymmetric') - - configdb = ConfigDBConnector() - configdb.connect() - - if interface: - db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|{0}'.format(interface)) - else: - db_keys = configdb.keys(configdb.CONFIG_DB, 'PORT|*') - - table = [] - - for i in db_keys or [None]: - key = None - if i: - key = i.split('|')[-1] - - if key and key.startswith('Ethernet'): - entry = configdb.get_entry('PORT', key) - table.append([key, entry.get('pfc_asym', 'N/A')]) - - sorted_table = natsorted(table) - - click.echo() - click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) - click.echo() - -def configPfcPrio(status, interface, priority): - configdb = ConfigDBConnector() - configdb.connect() - - if interface not in configdb.get_keys('PORT_QOS_MAP'): - click.echo('Cannot find interface {0}'.format(interface)) - return - - """Current lossless priorities on the interface""" - entry = configdb.get_entry('PORT_QOS_MAP', interface) - enable_prio = entry.get('pfc_enable').split(',') - - """Avoid '' in enable_prio""" - enable_prio = [x.strip() for x in enable_prio if x.strip()] - - if status == 'on' and priority in enable_prio: - click.echo('Priority {0} has already been enabled on {1}'.format(priority, interface)) - return - - if status == 'off' and priority not in enable_prio: - click.echo('Priority {0} is not enabled on {1}'.format(priority, interface)) - return - - if status == 'on': - enable_prio.append(priority) - - else: - enable_prio.remove(priority) - - enable_prio.sort() - configdb.mod_entry("PORT_QOS_MAP", interface, {'pfc_enable': ','.join(enable_prio)}) - - """Show the latest PFC configuration""" - showPfcPrio(interface) - -def showPfcPrio(interface): - """ - PFC handler to display PFC enabled priority information. - """ - header = ('Interface', 'Lossless priorities') - table = [] - - configdb = ConfigDBConnector() - configdb.connect() - - """Get all the interfaces with QoS map information""" - intfs = configdb.get_keys('PORT_QOS_MAP') - - """The user specifies an interface but we cannot find it""" - if interface and interface not in intfs: - click.echo('Cannot find interface {0}'.format(interface)) - return - - if interface: - intfs = [interface] - - for intf in intfs: - entry = configdb.get_entry('PORT_QOS_MAP', intf) - table.append([intf, entry.get('pfc_enable', 'N/A')]) - - sorted_table = natsorted(table) - click.echo() - click.echo(tabulate(sorted_table, headers=header, tablefmt="simple", missingval="")) - click.echo() - @click.group() def cli(): """PFC Command Line""" - pass + @cli.group() def config(): """Config PFC""" pass + @cli.group() def show(): """Show PFC information""" pass + @click.command() @click.argument('status', type=click.Choice(PRIORITY_STATUS)) @click.argument('interface', type=click.STRING) -def configAsym(status, interface): +@multi_asic_util.multi_asic_click_option_namespace +def configAsym(status, interface, namespace): """Configure asymmetric PFC on a given port.""" - configPfcAsym(interface, status) + Pfc(namespace).configPfcAsym(interface, status) + @click.command() @click.argument('status', type=click.Choice(PRIORITY_STATUS)) @click.argument('interface', type=click.STRING) @click.argument('priority', type=click.Choice(ALL_PRIORITIES)) -def configPrio(status, interface, priority): +@multi_asic_util.multi_asic_click_option_namespace +def configPrio(status, interface, priority, namespace): """Configure PFC on a given priority.""" - configPfcPrio(status, interface, priority) - + Pfc(namespace).configPfcPrio(status, interface, priority) + + @click.command() @click.argument('interface', type=click.STRING, required=False) -def showAsym(interface): +@multi_asic_util.multi_asic_click_option_namespace +def showAsym(interface, namespace): """Show asymmetric PFC information""" - showPfcAsym(interface) + Pfc(namespace).showPfcAsym(interface) + @click.command() @click.argument('interface', type=click.STRING, required=False) -def showPrio(interface): +@multi_asic_util.multi_asic_click_option_namespace +def showPrio(interface, namespace): """Show PFC priority information""" - showPfcPrio(interface) + Pfc(namespace).showPfcPrio(interface) + config.add_command(configAsym, "asymmetric") config.add_command(configPrio, "priority") diff --git a/rcli/linecard.py b/rcli/linecard.py index 73c13a73ef..f893428a42 100644 --- a/rcli/linecard.py +++ b/rcli/linecard.py @@ -8,7 +8,7 @@ import termios import tty -from .utils import get_linecard_ip +from .utils import get_linecard_ip, get_linecard_hostname_from_module_name, get_linecard_module_name_from_hostname from paramiko.py3compat import u from paramiko import Channel @@ -31,7 +31,17 @@ def __init__(self, linecard_name, username, password): if not self.ip: sys.exit(1) - self.linecard_name = linecard_name + # if the user passes linecard hostname, then try to get the module name for that linecard + module_name = get_linecard_module_name_from_hostname(linecard_name) + if module_name is None: + # if the module name cannot be found from host, assume the user has passed module name + self.module_name = linecard_name + self.hostname = get_linecard_hostname_from_module_name(linecard_name) + else: + # the user has passed linecard hostname + self.hostname = linecard_name + self.module_name = module_name + self.username = username self.password = password diff --git a/rcli/rexec.py b/rcli/rexec.py index 8831d5585f..21929c8012 100644 --- a/rcli/rexec.py +++ b/rcli/rexec.py @@ -30,20 +30,22 @@ def cli(linecard_names, command, username): if list(linecard_names) == ["all"]: # Get all linecard names using autocompletion helper - linecard_names = rcli_utils.get_all_linecards(None, None, "") + module_names = sorted(rcli_utils.get_all_linecards(None, None, "")) + else: + module_names = linecard_names linecards = [] # Iterate through each linecard, check if the login was successful - for linecard_name in linecard_names: - linecard = Linecard(linecard_name, username, password) + for module_name in module_names: + linecard = Linecard(module_name, username, password) if not linecard.connection: - click.echo(f"Failed to connect to {linecard_name} with username {username}") + click.echo(f"Failed to connect to {module_name} with username {username}") sys.exit(1) linecards.append(linecard) for linecard in linecards: if linecard.connection: - click.echo(f"======== {linecard.linecard_name} output: ========") + click.echo(f"======== {linecard.module_name}|{linecard.hostname} output: ========") click.echo(linecard.execute_cmd(command)) diff --git a/rcli/rshell.py b/rcli/rshell.py index bac02d42d8..b22187a0f3 100644 --- a/rcli/rshell.py +++ b/rcli/rshell.py @@ -28,14 +28,14 @@ def cli(linecard_name, username): try: linecard = Linecard(linecard_name, username, password) if linecard.connection: - click.echo(f"Connecting to {linecard.linecard_name}") + click.echo(f"Connecting to {linecard.module_name}") # If connection was created, connection exists. # Otherwise, user will see an error message. linecard.start_shell() click.echo("Connection Closed") except paramiko.ssh_exception.AuthenticationException: click.echo( - f"Login failed on '{linecard.linecard_name}' with username '{linecard.username}'") + f"Login failed on '{linecard.module_name}' with username '{linecard.username}'") if __name__=="__main__": diff --git a/rcli/utils.py b/rcli/utils.py index 510e360581..7563eafdcd 100644 --- a/rcli/utils.py +++ b/rcli/utils.py @@ -1,7 +1,7 @@ import click -from getpass import getpass +import getpass import os -import sys +import signal from swsscommon.swsscommon import SonicV2Connector @@ -19,6 +19,8 @@ CHASSIS_MODULE_HOSTNAME_TABLE = 'CHASSIS_MODULE_HOSTNAME_TABLE' CHASSIS_MODULE_HOSTNAME = 'module_hostname' +GET_PASSWORD_TIMEOUT = 10 + def connect_to_chassis_state_db(): chassis_state_db = SonicV2Connector(host="127.0.0.1") chassis_state_db.connect(chassis_state_db.CHASSIS_STATE_DB) @@ -43,6 +45,20 @@ def get_linecard_module_name_from_hostname(linecard_name: str): return None + +def get_linecard_hostname_from_module_name(linecard_name: str): + + chassis_state_db = connect_to_chassis_state_db() + keys = chassis_state_db.keys(chassis_state_db.CHASSIS_STATE_DB, '{}|{}'.format(CHASSIS_MODULE_HOSTNAME_TABLE, '*')) + for key in keys: + module_name = key.split('|')[1] + if module_name.replace('-', '').lower() == linecard_name.replace('-', '').lower(): + hostname = chassis_state_db.get(chassis_state_db.CHASSIS_STATE_DB, key, CHASSIS_MODULE_HOSTNAME) + return hostname + + return None + + def get_linecard_ip(linecard_name: str): """ Given a linecard name, lookup its IP address in the midplane table @@ -69,6 +85,7 @@ def get_linecard_ip(linecard_name: str): return None return module_ip + def get_module_ip_and_access_from_state_db(module_name): state_db = connect_state_db() data_dict = state_db.get_all( @@ -136,8 +153,17 @@ def get_password(username=None): if username is None: username = os.getlogin() - return getpass( + def get_password_timeout(*args): + print("\nAborted! Timeout when waiting for password input.") + exit(1) + + signal.signal(signal.SIGALRM, get_password_timeout) + signal.alarm(GET_PASSWORD_TIMEOUT) # Set a timeout of 60 seconds + password = getpass.getpass( "Password for username '{}': ".format(username), # Pass in click stdout stream - this is similar to using click.echo stream=click.get_text_stream('stdout') ) + signal.alarm(0) # Cancel the alarm + + return password diff --git a/scripts/config_validator.py b/scripts/config_validator.py new file mode 100755 index 0000000000..ee5789e95a --- /dev/null +++ b/scripts/config_validator.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 +import json +import argparse +import sonic_yang + +from sonic_py_common import logger + +YANG_MODELS_DIR = "/usr/local/yang-models" +SYSLOG_IDENTIFIER = 'config_validator' + +# Global logger instance +log = logger.Logger(SYSLOG_IDENTIFIER) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument('-c', + dest='config', + metavar='config file', + type=str, + required=True, + help='the config file to be validated', + default=None) + + args = parser.parse_args() + config_file = args.config + with open(config_file) as fp: + config = json.load(fp) + # Run yang validation + yang_parser = sonic_yang.SonicYang(YANG_MODELS_DIR) + yang_parser.loadYangModel() + try: + yang_parser.loadData(configdbJson=config) + yang_parser.validate_data_tree() + except sonic_yang.SonicYangException as e: + log.log_error("Yang validation failed: " + str(e)) + raise + if len(yang_parser.tablesWithOutYang): + log.log_error("Tables without yang models: " + str(yang_parser.tablesWithOutYang)) + raise Exception("Tables without yang models: " + str(yang_parser.tablesWithOutYang)) + + +if __name__ == "__main__": + main() diff --git a/scripts/db_migrator.py b/scripts/db_migrator.py index afd5e638de..3dbbb0b4f9 100755 --- a/scripts/db_migrator.py +++ b/scripts/db_migrator.py @@ -6,6 +6,7 @@ import sys import traceback import re +import subprocess from sonic_py_common import device_info, logger from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector, SonicDBConfig @@ -58,7 +59,7 @@ def __init__(self, namespace, socket=None): none-zero values. build: sequentially increase within a minor version domain. """ - self.CURRENT_VERSION = 'version_202405_01' + self.CURRENT_VERSION = 'version_202411_01' self.TABLE_NAME = 'VERSIONS' self.TABLE_KEY = 'DATABASE' @@ -1228,10 +1229,18 @@ def version_202311_03(self): def version_202405_01(self): """ - Version 202405_01, this version should be the final version for - master branch until 202405 branch is created. + Version 202405_01. """ log.log_info('Handling version_202405_01') + self.set_version('version_202411_01') + return 'version_202411_01' + + def version_202411_01(self): + """ + Version 202411_01, this version should be the final version for + master branch until 202411 branch is created. + """ + log.log_info('Handling version_202411_01') return None def get_version(self): @@ -1296,6 +1305,34 @@ def migrate(self): version = next_version # Perform common migration ops self.common_migration_ops() + # Perform yang validation + self.validate() + + def validate(self): + config = self.configDB.get_config() + # Fix table key in tuple + for table_name, table in config.items(): + new_table = {} + hit = False + for table_key, table_val in table.items(): + if isinstance(table_key, tuple): + new_key = "|".join(table_key) + new_table[new_key] = table_val + hit = True + else: + new_table[table_key] = table_val + if hit: + config[table_name] = new_table + config_file = "/tmp/validate.json" + with open(config_file, 'w') as fp: + json.dump(config, fp) + process = subprocess.Popen(["config_validator.py", "-c", config_file]) + # Check validation result for unit test + # Check validation result for end to end test + mark_file = "/etc/sonic/mgmt_test_mark" + if os.environ.get("UTILITIES_UNIT_TESTING", "0") == "2" or os.path.exists(mark_file): + ret = process.wait() + assert ret == 0, "Yang validation failed" def main(): try: diff --git a/scripts/debug_voq_chassis_packet_drops.sh b/scripts/debug_voq_chassis_packet_drops.sh new file mode 100755 index 0000000000..53e21c6f09 --- /dev/null +++ b/scripts/debug_voq_chassis_packet_drops.sh @@ -0,0 +1,371 @@ +#!/usr/bin/bash +# defaults for env vars +sleep_period=${sleep_period:-0} +maxiter=${maxiter:-25} # all but 4 iterations will be polling Egress drops +log=${log:-/dev/stdout} +time_format="%D %T.%6N" +delim="END" +# options +ing_check_mc=${ing_check_mc:-1} +ing_check_macsec=${ing_check_macsec:-1} +egr_check_mc=${egr_check_mc:-1} +egr_check_pmf_hit_bits=${egr_check_pmf_hit_bits:-1} +egr_diag_counter_g=${egr_diag_counter_g:-1} + +declare -a cores=("0" "1") +declare -a asics=("0" "1") +queue_pair_mask_a=(0 0 0 0) +dsp_map_a=(0 0 0 0) + +timestamp(){ + curr_time=$(date +"$time_format") + echo "$curr_time $logmsg" >> $log +} + +print_pqp_reasons() { + disc_reasons=$((16#${disc_reasons})) + if [ $disc_reasons -eq 0 ]; then echo "none" >> $log ; fi + if [ $(($disc_reasons & 1)) -ne 0 ] ; then echo "0- Total PDs threshold violated" >> $log ; fi + if [ $(($disc_reasons & 2)) -ne 0 ] ; then echo "1- Total PDs UC pool size threshold violated" >> $log ; fi + if [ $(($disc_reasons & 4)) -ne 0 ] ; then echo "2- Per port UC PDs threshold" >> $log ; fi + if [ $(($disc_reasons & 8)) -ne 0 ] ; then echo "3- Per queue UC PDs thresholds">> $log ; fi + if [ $(($disc_reasons & 16)) -ne 0 ] ; then echo "4- Per port UC DBs threshold">> $log ; fi + if [ $(($disc_reasons & 32)) -ne 0 ] ; then echo "5- Per queue UC DBs threshold">> $log ; fi + if [ $(($disc_reasons & 64)) -ne 0 ] ; then echo "6- Per queue disable bit">> $log ; fi + if [ $(($disc_reasons & 128)) -ne 0 ] ; then echo "7- Undefined">> $log ; fi + if [ $(($disc_reasons & 256)) -ne 0 ] ; then echo "8- Total PDs MC pool size threshold">> $log ; fi + if [ $(($disc_reasons & 512)) -ne 0 ] ; then echo "9- Per interface PDs threhold">> $log; fi + if [ $(($disc_reasons & 1024)) -ne 0 ] ; then echo "10- MC SP threshold">> $log ; fi + if [ $(($disc_reasons & 2048)) -ne 0 ] ; then echo "11- per MC-TC threshold">> $log ; fi + if [ $(($disc_reasons & 4096)) -ne 0 ] ; then echo "12- MC PDs per port threshold">> $log ; fi + if [ $(($disc_reasons & 8192)) -ne 0 ] ; then echo "13- MC PDs per queue threshold">> $log ; fi + if [ $(($disc_reasons & 16384)) -ne 0 ] ; then echo "14- MC per port size (bytes) threshold">> $log ; fi + if [ $(($disc_reasons & 32768)) -ne 0 ] ; then echo "15- MC per queue size(bytes) thresholds">> $log ; fi +} +print_rqp_reasons(){ + disc_reasons=$((16#${disc_reasons})) + if [ $disc_reasons -eq 0 ]; then echo "none" >> $log ; fi + if [ $(($disc_reasons & 1)) -ne 0 ] ; then echo "0- Total DBs threshold violated" >> $log ; fi + if [ $(($disc_reasons & 2)) -ne 0 ] ; then echo "1- Total UC DBs pool size threshold violated" >> $log ; fi + if [ $(($disc_reasons & 4)) -ne 0 ] ; then echo "2- UC packet discarded in EMR because UC FIFO is full" >> $log ; fi + if [ $(($disc_reasons & 8)) -ne 0 ] ; then echo "3- MC HP packetd discarded in EMR because MC FIFO is full">> $log ; fi + if [ $(($disc_reasons & 16)) -ne 0 ] ; then echo "4- MC LP packetd discarded in EMR because MC FIFO is full">> $log ; fi + if [ $(($disc_reasons & 32)) -ne 0 ] ; then echo "5- Total MC DBs pool size threshold violated">> $log ; fi + if [ $(($disc_reasons & 64)) -ne 0 ] ; then echo "6- Packet-DP is not eligible to take from shared DBs resources">> $log ; fi + if [ $(($disc_reasons & 128)) -ne 0 ] ; then echo "7- USP DBs threshold violated">> $log ; fi + if [ $(($disc_reasons & 256)) -ne 0 ] ; then echo "8- Discrete-Partitioning method: MC-TC DBs threshold violated">> $log ; fi + if [ $(($disc_reasons & 512)) -ne 0 ] ; then echo "9- Strict-priority method: MC-TC mapped to SP0 DBs threshold violated">> $log; fi + if [ $(($disc_reasons & 1024)) -ne 0 ] ; then echo "10- Strict-Priority method: MC-TC mapped to SP1 DBs threshold violated">> $log ; fi +} + +# whenever port_disabled mask change, print the up ports +# (according to the queue-pair mask and DSP port mapping, which is what matters ) + +check_new_port_state() { + last_queue_pair_mask=${queue_pair_mask_a[$index]} + queue_pair_mask=$(bcmcmd -n $asic "g hex ECGM_CGM_QUEUE_PAIR_DISABLED.ECGM${core}" | head -n +2 | tail -1) + if [ "$queue_pair_mask" == "$last_queue_pair_mask" ] ; then + return + fi + queue_pair_mask_a[$index]=$queue_pair_mask + logmsg="EGRESS_QPAIR asic $asic core $core new disabled mask: $queue_pair_mask" + timestamp + + start_dsp=$core + let amt=255-$core + dsp_map_a[$index]=$(bcmcmd -n $asic "d SCH_DSP_2_PORT_MAP_DSPP.SCH${core} $start_dsp $amt") + + hr_num=0 + for pos in {-3..-129..-2}; do # todo + byte=${queue_pair_mask:pos:2} + if [ $hr_num -le 8 ] ; then + hr_num_hex="HR_NUM=${hr_num}" + else + hr_num_hex=$(printf "HR_NUM=0x%x" $hr_num) + fi + hr_num=$(( hr_num + 8)) + entry=$(echo ${dsp_map_a[$index]} | sed -e "s/\r/\r\n/g" | grep -m 1 "$hr_num_hex") + found=$? + if [ $found -eq 1 ] ; then + continue + fi + dsp_port=$(echo $entry |grep -o "\[.*\]"| tr -dc '[:alnum:]') + if [ "$byte" = "ff" ]; then + printf "DOWN %3d ${entry}\n" $dsp_port >> $log + else + printf "UP %3d ${entry}\n" $dsp_port >> $log + fi + done + echo >> $log +} + +decode_last_rqp_drop() { + rqp_disc=$(bcmcmd -n $asic "g hex ECGM_RQP_DISCARD_REASONS.ECGM${core}" | head -n -1 | tail -1) + prefix=${rqp_disc: 0: 2} + if [ "$prefix" != "0x" ]; then + return; # empty (0) or a failed read + fi + logmsg="EGRESS_DROP RQP_DISCARD_REASONS asic $asic core $core index $index: $rqp_disc" + timestamp + disc_reasons=${rqp_disc: -4: 3} + print_rqp_reasons +} + +decode_last_pqp_drop() { + pqp_disc=$(bcmcmd -n $asic "g hex ECGM_PQP_DISCARD_REASONS.ECGM${core}" | head -n -1 | tail -1 ) + prefix=${pqp_disc: 0: 2} + if [ "$prefix" != "0x" ]; then + return; # empty (0) or a failed read + fi + logmsg="EGRESS_DROP PQP_DISCARD_REASONS asic $asic core $core: $pqp_disc" + timestamp + check_new_port_state # in case the DSP map has changed + disc_reasons=${pqp_disc: -5: 4} + last_reason=${pqp_disc: -9: 4} + drop_cmd=${pqp_disc: -19: 10} + queue=${drop_cmd: -8: 3} + queue=$((16#${queue})) + queue=$(($queue / 4 )) + queue=$(($queue & 248)) + hr_num_hex=$(printf "%02x" $queue) + entry=$(echo ${dsp_map_a[$index]} | sed -e "s/\r/\r\n/g" | grep -m 1 "$hr_num_hex") + found=$? + dsp_port=$(echo $entry |grep -o "\[.*\]"| tr -dc '[:alnum:]') + if [ $found -eq 1 ] ; then + echo "drop_reason 0x${disc_reasons} queue 0x${hr_num_hex} dsp_port not_found" >> $log + else + dsp_port=$(echo $entry |grep -o "\[.*\]"| tr -dc '[:alnum:]') + echo "drop_reason 0x${disc_reasons} queue 0x${hr_num_hex} dsp port $dsp_port" >> $log + fi + echo "pqp discard reasons (cumulative since last read):" >> $log + print_pqp_reasons + echo "pqp last packet discard reasons:" >> $log + disc_reasons=$last_reason + print_pqp_reasons + echo >> $log +} + + +clear_tcam_hit_bits() { + cint_filename="/tmp/hitbits" + cint=';print bcm_field_entry_hit_flush(0, BCM_FIELD_ENTRY_HIT_FLUSH_ALL, 0); exit;' + bcmcmd -n $asic "log off; rm $cint_filename;log file=$cint_filename quiet=yes; echo '$cint';log off;cint $cint_filename" >> /dev/null +} + +dump_tcam_drop_action_hits() { + echo "SAI_FG_TRAP hits:" >> $log + bcmcmd -n $asic "dbal table dump Table=SAI_FG_TRAP" | grep "CORE" | awk -F'|' '{print $2,$34}' >> $log + echo "EPMF_Cascade hits:" >> $log + # entries 51,52,53,54,55,56 have drop action + bcmcmd -n $asic "dbal table dump Table=EPMF_Cascade" | grep "CORE" | awk -F'|' '{print $2,$10}'>> $log + clear_tcam_hit_bits +} + +check_egress_drops() { + hit=0 + pqp_uc_discard=$(bcmcmd -n $asic "g hex PQP_PQP_DISCARD_UNICAST_PACKET_COUNTER.PQP${core}"| head -n -1 | tail -n +2 | tr -dc '[:alnum:]') + erpp_discard=$(bcmcmd -n $asic "g hex PQP_ERPP_DISCARDED_PACKET_COUNTER.PQP${core}"| head -n -1 | tail -n +2 | tr -dc '[:alnum:]') + rqp_debug_counters=$(bcmcmd -n $asic "g RQP_PRP_DEBUG_COUNTERS.RQP${core}" | head -n -1 | tail -n +2 | sed -e 's/=/ /g'| sed -e 's/,/ /g'|tr -dc "[:alnum:] =_" ) + + pqp_uc_discard=$(printf "%d" $pqp_uc_discard) + erpp_discard=$(printf "%d" $erpp_discard) + + if [ $pqp_uc_discard -ne 0 ]; then + logmsg="EGRESS_DROP UC_DROP on ASIC $asic CORE $core : PQP_DISCARD_UNICAST_PACKET_COUNTER = $pqp_uc_discard" + timestamp + hit=1; + fi + if [ $erpp_discard -ne 0 ]; then + logmsg="EGRESS_DROP ERPP_DROP on ASIC $asic CORE $core : PQP_ERPP_DISCARDED_PACKET_COUNTER = $erpp_discard" + timestamp + hit=1; + fi + + sop_discard_uc=$(echo $rqp_debug_counters | awk {'print $4'}) + prp_discard_uc=$(echo $rqp_debug_counters | awk {'print $14'}) + dbf_err_cnt=$(echo $rqp_debug_counters | awk {'print $18'}) + + sop_discard_uc=$(printf "%d" $sop_discard_uc) + prp_discard_uc=$(printf "%d" $prp_discard_uc) + dbf_err_cnt=$(printf "%d" $dbf_err_cnt) + + if [ $sop_discard_uc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_SOP_UC_DISCARD on ASIC $asic CORE $core : $sop_discard_uc" + timestamp + hit=1; + fi + if [ $prp_discard_uc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_PRP_UC_DISCARD on ASIC $asic CORE $core : $prp_discard_uc" + timestamp + hit=1; + fi + if [ $dbf_err_cnt -ne 0 ]; then + logmsg="EGRESS_DROP RQP_DBF_ERR on ASIC $asic CORE $core : $dbf_err_cnt" + timestamp + hit=1; + fi + if [ $egr_check_mc -ne 0 ]; then + sop_discard_mc=$(echo $rqp_debug_counters | awk {'print $6'}) + prp_discard_mc=$(echo $rqp_debug_counters | awk {'print $16'}) + sop_discard_mc=$(printf "%d" $sop_discard_mc) + prp_discard_mc=$(printf "%d" $prp_discard_mc) + + pqp_mc_discard=$(bcmcmd -n $asic "g hex PQP_PQP_DISCARD_MULTICAST_PACKET_COUNTER.PQP${core}" | head -n -1 | tail -n +2 | tr -dc '[:alnum:]') + pqp_mc_discard=$(printf "%d" $pqp_mc_discard) + if [ $pqp_mc_discard -ne 0 ]; then + logmsg="EGRESS_DROP MC_DROP ASIC $asic CORE $core : PQP_DISCARD_MULTICAST_PACKET_COUNTER = $pqp_mc_discard" + timestamp + hit=1; + fi + if [ $sop_discard_mc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_SOP_MC_DISCARD on ASIC $asic CORE $core : $sop_discard_mc" + timestamp + hit=1; + fi + if [ $prp_discard_mc -ne 0 ]; then + logmsg="EGRESS_DROP RQP_PRP_MC_DISCARD on ASIC $asic CORE $core : $prp_discard_mc" + timestamp + hit=1; + fi + fi + if [ $hit -eq 0 ] ; then + return + fi + + decode_last_pqp_drop + # bcmcmd -n $asic "g chg ECGM_RQP_DISCARD_REASONS.ECGM${core}" | grep "=" >> $log + decode_last_rqp_drop + bcmcmd -n $asic "g chg PQP_INTERRUPT_REGISTER.PQP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g chg RQP_INTERRUPT_REGISTER.RQP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s PQP_INTERRUPT_REGISTER.PQP${core} -1" > /dev/null + bcmcmd -n $asic "s RQP_INTERRUPT_REGISTER.RQP${core} -1" > /dev/null + + bcmcmd -n $asic "g chg RQP_PACKET_REASSEMBLY_INTERRUPT_REGISTER.RQP${core}"| tail -2 | head -n -1 >> $log + bcmcmd -n $asic "s RQP_PACKET_REASSEMBLY_INTERRUPT_REGISTER.RQP${core} -1" > /dev/null + + bcmcmd -n $asic "g chg FDR_INTERRUPT_REGISTER.FDR${core}"| head -n -1 | tail -n +2 >> $log + # FDA0 block is shared by both cores + bcmcmd -n $asic "g chg FDA_INTERRUPT_REGISTER.FDA0"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s FDR_INTERRUPT_REGISTER.FDR${core} -1" > /dev/null + bcmcmd -n $asic "s FDA_INTERRUPT_REGISTER.FDA0 -1" > /dev/null + + bcmcmd -n $asic "g chg ERPP_INTERRUPT_REGISTER.ERPP${core}"| head -n -1 | tail -n +2>> $log + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARD_INTERRUPT_REGISTER.ERPP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARD_INTERRUPT_REGISTER_2.ERPP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s ERPP_ERPP_DISCARD_INTERRUPT_REGISTER.ERPP${core} -1" > /dev/null + bcmcmd -n $asic "s ERPP_ERPP_DISCARD_INTERRUPT_REGISTER_2.ERPP${core} -1" > /dev/null + bcmcmd -n $asic "s ERPP_INTERRUPT_REGISTER.ERPP${core} -1" > /dev/null + + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARDS_INTERRUPT_REGISTER_MASK.ERPP${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g chg ERPP_ERPP_DISCARDS_INTERRUPT_REGISTER_MASK_2.ERPP${core}"| head -n -1 | tail -n +2 >> $log + + #bcmcmd -n $asic "g chg IPT_FLOW_CONTROL_DEBUG.IPT${core}" >> $log + bcmcmd -n $asic "tm egr con"| head -n -1 | tail -n +2 >> $log + + if [ $egr_check_pmf_hit_bits -eq 1 ]; then + dump_tcam_drop_action_hits + fi + if [ $egr_diag_counter_g -eq 1 ]; then + bcmcmd -n $asic "diag counter g nz core=${core}"| head -n -1 | tail -n +2 >> $log + fi + echo "$delim" >> $log + echo >> $log +} + +dump_ingress_traps() { + bcmcmd -n $asic "g IPPB_DBG_FLP_DATA_PATH_TRAP.IPPB${core}" | head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s IPPB_DBG_FLP_DATA_PATH_TRAP.IPPB${core} -1"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g IPPE_DBG_LLR_TRAP_0.IPPE${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "s IPPE_DBG_LLR_TRAP_0.IPPE${core} -1"| head -n -1 | tail -n +2 >> $log +} +dump_macsec() { + bcmcmd -n $asic "sec stat show; sec stat clear" >> $log +} + +rjct_filename=rjct_status.txt + +check_ingress_drops() { + hit=0 + bcmcmd -n $asic "getreg chg CGM_REJECT_STATUS_BITMAP.CGM${core}" | awk '{split($0,a,":"); print a[2]}' > $rjct_filename + while read -r line; do + [ -z $line ] && continue + res=$(echo $line | grep -v "," | grep "<>") + if [ -z $res ]; then + hit=1 + fi + done < "$rjct_filename" + + if [ $hit == 1 ]; then + logmsg="INGRESS_DROP asic $asic core $core" + timestamp + cat $rjct_filename >> $log + bcmcmd -n $asic "g CGM_MAX_VOQ_WORDS_QSIZE_TRACK.CGM${core}" | head -n -1 | tail -n +2 >> $log + #bcmcmd -n $asic "g chg IPT_FLOW_CONTROL_DEBUG.IPT${core}"| head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "tm ing cong core=$core" >> $log + bcmcmd -n $asic "trap last info core=$core" >> $log + bcmcmd -n $asic "pp vis ppi core=$core" >> $log + bcmcmd -n $asic "pp vis fdt core=$core" >> $log + bcmcmd -n $asic "pp vis ikleap core=$core" >> $log + #bcmcmd -n $asic "pp vis last" >> $log + if [ $ing_check_mc -eq 1 ] ; then + bcmcmd -n $asic "dbal table dump table=mcdb" >> $log + bcmcmd -n $asic "g MTM_ING_MCDB_OFFSET" | head -n -1 | tail -n +2 >> $log + bcmcmd -n $asic "g MTM_EGR_MCDB_OFFSET" | head -n -1 | tail -n +2 >> $log + fi + bcmcmd -n $asic "diag counter g nz core=${core}" >> $log + echo "" >> $log + dump_ingress_traps + echo "" >> $log + if [ $ing_check_macsec -eq 1 ] ; then + dump_macsec + fi + echo "$delim" >> $log + fi +} + +# clear stats +for asic in "${asics[@]}" +do + bcmcmd -n $asic "sec stat clear; clear counter; clear interrupt all" >> /dev/null +done + +iter_a=(0 0 0 0) +while true; +do + for asic in "${asics[@]}" + do + for core in "${cores[@]}" + do + index=$(($asic*2+$core)) + iter=$((${iter_a[$index]}+1)) + if [ $iter -eq $maxiter ] ; then + iter_a[$index]=0; + sleep $sleep_period + continue + fi + iter_a[$index]=$iter + # for majority of polling cycles, check the PQP drop reason and queue + if [ $iter -gt 4 ] ; then + decode_last_pqp_drop + continue + fi + # check for any change in pqp disabled port mask + if [ $iter -eq 1 ] ; then + check_new_port_state + continue + fi + if [ $iter -eq 2 ] ; then + check_egress_drops + continue + fi + if [ $iter -eq 3 ]; then + check_ingress_drops + continue + fi + if [ $iter -eq 4 ]; then + decode_last_rqp_drop + fi + done + done +done + diff --git a/scripts/decode-syseeprom b/scripts/decode-syseeprom index 3d0b8d1db9..5812f38190 100755 --- a/scripts/decode-syseeprom +++ b/scripts/decode-syseeprom @@ -17,13 +17,15 @@ import sys import sonic_platform from sonic_platform_base.sonic_eeprom.eeprom_tlvinfo import TlvInfoDecoder -from sonic_py_common import device_info +from sonic_py_common import device_info, logger from swsscommon.swsscommon import SonicV2Connector from tabulate import tabulate EEPROM_INFO_TABLE = 'EEPROM_INFO' +SYSLOG_IDENTIFIER = 'decode-syseeprom' +log = logger.Logger(SYSLOG_IDENTIFIER) def instantiate_eeprom_object(): eeprom = None diff --git a/scripts/dpu-tty.py b/scripts/dpu-tty.py new file mode 100755 index 0000000000..ff0b041b01 --- /dev/null +++ b/scripts/dpu-tty.py @@ -0,0 +1,73 @@ +#!/usr/bin/env python3 +# +# Copyright (c) 2024 Cisco Systems, Inc. +# + +import argparse +import json +import os +import subprocess +from sonic_py_common import device_info + +UART_CON = '/usr/bin/picocom' + + +def get_dpu_tty(dpu, tty, baud): + + platform = device_info.get_platform() + if not platform: + print("No platform") + return None + + # Get platform path. + platform_path = device_info.get_path_to_platform_dir() + + if os.path.isfile(os.path.join(platform_path, device_info.PLATFORM_JSON_FILE)): + json_file = os.path.join(platform_path, device_info.PLATFORM_JSON_FILE) + + try: + with open(json_file, 'r') as file: + platform_data = json.load(file) + except (json.JSONDecodeError, IOError, TypeError, ValueError): + print("No platform.json") + return None + + dpus = platform_data.get('DPUS', None) + if dpus is None: + print("No DPUs in platform.json") + return None + + if tty is None: + dev = dpus[dpu]["serial-console"]["device"] + else: + # overwrite tty device in platform.json + dev = tty + + if baud is None: + baud = dpus[dpu]["serial-console"]["baud-rate"] + return dev, baud + + +def main(): + + parser = argparse.ArgumentParser(description='DPU TTY Console Utility') + parser.add_argument('-n', '--name', required=True) + parser.add_argument('-t', '--tty') + parser.add_argument('-b', '--baud') + args = parser.parse_args() + + dpu_tty, dpu_baud = get_dpu_tty(args.name, args.tty, args.baud) + # Use UART console utility for error checking of dpu_tty and dpu_baud. + + p = subprocess.run([UART_CON, '-b', dpu_baud, '/dev/%s' % dpu_tty]) + if p.returncode: + print('{} failed'.format(p.args)) + if p.stdout: + print(p.stdout) + if p.stderr: + print(p.stderr) + return p.returncode + + +if __name__ == "__main__": + exit(main()) diff --git a/scripts/dropconfig b/scripts/dropconfig index 180c6166c6..1fc812a474 100755 --- a/scripts/dropconfig +++ b/scripts/dropconfig @@ -105,7 +105,7 @@ class DropConfig(object): if supported_reasons and int(capabilities.get('count', 0)) > 0: print('\n{}'.format(counter)) for reason in supported_reasons: - print('\t{}'.format(reason)) + print(' {}'.format(reason)) def create_counter(self, counter_name, alias, group, counter_type, description, reasons): diff --git a/scripts/dropstat b/scripts/dropstat index 4e9f5bb4d0..219ad2b494 100755 --- a/scripts/dropstat +++ b/scripts/dropstat @@ -11,8 +11,8 @@ # - Refactor calls to COUNTERS_DB to reduce redundancy # - Cache DB queries to reduce # of expensive queries +import click import json -import argparse import os import socket import sys @@ -20,6 +20,9 @@ import sys from collections import OrderedDict from natsort import natsorted from tabulate import tabulate +from sonic_py_common import multi_asic +from utilities_common.general import load_db_config +import utilities_common.multi_asic as multi_asic_util # mock the redis for unit test purposes # try: @@ -28,9 +31,14 @@ try: test_path = os.path.join(modules_path, "tests") sys.path.insert(0, modules_path) sys.path.insert(0, test_path) - import mock_tables.dbconnector + from tests.mock_tables import dbconnector socket.gethostname = lambda: 'sonic_drops_test' os.getuid = lambda: 27 + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import tests.mock_tables.mock_multi_asic + dbconnector.load_namespace_config() + else: + dbconnector.load_database_config() except KeyError: pass @@ -43,6 +51,7 @@ DEBUG_COUNTER_PORT_STAT_MAP = 'COUNTERS_DEBUG_NAME_PORT_STAT_MAP' DEBUG_COUNTER_SWITCH_STAT_MAP = 'COUNTERS_DEBUG_NAME_SWITCH_STAT_MAP' COUNTERS_PORT_NAME_MAP = 'COUNTERS_PORT_NAME_MAP' COUNTER_TABLE_PREFIX = 'COUNTERS:' +SWITCH_LEVEL_COUNTER_PREFIX = 'SWITCH_STD_DROP_COUNTER-' # ASIC_DB Tables ASIC_SWITCH_INFO_PREFIX = 'ASIC_STATE:SAI_OBJECT_TYPE_SWITCH:' @@ -79,34 +88,43 @@ std_port_headers_map = { # Standard Switch-Level Headers std_switch_description_header = ['DEVICE'] +std_switch_dflt_drop_headers= [ 'SWITCH-ID'] +std_switch_drop_headers_map = { + 'SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP': 'PKT_INTEGRITY_ERR' +} def get_dropstat_dir(): return UserCache().get_directory() class DropStat(object): - def __init__(self): - self.config_db = ConfigDBConnector() - self.config_db.connect() - - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.COUNTERS_DB) - self.db.connect(self.db.ASIC_DB) - self.db.connect(self.db.APPL_DB) + def __init__(self, namespace): + self.namespaces = multi_asic.get_namespace_list(namespace) + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.db = None + self.config_db = None + self.cached_namespace = None dropstat_dir = get_dropstat_dir() self.port_drop_stats_file = os.path.join(dropstat_dir, 'port-stats') - self.switch_drop_stats_file = os.path.join(dropstat_dir + 'switch-stats') + self.switch_drop_stats_file = os.path.join(dropstat_dir, 'switch-stats') + self.switch_std_drop_stats_file = os.path.join(dropstat_dir, 'switch-std-drop-stats') self.stat_lookup = {} self.reverse_stat_lookup = {} + @multi_asic_util.run_on_multi_asic def show_drop_counts(self, group, counter_type): """ Prints out the current drop counts at the port-level and switch-level. """ + if os.environ.get("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE", "0") == "1": + # Temp cache needs to be cleard to avoid interference from previous test cases + UserCache().remove() + + self.show_switch_std_drop_counts(group, counter_type) self.show_port_drop_counts(group, counter_type) print('') self.show_switch_drop_counts(group, counter_type) @@ -116,16 +134,91 @@ class DropStat(object): Clears the current drop counts. """ + counters_port_drop = {} + counters_switch_drop = {} + counters_switch_std_drop = {} + for ns in self.namespaces: + self.config_db = multi_asic.connect_config_db_for_ns(ns) + self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + + counts = self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP) + if counts: + counters_port_drop.update(counts) + + counters = self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP) + if counters: + counts = self.get_counts(counters, self.get_switch_id()) + counters_switch_drop.update(counts) + + counters = self.get_configured_counters(DEBUG_COUNTER_SWITCH_STAT_MAP, True) + if counters: + counts = self.get_counts(counters, self.get_switch_id()) + counters_switch_std_drop.update(counts) + try: - json.dump(self.get_counts_table(self.gather_counters(std_port_rx_counters + std_port_tx_counters, DEBUG_COUNTER_PORT_STAT_MAP), COUNTERS_PORT_NAME_MAP), - open(self.port_drop_stats_file, 'w+')) - json.dump(self.get_counts(self.gather_counters([], DEBUG_COUNTER_SWITCH_STAT_MAP), self.get_switch_id()), - open(self.switch_drop_stats_file, 'w+')) + if counters_port_drop: + json.dump(counters_port_drop, open(self.port_drop_stats_file, 'w+')) + + if counters_switch_drop: + json.dump(counters_switch_drop, open(self.switch_drop_stats_file, 'w+')) + + if counters_switch_std_drop: + json.dump(counters_switch_std_drop, open(self.switch_std_drop_stats_file, 'w+')) except IOError as e: print(e) sys.exit(e.errno) print("Cleared drop counters") + def show_switch_std_drop_counts(self, group, counter_type): + """ + Prints out the standard drop counts (packet integrity drop etc) at the switch level, if such counts exist. + """ + + if group is not None or counter_type is not None: + return + + #Currently the switch drop counter (packet integrity) is supported only for chassis. + if os.environ.get("VOQ_DROP_COUNTER_TESTING", "0") == "1": + #fake the switch_type for mock-test code coverage + switch_type = "voq" + else: + switch_type = self.db.get(self.db.CONFIG_DB, "DEVICE_METADATA|localhost", "switch_type") + + if switch_type is None: + return + if switch_type != "fabric" and switch_type != "voq": + return + + switch_std_drop_ckpt = {} + + # Grab the latest clear checkpoint, if it exists + if os.path.isfile(self.switch_std_drop_stats_file): + switch_std_drop_ckpt = json.load(open(self.switch_std_drop_stats_file, 'r')) + + counters = self.get_configured_counters(DEBUG_COUNTER_SWITCH_STAT_MAP, True) + if not counters: + return + switch_id = self.get_switch_id() + switch_std_stats = self.get_counts(counters, switch_id) + + if not switch_std_stats: + return + + if os.environ.get("VOQ_DROP_COUNTER_TESTING", "0") == "1": + row = [socket.gethostname()] + else: + cfg_switch_id = self.db.get(self.db.CONFIG_DB, "DEVICE_METADATA|localhost", "switch_id") + row = [cfg_switch_id] + + headers = std_switch_dflt_drop_headers + for cntr in counters: + if cntr in std_switch_drop_headers_map: + row.append(switch_std_stats.get(cntr, 0) - switch_std_drop_ckpt.get(cntr, 0)) + headers.append(std_switch_drop_headers_map[cntr]) + if row: + print(tabulate([row], headers, tablefmt='simple', stralign='right')) + print('') + def show_port_drop_counts(self, group, counter_type): """ Prints out the drop counts at the port level, if such counts exist. @@ -189,7 +282,7 @@ class DropStat(object): the group or not the right counter type. """ - configured_counters = self.get_configured_counters(object_stat_map) + configured_counters = self.get_configured_counters(object_stat_map, False) counters = std_counters + configured_counters return [ctr for ctr in counters if self.in_group(ctr, object_stat_map, group) and @@ -258,12 +351,13 @@ class DropStat(object): the given object type. """ + if self.cached_namespace != self.multi_asic.current_namespace: + self.stat_lookup = {} + self.cached_namespace = self.multi_asic.current_namespace + if not self.stat_lookup.get(object_stat_map, None): stats_map = self.db.get_all(self.db.COUNTERS_DB, object_stat_map) - if stats_map: - self.stat_lookup[object_stat_map] = stats_map - else: - self.stat_lookup[object_stat_map] = None + self.stat_lookup[object_stat_map] = stats_map if stats_map else None return self.stat_lookup[object_stat_map] @@ -282,7 +376,7 @@ class DropStat(object): return self.reverse_stat_lookup[object_stat_map] - def get_configured_counters(self, object_stat_map): + def get_configured_counters(self, object_stat_map, std_switch_cntr=False): """ Returns the list of counters that have been configured to track packet drops. @@ -294,6 +388,15 @@ class DropStat(object): if not counters: return configured_counters + #Switch level standard drop counters are added by default and added to DEBUG_COUNTER_SWITCH_STAT_MAP table, + #so remove it from configrued counters + if object_stat_map == DEBUG_COUNTER_SWITCH_STAT_MAP: + if std_switch_cntr: + new_cntrs = {k:counters[k] for k in counters if SWITCH_LEVEL_COUNTER_PREFIX in k} + else: + new_cntrs = {k:counters[k] for k in counters if not SWITCH_LEVEL_COUNTER_PREFIX in k} + return list(new_cntrs.values()) + return list(counters.values()) def get_counter_name(self, object_stat_map, counter_stat): @@ -385,39 +488,22 @@ class DropStat(object): else: return PORT_STATE_NA - -def main(): - parser = argparse.ArgumentParser(description='Display drop counters', - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -Examples: - dropstat -""") - - # Version - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - - # Actions - parser.add_argument('-c', '--command', type=str, help='Desired action to perform') - - # Variables - parser.add_argument('-g', '--group', type=str, help='The group of the target drop counter', default=None) - parser.add_argument('-t', '--type', type=str, help='The type of the target drop counter', default=None) - - args = parser.parse_args() - - command = args.command - - group = args.group - counter_type = args.type - - dcstat = DropStat() +@click.command(help='Display drop counters') +@click.option('-c', '--command', required=True, help='Desired action to perform', + type=click.Choice(['clear', 'show'], case_sensitive=False)) +@click.option('-g', '--group', default=None, help='The group of the target drop counter') +@click.option('-t', '--type', 'counter_type', default=None, help='The type of the target drop counter') +@click.option('-n', '--namespace', help='Namespace name', default=None, + type=click.Choice(multi_asic.get_namespace_list())) +@click.version_option(version='1.0') +def main(command, group, counter_type, namespace): + load_db_config() + + dcstat = DropStat(namespace) if command == 'clear': dcstat.clear_drop_counts() - elif command == 'show': - dcstat.show_drop_counts(group, counter_type) else: - print("Command not recognized") + dcstat.show_drop_counts(group, counter_type) if __name__ == '__main__': diff --git a/scripts/ecnconfig b/scripts/ecnconfig index e3b08d2bd3..9b2deab4dc 100755 --- a/scripts/ecnconfig +++ b/scripts/ecnconfig @@ -5,7 +5,7 @@ ecnconfig is the utility to 1) show and change ECN configuration -usage: ecnconfig [-h] [-v] [-l] [-p PROFILE] [-gmin GREEN_MIN] +usage: ecnconfig [-h] [-v] [-l] [-p PROFILE] [-gmin GREEN_MIN] [-n NAMESPACE] [-gmax GREEN_MAX] [-ymin YELLOW_MIN] [-ymax YELLOW_MAX] [-rmin RED_MIN] [-rmax RED_MAX] [-gdrop GREEN_DROP_PROB] [-ydrop YELLOW_DROP_PROB] [-rdrop RED_DROP_PROB] [-vv] @@ -16,6 +16,7 @@ optional arguments: -vv --verbose verbose output -l --list show ECN WRED configuration -p --profile specify WRED profile name + -n --namespace show ECN configuration for specified namespace -gmin --green-min set min threshold for packets marked green -gmax --green-max set max threshold for packets marked green -ymin --yellow-min set min threshold for packets marked yellow @@ -47,7 +48,7 @@ $ecnconfig -q 3 ECN status: queue 3: on """ -import argparse +import click import json import os import sys @@ -62,12 +63,17 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector - + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() except KeyError: pass from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector +from sonic_py_common import multi_asic +from utilities_common import multi_asic as multi_asic_util +from utilities_common.general import load_db_config WRED_PROFILE_TABLE_NAME = "WRED_PROFILE" WRED_CONFIG_FIELDS = { @@ -82,7 +88,6 @@ WRED_CONFIG_FIELDS = { "rdrop": "red_drop_probability" } -PORT_TABLE_NAME = "PORT" QUEUE_TABLE_NAME = "QUEUE" DEVICE_NEIGHBOR_TABLE_NAME = "DEVICE_NEIGHBOR" FIELD = "wred_profile" @@ -96,18 +101,25 @@ class EcnConfig(object): """ Process ecnconfig """ - def __init__(self, filename, verbose): + def __init__(self, test_filename, verbose, namespace): self.ports = [] self.queues = [] - self.filename = filename self.verbose = verbose + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + self.num_wred_profiles = 0 - # Set up db connections - self.db = ConfigDBConnector() - self.db.connect() + # For unit testing + self.test_filename = test_filename + self.updated_profile_tables = {} + @multi_asic_util.run_on_multi_asic def list(self): - wred_profiles = self.db.get_table(WRED_PROFILE_TABLE_NAME) + """ + List all WRED profiles. + """ + wred_profiles = self.config_db.get_table(WRED_PROFILE_TABLE_NAME) for name, data in wred_profiles.items(): profile_name = name profile_data = data @@ -117,12 +129,18 @@ class EcnConfig(object): line = [field, value] config.append(line) print(tabulate(config) + "\n") - if self.verbose: - print("Total profiles: %d" % len(wred_profiles)) + self.num_wred_profiles += len(wred_profiles) - # get parameters of a WRED profile def get_profile_data(self, profile): - wred_profiles = self.db.get_table(WRED_PROFILE_TABLE_NAME) + """ + Get parameters of a WRED profile + """ + if self.namespace or not multi_asic.is_multi_asic(): + db = ConfigDBConnector(namespace=self.namespace) + db.connect() + wred_profiles = db.get_table(WRED_PROFILE_TABLE_NAME) + else: + wred_profiles = multi_asic.get_table(WRED_PROFILE_TABLE_NAME) for profile_name, profile_data in wred_profiles.items(): if profile_name == profile: @@ -131,6 +149,9 @@ class EcnConfig(object): return None def validate_profile_data(self, profile_data): + """ + Validate threshold, probability and color values. + """ result = True # check if thresholds are non-negative integers @@ -168,73 +189,116 @@ class EcnConfig(object): return result + @multi_asic_util.run_on_multi_asic def set_wred_threshold(self, profile, threshold, value): + """ + Single asic behaviour: + Set threshold value on default namespace + + Multi asic behaviour: + Set threshold value on the specified namespace. + If no namespace is provided, set on all namespaces. + """ chk_exec_privilege() + # Modify the threshold field = WRED_CONFIG_FIELDS[threshold] if self.verbose: - print("Setting %s value to %s" % (field, value)) - self.db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) - if self.filename is not None: - prof_table = self.db.get_table(WRED_PROFILE_TABLE_NAME) - with open(self.filename, "w") as fd: - json.dump(prof_table, fd) + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' + print("Setting %s value to %s%s" % (field, value, namespace_str)) + self.config_db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) + + # Record the change for unit testing + if self.test_filename: + profile_table = self.config_db.get_table(WRED_PROFILE_TABLE_NAME) + if self.multi_asic.current_namespace in self.updated_profile_tables.keys(): + self.updated_profile_tables[self.multi_asic.current_namespace][profile][threshold] = value + else: + self.updated_profile_tables[self.multi_asic.current_namespace] = profile_table + @multi_asic_util.run_on_multi_asic def set_wred_prob(self, profile, drop_color, value): + """ + Single asic behaviour: + Set drop probability on default namespace + + Multi asic behaviour: + Set drop probability value on the specified namespace. + If no namespace is provided, set on all namespaces. + """ chk_exec_privilege() + # Modify the drop probability field = WRED_CONFIG_FIELDS[drop_color] if self.verbose: - print("Setting %s value to %s%%" % (field, value)) - self.db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) - if self.filename is not None: - prof_table = self.db.get_table(WRED_PROFILE_TABLE_NAME) - with open(self.filename, "w") as fd: - json.dump(prof_table, fd) + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' + print("Setting %s value to %s%%%s" % (field, value, namespace_str)) + self.config_db.mod_entry(WRED_PROFILE_TABLE_NAME, profile, {field: value}) + + # Record the change for unit testing + if self.test_filename: + profile_table = self.config_db.get_table(WRED_PROFILE_TABLE_NAME) + if self.multi_asic.current_namespace in self.updated_profile_tables.keys(): + self.updated_profile_tables[self.multi_asic.current_namespace][profile][field] = value + else: + self.updated_profile_tables[self.multi_asic.current_namespace] = profile_table class EcnQ(object): """ Process ecn on/off on queues """ - def __init__(self, queues, filename, verbose): + def __init__(self, queues, test_filename, verbose, namespace): self.ports_key = [] self.queues = queues.split(',') - self.filename = filename self.verbose = verbose + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + self.db = None - # Set up db connections - self.config_db = ConfigDBConnector() - self.config_db.connect() - - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.CONFIG_DB) - - self.gen_ports_key() + # For unit testing + self.test_filename = test_filename + self.updated_q_table = {} def gen_ports_key(self): - if self.ports_key is not None: - port_table = self.config_db.get_table(DEVICE_NEIGHBOR_TABLE_NAME) - self.ports_key = list(port_table.keys()) + port_table = self.config_db.get_table(DEVICE_NEIGHBOR_TABLE_NAME) + self.ports_key = list(port_table.keys()) - # Verify at least one port is available - if len(self.ports_key) == 0: - raise Exception("No active ports detected in table '{}'".format(DEVICE_NEIGHBOR_TABLE_NAME)) + # Verify at least one port is available + if len(self.ports_key) == 0: + raise Exception("No active ports detected in table '{}'".format(DEVICE_NEIGHBOR_TABLE_NAME)) - # In multi-ASIC platforms backend ethernet ports are identified as - # 'Ethernet-BPxy'. Add 1024 to sort backend ports to the end. - self.ports_key.sort( - key = lambda k: int(k[8:]) if "BP" not in k else int(k[11:]) + 1024 - ) + # In multi-ASIC platforms backend ethernet ports are identified as + # 'Ethernet-BPxy'. Add 1024 to sort backend ports to the end. + self.ports_key.sort( + key = lambda k: int(k[8:]) if "BP" not in k else int(k[11:]) + 1024 + ) def dump_table_info(self): - if self.filename is not None: + """ + A function to dump updated queue tables. + These JSON dumps are used exclusively by unit tests. + The tables are organized by namespaces for multi-asic support. + """ + if self.test_filename is not None: q_table = self.config_db.get_table(QUEUE_TABLE_NAME) - with open(self.filename, "w") as fd: - json.dump({repr(x):y for x, y in q_table.items()}, fd) + with open(self.test_filename, "w") as fd: + self.updated_q_table[self.multi_asic.current_namespace] = {repr(x):y for x, y in q_table.items()} + json.dump(self.updated_q_table, fd) + @multi_asic_util.run_on_multi_asic def set(self, enable): + """ + Single asic behaviour: + Enable or disable queues on default namespace + + Multi asic behaviour: + Enable or disable queues on a specified namespace. + If no namespace is provided, set on all namespaces. + """ chk_exec_privilege() + self.gen_ports_key() for queue in self.queues: if self.verbose: print("%s ECN on %s queue %s" % ("Enable" if enable else "Disable", ','.join(self.ports_key), queue)) @@ -252,10 +316,24 @@ class EcnQ(object): self.config_db.mod_entry(QUEUE_TABLE_NAME, key, None) else: self.config_db.set_entry(QUEUE_TABLE_NAME, key, entry) + # For unit testing self.dump_table_info() + @multi_asic_util.run_on_multi_asic def get(self): - print("ECN status:") + """ + Single asic behaviour: + Get status of queues on default namespace + + Multi asic behaviour: + Get status of queues on a specified namespace. + If no namespace is provided, get queue status on all namespaces. + """ + self.gen_ports_key() + namespace = self.multi_asic.current_namespace + namespace_str = f" for namespace {namespace}" if namespace else '' + print(f"ECN status{namespace_str}:") + for queue in self.queues: out = ' '.join(['queue', queue]) if self.verbose: @@ -270,81 +348,77 @@ class EcnQ(object): print("%s: on" % (out)) else: print("%s: off" % (out)) + # For unit testing self.dump_table_info() -def main(): - parser = argparse.ArgumentParser(description='Show and change:\n' - '1) ECN WRED configuration\n' - '2) ECN on/off status on queues', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show ECN WRED configuration') - parser.add_argument('-p', '--profile', type=str, help='specify WRED profile name', default=None) - parser.add_argument('-gmin', '--green-min', type=str, help='set min threshold for packets marked \'green\'', default=None) - parser.add_argument('-gmax', '--green-max', type=str, help='set max threshold for packets marked \'green\'', default=None) - parser.add_argument('-ymin', '--yellow-min', type=str, help='set min threshold for packets marked \'yellow\'', default=None) - parser.add_argument('-ymax', '--yellow-max', type=str, help='set max threshold for packets marked \'yellow\'', default=None) - parser.add_argument('-rmin', '--red-min', type=str, help='set min threshold for packets marked \'red\'', default=None) - parser.add_argument('-rmax', '--red-max', type=str, help='set max threshold for packets marked \'red\'', default=None) - parser.add_argument('-gdrop', '--green-drop-prob', type=str, help='set max drop/mark probability for packets marked \'green\'', default=None) - parser.add_argument('-ydrop', '--yellow-drop-prob', type=str, help='set max drop/mark probability for packets marked \'yellow\'', default=None) - parser.add_argument('-rdrop', '--red-drop-prob', type=str, help='set max drop/mark probability for packets marked \'red\'', default=None) - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - parser.add_argument('-vv', '--verbose', action='store_true', help='Verbose output', default=False) - - parser.add_argument('command', nargs='?', choices=['on', 'off'], type=str, help='turn on/off ecn', default=None) - parser.add_argument('-q', '--queue', type=str, help='specify queue index list: 3,4', default=None) - parser.add_argument('-f', '--filename', help='file used by mock tests', type=str, default=None) - +@click.command(help='Show and change: ECN WRED configuration\nECN on/off status on queues') +@click.argument('command', type=click.Choice(['on', 'off'], case_sensitive=False), required=False, default=None) +@click.option('-l', '--list', 'show_config', is_flag=True, help='show ECN WRED configuration') +@click.option('-p', '--profile', type=str, help='specify WRED profile name', default=None) +@click.option('-gmin', '--green-min', type=str, help='set min threshold for packets marked \'green\'', default=None) +@click.option('-gmax', '--green-max', type=str, help='set max threshold for packets marked \'green\'', default=None) +@click.option('-ymin', '--yellow-min', type=str, help='set min threshold for packets marked \'yellow\'', default=None) +@click.option('-ymax', '--yellow-max', type=str, help='set max threshold for packets marked \'yellow\'', default=None) +@click.option('-rmin', '--red-min', type=str, help='set min threshold for packets marked \'red\'', default=None) +@click.option('-rmax', '--red-max', type=str, help='set max threshold for packets marked \'red\'', default=None) +@click.option('-gdrop', '--green-drop-prob', type=str, help='set max drop/mark probability for packets marked \'green\'', default=None) +@click.option('-ydrop', '--yellow-drop-prob', type=str, help='set max drop/mark probability for packets marked \'yellow\'', default=None) +@click.option('-rdrop', '--red-drop-prob', type=str, help='set max drop/mark probability for packets marked \'red\'', default=None) +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Namespace name or skip for all', default=None) +@click.option('-vv', '--verbose', is_flag=True, help='Verbose output', default=False) +@click.option('-q', '--queue', type=str, help='specify queue index list: 3,4', default=None) +@click.version_option(version='1.0') +def main(command, show_config, profile, green_min, + green_max, yellow_min, yellow_max, red_min, + red_max, green_drop_prob, yellow_drop_prob, + red_drop_prob, namespace, verbose, queue): + test_filename = None if os.environ.get("UTILITIES_UNIT_TESTING", "0") == "2": - sys.argv.extend(['-f', '/tmp/ecnconfig']) - - args = parser.parse_args() + test_filename = '/tmp/ecnconfig' try: - if args.list or args.profile: - prof_cfg = EcnConfig(args.filename, args.verbose) - if args.list: - arg_len_max = 2 - if args.verbose: - arg_len_max += 1 - if args.filename: - arg_len_max += 2 - if len(sys.argv) > arg_len_max: + load_db_config() + if show_config or profile: + # Check if a set option has been provided + setOption = (green_min or green_max or yellow_min or yellow_max or red_min or red_max + or green_drop_prob or yellow_drop_prob or red_drop_prob) + + prof_cfg = EcnConfig(test_filename, verbose, namespace) + if show_config: + if setOption: raise Exception("Input arguments error. No set options allowed when -l[ist] specified") + prof_cfg.list() - elif args.profile: - arg_len_min = 4 - if args.verbose: - arg_len_min += 1 - if args.filename: - arg_len_min += 2 - if len(sys.argv) < arg_len_min: + if verbose: + print("Total profiles: %d" % prof_cfg.num_wred_profiles) + + elif profile: + if not setOption: raise Exception("Input arguments error. Specify at least one threshold parameter to set") # get current configuration data - wred_profile_data = prof_cfg.get_profile_data(args.profile) + wred_profile_data = prof_cfg.get_profile_data(profile) if wred_profile_data is None: - raise Exception("Input arguments error. Invalid WRED profile %s" % (args.profile)) - - if args.green_max: - wred_profile_data[WRED_CONFIG_FIELDS["gmax"]] = args.green_max - if args.green_min: - wred_profile_data[WRED_CONFIG_FIELDS["gmin"]] = args.green_min - if args.yellow_max: - wred_profile_data[WRED_CONFIG_FIELDS["ymax"]] = args.yellow_max - if args.yellow_min: - wred_profile_data[WRED_CONFIG_FIELDS["ymin"]] = args.yellow_min - if args.red_max: - wred_profile_data[WRED_CONFIG_FIELDS["rmax"]] = args.red_max - if args.red_min: - wred_profile_data[WRED_CONFIG_FIELDS["rmin"]] = args.red_min - if args.green_drop_prob: - wred_profile_data[WRED_CONFIG_FIELDS["gdrop"]] = args.green_drop_prob - if args.yellow_drop_prob: - wred_profile_data[WRED_CONFIG_FIELDS["ydrop"]] = args.yellow_drop_prob - if args.red_drop_prob: - wred_profile_data[WRED_CONFIG_FIELDS["rdrop"]] = args.red_drop_prob + raise Exception("Input arguments error. Invalid WRED profile %s for namespace %s" % (profile, namespace)) + + if green_max: + wred_profile_data[WRED_CONFIG_FIELDS["gmax"]] = green_max + if green_min: + wred_profile_data[WRED_CONFIG_FIELDS["gmin"]] = green_min + if yellow_max: + wred_profile_data[WRED_CONFIG_FIELDS["ymax"]] = yellow_max + if yellow_min: + wred_profile_data[WRED_CONFIG_FIELDS["ymin"]] = yellow_min + if red_max: + wred_profile_data[WRED_CONFIG_FIELDS["rmax"]] = red_max + if red_min: + wred_profile_data[WRED_CONFIG_FIELDS["rmin"]] = red_min + if green_drop_prob: + wred_profile_data[WRED_CONFIG_FIELDS["gdrop"]] = green_drop_prob + if yellow_drop_prob: + wred_profile_data[WRED_CONFIG_FIELDS["ydrop"]] = yellow_drop_prob + if red_drop_prob: + wred_profile_data[WRED_CONFIG_FIELDS["rdrop"]] = red_drop_prob # validate new configuration data if prof_cfg.validate_profile_data(wred_profile_data) == False: @@ -352,41 +426,39 @@ def main(): # apply new configuration # the following parameters can be combined in one run - if args.green_max: - prof_cfg.set_wred_threshold(args.profile, "gmax", args.green_max) - if args.green_min: - prof_cfg.set_wred_threshold(args.profile, "gmin", args.green_min) - if args.yellow_max: - prof_cfg.set_wred_threshold(args.profile, "ymax", args.yellow_max) - if args.yellow_min: - prof_cfg.set_wred_threshold(args.profile, "ymin", args.yellow_min) - if args.red_max: - prof_cfg.set_wred_threshold(args.profile, "rmax", args.red_max) - if args.red_min: - prof_cfg.set_wred_threshold(args.profile, "rmin", args.red_min) - if args.green_drop_prob: - prof_cfg.set_wred_prob(args.profile, "gdrop", args.green_drop_prob) - if args.yellow_drop_prob: - prof_cfg.set_wred_prob(args.profile, "ydrop", args.yellow_drop_prob) - if args.red_drop_prob: - prof_cfg.set_wred_prob(args.profile, "rdrop", args.red_drop_prob) - - elif args.queue: - arg_len_min = 3 - if args.filename: - arg_len_min += 1 - if args.verbose: - arg_len_min += 1 - if len(sys.argv) < arg_len_min: + if green_max: + prof_cfg.set_wred_threshold(profile, "gmax", green_max) + if green_min: + prof_cfg.set_wred_threshold(profile, "gmin", green_min) + if yellow_max: + prof_cfg.set_wred_threshold(profile, "ymax", yellow_max) + if yellow_min: + prof_cfg.set_wred_threshold(profile, "ymin", yellow_min) + if red_max: + prof_cfg.set_wred_threshold(profile, "rmax", red_max) + if red_min: + prof_cfg.set_wred_threshold(profile, "rmin", red_min) + if green_drop_prob: + prof_cfg.set_wred_prob(profile, "gdrop", green_drop_prob) + if yellow_drop_prob: + prof_cfg.set_wred_prob(profile, "ydrop", yellow_drop_prob) + if red_drop_prob: + prof_cfg.set_wred_prob(profile, "rdrop", red_drop_prob) + + # Dump the current config in the file for unit tests + if test_filename: + with open(test_filename, "w") as fd: + json.dump(prof_cfg.updated_profile_tables, fd) + + elif queue: + if queue.split(',') == ['']: raise Exception("Input arguments error. Specify at least one queue by index") - - q_ecn = EcnQ(args.queue, args.filename, args.verbose) - if not args.command: + q_ecn = EcnQ(queue, test_filename, verbose, namespace) + if command is None: q_ecn.get() else: - q_ecn.set(enable = True if args.command == 'on' else False) + q_ecn.set(enable = True if command == 'on' else False) else: - parser.print_help() sys.exit(1) except Exception as e: diff --git a/scripts/fabricstat b/scripts/fabricstat index cf3d14bf5e..6f1893c9db 100755 --- a/scripts/fabricstat +++ b/scripts/fabricstat @@ -399,6 +399,49 @@ class FabricIsolation(FabricStat): print(tabulate(body, header, tablefmt='simple', stralign='right')) return +class FabricRate(FabricStat): + def rate_print(self): + # Connect to database + self.db = multi_asic.connect_to_all_dbs_for_ns(self.namespace) + # Get the set of all fabric ports + port_keys = self.db.keys(self.db.STATE_DB, FABRIC_PORT_STATUS_TABLE_PREFIX + '*') + # Create a new dictionary. The keys are the local port values in integer format. + # Only fabric ports that have remote port data are added. + port_dict = {} + for port_key in port_keys: + port_data = self.db.get_all(self.db.STATE_DB, port_key) + port_number = int(port_key.replace("FABRIC_PORT_TABLE|PORT", "")) + port_dict.update({port_number: port_data}) + # Create ordered table of fabric ports. + rxRate = 0 + rxData = 0 + txRate = 0 + txData = 0 + time = 0 + local_time = "" + # RX data , Tx data , Time are for testing + asic = "asic0" + if self.namespace: + asic = self.namespace + header = ["ASIC", "Link ID", "Rx Data Mbps", "Tx Data Mbps"] + body = [] + for port_number in sorted(port_dict.keys()): + port_data = port_dict[port_number] + if "OLD_RX_RATE_AVG" in port_data: + rxRate = port_data["OLD_RX_RATE_AVG"] + if "OLD_RX_DATA" in port_data: + rxData = port_data["OLD_RX_DATA"] + if "OLD_TX_RATE_AVG" in port_data: + txRate = port_data["OLD_TX_RATE_AVG"] + if "OLD_TX_DATA" in port_data: + txData = port_data["OLD_TX_DATA"] + if "LAST_TIME" in port_data: + time = int(port_data["LAST_TIME"]) + local_time = datetime.fromtimestamp(time) + body.append((asic, port_number, rxRate, txRate)); + click.echo() + click.echo(tabulate(body, header, tablefmt='simple', stralign='right')) + def main(): global cnstat_dir global cnstat_fqn_file_port @@ -415,6 +458,8 @@ Examples: fabricstat -q -n asic0 fabricstat -c fabricstat -c -n asic0 + fabricstat -s + fabricstat -s -n asic0 fabricstat -C fabricstat -D """) @@ -425,6 +470,7 @@ Examples: parser.add_argument('-e', '--errors', action='store_true', help='Display errors') parser.add_argument('-c','--capacity',action='store_true', help='Display fabric capacity') parser.add_argument('-i','--isolation', action='store_true', help='Display fabric ports isolation status') + parser.add_argument('-s','--rate', action='store_true', help='Display fabric counters rate') parser.add_argument('-C','--clear', action='store_true', help='Copy & clear fabric counters') parser.add_argument('-D','--delete', action='store_true', help='Delete saved stats') @@ -433,6 +479,7 @@ Examples: reachability = args.reachability capacity_status = args.capacity isolation_status = args.isolation + rate = args.rate namespace = args.namespace errors_only = args.errors @@ -455,17 +502,21 @@ Examples: def nsStat(ns, errors_only): if queue: - stat = FabricQueueStat(ns) + stat = FabricQueueStat(ns) elif reachability: - stat = FabricReachability(ns) - stat.reachability_print() - return + stat = FabricReachability(ns) + stat.reachability_print() + return elif isolation_status: - stat = FabricIsolation(ns) - stat.isolation_print() - return + stat = FabricIsolation(ns) + stat.isolation_print() + return + elif rate: + stat = FabricRate(ns) + stat.rate_print() + return else: - stat = FabricPortStat(ns) + stat = FabricPortStat(ns) cnstat_dict = stat.get_cnstat_dict() if save_fresh_stats: stat.save_fresh_stats() @@ -489,7 +540,10 @@ Examples: stat = FabricCapacity(namespace, table_cnt, threshold) stat.capacity_print() - click.echo("Monitored fabric capacity threshold: {}".format(threshold[0])) + print_th = "" + if threshold: + print_th = threshold[0] + click.echo("Monitored fabric capacity threshold: {}".format(print_th)) click.echo() click.echo(tabulate(table_cnt, capacity_header, tablefmt='simple', stralign='right')) else: diff --git a/scripts/fast-reboot b/scripts/fast-reboot index 91791b3771..5c1a32f50e 100755 --- a/scripts/fast-reboot +++ b/scripts/fast-reboot @@ -50,6 +50,7 @@ EXIT_NO_CONTROL_PLANE_ASSISTANT=20 EXIT_SONIC_INSTALLER_VERIFY_REBOOT=21 EXIT_PLATFORM_FW_AU_FAILURE=22 EXIT_TEAMD_RETRY_COUNT_FAILURE=23 +EXIT_NO_MIRROR_SESSION_ACLS=24 function error() { @@ -146,7 +147,7 @@ function clear_boot() # common_clear debug "${REBOOT_TYPE} failure ($?) cleanup ..." - /sbin/kexec -u || /bin/true + /sbin/kexec -u -a || /bin/true teardown_control_plane_assistant @@ -243,18 +244,42 @@ function wait_for_pre_shutdown_complete_or_fail() function backup_database() { debug "Backing up database ..." + + if [[ "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then + # Advanced reboot: dump state to host disk + sonic-db-cli ASIC_DB FLUSHDB > /dev/null + sonic-db-cli COUNTERS_DB FLUSHDB > /dev/null + sonic-db-cli FLEX_COUNTER_DB FLUSHDB > /dev/null + fi + + if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then + # Flush RESTAP_DB in fast-reboot to avoid stale status + sonic-db-cli RESTAPI_DB FLUSHDB > /dev/null + fi + # Dump redis content to a file 'dump.rdb' in warmboot directory mkdir -p $WARM_DIR # Delete keys in stateDB except FDB_TABLE|*, MIRROR_SESSION_TABLE|*, WARM_RESTART_ENABLE_TABLE|*, FG_ROUTE_TABLE|* sonic-db-cli STATE_DB eval " for _, k in ipairs(redis.call('keys', '*')) do - if not string.match(k, 'FDB_TABLE|') and not string.match(k, 'WARM_RESTART_TABLE|') \ + if string.match(k, 'PORT_TABLE|Ethernet') then + for i, f in ipairs(redis.call('hgetall', k)) do + if i % 2 == 1 then + if not string.match(f, 'host_tx_ready') \ + and not string.match(f, 'NPU_SI_SETTINGS_SYNC_STATUS') \ + and not string.match(f, 'CMIS_REINIT_REQUIRED') then + redis.call('hdel', k, f) + end + end + end + elseif not string.match(k, 'FDB_TABLE|') and not string.match(k, 'WARM_RESTART_TABLE|') \ and not string.match(k, 'MIRROR_SESSION_TABLE|') \ and not string.match(k, 'FG_ROUTE_TABLE|') \ and not string.match(k, 'WARM_RESTART_ENABLE_TABLE|') \ and not string.match(k, 'TRANSCEIVER_INFO|') \ and not string.match(k, 'VXLAN_TUNNEL_TABLE|') \ and not string.match(k, 'BUFFER_MAX_PARAM_TABLE|') \ + and not string.match(k, 'STORAGE_INFO|') \ and not string.match(k, 'FAST_RESTART_ENABLE_TABLE|') then redis.call('del', k) end @@ -272,6 +297,53 @@ function backup_database() fi } +function check_mirror_session_acls() +{ + debug "Checking if mirror session ACLs (arp, nd) programmed to ASIC successfully" + ACL_ARP="missing" + ACL_ND="missing" + start_time=${SECONDS} + elapsed_time=$((${SECONDS} - ${start_time})) + retry_count=0 + while [[ ${elapsed_time} -lt 10 ]]; do + CHECK_ACL_ENTRIES=0 + retry_count=$((retry_count + 1)) + ACL_OUTPUT=$(sonic-db-cli ASIC_DB KEYS "*" | grep SAI_OBJECT_TYPE_ACL_ENTRY) || CHECK_ACL_ENTRIES=$? + if [[ ${CHECK_ACL_ENTRIES} -ne 0 ]]; then + debug "Failed to retrieve SAI_OBJECT_TYPE_ACL_ENTRY from redis, retrying... (Attempt: ${retry_count})" + sleep 0.1 + elapsed_time=$((${SECONDS} - ${start_time})) + continue + fi + ACL_ENTRIES=( ${ACL_OUTPUT} ) + if [[ ${#ACL_ENTRIES[@]} -eq 0 ]]; then + debug "NO SAI_OBJECT_TYPE_ACL_ENTRY objects found, retrying... (Attempt: ${retry_count})" + sleep 0.1 + elapsed_time=$((${SECONDS} - ${start_time})) + continue + fi + for ACL_ENTRY in ${ACL_ENTRIES[@]}; do + ACL_PRIORITY=$(sonic-db-cli ASIC_DB HGET ${ACL_ENTRY} SAI_ACL_ENTRY_ATTR_PRIORITY) + if [[ ${ACL_PRIORITY} -eq 8888 ]]; then + ACL_ARP="found" + fi + if [[ ${ACL_PRIORITY} -eq 8887 ]]; then + ACL_ND="found" + fi + done + if [[ "${ACL_ARP}" = "found" && "${ACL_ND}" = "found" ]]; then + break + fi + sleep 0.1 + elapsed_time=$((${SECONDS} - ${start_time})) + done + if [[ "${ACL_ARP}" != "found" || "${ACL_ND}" != "found" ]]; then + error "Failed to program mirror session ACLs on ASIC. ACLs: ARP=${ACL_ARP} ND=${ACL_ND}" + exit ${EXIT_NO_MIRROR_SESSION_ACLS} + fi + debug "Mirror session ACLs (arp, nd) programmed to ASIC successfully" +} + function setup_control_plane_assistant() { if [[ -n "${ASSISTANT_IP_LIST}" && -x ${ASSISTANT_SCRIPT} ]]; then @@ -279,6 +351,7 @@ function setup_control_plane_assistant() if [[ "${HWSKU}" != "DellEMC-Z9332f-M-O16C64" && "${HWSKU}" != "DellEMC-Z9332f-M-O16C64-lab" ]]; then debug "Setting up control plane assistant: ${ASSISTANT_IP_LIST} ..." ${ASSISTANT_SCRIPT} -s ${ASSISTANT_IP_LIST} -m set + check_mirror_session_acls else debug "${HWSKU} Not capable to support CPA. Skipping gracefully ..." fi @@ -307,27 +380,58 @@ function setup_reboot_variables() { # Kernel and initrd image HWSKU=$(show platform summary --json | python -c 'import sys, json; print(json.load(sys.stdin)["hwsku"])') + CURR_SONIC_IMAGE=$(sonic-installer list | grep "Current: " | cut -d ' ' -f 2) NEXT_SONIC_IMAGE=$(sonic-installer list | grep "Next: " | cut -d ' ' -f 2) IMAGE_PATH="/host/image-${NEXT_SONIC_IMAGE#SONiC-OS-}" + if [ "$NEXT_SONIC_IMAGE" = "$CURR_SONIC_IMAGE" ]; then + if [[ -f ${DEVPATH}/${PLATFORM}/installer.conf ]]; then + . ${DEVPATH}/${PLATFORM}/installer.conf + fi + else + tmp_dir=`mktemp -d` + mount -o ro $IMAGE_PATH/fs.squashfs $tmp_dir + if [[ -f $tmp_dir/${DEVPATH}/${PLATFORM}/installer.conf ]]; then + . $tmp_dir/${DEVPATH}/${PLATFORM}/installer.conf + fi + umount $tmp_dir + rm -rf $tmp_dir + fi + if grep -q aboot_platform= /host/machine.conf; then if is_secureboot; then KERNEL_IMAGE="" BOOT_OPTIONS="SONIC_BOOT_TYPE=${BOOT_TYPE_ARG} secure_boot_enable=1" else KERNEL_IMAGE="$(ls $IMAGE_PATH/boot/vmlinuz-*)" - BOOT_OPTIONS="$(cat "$IMAGE_PATH/kernel-cmdline" | tr '\n' ' ') SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" + BOOT_OPTIONS="$(cat "$IMAGE_PATH/kernel-cmdline" | tr '\n' ' ') ${KEXEC_LOAD_EXTRA_CMDLINE_LINUX} SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" fi INITRD=$(echo $KERNEL_IMAGE | sed 's/vmlinuz/initrd.img/g') elif grep -q onie_platform= /host/machine.conf; then if [ -r /host/grub/grub.cfg ]; then KERNEL_OPTIONS=$(cat /host/grub/grub.cfg | sed "/$NEXT_SONIC_IMAGE'/,/}/"'!'"g" | grep linux) KERNEL_IMAGE="/host$(echo $KERNEL_OPTIONS | cut -d ' ' -f 2)" - BOOT_OPTIONS="$(echo $KERNEL_OPTIONS | sed -e 's/\s*linux\s*/BOOT_IMAGE=/') SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" + BOOT_OPTIONS="$(echo $KERNEL_OPTIONS | sed -e 's/\s*linux\s*/BOOT_IMAGE=/') ${KEXEC_LOAD_EXTRA_CMDLINE_LINUX} SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" INITRD=$(echo $KERNEL_IMAGE | sed 's/vmlinuz/initrd.img/g') # Handle architectures supporting Device Tree elif [ -f /sys/firmware/devicetree/base/chosen/bootargs ]; then KERNEL_IMAGE="$(ls $IMAGE_PATH/boot/vmlinuz-*)" - BOOT_OPTIONS="$(cat /sys/firmware/devicetree/base/chosen/bootargs | sed 's/.$//') SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" + # Fetch next_boot variable + SONIC_IMAGE_NAME="$( fw_printenv boot_next | cut -d '=' -f 2- )" + SUFFIX="" + if [[ ${SONIC_IMAGE_NAME} == "run sonic_image_2" ]]; then + SUFFIX="_old" + fi + SONIC_BOOTARGS="$(fw_printenv sonic_bootargs${SUFFIX} | cut -d '=' -f 2- )" + if [[ ! -z "${SONIC_BOOTARGS}" ]]; then + LINUX_BOOTARGS="$( fw_printenv linuxargs${SUFFIX} | cut -d '=' -f 2- )" + BAUDRATE="$( fw_printenv baudrate | cut -d '=' -f 2- )" + BOOT_OPTIONS="$(echo $SONIC_BOOTARGS | sed -e "s/\${baudrate}/$BAUDRATE/g")" + BOOT_OPTIONS="$(echo $BOOT_OPTIONS | sed -e "s@\${linuxargs$SUFFIX}@$LINUX_BOOTARGS@g")" + BOOT_OPTIONS="$(echo $BOOT_OPTIONS | sed -e 's/.$//') ${KEXEC_LOAD_EXTRA_CMDLINE_LINUX} SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" + else + # Fetch bootargs from device tree of the current image + BOOT_OPTIONS="$(cat /sys/firmware/devicetree/base/chosen/bootargs | sed 's/.$//') ${KEXEC_LOAD_EXTRA_CMDLINE_LINUX} SONIC_BOOT_TYPE=${BOOT_TYPE_ARG}" + fi INITRD=$(echo $KERNEL_IMAGE | sed 's/vmlinuz/initrd.img/g') # If initrd is a U-Boot uImage, remove the uImage header @@ -349,6 +453,12 @@ function setup_reboot_variables() local fstype=$(blkid -o value -s TYPE ${sonic_dev}) BOOT_OPTIONS="${BOOT_OPTIONS} ssd-upgrader-part=${sonic_dev},${fstype}" fi + + if [[ "$sonic_asic_type" == "mellanox" ]]; then + # Set governor to performance to speed up boot process. + # The governor is reset back to kernel default in warmboot-finalizer script. + BOOT_OPTIONS="${BOOT_OPTIONS} cpufreq.default_governor=performance" + fi } function check_docker_exec() @@ -452,7 +562,7 @@ function unload_kernel() { # Unload the previously loaded kernel if any loaded if [[ "$(cat /sys/kernel/kexec_loaded)" -eq 1 ]]; then - /sbin/kexec -u + /sbin/kexec -u -a fi } @@ -606,7 +716,7 @@ if is_secureboot && grep -q aboot_machine= /host/machine.conf; then else # check if secure boot is enable in UEFI CHECK_SECURE_UPGRADE_ENABLED=0 - SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? + SECURE_UPGRADE_ENABLED=$(mokutil --sb-state 2>/dev/null | grep -c "enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? if [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]]; then debug "Loading kernel without secure boot" load_kernel @@ -752,23 +862,11 @@ for service in ${SERVICES_TO_STOP}; do wait_for_pre_shutdown_complete_or_fail fi - if [[ "$REBOOT_TYPE" = "fastfast-reboot" || "$REBOOT_TYPE" = "fast-reboot" ]]; then - # Advanced reboot: dump state to host disk - sonic-db-cli ASIC_DB FLUSHDB > /dev/null - sonic-db-cli COUNTERS_DB FLUSHDB > /dev/null - sonic-db-cli FLEX_COUNTER_DB FLUSHDB > /dev/null - fi - - if [[ "$REBOOT_TYPE" = "fast-reboot" ]]; then - # Flush RESTAP_DB in fast-reboot to avoid stale status - sonic-db-cli RESTAPI_DB FLUSHDB > /dev/null - fi - - backup_database - fi done +backup_database + # Stop the docker container engine. Otherwise we will have a broken docker storage systemctl stop docker.service || debug "Ignore stopping docker service error $?" @@ -810,7 +908,6 @@ if [[ -x ${DEVPATH}/${PLATFORM}/${PLATFORM_FWUTIL_AU_REBOOT_HANDLE} ]]; then fi fi - # Enable Watchdog Timer if [ -x ${WATCHDOG_UTIL} ]; then debug "Enabling Watchdog before ${REBOOT_TYPE}" diff --git a/scripts/generate_dump b/scripts/generate_dump index 06d163a45e..098f75e9f9 100755 --- a/scripts/generate_dump +++ b/scripts/generate_dump @@ -1120,7 +1120,6 @@ save_file() { find_files() { trap 'handle_error $? $LINENO' ERR local -r directory=$1 - $TOUCH --date="${SINCE_DATE}" "${REFERENCE_FILE}" local -r find_command="find -L $directory -type f -newer ${REFERENCE_FILE}" echo $($find_command) @@ -1210,6 +1209,16 @@ collect_mellanox() { local timeout_cmd="timeout --foreground ${TIMEOUT_MIN}m" local sai_dump_folder="/tmp/saisdkdump" local sai_dump_filename="${sai_dump_folder}/sai_sdk_dump_$(date +"%m_%d_%Y_%I_%M_%p")" + local platform=$(python3 -c "from sonic_py_common import device_info; print(device_info.get_platform())") + local platform_folder="/usr/share/sonic/device/${platform}" + local hwsku=$(python3 -c "from sonic_py_common import device_info; print(device_info.get_hwsku())") + local sku_folder="/usr/share/sonic/device/${platform}/${hwsku}" + local cmis_host_mgmt_files=( + "/tmp/nv-syncd-shared/sai.profile" + "${sku_folder}/pmon_daemon_control.json" + "${sku_folder}/media_settings.json" + "${sku_folder}/optics_si_settings.json" + ) if [[ "$( docker container inspect -f '{{.State.Running}}' syncd )" == "true" ]]; then if [[ x"$(sonic-db-cli APPL_DB EXISTS PORT_TABLE:PortInitDone)" == x"1" ]]; then @@ -1232,6 +1241,18 @@ collect_mellanox() { fi fi + # collect the sdk dump + local sdk_dbg_folder="/var/log/sdk_dbg" + for file in $(find $sdk_dbg_folder -name "sx_sdk_*") + do + if [[ $file != *.gz ]] + then + save_file $file sai_sdk_dump true + else + save_file $file sai_sdk_dump false + fi + done + # run 'hw-management-generate-dump.sh' script and save the result file HW_DUMP_FILE=/usr/bin/hw-management-generate-dump.sh if [ -f "$HW_DUMP_FILE" ]; then @@ -1252,6 +1273,21 @@ collect_mellanox() { fi save_cmd "get_component_versions.py" "component_versions" + + # Save CMIS-host-management related files + local cmis_host_mgmt_path="cmis-host-mgmt" + + for file in "${cmis_host_mgmt_files[@]}"; do + if [[ -f "${file}" ]]; then + ${CMD_PREFIX}save_file "${file}" "$cmis_host_mgmt_path" false true + fi + done + + if [[ ! -f "${sku_folder}/pmon_daemon_control.json" && -f "${platform_folder}/pmon_daemon_control.json" ]]; then + ${CMD_PREFIX}save_file "${platform_folder}/pmon_daemon_control.json" "$cmis_host_mgmt_path" false true + fi + + save_cmd "show interfaces autoneg status" "autoneg.status" } ############################################################################### @@ -1589,7 +1625,7 @@ collect_cisco_8000() { } ############################################################################## -# collect_innovium +# collect_marvell_teralynx # Globals: # None # Arguments: @@ -1597,7 +1633,7 @@ collect_cisco_8000() { # Retuens: # None ############################################################################## -collect_innovium() { +collect_marvell_teralynx() { save_cmd "ivmcmd 'show techsupport -i /innovium/show_techsupport_infile'" "show_techsupport_op_ifcs.log" save_cmd "ivmcmd 'show techsupport -i /innovium/show_techsupport_infile_iSAI'" "show_techsupport_op_iSAI.log" } @@ -1652,6 +1688,45 @@ collect_nvidia_bluefield() { fi } +############################################################################### +# Collect Pensando specific information +# Globals: +# MKDIR +# V +# NOOP +# RM +# Arguments: +# None +# Returns: +# None +############################################################################### +collect_pensando() { + trap 'handle_error $? $LINENO' ERR + platform=$(grep 'onie_platform=' /host/machine.conf | cut -d '=' -f 2) + pipeline=`cat /usr/share/sonic/device/${platform}/default_pipeline` + if [ ${pipeline} = "polaris" ]; then + dpu_container_name="polaris" + else + dpu_container_name="dpu" + fi + local dpu_dump_folder="/root/dpu_dump" + $MKDIR $V -p $dpu_dump_folder + if $NOOP; then + echo "docker exec ${dpu_container_name} /nic/tools/collect_techsupport.sh" + else + output=$(docker exec ${dpu_container_name} /nic/tools/collect_techsupport.sh 2>&1) + if echo "${output}" | grep -q "Techsupport collected at"; then + file_path=$(echo "${output}" | grep -oP '(?<=Techsupport collected at ).*') + file_name=$(basename "${file_path}") + copy_from_docker ${dpu_container_name} ${file_path} ${dpu_dump_folder} + save_file ${dpu_dump_folder}/${file_name} ${dpu_container_name}_techsupport false + else + echo "Failed to collect ${dpu_container_name} container techsupport..." + fi + fi + $RM $V -rf $dpu_dump_folder +} + ############################################################################### # Save log file # Globals: @@ -1914,6 +1989,8 @@ main() { ${CMD_PREFIX}renice +5 -p $$ >> /dev/null ${CMD_PREFIX}ionice -c 2 -n 5 -p $$ >> /dev/null + # Created file as a reference to compare modification time + $TOUCH --date="${SINCE_DATE}" "${REFERENCE_FILE}" $MKDIR $V -p $TARDIR # Start with this script so its obvious what code is responsible @@ -2008,6 +2085,12 @@ main() { fi wait + save_cmd "stpctl all" "stp.log" + save_cmd "show spanning_tree" "stp.show" + save_cmd "show spanning_tree statistics" "stp.stats" + save_cmd "show spanning_tree bpdu_guard" "stp.bg" + save_cmd "show spanning_tree root_guard" "stp.rg" + save_cmd "ps aux" "ps.aux" & save_cmd "top -b -n 1" "top" & save_cmd "free" "free" & @@ -2076,14 +2159,18 @@ main() { collect_nvidia_bluefield fi - if [ "$asic" = "innovium" ]; then - collect_innovium + if [ "$asic" = "marvell-teralynx" ]; then + collect_marvell_teralynx fi if [ "$asic" = "marvell" ]; then collect_marvell fi + if [ "$asic" = "pensando" ]; then + collect_pensando + fi + # 2nd counter snapshot late. Need 2 snapshots to make sense of counters trend. save_counter_snapshot $asic 2 @@ -2142,6 +2229,9 @@ finalize() { else echo "WARNING: gzip operation appears to have failed." >&2 fi + # sometimes gzip takes more than 20 sec to finish, causing file time create validation + # to fail. touching the tarfile created to refresh modify time. + touch ${TARFILE} fi # Invoke the TechSupport Cleanup Hook @@ -2155,7 +2245,7 @@ finalize() { ############################################################################### -# Remove secret from pipeline inout and output result to pipeline. +# Remove secret from pipeline input and output result to pipeline. # Globals: # None # Arguments: @@ -2168,6 +2258,18 @@ remove_secret_from_config_db_dump() { sed -E 's/\"passkey\"\s*:\s*\"([^\"]*)\"/\"passkey\":\"****\"/g; /SNMP_COMMUNITY/,/\s{2,4}\},/d' } + +############################################################################### +# Remove secret from file. +############################################################################### +remove_secret_from_config_db_dump_file() { + local dumpfile=$1 + if [ -e ${dumpfile} ]; then + cat $dumpfile | remove_secret_from_config_db_dump > $dumpfile.temp + mv $dumpfile.temp $dumpfile + fi +} + ############################################################################### # Remove secret from dump files. # Globals: @@ -2201,8 +2303,24 @@ remove_secret_from_etc_files() { sed -i -E 's/(\s*snmp_\S*community\s*:\s*)(\S*)/\1****/g' $dumppath/etc/sonic/snmp.yml # Remove secret from /etc/sonic/config_db.json - cat $dumppath/etc/sonic/config_db.json | remove_secret_from_config_db_dump > $dumppath/etc/sonic/config_db.json.temp - mv $dumppath/etc/sonic/config_db.json.temp $dumppath/etc/sonic/config_db.json + remove_secret_from_config_db_dump_file $dumppath/etc/sonic/config_db.json + + # Remove secret from /etc/sonic/golden_config_db.json + remove_secret_from_config_db_dump_file $dumppath/etc/sonic/golden_config_db.json + + # Remove secret from /etc/sonic/old_config/ + + # Remove snmp community string from old_config/snmp.yml + local oldsnmp=${dumppath}/etc/sonic/old_config/snmp.yml + if [ -e ${oldsnmp} ]; then + sed -i -E 's/(\s*snmp_\S*community\s*:\s*)(\S*)/\1****/g' $oldsnmp + fi + + # Remove secret from /etc/sonic/config_db.json + remove_secret_from_config_db_dump_file ${dumppath}/etc/sonic/old_config/config_db.json + + # Remove secret from /etc/sonic/golden_config_db.json + remove_secret_from_config_db_dump_file ${dumppath}/etc/sonic/old_config/golden_config_db.json } ############################################################################### diff --git a/scripts/lldpshow b/scripts/lldpshow index e09176cf3c..fe40296f91 100755 --- a/scripts/lldpshow +++ b/scripts/lldpshow @@ -26,8 +26,9 @@ import sys from lxml import etree as ET from sonic_py_common import device_info +from utilities_common import constants from swsscommon.swsscommon import ConfigDBConnector -from utilities_common.general import load_db_config +from utilities_common.general import load_db_config, get_feature_state_data from tabulate import tabulate BACKEND_ASIC_INTERFACE_NAME_PREFIX = 'Ethernet-BP' @@ -69,8 +70,12 @@ class Lldpshow(object): self.lldp_interface[instance_num] += key + SPACE_TOKEN # LLDP running in host namespace - self.lldp_instance.append(LLDP_INSTANCE_IN_HOST_NAMESPACE) - self.lldp_interface.append(LLDP_INTERFACE_LIST_IN_HOST_NAMESPACE) + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=constants.DEFAULT_NAMESPACE) + config_db.connect() + global_scope, asic_scope = get_feature_state_data(config_db, "lldp") + if global_scope == "True": + self.lldp_instance.append(LLDP_INSTANCE_IN_HOST_NAMESPACE) + self.lldp_interface.append(LLDP_INTERFACE_LIST_IN_HOST_NAMESPACE) def get_info(self, lldp_detail_info, lldp_port): """ @@ -85,7 +90,7 @@ class Lldpshow(object): elif lldp_interface_list == '': lldp_args = [] else: - lldp_args = [lldp_interface_list] + lldp_args = lldp_interface_list.split(' ') lldp_cmd = ['sudo', 'docker', 'exec', '-i', 'lldp{}'.format(self.lldp_instance[lldp_instace_num]), 'lldpctl'] + lldp_args p = subprocess.Popen(lldp_cmd, stdout=subprocess.PIPE, text=True) (output, err) = p.communicate() diff --git a/scripts/mmuconfig b/scripts/mmuconfig index ebeb74fdaf..3986f3ba1b 100755 --- a/scripts/mmuconfig +++ b/scripts/mmuconfig @@ -18,17 +18,23 @@ optional arguments: import os import sys -import argparse +import click import tabulate import traceback import json +from utilities_common.general import load_db_config +from sonic_py_common import multi_asic +from utilities_common import multi_asic as multi_asic_util BUFFER_POOL_TABLE_NAME = "BUFFER_POOL" BUFFER_PROFILE_TABLE_NAME = "BUFFER_PROFILE" DEFAULT_LOSSLESS_BUFFER_PARAMETER_NAME = "DEFAULT_LOSSLESS_BUFFER_PARAMETER" DYNAMIC_THRESHOLD = "dynamic_th" +DYNAMIC_THRESHOLD_MIN = -8 +DYNAMIC_THRESHOLD_MAX = 8 STATIC_THRESHOLD = "static_th" +STATIC_THRESHOLD_MIN = 0 BUFFER_PROFILE_FIELDS = { "alpha": DYNAMIC_THRESHOLD, "staticth" : STATIC_THRESHOLD @@ -42,6 +48,11 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() + else: + mock_tables.dbconnector.load_database_config() except KeyError: pass @@ -49,22 +60,21 @@ except KeyError: from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector class MmuConfig(object): - def __init__(self, verbose, config, filename): + def __init__(self, verbose, config, filename, namespace): self.verbose = verbose self.config = config self.filename = filename + self.namespace = namespace + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.config_db = None + self.db = None - # Set up db connections - if self.config: - self.db = ConfigDBConnector() - self.db.connect() - else: - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.STATE_DB, False) + # For unit testing + self.updated_profile_table = {} def get_table(self, tablename): if self.config: - return self.db.get_table(tablename) + return self.config_db.get_table(tablename) entries = {} keys = self.db.keys(self.db.STATE_DB, tablename + '*') @@ -77,13 +87,15 @@ class MmuConfig(object): return entries + @multi_asic_util.run_on_multi_asic def list(self): + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' lossless_traffic_pattern = self.get_table(DEFAULT_LOSSLESS_BUFFER_PARAMETER_NAME) if lossless_traffic_pattern: for _, pattern in lossless_traffic_pattern.items(): config = [] - print("Lossless traffic pattern:") + print(f"Lossless traffic pattern{namespace_str}:") for field, value in pattern.items(): config.append([field, value]) print(tabulate.tabulate(config) + "\n") @@ -93,97 +105,88 @@ class MmuConfig(object): for pool_name, pool_data in buf_pools.items(): config = [] - print("Pool: " + pool_name) + print(f"Pool{namespace_str}: " + pool_name) for field, value in pool_data.items(): config.append([field, value]) print(tabulate.tabulate(config) + "\n") if self.verbose: print("Total pools: %d\n\n" % len(buf_pools)) else: - print("No buffer pool information available") + print(f"No buffer pool information available{namespace_str}") buf_profs = self.get_table(BUFFER_PROFILE_TABLE_NAME) if buf_profs: for prof_name, prof_data in buf_profs.items(): config = [] - print("Profile: " + prof_name) + print(f"Profile{namespace_str}: " + prof_name) for field, value in prof_data.items(): config.append([field, value]) print(tabulate.tabulate(config) + "\n") if self.verbose: print("Total profiles: %d" % len(buf_profs)) else: - print("No buffer profile information available") + print(f"No buffer profile information available{namespace_str}") + @multi_asic_util.run_on_multi_asic def set(self, profile, field_alias, value): + namespace_str = f" for namespace {self.multi_asic.current_namespace}" if multi_asic.is_multi_asic() else '' if os.geteuid() != 0 and os.environ.get("UTILITIES_UNIT_TESTING", "0") != "2": sys.exit("Root privileges required for this operation") field = BUFFER_PROFILE_FIELDS[field_alias] - buf_profs = self.db.get_table(BUFFER_PROFILE_TABLE_NAME) - v = int(value) + buf_profs = self.config_db.get_table(BUFFER_PROFILE_TABLE_NAME) if field == DYNAMIC_THRESHOLD: - if v < -8 or v > 8: - sys.exit("Invalid alpha value: 2^(%s)" % (value)) - if profile in buf_profs and DYNAMIC_THRESHOLD not in buf_profs[profile]: sys.exit("%s not using dynamic thresholding" % (profile)) elif field == STATIC_THRESHOLD: - if v < 0: - sys.exit("Invalid static threshold value: (%s)" % (value)) - if profile in buf_profs and STATIC_THRESHOLD not in buf_profs[profile]: sys.exit("%s not using static threshold" % (profile)) else: sys.exit("Set field %s not supported" % (field)) if self.verbose: - print("Setting %s %s value to %s" % (profile, field, value)) - self.db.mod_entry(BUFFER_PROFILE_TABLE_NAME, profile, {field: value}) + print("Setting %s %s value to %s%s" % (profile, field, value, namespace_str)) + self.config_db.mod_entry(BUFFER_PROFILE_TABLE_NAME, profile, {field: value}) if self.filename is not None: - prof_table = self.db.get_table(BUFFER_PROFILE_TABLE_NAME) + self.updated_profile_table[self.multi_asic.current_namespace] = self.config_db.get_table(BUFFER_PROFILE_TABLE_NAME) with open(self.filename, "w") as fd: - json.dump(prof_table, fd) - - -def main(config): - if config: - parser = argparse.ArgumentParser(description='Show and change: mmu configuration', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show mmu configuration') - parser.add_argument('-p', '--profile', type=str, help='specify buffer profile name', default=None) - parser.add_argument('-a', '--alpha', type=str, help='set n for dyanmic threshold alpha 2^(n)', default=None) - parser.add_argument('-s', '--staticth', type=str, help='set static threshold', default=None) - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - else: - parser = argparse.ArgumentParser(description='Show buffer state', - formatter_class=argparse.RawTextHelpFormatter) - - parser.add_argument('-l', '--list', action='store_true', help='show buffer state') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - - parser.add_argument('-vv', '--verbose', action='store_true', help='verbose output', default=False) - parser.add_argument('-f', '--filename', help='file used by mock tests', type=str, default=None) - + json.dump(self.updated_profile_table, fd) + +@click.command(help='Show and change: mmu configuration') +@click.option('-l', '--list', 'show_config', is_flag=True, help='show mmu configuration') +@click.option('-p', '--profile', type=str, help='specify buffer profile name', default=None) +@click.option('-a', '--alpha', type=click.IntRange(DYNAMIC_THRESHOLD_MIN, DYNAMIC_THRESHOLD_MAX), help='set n for dyanmic threshold alpha 2^(n)', default=None) +@click.option('-s', '--staticth', type=click.IntRange(min=STATIC_THRESHOLD_MIN), help='set static threshold', default=None) +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Namespace name or skip for all', default=None) +@click.option('-vv', '--verbose', is_flag=True, help='verbose output', default=False) +@click.version_option(version='1.0') +def main(show_config, profile, alpha, staticth, namespace, verbose): + # A test file created for unit test purposes + filename=None if os.environ.get("UTILITIES_UNIT_TESTING", "0") == "2": - sys.argv.extend(['-f', '/tmp/mmuconfig']) + filename = '/tmp/mmuconfig' - - args = parser.parse_args() + # Buffershow and mmuconfig cmds share this script + # Buffershow cmd cannot modify configs hence config is set to False + config = True if sys.argv[0].split('/')[-1] == "mmuconfig" else False try: - mmu_cfg = MmuConfig(args.verbose, config, args.filename) - if args.list: + load_db_config() + mmu_cfg = MmuConfig(verbose, config, filename, namespace) + + # Both mmuconfig and buffershow have access to show_config option + if show_config: mmu_cfg.list() - elif config and args.profile: - if args.alpha: - mmu_cfg.set(args.profile, "alpha", args.alpha) - elif args.staticth: - mmu_cfg.set(args.profile, "staticth", args.staticth) + # Buffershow cannot modify profiles + elif config and profile: + if alpha: + mmu_cfg.set(profile, "alpha", alpha) + elif staticth: + mmu_cfg.set(profile, "staticth", staticth) else: - parser.print_help() + ctx = click.get_current_context() + click.echo(ctx.get_help()) sys.exit(1) except Exception as e: @@ -192,7 +195,4 @@ def main(config): sys.exit(1) if __name__ == "__main__": - if sys.argv[0].split('/')[-1] == "mmuconfig": - main(True) - else: - main(False) + main() diff --git a/scripts/pg-drop b/scripts/pg-drop index 7741593081..9078d28ad6 100755 --- a/scripts/pg-drop +++ b/scripts/pg-drop @@ -5,6 +5,7 @@ # pg-drop is a tool for show/clear ingress pg dropped packet stats. # ##################################################################### +from importlib import reload import json import argparse import os @@ -13,6 +14,8 @@ from collections import OrderedDict from natsort import natsorted from tabulate import tabulate +from utilities_common.general import load_db_config +from sonic_py_common import multi_asic # mock the redis for unit test purposes # try: @@ -22,7 +25,9 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector - + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import mock_tables.mock_multi_asic + mock_tables.dbconnector.load_namespace_config() except KeyError: pass @@ -43,13 +48,11 @@ def get_dropstat_dir(): class PgDropStat(object): - def __init__(self): - self.counters_db = SonicV2Connector(host='127.0.0.1') - self.counters_db.connect(self.counters_db.COUNTERS_DB) - - self.configdb = ConfigDBConnector() + def __init__(self, namespace): + self.namespace = namespace + self.ns_list = multi_asic.get_namespace_list(namespace) + self.configdb = ConfigDBConnector(namespace=namespace) self.configdb.connect() - dropstat_dir = get_dropstat_dir() self.port_drop_stats_file = os.path.join(dropstat_dir, 'pg_drop_stats') @@ -57,14 +60,14 @@ class PgDropStat(object): """ Get port ID using object ID """ - port_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, oid) + port_id = self.get_counters_mapdata(COUNTERS_PG_PORT_MAP, oid) if not port_id: print("Port is not available for oid '{}'".format(oid)) sys.exit(1) return port_id # Get all ports - self.counter_port_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + self.counter_port_name_map = self.get_counters_mapall(COUNTERS_PORT_NAME_MAP) if not self.counter_port_name_map: print("COUNTERS_PORT_NAME_MAP is empty!") sys.exit(1) @@ -77,7 +80,7 @@ class PgDropStat(object): self.port_name_map[self.counter_port_name_map[port]] = port # Get PGs for each port - counter_pg_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) + counter_pg_name_map = self.get_counters_mapall(COUNTERS_PG_NAME_MAP) if not counter_pg_name_map: print("COUNTERS_PG_NAME_MAP is empty!") sys.exit(1) @@ -94,13 +97,32 @@ class PgDropStat(object): "header_prefix": "PG"}, } + def get_counters_mapdata(self, tablemap, index): + for ns in self.ns_list: + counters_db = SonicV2Connector(namespace=ns) + counters_db.connect(counters_db.COUNTERS_DB) + data = counters_db.get(counters_db.COUNTERS_DB, tablemap, index) + if data: + return data + return None + + def get_counters_mapall(self, tablemap): + mapdata = {} + for ns in self.ns_list: + counters_db = SonicV2Connector(namespace=ns) + counters_db.connect(counters_db.COUNTERS_DB) + map_result = counters_db.get_all(counters_db.COUNTERS_DB, tablemap) + if map_result: + mapdata.update(map_result) + return mapdata + def get_pg_index(self, oid): """ return PG index (0-7) oid - object ID for entry in redis """ - pg_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, oid) + pg_index = self.get_counters_mapdata(COUNTERS_PG_INDEX_MAP, oid) if not pg_index: print("Priority group index is not available for oid '{}'".format(oid)) sys.exit(1) @@ -154,7 +176,7 @@ class PgDropStat(object): old_collected_data = port_drop_ckpt.get(name,{})[full_table_id] if len(port_drop_ckpt) > 0 else 0 idx = int(idx_func(obj_id)) pos = self.header_idx_to_pos[idx] - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, full_table_id, counter_name) + counter_data = self.get_counters_mapdata(full_table_id, counter_name) if counter_data is None: fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: @@ -180,18 +202,18 @@ class PgDropStat(object): print(tabulate(table, self.header_list, tablefmt='simple', stralign='right')) def get_counts(self, counters, oid): - """ - Get the PG drop counts for an individual counter. - """ - counts = {} - table_id = COUNTER_TABLE_PREFIX + oid - for counter in counters: - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, table_id, counter) - if counter_data is None: - counts[table_id] = 0 - else: - counts[table_id] = int(counter_data) - return counts + """ + Get the PG drop counts for an individual counter. + """ + counts = {} + table_id = COUNTER_TABLE_PREFIX + oid + for counter in counters: + counter_data = self.get_counters_mapdata(table_id, counter) + if counter_data is None: + counts[table_id] = 0 + else: + counts[table_id] = int(counter_data) + return counts def get_counts_table(self, counters, object_table): """ @@ -199,10 +221,10 @@ class PgDropStat(object): to its PG drop counts. Counts are contained in a dictionary that maps counter oid to its counts. """ - counter_object_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, object_table) + counter_object_name_map = self.get_counters_mapall(object_table) current_stat_dict = OrderedDict() - if counter_object_name_map is None: + if not counter_object_name_map: return current_stat_dict for obj in natsorted(counter_object_name_map): @@ -239,10 +261,12 @@ def main(): epilog=""" Examples: pg-drop -c show +pg-drop -c show --namespace asic0 pg-drop -c clear """) parser.add_argument('-c', '--command', type=str, help='Desired action to perform') + parser.add_argument('-n', '--namespace', type=str, help='Namespace name or skip for all', default=None) args = parser.parse_args() command = args.command @@ -256,7 +280,16 @@ pg-drop -c clear print(e) sys.exit(e.errno) - pgdropstat = PgDropStat() + # Load database config files + load_db_config() + namespaces = multi_asic.get_namespace_list() + if args.namespace and args.namespace not in namespaces: + namespacelist = ', '.join(namespaces) + print(f"Input value for '--namespace' / '-n'. Choose from one of ({namespacelist})") + sys.exit(1) + + # For 'clear' command force applying to all namespaces + pgdropstat = PgDropStat(args.namespace if command != 'clear' else None) if command == 'clear': pgdropstat.clear_drop_counts() diff --git a/scripts/portstat b/scripts/portstat index 6294ba57a9..58cc9aefd6 100755 --- a/scripts/portstat +++ b/scripts/portstat @@ -8,16 +8,10 @@ import json import argparse -import datetime import os.path import sys import time -from collections import OrderedDict, namedtuple - -from natsort import natsorted -from tabulate import tabulate -from sonic_py_common import multi_asic -from sonic_py_common import device_info +from collections import OrderedDict # mock the redis for unit test purposes # try: @@ -27,6 +21,13 @@ try: sys.path.insert(0, modules_path) sys.path.insert(0, tests_path) import mock_tables.dbconnector + + if os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] == "1": + import mock + import sonic_py_common + from swsscommon.swsscommon import SonicV2Connector + sonic_py_common.device_info.is_supervisor = mock.MagicMock(return_value=True) + SonicV2Connector.delete_all_by_pattern = mock.MagicMock() if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": import mock_tables.mock_multi_asic mock_tables.dbconnector.load_namespace_config() @@ -34,530 +35,11 @@ try: except KeyError: pass -from swsscommon.swsscommon import CounterTable, PortCounter from utilities_common import constants from utilities_common.intf_filter import parse_interface_in_filter -import utilities_common.multi_asic as multi_asic_util -from utilities_common.netstat import ns_diff, table_as_json, format_brate, format_prate, format_util, format_number_with_comma from utilities_common.cli import json_serial, UserCache - -""" -The order and count of statistics mentioned below needs to be in sync with the values in portstat script -So, any fields added/deleted in here should be reflected in portstat script also -""" -NStats = namedtuple("NStats", "rx_ok, rx_err, rx_drop, rx_ovr, tx_ok,\ - tx_err, tx_drop, tx_ovr, rx_byt, tx_byt,\ - rx_64, rx_65_127, rx_128_255, rx_256_511, rx_512_1023, rx_1024_1518, rx_1519_2047, rx_2048_4095, rx_4096_9216, rx_9217_16383,\ - rx_uca, rx_mca, rx_bca, rx_all,\ - tx_64, tx_65_127, tx_128_255, tx_256_511, tx_512_1023, tx_1024_1518, tx_1519_2047, tx_2048_4095, tx_4096_9216, tx_9217_16383,\ - tx_uca, tx_mca, tx_bca, tx_all,\ - rx_jbr, rx_frag, rx_usize, rx_ovrrun,\ - fec_corr, fec_uncorr, fec_symbol_err") -header_all = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', - 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] -header_std = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', - 'TX_OK', 'TX_BPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] -header_errors_only = ['IFACE', 'STATE', 'RX_ERR', 'RX_DRP', 'RX_OVR', 'TX_ERR', 'TX_DRP', 'TX_OVR'] -header_fec_only = ['IFACE', 'STATE', 'FEC_CORR', 'FEC_UNCORR', 'FEC_SYMBOL_ERR'] -header_rates_only = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL'] - -rates_key_list = [ 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_BPS', 'TX_PPS', 'TX_UTIL' ] -ratestat_fields = ("rx_bps", "rx_pps", "rx_util", "tx_bps", "tx_pps", "tx_util") -RateStats = namedtuple("RateStats", ratestat_fields) - -""" -The order and count of statistics mentioned below needs to be in sync with the values in portstat script -So, any fields added/deleted in here should be reflected in portstat script also -""" -BUCKET_NUM = 45 -counter_bucket_dict = { - 0:['SAI_PORT_STAT_IF_IN_UCAST_PKTS', 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS'], - 1:['SAI_PORT_STAT_IF_IN_ERRORS'], - 2:['SAI_PORT_STAT_IF_IN_DISCARDS'], - 3:['SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS'], - 4:['SAI_PORT_STAT_IF_OUT_UCAST_PKTS', 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS'], - 5:['SAI_PORT_STAT_IF_OUT_ERRORS'], - 6:['SAI_PORT_STAT_IF_OUT_DISCARDS'], - 7:['SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS'], - 8:['SAI_PORT_STAT_IF_IN_OCTETS'], - 9:['SAI_PORT_STAT_IF_OUT_OCTETS'], - 10:['SAI_PORT_STAT_ETHER_IN_PKTS_64_OCTETS'], - 11:['SAI_PORT_STAT_ETHER_IN_PKTS_65_TO_127_OCTETS'], - 12:['SAI_PORT_STAT_ETHER_IN_PKTS_128_TO_255_OCTETS'], - 13:['SAI_PORT_STAT_ETHER_IN_PKTS_256_TO_511_OCTETS'], - 14:['SAI_PORT_STAT_ETHER_IN_PKTS_512_TO_1023_OCTETS'], - 15:['SAI_PORT_STAT_ETHER_IN_PKTS_1024_TO_1518_OCTETS'], - 16:['SAI_PORT_STAT_ETHER_IN_PKTS_1519_TO_2047_OCTETS'], - 17:['SAI_PORT_STAT_ETHER_IN_PKTS_2048_TO_4095_OCTETS'], - 18:['SAI_PORT_STAT_ETHER_IN_PKTS_4096_TO_9216_OCTETS'], - 19:['SAI_PORT_STAT_ETHER_IN_PKTS_9217_TO_16383_OCTETS'], - 20:['SAI_PORT_STAT_IF_IN_UCAST_PKTS'], - 21:['SAI_PORT_STAT_IF_IN_MULTICAST_PKTS'], - 22:['SAI_PORT_STAT_IF_IN_BROADCAST_PKTS'], - 23:['SAI_PORT_STAT_IF_IN_UCAST_PKTS', 'SAI_PORT_STAT_IF_IN_MULTICAST_PKTS', 'SAI_PORT_STAT_IF_IN_BROADCAST_PKTS'], - 24:['SAI_PORT_STAT_ETHER_OUT_PKTS_64_OCTETS'], - 25:['SAI_PORT_STAT_ETHER_OUT_PKTS_65_TO_127_OCTETS'], - 26:['SAI_PORT_STAT_ETHER_OUT_PKTS_128_TO_255_OCTETS'], - 27:['SAI_PORT_STAT_ETHER_OUT_PKTS_256_TO_511_OCTETS'], - 28:['SAI_PORT_STAT_ETHER_OUT_PKTS_512_TO_1023_OCTETS'], - 29:['SAI_PORT_STAT_ETHER_OUT_PKTS_1024_TO_1518_OCTETS'], - 30:['SAI_PORT_STAT_ETHER_OUT_PKTS_1519_TO_2047_OCTETS'], - 31:['SAI_PORT_STAT_ETHER_OUT_PKTS_2048_TO_4095_OCTETS'], - 32:['SAI_PORT_STAT_ETHER_OUT_PKTS_4096_TO_9216_OCTETS'], - 33:['SAI_PORT_STAT_ETHER_OUT_PKTS_9217_TO_16383_OCTETS'], - 34:['SAI_PORT_STAT_IF_OUT_UCAST_PKTS'], - 35:['SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS'], - 36:['SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS'], - 37:['SAI_PORT_STAT_IF_OUT_UCAST_PKTS', 'SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS', 'SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS'], - 38:['SAI_PORT_STAT_ETHER_STATS_JABBERS'], - 39:['SAI_PORT_STAT_ETHER_STATS_FRAGMENTS'], - 40:['SAI_PORT_STAT_ETHER_STATS_UNDERSIZE_PKTS'], - 41:['SAI_PORT_STAT_IP_IN_RECEIVES'], - 42:['SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES'], - 43:['SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES'], - 44:['SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS'] -} - -STATUS_NA = 'N/A' - -RATES_TABLE_PREFIX = "RATES:" - -COUNTER_TABLE_PREFIX = "COUNTERS:" -COUNTERS_PORT_NAME_MAP = "COUNTERS_PORT_NAME_MAP" - -PORT_STATUS_TABLE_PREFIX = "PORT_TABLE:" -PORT_STATE_TABLE_PREFIX = "PORT_TABLE|" -PORT_OPER_STATUS_FIELD = "oper_status" -PORT_ADMIN_STATUS_FIELD = "admin_status" -PORT_STATUS_VALUE_UP = 'UP' -PORT_STATUS_VALUE_DOWN = 'DOWN' -PORT_SPEED_FIELD = "speed" - -PORT_STATE_UP = 'U' -PORT_STATE_DOWN = 'D' -PORT_STATE_DISABLED = 'X' - - -class Portstat(object): - def __init__(self, namespace, display_option): - self.db = None - self.multi_asic = multi_asic_util.MultiAsic(display_option, namespace) - - def get_cnstat_dict(self): - self.cnstat_dict = OrderedDict() - self.cnstat_dict['time'] = datetime.datetime.now() - self.ratestat_dict = OrderedDict() - self.collect_stat() - return self.cnstat_dict, self.ratestat_dict - - @multi_asic_util.run_on_multi_asic - def collect_stat(self): - """ - Collect the statisitics from all the asics present on the - device and store in a dict - """ - - cnstat_dict, ratestat_dict = self.get_cnstat() - self.cnstat_dict.update(cnstat_dict) - self.ratestat_dict.update(ratestat_dict) - - def get_cnstat(self): - """ - Get the counters info from database. - """ - def get_counters(port): - """ - Get the counters from specific table. - """ - fields = ["0"]*BUCKET_NUM - - _, fvs = counter_table.get(PortCounter(), port) - fvs = dict(fvs) - for pos, cntr_list in counter_bucket_dict.items(): - for counter_name in cntr_list: - if counter_name not in fvs: - fields[pos] = STATUS_NA - elif fields[pos] != STATUS_NA: - fields[pos] = str(int(fields[pos]) + int(fvs[counter_name])) - - cntr = NStats._make(fields)._asdict() - return cntr - - def get_rates(table_id): - """ - Get the rates from specific table. - """ - fields = ["0","0","0","0","0","0"] - for pos, name in enumerate(rates_key_list): - full_table_id = RATES_TABLE_PREFIX + table_id - counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, name) - if counter_data is None: - fields[pos] = STATUS_NA - elif fields[pos] != STATUS_NA: - fields[pos] = float(counter_data) - cntr = RateStats._make(fields) - return cntr - - # Get the info from database - counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP); - # Build a dictionary of the stats - cnstat_dict = OrderedDict() - cnstat_dict['time'] = datetime.datetime.now() - ratestat_dict = OrderedDict() - counter_table = CounterTable(self.db.get_redis_client(self.db.COUNTERS_DB)) - if counter_port_name_map is None: - return cnstat_dict, ratestat_dict - for port in natsorted(counter_port_name_map): - port_name = port.split(":")[0] - if self.multi_asic.skip_display(constants.PORT_OBJ, port_name): - continue - cnstat_dict[port] = get_counters(port) - ratestat_dict[port] = get_rates(counter_port_name_map[port]) - return cnstat_dict, ratestat_dict - - def get_port_speed(self, port_name): - """ - Get the port speed - """ - # Get speed from APPL_DB - state_db_table_id = PORT_STATE_TABLE_PREFIX + port_name - app_db_table_id = PORT_STATUS_TABLE_PREFIX + port_name - for ns in self.multi_asic.get_ns_list_based_on_options(): - self.db = multi_asic.connect_to_all_dbs_for_ns(ns) - speed = self.db.get(self.db.STATE_DB, state_db_table_id, PORT_SPEED_FIELD) - oper_status = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_OPER_STATUS_FIELD) - if speed is None or speed == STATUS_NA or oper_status != "up": - speed = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_SPEED_FIELD) - if speed is not None: - return int(speed) - return STATUS_NA - - def get_port_state(self, port_name): - """ - Get the port state - """ - full_table_id = PORT_STATUS_TABLE_PREFIX + port_name - for ns in self.multi_asic.get_ns_list_based_on_options(): - self.db = multi_asic.connect_to_all_dbs_for_ns(ns) - admin_state = self.db.get(self.db.APPL_DB, full_table_id, PORT_ADMIN_STATUS_FIELD) - oper_state = self.db.get(self.db.APPL_DB, full_table_id, PORT_OPER_STATUS_FIELD) - - if admin_state is None or oper_state is None: - continue - if admin_state.upper() == PORT_STATUS_VALUE_DOWN: - return PORT_STATE_DISABLED - elif admin_state.upper() == PORT_STATUS_VALUE_UP and oper_state.upper() == PORT_STATUS_VALUE_UP: - return PORT_STATE_UP - elif admin_state.upper() == PORT_STATUS_VALUE_UP and oper_state.upper() == PORT_STATUS_VALUE_DOWN: - return PORT_STATE_DOWN - else: - return STATUS_NA - return STATUS_NA - - - def cnstat_print(self, cnstat_dict, ratestat_dict, intf_list, use_json, print_all, errors_only, fec_stats_only, rates_only, detail=False): - """ - Print the cnstat. - """ - - if intf_list and detail: - self.cnstat_intf_diff_print(cnstat_dict, {}, intf_list) - return None - - table = [] - header = None - - for key, data in cnstat_dict.items(): - if key == 'time': - continue - if intf_list and key not in intf_list: - continue - port_speed = self.get_port_speed(key) - rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) - if print_all: - header = header_all - table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) - elif errors_only: - header = header_errors_only - table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) - elif fec_stats_only: - header = header_fec_only - table.append((key, self.get_port_state(key), - format_number_with_comma(data['fec_corr']), - format_number_with_comma(data['fec_uncorr']), - format_number_with_comma(data['fec_symbol_err']))) - elif rates_only: - header = header_rates_only - table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed))) - else: - header = header_std - table.append((key, self.get_port_state(key), - format_number_with_comma(data['rx_ok']), - format_brate(rates.rx_bps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(data['rx_err']), - format_number_with_comma(data['rx_drop']), - format_number_with_comma(data['rx_ovr']), - format_number_with_comma(data['tx_ok']), - format_brate(rates.tx_bps), - format_util(rates.tx_bps, port_speed), - format_number_with_comma(data['tx_err']), - format_number_with_comma(data['tx_drop']), - format_number_with_comma(data['tx_ovr']))) - if table: - if use_json: - print(table_as_json(table, header)) - else: - print(tabulate(table, header, tablefmt='simple', stralign='right')) - if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json: - print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") - - def cnstat_intf_diff_print(self, cnstat_new_dict, cnstat_old_dict, intf_list): - """ - Print the difference between two cnstat results for interface. - """ - - for key, cntr in cnstat_new_dict.items(): - if key == 'time': - continue - - if key in cnstat_old_dict: - old_cntr = cnstat_old_dict.get(key) - else: - old_cntr = NStats._make([0] * BUCKET_NUM)._asdict() - - if intf_list and key not in intf_list: - continue - - print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr['rx_64'], old_cntr['rx_64']))) - print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr['rx_65_127'], old_cntr['rx_65_127']))) - print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr['rx_128_255'], old_cntr['rx_128_255']))) - print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr['rx_256_511'], old_cntr['rx_256_511']))) - print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr['rx_512_1023'], old_cntr['rx_512_1023']))) - print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr['rx_1024_1518'], old_cntr['rx_1024_1518']))) - print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr['rx_1519_2047'], old_cntr['rx_1519_2047']))) - print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr['rx_2048_4095'], old_cntr['rx_2048_4095']))) - print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr['rx_4096_9216'], old_cntr['rx_4096_9216']))) - print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr['rx_9217_16383'], old_cntr['rx_9217_16383']))) - - print("") - print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr['rx_all'], old_cntr['rx_all']))) - print("Unicast Packets Received....................... {}".format(ns_diff(cntr['rx_uca'], old_cntr['rx_uca']))) - print("Multicast Packets Received..................... {}".format(ns_diff(cntr['rx_mca'], old_cntr['rx_mca']))) - print("Broadcast Packets Received..................... {}".format(ns_diff(cntr['rx_bca'], old_cntr['rx_bca']))) - - print("") - print("Jabbers Received............................... {}".format(ns_diff(cntr['rx_jbr'], old_cntr['rx_jbr']))) - print("Fragments Received............................. {}".format(ns_diff(cntr['rx_frag'], old_cntr['rx_frag']))) - print("Undersize Received............................. {}".format(ns_diff(cntr['rx_usize'], old_cntr['rx_usize']))) - print("Overruns Received.............................. {}".format(ns_diff(cntr['rx_ovrrun'], old_cntr['rx_ovrrun']))) - - print("") - print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr['tx_64'], old_cntr['tx_64']))) - print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr['tx_65_127'], old_cntr['tx_65_127']))) - print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr['tx_128_255'], old_cntr['tx_128_255']))) - print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr['tx_256_511'], old_cntr['tx_256_511']))) - print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr['tx_512_1023'], old_cntr['tx_512_1023']))) - print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr['tx_1024_1518'], old_cntr['tx_1024_1518']))) - print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr['tx_1519_2047'], old_cntr['tx_1519_2047']))) - print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr['tx_2048_4095'], old_cntr['tx_2048_4095']))) - print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr['tx_4096_9216'], old_cntr['tx_4096_9216']))) - print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr['tx_9217_16383'], old_cntr['tx_9217_16383']))) - - print("") - print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr['tx_all'], old_cntr['tx_all']))) - print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr['tx_uca'], old_cntr['tx_uca']))) - print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_mca'], old_cntr['tx_mca']))) - print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_bca'], old_cntr['tx_bca']))) - - print("Time Since Counters Last Cleared............... " + str(cnstat_old_dict.get('time'))) - - - def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict, - ratestat_dict, intf_list, use_json, - print_all, errors_only, fec_stats_only, - rates_only, detail=False): - """ - Print the difference between two cnstat results. - """ - - if intf_list and detail: - self.cnstat_intf_diff_print(cnstat_new_dict, cnstat_old_dict, intf_list) - return None - - table = [] - header = None - - for key, cntr in cnstat_new_dict.items(): - if key == 'time': - continue - old_cntr = None - if key in cnstat_old_dict: - old_cntr = cnstat_old_dict.get(key) - - rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(ratestat_fields))) - - if intf_list and key not in intf_list: - continue - port_speed = self.get_port_speed(key) - - if print_all: - header = header_all - if old_cntr is not None: - table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) - else: - table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) - elif errors_only: - header = header_errors_only - if old_cntr is not None: - table.append((key, self.get_port_state(key), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) - else: - table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) - elif fec_stats_only: - header = header_fec_only - if old_cntr is not None: - table.append((key, self.get_port_state(key), - ns_diff(cntr['fec_corr'], old_cntr['fec_corr']), - ns_diff(cntr['fec_uncorr'], old_cntr['fec_uncorr']), - ns_diff(cntr['fec_symbol_err'], old_cntr['fec_symbol_err']))) - else: - table.append((key, self.get_port_state(key), - format_number_with_comma(cntr['fec_corr']), - format_number_with_comma(cntr['fec_uncorr']), - format_number_with_comma(cntr['fec_symbol_err']))) - - elif rates_only: - header = header_rates_only - if old_cntr is not None: - table.append((key, - self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed))) - else: - table.append((key, - self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), - format_brate(rates.rx_bps), - format_prate(rates.rx_pps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['tx_ok']), - format_brate(rates.tx_bps), - format_prate(rates.tx_pps), - format_util(rates.tx_bps, port_speed))) - else: - header = header_std - if old_cntr is not None: - table.append((key, - self.get_port_state(key), - ns_diff(cntr['rx_ok'], old_cntr['rx_ok']), - format_brate(rates.rx_bps), - format_util(rates.rx_bps, port_speed), - ns_diff(cntr['rx_err'], old_cntr['rx_err']), - ns_diff(cntr['rx_drop'], old_cntr['rx_drop']), - ns_diff(cntr['rx_ovr'], old_cntr['rx_ovr']), - ns_diff(cntr['tx_ok'], old_cntr['tx_ok']), - format_brate(rates.tx_bps), - format_util(rates.tx_bps, port_speed), - ns_diff(cntr['tx_err'], old_cntr['tx_err']), - ns_diff(cntr['tx_drop'], old_cntr['tx_drop']), - ns_diff(cntr['tx_ovr'], old_cntr['tx_ovr']))) - else: - table.append((key, - self.get_port_state(key), - format_number_with_comma(cntr['rx_ok']), - format_brate(rates.rx_bps), - format_util(rates.rx_bps, port_speed), - format_number_with_comma(cntr['rx_err']), - format_number_with_comma(cntr['rx_drop']), - format_number_with_comma(cntr['rx_ovr']), - format_number_with_comma(cntr['tx_ok']), - format_brate(rates.tx_bps), - format_util(rates.tx_bps, port_speed), - format_number_with_comma(cntr['tx_err']), - format_number_with_comma(cntr['tx_drop']), - format_number_with_comma(cntr['tx_ovr']))) - if table: - if use_json: - print(table_as_json(table, header)) - else: - print(tabulate(table, header, tablefmt='simple', stralign='right')) - if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json: - print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") +from utilities_common.portstat import Portstat def main(): parser = argparse.ArgumentParser(description='Display the ports state and counters', diff --git a/scripts/queuestat b/scripts/queuestat index 8f95554481..3774ede6d9 100755 --- a/scripts/queuestat +++ b/scripts/queuestat @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +#!/usr/bin/python3 ##################################################################### # @@ -7,7 +7,7 @@ ##################################################################### import json -import argparse +import click import datetime import os.path import sys @@ -38,8 +38,9 @@ from utilities_common import constants import utilities_common.multi_asic as multi_asic_util QueueStats = namedtuple("QueueStats", "queueindex, queuetype, totalpacket, totalbytes, droppacket, dropbytes") +VoqStats = namedtuple("VoqStats", "queueindex, queuetype, totalpacket, totalbytes, droppacket, dropbytes, creditWDpkts") header = ['Port', 'TxQ', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes'] -voq_header = ['Port', 'Voq', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes'] +voq_header = ['Port', 'Voq', 'Counter/pkts', 'Counter/bytes', 'Drop/pkts', 'Drop/bytes', 'Credit-WD-Del/pkts'] counter_bucket_dict = { 'SAI_QUEUE_STAT_PACKETS': 2, @@ -47,6 +48,9 @@ counter_bucket_dict = { 'SAI_QUEUE_STAT_DROPPED_PACKETS': 4, 'SAI_QUEUE_STAT_DROPPED_BYTES': 5, } +voq_counter_bucket_dict = { + 'SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS': 6 +} from utilities_common.cli import json_dump from utilities_common.netstat import ns_diff, STATUS_NA @@ -73,15 +77,24 @@ cnstat_dir = 'N/A' cnstat_fqn_file = 'N/A' -def build_json(port, cnstat): +def build_json(port, cnstat, voq=False): def ports_stats(k): p = {} - p[k[1]] = { - "totalpacket": k[2], - "totalbytes": k[3], - "droppacket": k[4], - "dropbytes": k[5] - } + if voq: + p[k[1]] = { + "totalpacket": k[2], + "totalbytes": k[3], + "droppacket": k[4], + "dropbytes": k[5], + "creditWDPkts": k[6] + } + else: + p[k[1]] = { + "totalpacket": k[2], + "totalbytes": k[3], + "droppacket": k[4], + "dropbytes": k[5] + } return p out = {} @@ -89,23 +102,40 @@ def build_json(port, cnstat): out.update(ports_stats(k)) return out +class QueuestatWrapper(object): + """A wrapper to execute queuestat cmd over the correct namespaces""" + def __init__(self, namespace, voq): + self.namespace = namespace + self.voq = voq -class Queuestat(object): - def __init__(self, namespace, voq=False): + # Initialize the multi-asic namespace + self.multi_asic = multi_asic_util.MultiAsic(constants.DISPLAY_ALL, namespace_option=namespace) self.db = None - self.multi_asic = multi_asic_util.MultiAsic(constants.DISPLAY_ALL, namespace) - if namespace is not None: - for ns in self.multi_asic.get_ns_list_based_on_options(): - self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + + @multi_asic_util.run_on_multi_asic + def run(self, save_fresh_stats, port_to_show_stats, json_opt, non_zero): + queuestat = Queuestat(self.multi_asic.current_namespace, self.db, self.voq) + if save_fresh_stats: + queuestat.save_fresh_stats() + return + + if port_to_show_stats != None: + queuestat.get_print_port_stat(port_to_show_stats, json_opt, non_zero) else: - self.db = SonicV2Connector(use_unix_socket_path=False) - self.db.connect(self.db.COUNTERS_DB) + queuestat.get_print_all_stat(json_opt, non_zero) + + +class Queuestat(object): + def __init__(self, namespace, db, voq=False): + self.db = db self.voq = voq + self.namespace = namespace + self.namespace_str = f" for {namespace}" if namespace else '' def get_queue_port(table_id): port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) if port_table_id is None: - print("Port is not available!", table_id) + print(f"Port is not available{self.namespace_str}!", table_id) sys.exit(1) return port_table_id @@ -117,7 +147,7 @@ class Queuestat(object): self.counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) if self.counter_port_name_map is None: - print("COUNTERS_PORT_NAME_MAP is empty!") + print(f"COUNTERS_PORT_NAME_MAP is empty{self.namespace_str}!") sys.exit(1) self.port_queues_map = {} @@ -135,7 +165,7 @@ class Queuestat(object): counter_queue_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) if counter_queue_name_map is None: - print("COUNTERS_QUEUE_NAME_MAP is empty!") + print(f"COUNTERS_QUEUE_NAME_MAP is empty{self.namespace_str}!") sys.exit(1) for queue in counter_queue_name_map: @@ -153,7 +183,7 @@ class Queuestat(object): def get_queue_index(table_id): queue_index = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_INDEX_MAP, table_id) if queue_index is None: - print("Queue index is not available!", table_id) + print(f"Queue index is not available{self.namespace_str}!", table_id) sys.exit(1) return queue_index @@ -161,7 +191,7 @@ class Queuestat(object): def get_queue_type(table_id): queue_type = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_TYPE_MAP, table_id) if queue_type is None: - print("Queue Type is not available!", table_id) + print(f"Queue Type is not available{self.namespace_str}!", table_id) sys.exit(1) elif queue_type == SAI_QUEUE_TYPE_MULTICAST: return QUEUE_TYPE_MC @@ -172,21 +202,33 @@ class Queuestat(object): elif queue_type == SAI_QUEUE_TYPE_ALL: return QUEUE_TYPE_ALL else: - print("Queue Type is invalid:", table_id, queue_type) + print(f"Queue Type is invalid{self.namespace_str}:", table_id, queue_type) sys.exit(1) - fields = ["0","0","0","0","0","0"] + if self.voq: + fields = ["0","0","0","0","0","0","0"] + else: + fields = ["0","0","0","0","0","0"] fields[0] = get_queue_index(table_id) fields[1] = get_queue_type(table_id) - for counter_name, pos in counter_bucket_dict.items(): + counter_dict = {} + counter_dict.update(counter_bucket_dict) + if self.voq: + counter_dict.update(voq_counter_bucket_dict) + + for counter_name, pos in counter_dict.items(): full_table_id = COUNTER_TABLE_PREFIX + table_id counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, counter_name) if counter_data is None: fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: fields[pos] = str(int(counter_data)) - cntr = QueueStats._make(fields)._asdict() + + if self.voq: + cntr = VoqStats._make(fields)._asdict() + else: + cntr = QueueStats._make(fields)._asdict() return cntr # Build a dictionary of the stats @@ -211,18 +253,26 @@ class Queuestat(object): if json_opt: json_output[port][key] = data continue - if not non_zero or data['totalpacket'] != '0' or data['totalbytes'] != '0' or \ - data['droppacket'] != '0' or data['dropbytes'] != '0': - table.append((port, data['queuetype'] + str(data['queueindex']), - data['totalpacket'], data['totalbytes'], - data['droppacket'], data['dropbytes'])) + if self.voq: + if not non_zero or data['totalpacket'] != '0' or data['totalbytes'] != '0' or \ + data['droppacket'] != '0' or data['dropbytes'] != '0' or data['creditWDpkts'] != '0': + table.append((port, data['queuetype'] + str(data['queueindex']), + data['totalpacket'], data['totalbytes'], + data['droppacket'], data['dropbytes'], data['creditWDpkts'])) + else: + if not non_zero or data['totalpacket'] != '0' or data['totalbytes'] != '0' or \ + data['droppacket'] != '0' or data['dropbytes'] != '0': + table.append((port, data['queuetype'] + str(data['queueindex']), + data['totalpacket'], data['totalbytes'], + data['droppacket'], data['dropbytes'])) if json_opt: - json_output[port].update(build_json(port, table)) + json_output[port].update(build_json(port, table, self.voq)) return json_output else: hdr = voq_header if self.voq else header if table: + print(f"For namespace {self.namespace}:") print(tabulate(table, hdr, tablefmt='simple', stralign='right')) print() @@ -242,30 +292,47 @@ class Queuestat(object): old_cntr = None if key in cnstat_old_dict: old_cntr = cnstat_old_dict.get(key) - if old_cntr is not None: - if not non_zero or ns_diff(cntr['totalpacket'], old_cntr['totalpacket']) != '0' or \ + if self.voq: + if not non_zero or ns_diff(cntr['totalpacket'], old_cntr['totalpacket']) != '0' or \ + ns_diff(cntr['totalbytes'], old_cntr['totalbytes']) != '0' or \ + ns_diff(cntr['droppacket'], old_cntr['droppacket']) != '0' or \ + ns_diff(cntr['dropbytes'], old_cntr['dropbytes']) != '0' or \ + ns_diff(cntr['creditWDpkts'], old_cntr['creditWDpkts']) != '0': + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), + ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), + ns_diff(cntr['droppacket'], old_cntr['droppacket']), + ns_diff(cntr['dropbytes'], old_cntr['dropbytes']), + ns_diff(cntr['creditWDpkts'], old_cntr['creditWDpkts']))) + elif not non_zero or cntr['totalpacket'] != '0' or cntr['totalbytes'] != '0' or \ + cntr['droppacket'] != '0' or cntr['dropbytes'] != '0' or cntr['creditWDpkts'] != '0': + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + cntr['totalpacket'], cntr['totalbytes'], + cntr['droppacket'], cntr['dropbytes'], cntr['creditWDpkts'])) + else: + if not non_zero or ns_diff(cntr['totalpacket'], old_cntr['totalpacket']) != '0' or \ ns_diff(cntr['totalbytes'], old_cntr['totalbytes']) != '0' or \ ns_diff(cntr['droppacket'], old_cntr['droppacket']) != '0' or \ ns_diff(cntr['dropbytes'], old_cntr['dropbytes']) != '0': - table.append((port, cntr['queuetype'] + str(cntr['queueindex']), - ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), - ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), - ns_diff(cntr['droppacket'], old_cntr['droppacket']), - ns_diff(cntr['dropbytes'], old_cntr['dropbytes']))) - elif not non_zero or cntr['totalpacket'] != '0' or cntr['totalbytes'] != '0' or \ + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + ns_diff(cntr['totalpacket'], old_cntr['totalpacket']), + ns_diff(cntr['totalbytes'], old_cntr['totalbytes']), + ns_diff(cntr['droppacket'], old_cntr['droppacket']), + ns_diff(cntr['dropbytes'], old_cntr['dropbytes']))) + elif not non_zero or cntr['totalpacket'] != '0' or cntr['totalbytes'] != '0' or \ cntr['droppacket'] != '0' or cntr['dropbytes'] != '0': - table.append((port, cntr['queuetype'] + str(cntr['queueindex']), - cntr['totalpacket'], cntr['totalbytes'], - cntr['droppacket'], cntr['dropbytes'])) + table.append((port, cntr['queuetype'] + str(cntr['queueindex']), + cntr['totalpacket'], cntr['totalbytes'], + cntr['droppacket'], cntr['dropbytes'])) if json_opt: - json_output[port].update(build_json(port, table)) + json_output[port].update(build_json(port, table, self.voq)) return json_output else: hdr = voq_header if self.voq else header if table: - print(port + " Last cached time was " + str(cnstat_old_dict.get('time'))) + print(port + f" Last cached time{self.namespace_str} was " + str(cnstat_old_dict.get('time'))) print(tabulate(table, hdr, tablefmt='simple', stralign='right')) print() @@ -321,7 +388,7 @@ class Queuestat(object): json_output[port].update({"cached_time":cnstat_cached_dict.get('time')}) json_output.update(self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt, non_zero)) else: - print("Last cached time was " + str(cnstat_cached_dict.get('time'))) + print(f"Last cached time{self.namespace_str} was " + str(cnstat_cached_dict.get('time'))) self.cnstat_diff_print(port, cnstat_dict, cnstat_cached_dict, json_opt, non_zero) except IOError as e: print(e.errno, e) @@ -346,38 +413,33 @@ class Queuestat(object): else: print("Clear and update saved counters for " + port) -def main(): + +@click.command() +@click.option('-p', '--port', type=str, help='Show the queue conters for just one port', default=None) +@click.option('-c', '--clear', is_flag=True, default=False, help='Clear previous stats and save new ones') +@click.option('-d', '--delete', is_flag=True, default=False, help='Delete saved stats') +@click.option('-j', '--json_opt', is_flag=True, default=False, help='Print in JSON format') +@click.option('-V', '--voq', is_flag=True, default=False, help='display voq stats') +@click.option('-nz','--non_zero', is_flag=True, default=False, help='Display non-zero queue counters') +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Display queuecounters for a specific namespace name or skip for all', default=None) +@click.version_option(version='1.0') +def main(port, clear, delete, json_opt, voq, non_zero, namespace): + """ + Examples: + queuestat + queuestat -p Ethernet0 + queuestat -c + queuestat -d + queuestat -p Ethernet0 -n asic0 + """ + global cnstat_dir global cnstat_fqn_file - parser = argparse.ArgumentParser(description='Display the queue state and counters', - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -Examples: - queuestat - queuestat -p Ethernet0 - queuestat -c - queuestat -d -""") - - parser.add_argument('-p', '--port', type=str, help='Show the queue conters for just one port', default=None) - parser.add_argument('-c', '--clear', action='store_true', help='Clear previous stats and save new ones') - parser.add_argument('-d', '--delete', action='store_true', help='Delete saved stats') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - parser.add_argument('-j', '--json_opt', action='store_true', help='Print in JSON format') - parser.add_argument('-V', '--voq', action='store_true', help='display voq stats') - parser.add_argument('-n','--namespace', default=None, help='Display queue counters for specific namespace') - parser.add_argument('-nz','--non_zero', action='store_true', help='Display non-zero queue counters') - args = parser.parse_args() - - save_fresh_stats = args.clear - delete_stats = args.delete - voq = args.voq - json_opt = args.json_opt - namespace = args.namespace - non_zero = args.non_zero - - port_to_show_stats = args.port + save_fresh_stats = clear + delete_stats = delete + + port_to_show_stats = port cache = UserCache() @@ -387,16 +449,8 @@ Examples: if delete_stats: cache.remove() - queuestat = Queuestat( namespace, voq ) - - if save_fresh_stats: - queuestat.save_fresh_stats() - sys.exit(0) - - if port_to_show_stats!=None: - queuestat.get_print_port_stat(port_to_show_stats, json_opt, non_zero) - else: - queuestat.get_print_all_stat(json_opt, non_zero) + queuestat_wrapper = QueuestatWrapper(namespace, voq) + queuestat_wrapper.run(save_fresh_stats, port_to_show_stats, json_opt, non_zero) sys.exit(0) diff --git a/scripts/reboot b/scripts/reboot index b5b6a7a585..044334af3e 100755 --- a/scripts/reboot +++ b/scripts/reboot @@ -41,7 +41,6 @@ REBOOT_SCRIPT_NAME=$(basename $0) REBOOT_TYPE="${REBOOT_SCRIPT_NAME}" TAG_LATEST=no REBOOT_FLAGS="" -FORCE_REBOOT="no" function debug() { @@ -65,8 +64,8 @@ function stop_pmon_service() { CONTAINER_STOP_RC=0 debug "Stopping pmon docker" - docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? systemctl stop pmon || debug "Ignore stopping pmon error $?" + docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? if [[ CONTAINER_STOP_RC -ne 0 ]]; then debug "Failed killing container pmon RC $CONTAINER_STOP_RC ." fi @@ -179,7 +178,7 @@ function check_conflict_boot_in_fw_update() function parse_options() { - while getopts "h?v" opt; do + while getopts "h?vf" opt; do case ${opt} in h|\? ) show_help_and_exit @@ -192,12 +191,30 @@ function parse_options() ;; f ) REBOOT_FLAGS+=" -f" - FORCE_REBOOT="yes" ;; esac done } +function linecard_reboot_notify_supervisor() +{ + is_linecard=$(python3 -c 'from sonic_py_common import device_info; print("True") if device_info.is_chassis() == True and device_info.is_supervisor() == False else print("False")') + if [ $is_linecard == "True" ]; then + key=$(sonic-db-cli STATE_DB keys "CHASSIS_MODULE_TABLE|LINE-CARD*") + status=$? + if [ $status -eq 0 ]; then + module="${key#CHASSIS_MODULE_TABLE}" + if [ ! -z module ]; then + sonic-db-cli CHASSIS_STATE_DB hset "CHASSIS_MODULE_REBOOT_INFO_TABLE${module}" "reboot" "expected" + status=$? + if [ $status -ne 0 ]; then + debug "Failed to notify Supervisor: Linecard reboot is expected" + fi + fi + fi + fi +} + parse_options $@ # Exit if not superuser @@ -216,6 +233,9 @@ reboot_pre_check # Tag remotely deployed images as local tag_images +# Linecard reboot notify supervisor +linecard_reboot_notify_supervisor + # Stop SONiC services gracefully. stop_sonic_services @@ -256,12 +276,9 @@ fi if [ -x ${DEVPATH}/${PLATFORM}/${PRE_REBOOT_HOOK} ]; then debug "Executing the pre-reboot script" ${DEVPATH}/${PLATFORM}/${PRE_REBOOT_HOOK} - EXIT_CODE=$? - if [[ ${EXIT_CODE} != ${EXIT_SUCCESS} ]]; then - if [[ "${FORCE_REBOOT}" != "yes" ]]; then - echo "Reboot is interrupted: use -f (force) to override" - exit ${EXIT_ERROR} - fi + EXIT_CODE="$?" + if [[ "${EXIT_CODE}" != "${EXIT_SUCCESS}" ]]; then + debug "WARNING: Failed to handle pre-reboot script: rc=${EXIT_CODE}" fi fi diff --git a/scripts/route_check.py b/scripts/route_check.py index ee417dc49c..56c845424c 100755 --- a/scripts/route_check.py +++ b/scripts/route_check.py @@ -46,6 +46,7 @@ import signal import traceback import subprocess +import concurrent.futures from ipaddress import ip_network from swsscommon import swsscommon @@ -338,10 +339,18 @@ def is_suppress_fib_pending_enabled(namespace): return state == 'enabled' -def get_frr_routes(namespace): +def fetch_routes(cmd): """ - Read routes from zebra through CLI command - :return frr routes dictionary + Fetch routes using the given command. + """ + output = subprocess.check_output(cmd, text=True) + return json.loads(output) + + +def get_frr_routes_parallel(namespace): + """ + Read routes from zebra through CLI command for IPv4 and IPv6 in parallel + :return combined IPv4 and IPv6 routes dictionary. """ if namespace == multi_asic.DEFAULT_NAMESPACE: v4_route_cmd = ['show', 'ip', 'route', 'json'] @@ -350,12 +359,18 @@ def get_frr_routes(namespace): v4_route_cmd = ['show', 'ip', 'route', '-n', namespace, 'json'] v6_route_cmd = ['show', 'ipv6', 'route', '-n', namespace, 'json'] - output = subprocess.check_output(v4_route_cmd, text=True) - routes = json.loads(output) - output = subprocess.check_output(v6_route_cmd, text=True) - routes.update(json.loads(output)) - print_message(syslog.LOG_DEBUG, "FRR Routes: namespace={}, routes={}".format(namespace, routes)) - return routes + with concurrent.futures.ThreadPoolExecutor() as executor: + future_v4 = executor.submit(fetch_routes, v4_route_cmd) + future_v6 = executor.submit(fetch_routes, v6_route_cmd) + + # Wait for both results to complete + v4_routes = future_v4.result() + v6_routes = future_v6.result() + + # Combine both IPv4 and IPv6 routes + v4_routes.update(v6_routes) + print_message(syslog.LOG_DEBUG, "FRR Routes: namespace={}, routes={}".format(namespace, v4_routes)) + return v4_routes def get_interfaces(namespace): @@ -556,7 +571,7 @@ def check_frr_pending_routes(namespace): retries = FRR_CHECK_RETRIES for i in range(retries): missed_rt = [] - frr_routes = get_frr_routes(namespace) + frr_routes = get_frr_routes_parallel(namespace) for _, entries in frr_routes.items(): for entry in entries: @@ -689,8 +704,9 @@ def _filter_out_neigh_route(routes, neighs): return rt_appl_miss, rt_asic_miss -def check_routes(namespace): +def check_routes_for_namespace(namespace): """ + Process a Single Namespace: The heart of this script which runs the checks. Read APPL-DB & ASIC-DB, the relevant tables for route checking. Checkout routes in ASIC-DB to match APPL-DB, discounting local & @@ -708,97 +724,113 @@ def check_routes(namespace): :return (0, None) on sucess, else (-1, results) where results holds the unjustifiable entries. """ - namespace_list = [] - if namespace is not multi_asic.DEFAULT_NAMESPACE and namespace in multi_asic.get_namespace_list(): - namespace_list.append(namespace) - else: - namespace_list = multi_asic.get_namespace_list() - print_message(syslog.LOG_INFO, "Checking routes for namespaces: ", namespace_list) results = {} - adds = {} - deletes = {} - for namespace in namespace_list: - intf_appl_miss = [] - rt_appl_miss = [] - rt_asic_miss = [] - rt_frr_miss = [] - adds[namespace] = [] - deletes[namespace] = [] + adds = [] + deletes = [] + intf_appl_miss = [] + rt_appl_miss = [] + rt_asic_miss = [] + rt_frr_miss = [] - selector, subs, rt_asic = get_asicdb_routes(namespace) + selector, subs, rt_asic = get_asicdb_routes(namespace) - rt_appl = get_appdb_routes(namespace) - intf_appl = get_interfaces(namespace) + rt_appl = get_appdb_routes(namespace) + intf_appl = get_interfaces(namespace) - # Diff APPL-DB routes & ASIC-DB routes - rt_appl_miss, rt_asic_miss = diff_sorted_lists(rt_appl, rt_asic) + # Diff APPL-DB routes & ASIC-DB routes + rt_appl_miss, rt_asic_miss = diff_sorted_lists(rt_appl, rt_asic) - # Check missed ASIC routes against APPL-DB INTF_TABLE - _, rt_asic_miss = diff_sorted_lists(intf_appl, rt_asic_miss) - rt_asic_miss = filter_out_default_routes(rt_asic_miss) - rt_asic_miss = filter_out_vnet_routes(namespace, rt_asic_miss) - rt_asic_miss = filter_out_standalone_tunnel_routes(namespace, rt_asic_miss) - rt_asic_miss = filter_out_soc_ip_routes(namespace, rt_asic_miss) + # Check missed ASIC routes against APPL-DB INTF_TABLE + _, rt_asic_miss = diff_sorted_lists(intf_appl, rt_asic_miss) + rt_asic_miss = filter_out_default_routes(rt_asic_miss) + rt_asic_miss = filter_out_vnet_routes(namespace, rt_asic_miss) + rt_asic_miss = filter_out_standalone_tunnel_routes(namespace, rt_asic_miss) + rt_asic_miss = filter_out_soc_ip_routes(namespace, rt_asic_miss) + # Check APPL-DB INTF_TABLE with ASIC table route entries + intf_appl_miss, _ = diff_sorted_lists(intf_appl, rt_asic) - # Check APPL-DB INTF_TABLE with ASIC table route entries - intf_appl_miss, _ = diff_sorted_lists(intf_appl, rt_asic) + if rt_appl_miss: + rt_appl_miss = filter_out_local_interfaces(namespace, rt_appl_miss) - if rt_appl_miss: - rt_appl_miss = filter_out_local_interfaces(namespace, rt_appl_miss) + if rt_appl_miss: + rt_appl_miss = filter_out_voq_neigh_routes(namespace, rt_appl_miss) - if rt_appl_miss: - rt_appl_miss = filter_out_voq_neigh_routes(namespace, rt_appl_miss) + # NOTE: On dualtor environment, ignore any route miss for the + # neighbors learned from the vlan subnet. + if rt_appl_miss or rt_asic_miss: + rt_appl_miss, rt_asic_miss = filter_out_vlan_neigh_route_miss(namespace, rt_appl_miss, rt_asic_miss) - # NOTE: On dualtor environment, ignore any route miss for the - # neighbors learned from the vlan subnet. - if rt_appl_miss or rt_asic_miss: - rt_appl_miss, rt_asic_miss = filter_out_vlan_neigh_route_miss(namespace, rt_appl_miss, rt_asic_miss) + if rt_appl_miss or rt_asic_miss: + # Look for subscribe updates for a second + adds, deletes = get_subscribe_updates(selector, subs) - if rt_appl_miss or rt_asic_miss: - # Look for subscribe updates for a second - adds[namespace], deletes[namespace] = get_subscribe_updates(selector, subs) + # Drop all those for which SET received + rt_appl_miss, _ = diff_sorted_lists(rt_appl_miss, adds) - # Drop all those for which SET received - rt_appl_miss, _ = diff_sorted_lists(rt_appl_miss, adds[namespace]) + # Drop all those for which DEL received + rt_asic_miss, _ = diff_sorted_lists(rt_asic_miss, deletes) - # Drop all those for which DEL received - rt_asic_miss, _ = diff_sorted_lists(rt_asic_miss, deletes[namespace]) + if rt_appl_miss: + results["missed_ROUTE_TABLE_routes"] = rt_appl_miss - if rt_appl_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["missed_ROUTE_TABLE_routes"] = rt_appl_miss + if intf_appl_miss: + results["missed_INTF_TABLE_entries"] = intf_appl_miss - if intf_appl_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["missed_INTF_TABLE_entries"] = intf_appl_miss + if rt_asic_miss: + results["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss - if rt_asic_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["Unaccounted_ROUTE_ENTRY_TABLE_entries"] = rt_asic_miss + rt_frr_miss = check_frr_pending_routes(namespace) - rt_frr_miss = check_frr_pending_routes(namespace) + if rt_frr_miss: + results["missed_FRR_routes"] = rt_frr_miss - if rt_frr_miss: - if namespace not in results: - results[namespace] = {} - results[namespace]["missed_FRR_routes"] = rt_frr_miss + if results: + if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: + print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} \ + but all routes in APPL_DB and ASIC_DB are in sync".format(namespace)) + if is_suppress_fib_pending_enabled(namespace): + mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) + + return results, adds, deletes - if results: - if rt_frr_miss and not rt_appl_miss and not rt_asic_miss: - print_message(syslog.LOG_ERR, "Some routes are not set offloaded in FRR{} but all routes in APPL_DB and ASIC_DB are in sync".format(namespace)) - if is_suppress_fib_pending_enabled(namespace): - mitigate_installed_not_offloaded_frr_routes(namespace, rt_frr_miss, rt_appl) + +def check_routes(namespace): + """ + Main function to parallelize route checks across all namespaces. + """ + namespace_list = [] + if namespace is not multi_asic.DEFAULT_NAMESPACE and namespace in multi_asic.get_namespace_list(): + namespace_list.append(namespace) + else: + namespace_list = multi_asic.get_namespace_list() + print_message(syslog.LOG_INFO, "Checking routes for namespaces: ", namespace_list) + + results = {} + all_adds = {} + all_deletes = {} + + # Use ThreadPoolExecutor to parallelize the check for each namespace + with concurrent.futures.ThreadPoolExecutor() as executor: + futures = {executor.submit(check_routes_for_namespace, ns): ns for ns in namespace_list} + + for future in concurrent.futures.as_completed(futures): + ns = futures[future] + try: + result, adds, deletes = future.result() + if result: + results[ns] = result + all_adds[ns] = adds + all_deletes[ns] = deletes + except Exception as e: + print_message(syslog.LOG_ERR, "Error processing namespace {}: {}".format(ns, e)) if results: print_message(syslog.LOG_WARNING, "Failure results: {", json.dumps(results, indent=4), "}") print_message(syslog.LOG_WARNING, "Failed. Look at reported mismatches above") - print_message(syslog.LOG_WARNING, "add: ", json.dumps(adds, indent=4)) - print_message(syslog.LOG_WARNING, "del: ", json.dumps(deletes, indent=4)) + print_message(syslog.LOG_WARNING, "add: ", json.dumps(all_adds, indent=4)) + print_message(syslog.LOG_WARNING, "del: ", json.dumps(all_deletes, indent=4)) return -1, results else: print_message(syslog.LOG_INFO, "All good!") @@ -861,6 +893,5 @@ def main(): return ret, res - if __name__ == "__main__": sys.exit(main()[0]) diff --git a/scripts/soft-reboot b/scripts/soft-reboot index 957c6009eb..74d7051b1d 100755 --- a/scripts/soft-reboot +++ b/scripts/soft-reboot @@ -64,8 +64,8 @@ function stop_pmon_service() { CONTAINER_STOP_RC=0 debug "Stopping pmon docker" - docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? systemctl stop pmon || debug "Ignore stopping pmon error $?" + docker kill pmon &> /dev/null || CONTAINER_STOP_RC=$? if [[ CONTAINER_STOP_RC -ne 0 ]]; then debug "Failed killing container pmon RC $CONTAINER_STOP_RC ." fi @@ -93,7 +93,7 @@ function clear_lingering_reboot_config() if [[ -f ${WARM_DIR}/${REDIS_FILE} ]]; then mv -f ${WARM_DIR}/${REDIS_FILE} ${WARM_DIR}/${REDIS_FILE}.${TIMESTAMP} || /bin/true fi - /sbin/kexec -u || /bin/true + /sbin/kexec -u -a || /bin/true } SCRIPT=$0 @@ -147,9 +147,17 @@ function setup_reboot_variables() fi } +function invoke_kexec() { + /sbin/kexec -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" $@ +} + function load_kernel() { # Load kernel into the memory - /sbin/kexec -l "$KERNEL_IMAGE" --initrd="$INITRD" --append="$BOOT_OPTIONS" + invoke_kexec -a +} + +function load_kernel_secure() { + invoke_kexec -s } function reboot_pre_check() @@ -215,7 +223,14 @@ stop_sonic_services clear_lingering_reboot_config -load_kernel +# check if secure boot is enabled +CHECK_SECURE_UPGRADE_ENABLED=0 +SECURE_UPGRADE_ENABLED=$(mokutil --sb-state 2>/dev/null | grep -c "enabled") || CHECK_SECURE_UPGRADE_ENABLED=$? +if [[ CHECK_SECURE_UPGRADE_ENABLED -ne 0 ]]; then + load_kernel +else + load_kernel_secure +fi # Update the reboot cause file to reflect that user issued 'reboot' command # Upon next boot, the contents of this file will be used to determine the diff --git a/scripts/vnet_route_check.py b/scripts/vnet_route_check.py index d925427d40..c747bf7efb 100755 --- a/scripts/vnet_route_check.py +++ b/scripts/vnet_route_check.py @@ -74,7 +74,7 @@ def print_message(lvl, *args): def check_vnet_cfg(): ''' Returns True if VNET is configured in APP_DB or False if no VNET configuration. ''' - db = swsscommon.DBConnector('APPL_DB', 0) + db = swsscommon.DBConnector('APPL_DB', 0, True) vnet_db_keys = swsscommon.Table(db, 'VNET_TABLE').getKeys() @@ -85,7 +85,7 @@ def get_vnet_intfs(): ''' Returns dictionary of VNETs and related VNET interfaces. Format: { : [ ] } ''' - db = swsscommon.DBConnector('APPL_DB', 0) + db = swsscommon.DBConnector('APPL_DB', 0, True) intfs_table = swsscommon.Table(db, 'INTF_TABLE') intfs_keys = swsscommon.Table(db, 'INTF_TABLE').getKeys() @@ -109,7 +109,7 @@ def get_all_rifs_oids(): ''' Returns dictionary of all router interfaces and their OIDs. Format: { : } ''' - db = swsscommon.DBConnector('COUNTERS_DB', 0) + db = swsscommon.DBConnector('COUNTERS_DB', 0, True) rif_table = swsscommon.Table(db, 'COUNTERS_RIF_NAME_MAP') rif_name_oid_map = dict(rif_table.get('')[1]) @@ -140,7 +140,7 @@ def get_vrf_entries(): ''' Returns dictionary of VNET interfaces and corresponding VRF OIDs. Format: { : } ''' - db = swsscommon.DBConnector('ASIC_DB', 0) + db = swsscommon.DBConnector('ASIC_DB', 0, True) rif_table = swsscommon.Table(db, 'ASIC_STATE') vnet_rifs_oids = get_vnet_rifs_oids() @@ -162,7 +162,7 @@ def filter_out_vnet_ip2me_routes(vnet_routes): ''' Filters out IP2ME routes from the provided dictionary with VNET routes Format: { : { 'routes': [ ], 'vrf_oid': } } ''' - db = swsscommon.DBConnector('APPL_DB', 0) + db = swsscommon.DBConnector('APPL_DB', 0, True) all_rifs_db_keys = swsscommon.Table(db, 'INTF_TABLE').getKeys() vnet_intfs = get_vnet_intfs() @@ -198,7 +198,7 @@ def get_vnet_routes_from_app_db(): ''' Returns dictionary of VNET routes configured per each VNET in APP_DB. Format: { : { 'routes': [ ], 'vrf_oid': } } ''' - db = swsscommon.DBConnector('APPL_DB', 0) + db = swsscommon.DBConnector('APPL_DB', 0, True) vnet_intfs = get_vnet_intfs() vnet_vrfs = get_vrf_entries() @@ -245,7 +245,7 @@ def get_vnet_routes_from_asic_db(): ''' Returns dictionary of VNET routes configured per each VNET in ASIC_DB. Format: { : { 'routes': [ ], 'vrf_oid': } } ''' - db = swsscommon.DBConnector('ASIC_DB', 0) + db = swsscommon.DBConnector('ASIC_DB', 0, True) tbl = swsscommon.Table(db, 'ASIC_STATE') @@ -363,7 +363,7 @@ def main(): # Don't run VNET routes consistancy logic if there is no VNET configuration if not check_vnet_cfg(): return rc - asic_db = swsscommon.DBConnector('ASIC_DB', 0) + asic_db = swsscommon.DBConnector('ASIC_DB', 0, True) virtual_router = swsscommon.Table(asic_db, 'ASIC_STATE:SAI_OBJECT_TYPE_VIRTUAL_ROUTER') if virtual_router.getKeys() != []: global default_vrf_oid diff --git a/scripts/watermarkstat b/scripts/watermarkstat index 99a46d5484..70ea853bc4 100755 --- a/scripts/watermarkstat +++ b/scripts/watermarkstat @@ -5,14 +5,15 @@ # watermarkstat is a tool for displaying watermarks. # ##################################################################### - -import argparse +import click import json import os import sys from natsort import natsorted from tabulate import tabulate +from sonic_py_common import multi_asic +import utilities_common.multi_asic as multi_asic_util # mock the redis for unit test purposes # try: @@ -23,6 +24,10 @@ try: sys.path.insert(0, tests_path) from mock_tables import dbconnector + if os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] == "multi_asic": + import tests.mock_tables.mock_multi_asic + dbconnector.load_namespace_config() + if os.environ["WATERMARKSTAT_UNIT_TESTING"] == "1": input_path = os.path.join(tests_path, "wm_input") mock_db_path = os.path.join(input_path, "mock_db") @@ -66,18 +71,33 @@ COUNTERS_PG_INDEX_MAP = "COUNTERS_PG_INDEX_MAP" COUNTERS_BUFFER_POOL_NAME_MAP = "COUNTERS_BUFFER_POOL_NAME_MAP" -class Watermarkstat(object): +class WatermarkstatWrapper(object): + """A wrapper to execute Watermarkstat over the correct namespaces""" + def __init__(self, namespace): + self.namespace = namespace - def __init__(self): - self.counters_db = SonicV2Connector(use_unix_socket_path=False) - self.counters_db.connect(self.counters_db.COUNTERS_DB) + # Initialize the multi_asic object + self.multi_asic = multi_asic_util.MultiAsic(namespace_option=namespace) + self.db = None + + @multi_asic_util.run_on_multi_asic + def run(self, clear, persistent, wm_type): + watermarkstat = Watermarkstat(self.db, self.multi_asic.current_namespace) + if clear: + watermarkstat.send_clear_notification(("PERSISTENT" if persistent else "USER", wm_type.upper())) + else: + table_prefix = PERSISTENT_TABLE_PREFIX if persistent else USER_TABLE_PREFIX + watermarkstat.print_all_stat(table_prefix, wm_type) - # connect APP DB for clear notifications - self.app_db = SonicV2Connector(use_unix_socket_path=False) - self.app_db.connect(self.counters_db.APPL_DB) + +class Watermarkstat(object): + + def __init__(self, db, namespace): + self.namespace = namespace + self.db = db def get_queue_type(table_id): - queue_type = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_TYPE_MAP, table_id) + queue_type = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_TYPE_MAP, table_id) if queue_type is None: print("Queue Type is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -92,7 +112,7 @@ class Watermarkstat(object): sys.exit(1) def get_queue_port(table_id): - port_table_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) + port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_PORT_MAP, table_id) if port_table_id is None: print("Port is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -100,7 +120,7 @@ class Watermarkstat(object): return port_table_id def get_pg_port(table_id): - port_table_id = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, table_id) + port_table_id = self.db.get(self.db.COUNTERS_DB, COUNTERS_PG_PORT_MAP, table_id) if port_table_id is None: print("Port is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -108,7 +128,7 @@ class Watermarkstat(object): return port_table_id # Get all ports - self.counter_port_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + self.counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) if self.counter_port_name_map is None: print("COUNTERS_PORT_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -127,7 +147,7 @@ class Watermarkstat(object): self.port_name_map[self.counter_port_name_map[port]] = port # Get Queues for each port - counter_queue_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) + counter_queue_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_QUEUE_NAME_MAP) if counter_queue_name_map is None: print("COUNTERS_QUEUE_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -144,7 +164,7 @@ class Watermarkstat(object): self.port_all_queues_map[port][queue] = counter_queue_name_map[queue] # Get PGs for each port - counter_pg_name_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) + counter_pg_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PG_NAME_MAP) if counter_pg_name_map is None: print("COUNTERS_PG_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -154,7 +174,7 @@ class Watermarkstat(object): self.port_pg_map[port][pg] = counter_pg_name_map[pg] # Get all buffer pools - self.buffer_pool_name_to_oid_map = self.counters_db.get_all(self.counters_db.COUNTERS_DB, COUNTERS_BUFFER_POOL_NAME_MAP) + self.buffer_pool_name_to_oid_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_BUFFER_POOL_NAME_MAP) if self.buffer_pool_name_to_oid_map is None: print("COUNTERS_BUFFER_POOL_NAME_MAP is empty!", file=sys.stderr) sys.exit(1) @@ -194,7 +214,7 @@ class Watermarkstat(object): } def get_queue_index(self, table_id): - queue_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_QUEUE_INDEX_MAP, table_id) + queue_index = self.db.get(self.db.COUNTERS_DB, COUNTERS_QUEUE_INDEX_MAP, table_id) if queue_index is None: print("Queue index is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -202,7 +222,7 @@ class Watermarkstat(object): return queue_index def get_pg_index(self, table_id): - pg_index = self.counters_db.get(self.counters_db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, table_id) + pg_index = self.db.get(self.db.COUNTERS_DB, COUNTERS_PG_INDEX_MAP, table_id) if pg_index is None: print("Priority group index is not available in table '{}'".format(table_id), file=sys.stderr) sys.exit(1) @@ -256,7 +276,7 @@ class Watermarkstat(object): full_table_id = table_prefix + obj_id idx = int(idx_func(obj_id)) pos = self.header_idx_to_pos[idx] - counter_data = self.counters_db.get(self.counters_db.COUNTERS_DB, full_table_id, watermark) + counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, watermark) if counter_data is None or counter_data == '': fields[pos] = STATUS_NA elif fields[pos] != STATUS_NA: @@ -274,7 +294,7 @@ class Watermarkstat(object): continue db_key = table_prefix + bp_oid - data = self.counters_db.get(self.counters_db.COUNTERS_DB, db_key, type["wm_name"]) + data = self.db.get(self.db.COUNTERS_DB, db_key, type["wm_name"]) if data is None: data = STATUS_NA table.append((buf_pool, data)) @@ -283,58 +303,52 @@ class Watermarkstat(object): # Get stat for each port for port in natsorted(self.counter_port_name_map): row_data = list() + data = self.get_counters(table_prefix, type["obj_map"][port], type["idx_func"], type["wm_name"]) row_data.append(port) row_data.extend(data) table.append(tuple(row_data)) - print(type["message"]) + namespace_str = f" (Namespace {self.namespace})" if multi_asic.is_multi_asic() else '' + print(type["message"] + namespace_str) print(tabulate(table, self.header_list, tablefmt='simple', stralign='right')) def send_clear_notification(self, data): msg = json.dumps(data, separators=(',', ':')) - self.app_db.publish('APPL_DB', 'WATERMARK_CLEAR_REQUEST', msg) + self.db.publish('APPL_DB', 'WATERMARK_CLEAR_REQUEST', msg) return - -def main(): - - parser = argparse.ArgumentParser(description='Display the watermark counters', - formatter_class=argparse.RawTextHelpFormatter, - epilog=""" -Examples: - watermarkstat -t pg_headroom - watermarkstat -t pg_shared - watermarkstat -t q_shared_all - watermarkstat -p -t q_shared_all - watermarkstat -t q_shared_all -c - watermarkstat -t q_shared_uni -c - watermarkstat -t q_shared_multi -c - watermarkstat -p -t pg_shared - watermarkstat -p -t q_shared_multi -c - watermarkstat -t buffer_pool - watermarkstat -t buffer_pool -c - watermarkstat -p -t buffer_pool -c -""") - - parser.add_argument('-c', '--clear', action='store_true', help='Clear watermarks request') - parser.add_argument('-p', '--persistent', action='store_true', help='Do the operations on the persistent watermark') - parser.add_argument('-t', '--type', required=True, action='store', - choices=['pg_headroom', 'pg_shared', 'q_shared_uni', 'q_shared_multi', 'buffer_pool', 'headroom_pool', 'q_shared_all'], - help='The type of watermark') - parser.add_argument('-v', '--version', action='version', version='%(prog)s 1.0') - args = parser.parse_args() - watermarkstat = Watermarkstat() - - if args.clear: - watermarkstat.send_clear_notification(("PERSISTENT" if args.persistent else "USER", args.type.upper())) - sys.exit(0) - - table_prefix = PERSISTENT_TABLE_PREFIX if args.persistent else USER_TABLE_PREFIX - watermarkstat.print_all_stat(table_prefix, args.type) +@click.command() +@click.option('-c', '--clear', is_flag=True, help='Clear watermarks request') +@click.option('-p', '--persistent', is_flag=True, help='Do the operations on the persistent watermark') +@click.option('-t', '--type', 'wm_type', type=click.Choice(['pg_headroom', 'pg_shared', 'q_shared_uni', 'q_shared_multi', 'buffer_pool', 'headroom_pool', 'q_shared_all']), help='The type of watermark', required=True) +@click.option('-n', '--namespace', type=click.Choice(multi_asic.get_namespace_list()), help='Namespace name or skip for all', default=None) +@click.version_option(version='1.0') +def main(clear, persistent, wm_type, namespace): + """ + Display the watermark counters + + Examples: + watermarkstat -t pg_headroom + watermarkstat -t pg_shared + watermarkstat -t q_shared_all + watermarkstat -p -t q_shared_all + watermarkstat -t q_shared_all -c + watermarkstat -t q_shared_uni -c + wwatermarkstat -t q_shared_multi -c + watermarkstat -p -t pg_shared + watermarkstat -p -t q_shared_multi -c + watermarkstat -t buffer_pool + watermarkstat -t buffer_pool -c + watermarkstat -p -t buffer_pool -c + watermarkstat -t pg_headroom -n asic0 + watermarkstat -p -t buffer_pool -c -n asic1 + """ + + namespace_context = WatermarkstatWrapper(namespace) + namespace_context.run(clear, persistent, wm_type) sys.exit(0) - if __name__ == "__main__": main() diff --git a/setup.py b/setup.py index 196777d0e3..7101830664 100644 --- a/setup.py +++ b/setup.py @@ -88,7 +88,6 @@ 'utilities_common', 'watchdogutil', 'sonic_cli_gen', - 'wol', ], package_data={ 'generic_config_updater': ['gcu_services_validator.conf.json', 'gcu_field_operation_validators.conf.json'], @@ -117,14 +116,17 @@ 'scripts/buffershow', 'scripts/coredump-compress', 'scripts/configlet', + 'scripts/config_validator.py', 'scripts/db_migrator.py', 'scripts/decode-syseeprom', 'scripts/dropcheck', 'scripts/disk_check.py', + 'scripts/dpu-tty.py', 'scripts/dropconfig', 'scripts/dropstat', 'scripts/dualtor_neighbor_check.py', 'scripts/dump_nat_entries.py', + 'scripts/debug_voq_chassis_packet_drops.sh', 'scripts/ecnconfig', 'scripts/fabricstat', 'scripts/fanshow', @@ -223,7 +225,6 @@ 'undebug = undebug.main:cli', 'watchdogutil = watchdogutil.main:watchdogutil', 'sonic-cli-gen = sonic_cli_gen.main:cli', - 'wol = wol.main:wol', ] }, install_requires=[ @@ -249,14 +250,15 @@ 'pexpect>=4.8.0', 'semantic-version>=2.8.5', 'prettyprinter>=0.18.0', - 'pyroute2>=0.5.14, <0.6.1', - 'requests>=2.25.0', + 'pyroute2==0.7.12', + 'requests>=2.25.0, <=2.31.0', 'tabulate==0.9.0', 'toposort==1.6', 'www-authenticate==0.9.2', 'xmltodict==0.12.0', 'lazy-object-proxy', 'six==1.16.0', + 'scp==0.14.5', ] + sonic_dependencies, setup_requires= [ 'pytest-runner', diff --git a/sfputil/main.py b/sfputil/main.py index ad0b1b3775..80a5bcb3f2 100644 --- a/sfputil/main.py +++ b/sfputil/main.py @@ -18,7 +18,7 @@ import sonic_platform import sonic_platform_base.sonic_sfp.sfputilhelper from sonic_platform_base.sfp_base import SfpBase -from swsscommon.swsscommon import SonicV2Connector +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector from natsort import natsorted from sonic_py_common import device_info, logger, multi_asic from utilities_common.sfp_helper import covert_application_advertisement_to_output_string @@ -673,6 +673,20 @@ def eeprom(port, dump_dom, namespace): output += convert_sfp_info_to_output_string(xcvr_info) if dump_dom: + try: + api = platform_chassis.get_sfp(physical_port).get_xcvr_api() + except NotImplementedError: + output += "API is currently not implemented for this platform\n" + click.echo(output) + sys.exit(ERROR_NOT_IMPLEMENTED) + if api is None: + output += "API is none while getting DOM info!\n" + click.echo(output) + sys.exit(ERROR_NOT_IMPLEMENTED) + else: + if api.is_flat_memory(): + output += "DOM values not supported for flat memory module\n" + continue try: xcvr_dom_info = platform_chassis.get_sfp(physical_port).get_transceiver_bulk_status() except NotImplementedError: @@ -1306,6 +1320,62 @@ def reset(port_name): i += 1 + +# 'power' subgroup +@cli.group() +def power(): + """Enable or disable power of SFP transceiver""" + pass + + +# Helper method for setting low-power mode +def set_power(port_name, enable): + physical_port = logical_port_to_physical_port_index(port_name) + sfp = platform_chassis.get_sfp(physical_port) + + if is_port_type_rj45(port_name): + click.echo("Power disable/enable is not available for RJ45 port {}.".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + presence = sfp.get_presence() + except NotImplementedError: + click.echo("sfp get_presence() NOT implemented!") + sys.exit(EXIT_FAIL) + + if not presence: + click.echo("{}: SFP EEPROM not detected\n".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + result = platform_chassis.get_sfp(physical_port).set_power(enable) + except (NotImplementedError, AttributeError): + click.echo("This functionality is currently not implemented for this platform") + sys.exit(ERROR_NOT_IMPLEMENTED) + + if result: + click.echo("OK") + else: + click.echo("Failed") + sys.exit(EXIT_FAIL) + + +# 'disable' subcommand +@power.command() +@click.argument('port_name', metavar='') +def disable(port_name): + """Disable power of SFP transceiver""" + set_power(port_name, False) + + +# 'enable' subcommand +@power.command() +@click.argument('port_name', metavar='') +def enable(port_name): + """Enable power of SFP transceiver""" + set_power(port_name, True) + + def update_firmware_info_to_state_db(port_name): physical_port = logical_port_to_physical_port_index(port_name) @@ -1316,10 +1386,8 @@ def update_firmware_info_to_state_db(port_name): state_db.connect(state_db.STATE_DB) transceiver_firmware_info_dict = platform_chassis.get_sfp(physical_port).get_transceiver_info_firmware_versions() if transceiver_firmware_info_dict is not None: - active_firmware = transceiver_firmware_info_dict.get('active_firmware', 'N/A') - inactive_firmware = transceiver_firmware_info_dict.get('inactive_firmware', 'N/A') - state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), "active_firmware", active_firmware) - state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), "inactive_firmware", inactive_firmware) + for key, value in transceiver_firmware_info_dict.items(): + state_db.set(state_db.STATE_DB, 'TRANSCEIVER_FIRMWARE_INFO|{}'.format(port_name), key, value) # 'firmware' subgroup @cli.group() @@ -1523,7 +1591,9 @@ def download_firmware(port_name, filepath): 1 = Hitless Reset to Inactive Image (Default)\n \ 2 = Attempt non-hitless Reset to Running Image\n \ 3 = Attempt Hitless Reset to Running Image\n") -def run(port_name, mode): +@click.option('--delay', metavar='', type=click.IntRange(0, 10), default=5, + help="Delay time before updating firmware information to STATE_DB") +def run(port_name, mode, delay): """Run the firmware with default mode=0""" if is_port_type_rj45(port_name): @@ -1539,6 +1609,11 @@ def run(port_name, mode): click.echo('Failed to run firmware in mode={}! CDB status: {}'.format(mode, status)) sys.exit(EXIT_FAIL) + # The cable firmware can be still under initialization immediately after run_firmware + # We put a delay here to avoid potential error message in accessing the cable EEPROM + if delay: + time.sleep(delay) + update_firmware_info_to_state_db(port_name) click.echo("Firmware run in mode={} success".format(mode)) @@ -1890,5 +1965,116 @@ def get_overall_offset_sff8472(api, page, offset, size, wire_addr): return page * PAGE_SIZE + offset + PAGE_SIZE_FOR_A0H +# 'debug' subgroup +@cli.group() +def debug(): + """Module debug and diagnostic control""" + pass + + +# 'loopback' subcommand +@debug.command() +@click.argument('port_name', required=True) +@click.argument('loopback_mode', required=True, + type=click.Choice(["host-side-input", "host-side-output", + "media-side-input", "media-side-output"])) +@click.argument('enable', required=True, type=click.Choice(["enable", "disable"])) +def loopback(port_name, loopback_mode, enable): + """Set module diagnostic loopback mode + """ + physical_port = logical_port_to_physical_port_index(port_name) + sfp = platform_chassis.get_sfp(physical_port) + + if is_port_type_rj45(port_name): + click.echo("{}: This functionality is not applicable for RJ45 port".format(port_name)) + sys.exit(EXIT_FAIL) + + if not is_sfp_present(port_name): + click.echo("{}: SFP EEPROM not detected".format(port_name)) + sys.exit(EXIT_FAIL) + + try: + api = sfp.get_xcvr_api() + except NotImplementedError: + click.echo("{}: This functionality is not implemented".format(port_name)) + sys.exit(ERROR_NOT_IMPLEMENTED) + + namespace = multi_asic.get_namespace_for_port(port_name) + config_db = ConfigDBConnector(use_unix_socket_path=True, namespace=namespace) + if config_db is not None: + config_db.connect() + try: + subport = int(config_db.get(config_db.CONFIG_DB, f'PORT|{port_name}', 'subport')) + except TypeError: + click.echo(f"{port_name}: subport is not present in CONFIG_DB") + sys.exit(EXIT_FAIL) + + # If subport is set to 0, assign a default value of 1 to ensure valid subport configuration + if subport == 0: + subport = 1 + else: + click.echo(f"{port_name}: Failed to connect to CONFIG_DB") + sys.exit(EXIT_FAIL) + + state_db = SonicV2Connector(use_unix_socket_path=False, namespace=namespace) + if state_db is not None: + state_db.connect(state_db.STATE_DB) + try: + host_lane_count = int(state_db.get(state_db.STATE_DB, + f'TRANSCEIVER_INFO|{port_name}', + 'host_lane_count')) + except TypeError: + click.echo(f"{port_name}: host_lane_count is not present in STATE_DB") + sys.exit(EXIT_FAIL) + + try: + media_lane_count = int(state_db.get(state_db.STATE_DB, + f'TRANSCEIVER_INFO|{port_name}', + 'media_lane_count')) + except TypeError: + click.echo(f"{port_name}: media_lane_count is not present in STATE_DB") + sys.exit(EXIT_FAIL) + else: + click.echo(f"{port_name}: Failed to connect to STATE_DB") + sys.exit(EXIT_FAIL) + + if 'host-side' in loopback_mode: + lane_mask = get_subport_lane_mask(subport, host_lane_count) + elif 'media-side' in loopback_mode: + lane_mask = get_subport_lane_mask(subport, media_lane_count) + else: + lane_mask = 0 + + try: + status = api.set_loopback_mode(loopback_mode, + lane_mask=lane_mask, + enable=enable == 'enable') + except AttributeError: + click.echo("{}: Set loopback mode is not applicable for this module".format(port_name)) + sys.exit(ERROR_NOT_IMPLEMENTED) + except TypeError: + click.echo("{}: Set loopback mode failed. Parameter is not supported".format(port_name)) + sys.exit(EXIT_FAIL) + + if status: + click.echo("{}: {} {} loopback".format(port_name, enable, loopback_mode)) + else: + click.echo("{}: {} {} loopback failed".format(port_name, enable, loopback_mode)) + sys.exit(EXIT_FAIL) + + +def get_subport_lane_mask(subport, lane_count): + """Get the lane mask for the given subport and lane count + + Args: + subport (int): Subport number + lane_count (int): Lane count for the subport + + Returns: + int: Lane mask for the given subport and lane count + """ + return ((1 << lane_count) - 1) << ((subport - 1) * lane_count) + + if __name__ == '__main__': cli() diff --git a/show/bgp_cli.py b/show/bgp_cli.py new file mode 100644 index 0000000000..d475638092 --- /dev/null +++ b/show/bgp_cli.py @@ -0,0 +1,128 @@ +import click +import tabulate +import json +import utilities_common.cli as clicommon + +from utilities_common.bgp import ( + CFG_BGP_DEVICE_GLOBAL, + BGP_DEVICE_GLOBAL_KEY, + to_str, +) + + +# +# BGP helpers --------------------------------------------------------------------------------------------------------- +# + + +def format_attr_value(entry, attr): + """ Helper that formats attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attr (Dict): Attribute metadata. + + Returns: + str: formatted attribute value. + """ + + if attr["is-leaf-list"]: + value = entry.get(attr["name"], []) + return "\n".join(value) if value else "N/A" + return entry.get(attr["name"], "N/A") + + +# +# BGP CLI ------------------------------------------------------------------------------------------------------------- +# + + +@click.group( + name="bgp", + cls=clicommon.AliasedGroup +) +def BGP(): + """ Show BGP configuration """ + + pass + + +# +# BGP device-global --------------------------------------------------------------------------------------------------- +# + + +@BGP.command( + name="device-global" +) +@click.option( + "-j", "--json", "json_format", + help="Display in JSON format", + is_flag=True, + default=False +) +@clicommon.pass_db +@click.pass_context +def DEVICE_GLOBAL(ctx, db, json_format): + """ Show BGP device global state """ + + header = [ + "TSA", + "W-ECMP", + ] + body = [] + + table = db.cfgdb.get_table(CFG_BGP_DEVICE_GLOBAL) + entry = table.get(BGP_DEVICE_GLOBAL_KEY, {}) + + if not entry: + click.echo("No configuration is present in CONFIG DB") + ctx.exit(0) + + if json_format: + json_dict = { + "tsa": to_str( + format_attr_value( + entry, + { + 'name': 'tsa_enabled', + 'is-leaf-list': False + } + ) + ), + "w-ecmp": to_str( + format_attr_value( + entry, + { + 'name': 'wcmp_enabled', + 'is-leaf-list': False + } + ) + ) + } + click.echo(json.dumps(json_dict, indent=4)) + ctx.exit(0) + + row = [ + to_str( + format_attr_value( + entry, + { + 'name': 'tsa_enabled', + 'is-leaf-list': False + } + ) + ), + to_str( + format_attr_value( + entry, + { + 'name': 'wcmp_enabled', + 'is-leaf-list': False + } + ) + ) + ] + body.append(row) + + click.echo(tabulate.tabulate(body, header)) diff --git a/show/bgp_common.py b/show/bgp_common.py index b51e9f1879..e9c0e12e8a 100644 --- a/show/bgp_common.py +++ b/show/bgp_common.py @@ -3,7 +3,7 @@ import json import utilities_common.multi_asic as multi_asic_util -from sonic_py_common import multi_asic +from sonic_py_common import device_info, multi_asic from utilities_common import constants ''' @@ -60,10 +60,12 @@ def get_nexthop_info_str(nxhp_info, filterByIp): else: str_2_return = " via {},".format(nxhp_info['ip']) if "interfaceName" in nxhp_info: + intfs = nxhp_info['interfaceName'] if filterByIp: - str_2_return += ", via {}".format(nxhp_info['interfaceName']) + str_2_return += ", via {}".format(intfs) else: - str_2_return += " {},".format(nxhp_info['interfaceName']) + str_2_return += " {},".format(intfs) + elif "directlyConnected" in nxhp_info: str_2_return = " is directly connected," if "interfaceName" in nxhp_info: @@ -80,10 +82,13 @@ def get_nexthop_info_str(nxhp_info, filterByIp): str_2_return += "(vrf {}, {},".format(nxhp_info['vrf'], nxhp_info['interfaceName']) if "active" not in nxhp_info: str_2_return += " inactive" + if "recursive" in nxhp_info: + if device_info.is_voq_chassis(): + str_2_return = " " + str_2_return + " recursive via iBGP" + else: + str_2_return += " (recursive)" if "onLink" in nxhp_info: str_2_return += " onlink" - if "recursive" in nxhp_info: - str_2_return += " (recursive)" if "source" in nxhp_info: str_2_return += ", src {}".format(nxhp_info['source']) if "labels" in nxhp_info: @@ -220,6 +225,12 @@ def merge_to_combined_route(combined_route, route, new_info_l): if nh['interfaceName'] == combined_route[route][j]['nexthops'][y]['interfaceName']: found = True break + if device_info.is_voq_chassis(): + if nh['ip'] == combined_route[route][j]['nexthops'][y]['ip']: + if 'interfaceName' not in combined_route[route][j]['nexthops'][y]: + combined_route[route][j]['nexthops'][y] = nh + found = True + break elif "active" not in nh and "active" not in combined_route[route][j]['nexthops'][y]: if nh['ip'] == combined_route[route][j]['nexthops'][y]['ip']: found = True @@ -253,7 +264,7 @@ def process_route_info(route_info, device, filter_back_end, print_ns_str, asic_c while len(new_info['nexthops']): nh = new_info['nexthops'].pop() if filter_back_end and back_end_intf_set != None and "interfaceName" in nh: - if nh['interfaceName'] in back_end_intf_set: + if nh['interfaceName'] in back_end_intf_set or nh['interfaceName'].startswith('Ethernet-IB'): del_cnt += 1 else: new_nhop_l.append(copy.deepcopy(nh)) @@ -327,6 +338,7 @@ def show_routes(args, namespace, display, verbose, ipver): if display not in ['frontend', 'all']: print("dislay option '{}' is not a valid option.".format(display)) return + device = multi_asic_util.MultiAsic(display, namespace) arg_strg = "" found_json = 0 @@ -376,7 +388,6 @@ def show_routes(args, namespace, display, verbose, ipver): # Need to add "ns" to form bgpX so it is sent to the correct bgpX docker to handle the request cmd = "show {} route {}".format(ipver, arg_strg) output = bgp_util.run_bgp_show_command(cmd, ns) - # in case no output or something went wrong with user specified cmd argument(s) error it out # error from FRR always start with character "%" if output == "": diff --git a/show/bgp_frr_v4.py b/show/bgp_frr_v4.py index 6343e8b7b2..ddcd688581 100644 --- a/show/bgp_frr_v4.py +++ b/show/bgp_frr_v4.py @@ -1,6 +1,8 @@ import click +import sys +import subprocess -from sonic_py_common import multi_asic +from sonic_py_common import multi_asic, device_info from show.main import ip import utilities_common.bgp_util as bgp_util import utilities_common.cli as clicommon @@ -17,7 +19,14 @@ @ip.group(cls=clicommon.AliasedGroup) def bgp(): """Show IPv4 BGP (Border Gateway Protocol) information""" - pass + if device_info.is_supervisor(): + subcommand = sys.argv[3] + if subcommand not in "network": + # the command will be executed directly by rexec if it is not "show ip bgp network" + click.echo("Since the current device is a chassis supervisor, " + + "this command will be executed remotely on all linecards") + proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) + sys.exit(proc.returncode) # 'summary' subcommand ("show ip bgp summary") @@ -84,7 +93,7 @@ def neighbors(ipaddress, info_type, namespace): @bgp.command() @click.argument('ipaddress', metavar='[|]', - required=False) + required=True if device_info.is_supervisor() else False) @click.argument('info_type', metavar='[bestpath|json|longer-prefixes|multipath]', type=click.Choice( @@ -95,17 +104,26 @@ def neighbors(ipaddress, info_type, namespace): 'namespace', type=str, show_default=True, - required=True if multi_asic.is_multi_asic is True else False, + required=False, help='Namespace name or all', - default=multi_asic.DEFAULT_NAMESPACE, + default="all", callback=multi_asic_util.multi_asic_namespace_validation_callback) def network(ipaddress, info_type, namespace): """Show IP (IPv4) BGP network""" - if multi_asic.is_multi_asic() and namespace not in multi_asic.get_namespace_list(): - ctx = click.get_current_context() - ctx.fail('-n/--namespace option required. provide namespace from list {}'\ - .format(multi_asic.get_namespace_list())) + if device_info.is_supervisor(): + # the command will be executed by rexec + click.echo("Since the current device is a chassis supervisor, " + + "this command will be executed remotely on all linecards") + proc = subprocess.run(["rexec", "all"] + ["-c", " ".join(sys.argv)]) + sys.exit(proc.returncode) + + namespace = namespace.strip() + if multi_asic.is_multi_asic(): + if namespace != "all" and namespace not in multi_asic.get_namespace_list(): + ctx = click.get_current_context() + ctx.fail('invalid namespace {}. provide namespace from list {}' + .format(namespace, multi_asic.get_namespace_list())) command = 'show ip bgp' if ipaddress is not None: @@ -125,5 +143,15 @@ def network(ipaddress, info_type, namespace): if info_type is not None: command += ' {}'.format(info_type) - output = bgp_util.run_bgp_show_command(command, namespace) - click.echo(output.rstrip('\n')) + if namespace == "all": + if multi_asic.is_multi_asic(): + for ns in multi_asic.get_namespace_list(): + click.echo("\n======== namespace {} ========".format(ns)) + output = bgp_util.run_bgp_show_command(command, ns) + click.echo(output.rstrip('\n')) + else: + output = bgp_util.run_bgp_show_command(command, "") + click.echo(output.rstrip('\n')) + else: + output = bgp_util.run_bgp_show_command(command, namespace) + click.echo(output.rstrip('\n')) diff --git a/show/dropcounters.py b/show/dropcounters.py index 30779b9364..9bb988fc5b 100644 --- a/show/dropcounters.py +++ b/show/dropcounters.py @@ -1,5 +1,6 @@ import click import utilities_common.cli as clicommon +import utilities_common.multi_asic as multi_asic_util # @@ -41,7 +42,8 @@ def capabilities(verbose): @click.option('-g', '--group', required=False) @click.option('-t', '--counter_type', required=False) @click.option('--verbose', is_flag=True, help="Enable verbose output") -def counts(group, counter_type, verbose): +@multi_asic_util.multi_asic_click_option_namespace +def counts(group, counter_type, verbose, namespace): """Show drop counts""" cmd = ['dropstat', '-c', 'show'] @@ -51,4 +53,7 @@ def counts(group, counter_type, verbose): if counter_type: cmd += ['-t', str(counter_type)] + if namespace: + cmd += ['-n', str(namespace)] + clicommon.run_command(cmd, display_cmd=verbose) diff --git a/show/fabric.py b/show/fabric.py index 785e1ab477..898c76114c 100644 --- a/show/fabric.py +++ b/show/fabric.py @@ -74,3 +74,13 @@ def queue(namespace): if namespace is not None: cmd += ['-n', str(namespace)] clicommon.run_command(cmd) + + +@counters.command() +@multi_asic_util.multi_asic_click_option_namespace +def rate(namespace): + """Show fabric counters rate""" + cmd = ['fabricstat', '-s'] + if namespace is not None: + cmd += ['-n', str(namespace)] + clicommon.run_command(cmd) diff --git a/show/interfaces/__init__.py b/show/interfaces/__init__.py index 9287eb5af7..f8889e6c32 100644 --- a/show/interfaces/__init__.py +++ b/show/interfaces/__init__.py @@ -18,6 +18,8 @@ HWSKU_JSON = 'hwsku.json' +REDIS_HOSTIP = "127.0.0.1" + # Read given JSON file def readJsonFile(fileName): try: @@ -646,6 +648,74 @@ def fec_stats(verbose, period, namespace, display): clicommon.run_command(cmd, display_cmd=verbose) + +def get_port_oid_mapping(): + ''' Returns dictionary of all ports interfaces and their OIDs. ''' + db = SonicV2Connector(host=REDIS_HOSTIP) + db.connect(db.COUNTERS_DB) + + port_oid_map = db.get_all(db.COUNTERS_DB, 'COUNTERS_PORT_NAME_MAP') + + db.close(db.COUNTERS_DB) + + return port_oid_map + + +def fetch_fec_histogram(port_oid_map, target_port): + ''' Fetch and display FEC histogram for the given port. ''' + asic_db = SonicV2Connector(host=REDIS_HOSTIP) + asic_db.connect(asic_db.ASIC_DB) + + config_db = ConfigDBConnector() + config_db.connect() + + counter_db = SonicV2Connector(host=REDIS_HOSTIP) + counter_db.connect(counter_db.COUNTERS_DB) + + if target_port not in port_oid_map: + click.echo('Port {} not found in COUNTERS_PORT_NAME_MAP'.format(target_port), err=True) + raise click.Abort() + + port_oid = port_oid_map[target_port] + asic_db_kvp = counter_db.get_all(counter_db.COUNTERS_DB, 'COUNTERS:{}'.format(port_oid)) + + if asic_db_kvp is not None: + + fec_errors = {f'BIN{i}': asic_db_kvp.get + (f'SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S{i}', '0') for i in range(16)} + + # Prepare the data for tabulation + table_data = [(bin_label, error_value) for bin_label, error_value in fec_errors.items()] + + # Define headers + headers = ["Symbol Errors Per Codeword", "Codewords"] + + # Print FEC histogram using tabulate + click.echo(tabulate(table_data, headers=headers)) + else: + click.echo('No kvp found in ASIC DB for port {}, exiting'.format(target_port), err=True) + raise click.Abort() + + asic_db.close(asic_db.ASIC_DB) + config_db.close(config_db.CONFIG_DB) + counter_db.close(counter_db.COUNTERS_DB) + + +# 'fec-histogram' subcommand ("show interfaces counters fec-histogram") +@counters.command('fec-histogram') +@multi_asic_util.multi_asic_click_options +@click.argument('interfacename', required=True) +def fec_histogram(interfacename, namespace, display): + """Show interface counters fec-histogram""" + port_oid_map = get_port_oid_mapping() + + # Try to convert interface name from alias + interfacename = try_convert_interfacename_from_alias(click.get_current_context(), interfacename) + + # Fetch and display the FEC histogram + fetch_fec_histogram(port_oid_map, interfacename) + + # 'rates' subcommand ("show interfaces counters rates") @counters.command() @click.option('-p', '--period') diff --git a/show/main.py b/show/main.py index a4357f30f8..a0de752277 100755 --- a/show/main.py +++ b/show/main.py @@ -66,11 +66,14 @@ from . import plugins from . import syslog from . import dns +from . import bgp_cli +from . import stp # Global Variables PLATFORM_JSON = 'platform.json' HWSKU_JSON = 'hwsku.json' PORT_STR = "Ethernet" +BMP_STATE_DB = 'BMP_STATE_DB' VLAN_SUB_INTERFACE_SEPARATOR = '.' @@ -164,7 +167,7 @@ def get_config_json_by_namespace(namespace): iface_alias_converter = lazy_object_proxy.Proxy(lambda: clicommon.InterfaceAliasConverter()) # -# Display all storm-control data +# Display all storm-control data # def display_storm_all(): """ Show storm-control """ @@ -290,7 +293,6 @@ def cli(ctx): load_db_config() ctx.obj = Db() - # Add groups from other modules cli.add_command(acl.acl) cli.add_command(chassis_modules.chassis) @@ -317,6 +319,7 @@ def cli(ctx): cli.add_command(system_health.system_health) cli.add_command(warm_restart.warm_restart) cli.add_command(dns.dns) +cli.add_command(stp.spanning_tree) # syslog module cli.add_command(syslog.syslog) @@ -325,6 +328,8 @@ def cli(ctx): if is_gearbox_configured(): cli.add_command(gearbox.gearbox) +# bgp module +cli.add_command(bgp_cli.BGP) # # 'vrf' command ("show vrf") @@ -462,7 +467,7 @@ def is_mgmt_vrf_enabled(ctx): return False # -# 'storm-control' group +# 'storm-control' group # "show storm-control [interface ]" # @cli.group('storm-control', invoke_without_command=True) @@ -645,7 +650,8 @@ def counters(namespace, display, verbose): @pfc.command() @click.argument('interface', type=click.STRING, required=False) -def priority(interface): +@multi_asic_util.multi_asic_click_option_namespace +def priority(interface, namespace): """Show pfc priority""" cmd = ['pfc', 'show', 'priority'] if interface is not None and clicommon.get_interface_naming_mode() == "alias": @@ -653,12 +659,15 @@ def priority(interface): if interface is not None: cmd += [str(interface)] + if namespace is not None: + cmd += ['-n', str(namespace)] run_command(cmd) @pfc.command() @click.argument('interface', type=click.STRING, required=False) -def asymmetric(interface): +@multi_asic_util.multi_asic_click_option_namespace +def asymmetric(interface, namespace): """Show asymmetric pfc""" cmd = ['pfc', 'show', 'asymmetric'] if interface is not None and clicommon.get_interface_naming_mode() == "alias": @@ -666,6 +675,8 @@ def asymmetric(interface): if interface is not None: cmd += [str(interface)] + if namespace is not None: + cmd += ['-n', str(namespace)] run_command(cmd) @@ -774,23 +785,53 @@ def watermark(): # 'unicast' subcommand ("show queue watermarks unicast") @watermark.command('unicast') -def wm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_q_uni(namespace): """Show user WM for unicast queues""" command = ['watermarkstat', '-t', 'q_shared_uni'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'multicast' subcommand ("show queue watermarks multicast") @watermark.command('multicast') -def wm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_q_multi(namespace): """Show user WM for multicast queues""" command = ['watermarkstat', '-t', 'q_shared_multi'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'all' subcommand ("show queue watermarks all") @watermark.command('all') -def wm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_q_all(namespace): """Show user WM for all queues""" command = ['watermarkstat', '-t', 'q_shared_all'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # @@ -804,23 +845,53 @@ def persistent_watermark(): # 'unicast' subcommand ("show queue persistent-watermarks unicast") @persistent_watermark.command('unicast') -def pwm_q_uni(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_q_uni(namespace): """Show persistent WM for unicast queues""" command = ['watermarkstat', '-p', '-t', 'q_shared_uni'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'multicast' subcommand ("show queue persistent-watermarks multicast") @persistent_watermark.command('multicast') -def pwm_q_multi(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_q_multi(namespace): """Show persistent WM for multicast queues""" command = ['watermarkstat', '-p', '-t', 'q_shared_multi'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # 'all' subcommand ("show queue persistent-watermarks all") @persistent_watermark.command('all') -def pwm_q_all(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_q_all(namespace): """Show persistent WM for all queues""" command = ['watermarkstat', '-p', '-t', 'q_shared_all'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) # @@ -837,15 +908,35 @@ def watermark(): pass @watermark.command('headroom') -def wm_pg_headroom(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_pg_headroom(namespace): """Show user headroom WM for pg""" command = ['watermarkstat', '-t', 'pg_headroom'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @watermark.command('shared') -def wm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_pg_shared(namespace): """Show user shared WM for pg""" command = ['watermarkstat', '-t', 'pg_shared'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @priority_group.group() @@ -854,9 +945,12 @@ def drop(): pass @drop.command('counters') -def pg_drop_counters(): +@multi_asic_util.multi_asic_click_option_namespace +def pg_drop_counters(namespace): """Show dropped packets for priority-group""" command = ['pg-drop', '-c', 'show'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @priority_group.group(name='persistent-watermark') @@ -865,15 +959,36 @@ def persistent_watermark(): pass @persistent_watermark.command('headroom') -def pwm_pg_headroom(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_pg_headroom(namespace): """Show persistent headroom WM for pg""" command = ['watermarkstat', '-p', '-t', 'pg_headroom'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) + @persistent_watermark.command('shared') -def pwm_pg_shared(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_pg_shared(namespace): """Show persistent shared WM for pg""" command = ['watermarkstat', '-p', '-t', 'pg_shared'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @@ -886,15 +1001,36 @@ def buffer_pool(): """Show details of the buffer pools""" @buffer_pool.command('watermark') -def wm_buffer_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_buffer_pool(namespace): """Show user WM for buffer pools""" - command = ['watermarkstat', '-t' ,'buffer_pool'] + command = ['watermarkstat', '-t', 'buffer_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) + @buffer_pool.command('persistent-watermark') -def pwm_buffer_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_buffer_pool(namespace): """Show persistent WM for buffer pools""" command = ['watermarkstat', '-p', '-t', 'buffer_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @@ -907,15 +1043,36 @@ def headroom_pool(): """Show details of headroom pool""" @headroom_pool.command('watermark') -def wm_headroom_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def wm_headroom_pool(namespace): """Show user WM for headroom pool""" command = ['watermarkstat', '-t', 'headroom_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) + @headroom_pool.command('persistent-watermark') -def pwm_headroom_pool(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +def pwm_headroom_pool(namespace): """Show persistent WM for headroom pool""" command = ['watermarkstat', '-p', '-t', 'headroom_pool'] + if namespace is not None: + command += ['-n', str(namespace)] run_command(command) @@ -988,6 +1145,111 @@ def route_map(route_map_name, verbose): cmd[-1] += ' {}'.format(route_map_name) run_command(cmd, display_cmd=verbose) + +# +# 'vrrp' group ("show vrrp ...") +# +@cli.group(cls=clicommon.AliasedGroup, invoke_without_command="true") +@click.pass_context +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp(ctx, verbose): + """Show vrrp commands""" + if ctx.invoked_subcommand is not None: + return + + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp'] + run_command(cmd, display_cmd=verbose) + + +# 'interface' command +@vrrp.command('interface') +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrid', metavar='', required=False) +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp_interface(ctx, interface_name, vrid, verbose): + """show vrrp interface """ + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp'] + if vrid is not None: + cmd[-1] += ' interface {} {}'.format(interface_name, vrid) + else: + cmd[-1] += ' interface {}'.format(interface_name) + run_command(cmd, display_cmd=verbose) + + +# 'vrid' command +@vrrp.command('vrid') +@click.pass_context +@click.argument('vrid', metavar='', required=True) +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp_vrid(ctx, vrid, verbose): + """show vrrp vrid """ + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp {}'.format(vrid)] + run_command(cmd, display_cmd=verbose) + + +# 'summary' command +@vrrp.command('summary') +@click.pass_context +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp_summary(ctx, verbose): + """show vrrp summary""" + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp summary'] + run_command(cmd, display_cmd=verbose) + + +# +# 'vrrp6' group ("show vrrp6 ...") +# +@cli.group(cls=clicommon.AliasedGroup, invoke_without_command="true") +@click.pass_context +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp6(ctx, verbose): + """Show vrrp6 commands""" + if ctx.invoked_subcommand is not None: + return + + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp6'] + run_command(cmd, display_cmd=verbose) + + +# 'interface' command +@vrrp6.command('interface') +@click.pass_context +@click.argument('interface_name', metavar='', required=True) +@click.argument('vrid', metavar='', required=False) +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp6_interface(ctx, interface_name, vrid, verbose): + """show vrrp6 interface """ + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp6'] + if vrid is not None: + cmd[-1] += ' interface {} {}'.format(interface_name, vrid) + else: + cmd[-1] += ' interface {}'.format(interface_name) + run_command(cmd, display_cmd=verbose) + + +# 'vrid' command +@vrrp6.command('vrid') +@click.pass_context +@click.argument('vrid', metavar='', required=True) +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp6_vrid(ctx, vrid, verbose): + """show vrrp6 vrid """ + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp6 {}'.format(vrid)] + run_command(cmd, display_cmd=verbose) + + +# 'summary' command +@vrrp6.command('summary') +@click.pass_context +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def vrrp6_summary(ctx, verbose): + """show vrrp6 summary""" + cmd = ['sudo', constants.RVTYSH_COMMAND, '-c', 'show vrrp6 summary'] + run_command(cmd, display_cmd=verbose) + + # # 'ip' group ("show ip ...") # @@ -1184,7 +1446,11 @@ def protocol(verbose): ip.add_command(bgp) from .bgp_frr_v6 import bgp ipv6.add_command(bgp) - +elif device_info.is_supervisor(): + from .bgp_frr_v4 import bgp + ip.add_command(bgp) + from .bgp_frr_v6 import bgp + ipv6.add_command(bgp) # # 'link-local-mode' subcommand ("show ipv6 link-local-mode") # @@ -1438,11 +1704,11 @@ def all(verbose): for ns in ns_list: ns_config = get_config_json_by_namespace(ns) if bgp_util.is_bgp_feature_state_enabled(ns): - ns_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd, ns) + ns_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd, ns, exit_on_fail=False) output[ns] = ns_config click.echo(json.dumps(output, indent=4)) else: - host_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd) + host_config['bgpraw'] = bgp_util.run_bgp_show_command(bgpraw_cmd, exit_on_fail=False) click.echo(json.dumps(output['localhost'], indent=4)) @@ -1529,7 +1795,7 @@ def ntp(verbose): """Show NTP running configuration""" ntp_servers = [] ntp_dict = {} - with open("/etc/ntp.conf") as ntp_file: + with open("/etc/ntpsec/ntp.conf") as ntp_file: data = ntp_file.readlines() for line in data: if line.startswith("server "): @@ -1728,6 +1994,16 @@ def syslog(verbose): click.echo(tabulate(body, header, tablefmt="simple", stralign="left", missingval="")) +# 'spanning-tree' subcommand ("show runningconfiguration spanning_tree") +@runningconfiguration.command() +@click.option('--verbose', is_flag=True, help="Enable verbose output") +def spanning_tree(verbose): + """Show spanning_tree running configuration""" + stp_list = ["STP", "STP_PORT", "STP_VLAN", "STP_VLAN_PORT"] + for key in stp_list: + cmd = ['sudo', 'sonic-cfggen', '-d', '--var-json', key] + run_command(cmd, display_cmd=verbose) + # # 'startupconfiguration' group ("show startupconfiguration ...") # @@ -1998,10 +2274,13 @@ def policer(policer_name, verbose): # 'ecn' command ("show ecn") # @cli.command('ecn') +@multi_asic_util.multi_asic_click_option_namespace @click.option('--verbose', is_flag=True, help="Enable verbose output") -def ecn(verbose): +def ecn(namespace, verbose): """Show ECN configuration""" cmd = ['ecnconfig', '-l'] + if namespace is not None: + cmd += ['-n', str(namespace)] run_command(cmd, display_cmd=verbose) @@ -2020,9 +2299,22 @@ def boot(): # 'mmu' command ("show mmu") # @cli.command('mmu') -def mmu(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +@click.option('--verbose', '-vv', is_flag=True, help="Enable verbose output") +def mmu(namespace, verbose): """Show mmu configuration""" cmd = ['mmuconfig', '-l'] + if namespace is not None: + cmd += ['-n', str(namespace)] + if verbose: + cmd += ['-vv'] run_command(cmd) # @@ -2036,10 +2328,25 @@ def buffer(): # # 'configuration' command ("show buffer command") # + + @buffer.command() -def configuration(): +@click.option('--namespace', + '-n', + 'namespace', + default=None, + type=str, + show_default=True, + help='Namespace name or all', + callback=multi_asic_util.multi_asic_namespace_validation_callback) +@click.option('--verbose', '-vv', is_flag=True, help="Enable verbose output") +def configuration(namespace, verbose): """show buffer configuration""" cmd = ['mmuconfig', '-l'] + if namespace is not None: + cmd += ['-n', str(namespace)] + if verbose: + cmd += ['-vv'] run_command(cmd) # @@ -2082,6 +2389,138 @@ def ztp(status, verbose): run_command(cmd, display_cmd=verbose) +# +# 'bmp' group ("show bmp ...") +# +@cli.group(cls=clicommon.AliasedGroup) +def bmp(): + """Show details of the bmp dataset""" + pass + + +# 'bgp-neighbor-table' subcommand ("show bmp bgp-neighbor-table") +@bmp.command('bgp-neighbor-table') +@clicommon.pass_db +def bmp_neighbor_table(db): + """Show bmp bgp-neighbor-table information""" + bmp_headers = ["Neighbor_Address", "Peer_Address", "Peer_ASN", "Peer_RD", "Peer_Port", + "Local_Address", "Local_ASN", "Local_Port", "Advertised_Capabilities", "Received_Capabilities"] + + # BGP_NEIGHBOR_TABLE|10.0.1.2 + bmp_keys = db.db.keys(BMP_STATE_DB, "BGP_NEIGHBOR_TABLE|*") + + click.echo("Total number of bmp neighbors: {}".format(0 if bmp_keys is None else len(bmp_keys))) + + bmp_body = [] + if bmp_keys is not None: + for key in bmp_keys: + values = db.db.get_all(BMP_STATE_DB, key) + bmp_body.append([ + values["peer_addr"], # Neighbor_Address + values["peer_addr"], + values["peer_asn"], + values["peer_rd"], + values["peer_port"], + values["local_addr"], + values["local_asn"], + values["local_port"], + values["sent_cap"], + values["recv_cap"] + ]) + + click.echo(tabulate(bmp_body, bmp_headers)) + + +# 'bmp-rib-out-table' subcommand ("show bmp bgp-rib-out-table") +@bmp.command('bgp-rib-out-table') +@clicommon.pass_db +def bmp_rib_out_table(db): + """Show bmp bgp-rib-out-table information""" + bmp_headers = ["Neighbor_Address", "NLRI", "Origin", "AS_Path", "Origin_AS", "Next_Hop", "Local_Pref", + "Originator_ID", "Community_List", "Ext_Community_List"] + + # BGP_RIB_OUT_TABLE|192.181.168.0/25|10.0.0.59 + bmp_keys = db.db.keys(BMP_STATE_DB, "BGP_RIB_OUT_TABLE|*") + delimiter = db.db.get_db_separator(BMP_STATE_DB) + + click.echo("Total number of bmp bgp-rib-out-table: {}".format(0 if bmp_keys is None else len(bmp_keys))) + + bmp_body = [] + if bmp_keys is not None: + for key in bmp_keys: + key_values = key.split(delimiter) + if len(key_values) < 3: + continue + values = db.db.get_all(BMP_STATE_DB, key) + bmp_body.append([ + key_values[2], # Neighbor_Address + key_values[1], # NLRI + values["origin"], + values["as_path"], + values["origin_as"], + values["next_hop"], + values["local_pref"], + values["originator_id"], + values["community_list"], + values["ext_community_list"] + ]) + + click.echo(tabulate(bmp_body, bmp_headers)) + + +# 'bgp-rib-in-table' subcommand ("show bmp bgp-rib-in-table") +@bmp.command('bgp-rib-in-table') +@clicommon.pass_db +def bmp_rib_in_table(db): + """Show bmp bgp-rib-in-table information""" + bmp_headers = ["Neighbor_Address", "NLRI", "Origin", "AS_Path", "Origin_AS", "Next_Hop", "Local_Pref", + "Originator_ID", "Community_List", "Ext_Community_List"] + + # BGP_RIB_IN_TABLE|20c0:ef50::/64|10.0.0.57 + bmp_keys = db.db.keys(BMP_STATE_DB, "BGP_RIB_IN_TABLE|*") + delimiter = db.db.get_db_separator(BMP_STATE_DB) + + click.echo("Total number of bmp bgp-rib-in-table: {}".format(0 if bmp_keys is None else len(bmp_keys))) + + bmp_body = [] + if bmp_keys is not None: + for key in bmp_keys: + key_values = key.split(delimiter) + if len(key_values) < 3: + continue + values = db.db.get_all(BMP_STATE_DB, key) + bmp_body.append([ + key_values[2], # Neighbor_Address + key_values[1], # NLRI + values["origin"], + values["as_path"], + values["origin_as"], + values["next_hop"], + values["local_pref"], + values["originator_id"], + values["community_list"], + values["ext_community_list"] + ]) + + click.echo(tabulate(bmp_body, bmp_headers)) + + +# 'tables' subcommand ("show bmp tables") +@bmp.command('tables') +@clicommon.pass_db +def tables(db): + """Show bmp table status information""" + bmp_headers = ["Table_Name", "Enabled"] + bmp_body = [] + click.echo("BMP tables: ") + bmp_keys = db.cfgdb.get_table('BMP') + if bmp_keys['table']: + bmp_body.append(['bgp_neighbor_table', bmp_keys['table']['bgp_neighbor_table']]) + bmp_body.append(['bgp_rib_in_table', bmp_keys['table']['bgp_rib_in_table']]) + bmp_body.append(['bgp_rib_out_table', bmp_keys['table']['bgp_rib_out_table']]) + click.echo(tabulate(bmp_body, bmp_headers)) + + # # 'bfd' group ("show bfd ...") # @@ -2108,7 +2547,7 @@ def summary(db): key_values = key.split('|') values = db.db.get_all(db.db.STATE_DB, key) if "local_discriminator" not in values.keys(): - values["local_discriminator"] = "NA" + values["local_discriminator"] = "NA" bfd_body.append([key_values[3], key_values[2], key_values[1], values["state"], values["type"], values["local_addr"], values["tx_interval"], values["rx_interval"], values["multiplier"], values["multihop"], values["local_discriminator"]]) @@ -2139,7 +2578,7 @@ def peer(db, peer_ip): key_values = key.split(delimiter) values = db.db.get_all(db.db.STATE_DB, key) if "local_discriminator" not in values.keys(): - values["local_discriminator"] = "NA" + values["local_discriminator"] = "NA" bfd_body.append([key_values[3], key_values[2], key_values[1], values.get("state"), values.get("type"), values.get("local_addr"), values.get("tx_interval"), values.get("rx_interval"), values.get("multiplier"), values.get("multihop"), values.get("local_discriminator")]) @@ -2274,6 +2713,65 @@ def local_users_passwords_reset(db): click.echo(tabulate(messages, headers=hdrs, tablefmt='simple', missingval='')) +# 'serial_console' command group ("show serial_console ...") +# +@cli.group('serial_console', invoke_without_command=True) +@clicommon.pass_db +def serial_console(db): + """Show serial_console configuration""" + + serial_console_table = db.cfgdb.get_entry('SERIAL_CONSOLE', 'POLICIES') + + hdrs = ['inactivity-timeout', 'sysrq-capabilities'] + data = [] + + data.append(serial_console_table.get('inactivity_timeout', '900 ')) + data.append(serial_console_table.get('sysrq_capabilities', 'disabled ')) + + configuration = [data] + click.echo(tabulate(configuration, headers=hdrs, tablefmt='simple', missingval='')) + + +# +# 'ssh' command group ("show ssh ...") +# +@cli.group('ssh', invoke_without_command=True) +@clicommon.pass_db +def ssh(db): + """Show ssh configuration""" + + serial_console_table = db.cfgdb.get_entry('SSH_SERVER', 'POLICIES') + + hdrs = ['inactivity-timeout', 'max-sessions'] + data = [] + + data.append(serial_console_table.get('inactivity_timeout', '900 ')) + data.append(serial_console_table.get('max_session', '0 ')) + + configuration = [data] + click.echo(tabulate(configuration, headers=hdrs, tablefmt='simple', missingval='')) + + +# +# 'banner' command group ("show banner ...") +# +@cli.group('banner', invoke_without_command=True) +@clicommon.pass_db +def banner(db): + """Show banner messages""" + + banner_table = db.cfgdb.get_entry('BANNER_MESSAGE', 'global') + + hdrs = ['state', 'login', 'motd', 'logout'] + data = [] + + for key in hdrs: + data.append(banner_table.get(key, '').replace('\\n', '\n')) + + messages = [data] + click.echo(tabulate(messages, headers=hdrs, tablefmt='simple', missingval='')) + + # Load plugins and register them helper = util_base.UtilHelper() helper.load_and_register_plugins(plugins, cli) diff --git a/show/plugins/mlnx.py b/show/plugins/mlnx.py index 04d6a78b0a..09eacbc70a 100644 --- a/show/plugins/mlnx.py +++ b/show/plugins/mlnx.py @@ -132,20 +132,6 @@ def is_issu_status_enabled(): return issu_enabled - -@mlnx.command('sniffer') -def sniffer_status(): - """ Show sniffer status """ - components = ['sdk'] - env_variable_strings = [ENV_VARIABLE_SX_SNIFFER] - for index in range(len(components)): - enabled = sniffer_status_get(env_variable_strings[index]) - if enabled is True: - click.echo(components[index] + " sniffer is enabled") - else: - click.echo(components[index] + " sniffer is disabled") - - @mlnx.command('issu') def issu_status(): """ Show ISSU status """ diff --git a/show/plugins/pbh.py b/show/plugins/pbh.py index 407c596163..f47b43fbdc 100644 --- a/show/plugins/pbh.py +++ b/show/plugins/pbh.py @@ -395,7 +395,7 @@ def get_counter_value(pbh_counters, saved_pbh_counters, key, type): if not pbh_counters[key]: return '0' - if key in saved_pbh_counters: + if key in saved_pbh_counters and saved_pbh_counters[key]: new_value = int(pbh_counters[key][type]) - int(saved_pbh_counters[key][type]) if new_value >= 0: return str(new_value) diff --git a/show/plugins/sonic-system-ldap_yang.py b/show/plugins/sonic-system-ldap_yang.py new file mode 100644 index 0000000000..a91c8609db --- /dev/null +++ b/show/plugins/sonic-system-ldap_yang.py @@ -0,0 +1,145 @@ +""" +Auto-generated show CLI plugin. + + +""" + +import click +import tabulate +import natsort +import utilities_common.cli as clicommon + + +def format_attr_value(entry, attr): + """ Helper that formats attribute to be presented in the table output. + + Args: + entry (Dict[str, str]): CONFIG DB entry configuration. + attr (Dict): Attribute metadata. + + Returns: + str: fomatted attribute value. + """ + + if attr["is-leaf-list"]: + return "\n".join(entry.get(attr["name"], [])) + return entry.get(attr["name"], "N/A") + + +@click.group(name="ldap-server", + cls=clicommon.AliasedGroup, + invoke_without_command=True) +@clicommon.pass_db +def LDAP_SERVER(db): + """ [Callable command group] """ + + header = ["HOSTNAME", "PRIORITY"] + + body = [] + + table = db.cfgdb.get_table("LDAP_SERVER") + for key in natsort.natsorted(table): + entry = table[key] + if not isinstance(key, tuple): + key = (key,) + + row = [*key] + [ + format_attr_value( + entry, + {'name': 'priority', 'description': 'Server priority', + 'is-leaf-list': False, 'is-mandatory': False, 'group': ''}), + ] + + body.append(row) + + click.echo(tabulate.tabulate(body, header)) + + +@click.group(name="ldap", + cls=clicommon.AliasedGroup) +def LDAP(): + """ """ + + pass + + +@LDAP.command(name="global") +@clicommon.pass_db +def LDAP_global(db): + """ """ + + header = [ + "BIND DN", + "BIND PASSWORD", + "BIND TIMEOUT", + "VERSION", + "BASE DN", + "PORT", + "TIMEOUT", + ] + + body = [] + + table = db.cfgdb.get_table("LDAP") + entry = table.get("global", {}) + row = [ + format_attr_value( + entry, + {'name': 'bind_dn', 'description': 'LDAP global bind dn', 'is-leaf-list': False, + 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + { + 'name': 'bind_password', 'description': 'Shared secret used for encrypting the communication', + 'is-leaf-list': False, 'is-mandatory': False, 'group': '' + } + ), + format_attr_value( + entry, + {'name': 'bind_timeout', 'description': 'Ldap bind timeout', 'is-leaf-list': False, + 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'version', 'description': 'Ldap version', 'is-leaf-list': False, + 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'base_dn', 'description': 'Ldap user base dn', 'is-leaf-list': False, + 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'port', 'description': 'TCP port to communicate with LDAP server', + 'is-leaf-list': False, 'is-mandatory': False, 'group': ''} + ), + format_attr_value( + entry, + {'name': 'timeout', 'description': 'Ldap timeout duration in sec', 'is-leaf-list': False, + 'is-mandatory': False, 'group': ''} + ), + ] + + body.append(row) + click.echo(tabulate.tabulate(body, header)) + + +def register(cli): + """ Register new CLI nodes in root CLI. + + Args: + cli (click.core.Command): Root CLI node. + Raises: + Exception: when root CLI already has a command + we are trying to register. + """ + cli_node = LDAP_SERVER + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(LDAP_SERVER) + cli_node = LDAP + if cli_node.name in cli.commands: + raise Exception(f"{cli_node.name} already exists in CLI") + cli.add_command(LDAP) diff --git a/show/stp.py b/show/stp.py new file mode 100644 index 0000000000..a64d9764f5 --- /dev/null +++ b/show/stp.py @@ -0,0 +1,403 @@ +import re +import click +# import subprocess +import utilities_common.cli as clicommon +from swsscommon.swsscommon import SonicV2Connector, ConfigDBConnector + + +############################################################################## +# 'spanning_tree' group ("show spanning_tree ...") +############################################################################### +# STP show commands:- +# show spanning_tree +# show spanning_tree vlan +# show spanning_tree vlan interface +# show spanning_tree bpdu_guard +# show spanning_tree statistics +# show spanning_tree statistics vlan +# +############################################################################### +g_stp_vlanid = 0 +# +# Utility API's +# + + +def is_stp_docker_running(): + return True +# running_docker = subprocess.check_output('docker ps', shell=True) +# if running_docker.find("docker-stp".encode()) == -1: +# return False +# else: +# return True + + +def connect_to_cfg_db(): + config_db = ConfigDBConnector() + config_db.connect() + return config_db + + +def connect_to_appl_db(): + appl_db = SonicV2Connector(host="127.0.0.1") + appl_db.connect(appl_db.APPL_DB) + return appl_db + + +# Redis DB only supports limiter pattern search wildcards. +# check https://redis.io/commands/KEYS before using this api +# Redis-db uses glob-style patterns not regex +def stp_get_key_from_pattern(db_connect, db, pattern): + keys = db_connect.keys(db, pattern) + if keys: + return keys[0] + else: + return None + + +# get_all doesnt accept regex patterns, it requires exact key +def stp_get_all_from_pattern(db_connect, db, pattern): + key = stp_get_key_from_pattern(db_connect, db, pattern) + if key: + entry = db_connect.get_all(db, key) + return entry + + +def stp_is_port_fast_enabled(ifname): + app_db_entry = stp_get_all_from_pattern( + g_stp_appl_db, g_stp_appl_db.APPL_DB, "*STP_PORT_TABLE:{}".format(ifname)) + if (not app_db_entry or not ('port_fast' in app_db_entry) or app_db_entry['port_fast'] == 'no'): + return False + return True + + +def stp_is_uplink_fast_enabled(ifname): + entry = g_stp_cfg_db.get_entry("STP_PORT", ifname) + if (entry and ('uplink_fast' in entry) and entry['uplink_fast'] == 'true'): + return True + return False + + +def stp_get_entry_from_vlan_tb(db, vlanid): + entry = stp_get_all_from_pattern(db, db.APPL_DB, "*STP_VLAN_TABLE:Vlan{}".format(vlanid)) + if not entry: + return entry + + if 'bridge_id' not in entry: + entry['bridge_id'] = 'NA' + if 'max_age' not in entry: + entry['max_age'] = '0' + if 'hello_time' not in entry: + entry['hello_time'] = '0' + if 'forward_delay' not in entry: + entry['forward_delay'] = '0' + if 'hold_time' not in entry: + entry['hold_time'] = '0' + if 'last_topology_change' not in entry: + entry['last_topology_change'] = '0' + if 'topology_change_count' not in entry: + entry['topology_change_count'] = '0' + if 'root_bridge_id' not in entry: + entry['root_bridge_id'] = 'NA' + if 'root_path_cost' not in entry: + entry['root_path_cost'] = '0' + if 'desig_bridge_id' not in entry: + entry['desig_bridge_id'] = 'NA' + if 'root_port' not in entry: + entry['root_port'] = 'NA' + if 'root_max_age' not in entry: + entry['root_max_age'] = '0' + if 'root_hello_time' not in entry: + entry['root_hello_time'] = '0' + if 'root_forward_delay' not in entry: + entry['root_forward_delay'] = '0' + if 'stp_instance' not in entry: + entry['stp_instance'] = '65535' + + return entry + + +def stp_get_entry_from_vlan_intf_tb(db, vlanid, ifname): + entry = stp_get_all_from_pattern(db, db.APPL_DB, "*STP_VLAN_PORT_TABLE:Vlan{}:{}".format(vlanid, ifname)) + if not entry: + return entry + + if 'port_num' not in entry: + entry['port_num'] = 'NA' + if 'priority' not in entry: + entry['priority'] = '0' + if 'path_cost' not in entry: + entry['path_cost'] = '0' + if 'root_guard' not in entry: + entry['root_guard'] = 'NA' + if 'bpdu_guard' not in entry: + entry['bpdu_guard'] = 'NA' + if 'port_state' not in entry: + entry['port_state'] = 'NA' + if 'desig_cost' not in entry: + entry['desig_cost'] = '0' + if 'desig_root' not in entry: + entry['desig_root'] = 'NA' + if 'desig_bridge' not in entry: + entry['desig_bridge'] = 'NA' + + return entry + + +# +# This group houses Spanning_tree commands and subgroups +@click.group(cls=clicommon.AliasedGroup, invoke_without_command=True) +@click.pass_context +def spanning_tree(ctx): + """Show spanning_tree commands""" + global g_stp_appl_db + global g_stp_cfg_db + + if not is_stp_docker_running(): + ctx.fail("STP docker is not running") + + g_stp_appl_db = connect_to_appl_db() + g_stp_cfg_db = connect_to_cfg_db() + + global_cfg = g_stp_cfg_db.get_entry("STP", "GLOBAL") + if not global_cfg: + click.echo("Spanning-tree is not configured") + return + + global g_stp_mode + if 'pvst' == global_cfg['mode']: + g_stp_mode = 'PVST' + + if ctx.invoked_subcommand is None: + keys = g_stp_appl_db.keys(g_stp_appl_db.APPL_DB, "*STP_VLAN_TABLE:Vlan*") + if not keys: + return + vlan_list = [] + for key in keys: + result = re.search('.STP_VLAN_TABLE:Vlan(.*)', key) + vlanid = result.group(1) + vlan_list.append(int(vlanid)) + vlan_list.sort() + for vlanid in vlan_list: + ctx.invoke(show_stp_vlan, vlanid=vlanid) + + +@spanning_tree.group('vlan', cls=clicommon.AliasedGroup, invoke_without_command=True) +@click.argument('vlanid', metavar='', required=True, type=int) +@click.pass_context +def show_stp_vlan(ctx, vlanid): + """Show spanning_tree vlan information""" + global g_stp_vlanid + g_stp_vlanid = vlanid + + vlan_tb_entry = stp_get_entry_from_vlan_tb(g_stp_appl_db, g_stp_vlanid) + if not vlan_tb_entry: + return + + global g_stp_mode + if g_stp_mode: + click.echo("Spanning-tree Mode: {}".format(g_stp_mode)) + # reset so we dont print again + g_stp_mode = '' + + click.echo("") + click.echo("VLAN {} - STP instance {}".format(g_stp_vlanid, vlan_tb_entry['stp_instance'])) + click.echo("--------------------------------------------------------------------") + click.echo("STP Bridge Parameters:") + + click.echo("{:17}{:7}{:7}{:7}{:6}{:13}{}".format( + "Bridge", "Bridge", "Bridge", "Bridge", "Hold", "LastTopology", "Topology")) + click.echo("{:17}{:7}{:7}{:7}{:6}{:13}{}".format( + "Identifier", "MaxAge", "Hello", "FwdDly", "Time", "Change", "Change")) + click.echo("{:17}{:7}{:7}{:7}{:6}{:13}{}".format("hex", "sec", "sec", "sec", "sec", "sec", "cnt")) + click.echo("{:17}{:7}{:7}{:7}{:6}{:13}{}".format( + vlan_tb_entry['bridge_id'], + vlan_tb_entry['max_age'], + vlan_tb_entry['hello_time'], + vlan_tb_entry['forward_delay'], + vlan_tb_entry['hold_time'], + vlan_tb_entry['last_topology_change'], + vlan_tb_entry['topology_change_count'])) + + click.echo("") + click.echo("{:17}{:10}{:18}{:19}{:4}{:4}{}".format( + "RootBridge", "RootPath", "DesignatedBridge", "RootPort", "Max", "Hel", "Fwd")) + click.echo("{:17}{:10}{:18}{:19}{:4}{:4}{}".format("Identifier", "Cost", "Identifier", "", "Age", "lo", "Dly")) + click.echo("{:17}{:10}{:18}{:19}{:4}{:4}{}".format("hex", "", "hex", "", "sec", "sec", "sec")) + click.echo("{:17}{:10}{:18}{:19}{:4}{:4}{}".format( + vlan_tb_entry['root_bridge_id'], + vlan_tb_entry['root_path_cost'], + vlan_tb_entry['desig_bridge_id'], + vlan_tb_entry['root_port'], + vlan_tb_entry['root_max_age'], + vlan_tb_entry['root_hello_time'], + vlan_tb_entry['root_forward_delay'])) + + click.echo("") + click.echo("STP Port Parameters:") + click.echo("{:17}{:5}{:10}{:5}{:7}{:14}{:12}{:17}{}".format( + "Port", "Prio", "Path", "Port", "Uplink", "State", "Designated", "Designated", "Designated")) + click.echo("{:17}{:5}{:10}{:5}{:7}{:14}{:12}{:17}{}".format( + "Name", "rity", "Cost", "Fast", "Fast", "", "Cost", "Root", "Bridge")) + if ctx.invoked_subcommand is None: + keys = g_stp_appl_db.keys(g_stp_appl_db.APPL_DB, "*STP_VLAN_PORT_TABLE:Vlan{}:*".format(vlanid)) + if not keys: + return + intf_list = [] + for key in keys: + result = re.search('.STP_VLAN_PORT_TABLE:Vlan{}:(.*)'.format(vlanid), key) + ifname = result.group(1) + intf_list.append(ifname) + eth_list = [ifname[len("Ethernet"):] for ifname in intf_list if ifname.startswith("Ethernet")] + po_list = [ifname[len("PortChannel"):] for ifname in intf_list if ifname.startswith("PortChannel")] + + eth_list.sort() + po_list.sort() + for port_num in eth_list: + ctx.invoke(show_stp_interface, ifname="Ethernet"+str(port_num)) + for port_num in po_list: + ctx.invoke(show_stp_interface, ifname="PortChannel"+port_num) + + +@show_stp_vlan.command('interface') +@click.argument('ifname', metavar='', required=True) +@click.pass_context +def show_stp_interface(ctx, ifname): + """Show spanning_tree vlan interface information""" + + vlan_intf_tb_entry = stp_get_entry_from_vlan_intf_tb(g_stp_appl_db, g_stp_vlanid, ifname) + if not vlan_intf_tb_entry: + return + + click.echo("{:17}{:5}{:10}{:5}{:7}{:14}{:12}{:17}{}".format( + ifname, + vlan_intf_tb_entry['priority'], + vlan_intf_tb_entry['path_cost'], + 'Y' if (stp_is_port_fast_enabled(ifname)) else 'N', + 'Y' if (stp_is_uplink_fast_enabled(ifname)) else 'N', + vlan_intf_tb_entry['port_state'], + vlan_intf_tb_entry['desig_cost'], + vlan_intf_tb_entry['desig_root'], + vlan_intf_tb_entry['desig_bridge'] + )) + + +@spanning_tree.command('bpdu_guard') +@click.pass_context +def show_stp_bpdu_guard(ctx): + """Show spanning_tree bpdu_guard""" + + print_header = 1 + ifname_all = g_stp_cfg_db.get_keys("STP_PORT") + for ifname in ifname_all: + cfg_entry = g_stp_cfg_db.get_entry("STP_PORT", ifname) + if cfg_entry['bpdu_guard'] == 'true' and cfg_entry['enabled'] == 'true': + if print_header: + click.echo("{:17}{:13}{}".format("PortNum", "Shutdown", "Port Shut")) + click.echo("{:17}{:13}{}".format("", "Configured", "due to BPDU guard")) + click.echo("-------------------------------------------") + print_header = 0 + + if cfg_entry['bpdu_guard_do_disable'] == 'true': + disabled = 'No' + keys = g_stp_appl_db.keys(g_stp_appl_db.APPL_DB, "*STP_PORT_TABLE:{}".format(ifname)) + # only 1 key per ifname is expected in BPDU_GUARD_TABLE. + if keys: + appdb_entry = g_stp_appl_db.get_all(g_stp_appl_db.APPL_DB, keys[0]) + if appdb_entry and 'bpdu_guard_shutdown' in appdb_entry: + if appdb_entry['bpdu_guard_shutdown'] == 'yes': + disabled = 'Yes' + click.echo("{:17}{:13}{}".format(ifname, "Yes", disabled)) + else: + click.echo("{:17}{:13}{}".format(ifname, "No", "NA")) + + +@spanning_tree.command('root_guard') +@click.pass_context +def show_stp_root_guard(ctx): + """Show spanning_tree root_guard""" + + print_header = 1 + ifname_all = g_stp_cfg_db.get_keys("STP_PORT") + for ifname in ifname_all: + entry = g_stp_cfg_db.get_entry("STP_PORT", ifname) + if entry['root_guard'] == 'true' and entry['enabled'] == 'true': + if print_header: + global_entry = g_stp_cfg_db.get_entry("STP", "GLOBAL") + click.echo("Root guard timeout: {} secs".format(global_entry['rootguard_timeout'])) + click.echo("") + click.echo("{:17}{:7}{}".format("Port", "VLAN", "Current State")) + click.echo("-------------------------------------------") + print_header = 0 + + state = '' + vlanid = '' + keys = g_stp_appl_db.keys(g_stp_appl_db.APPL_DB, "*STP_VLAN_PORT_TABLE:*:{}".format(ifname)) + if keys: + for key in keys: + entry = g_stp_appl_db.get_all(g_stp_appl_db.APPL_DB, key) + if entry and 'root_guard_timer' in entry: + if entry['root_guard_timer'] == '0': + state = 'Consistent state' + else: + state = 'Inconsistent state ({} seconds left on timer)'.format(entry['root_guard_timer']) + + vlanid = re.search(':Vlan(.*):', key) + if vlanid: + click.echo("{:17}{:7}{}".format(ifname, vlanid.group(1), state)) + else: + click.echo("{:17}{:7}{}".format(ifname, vlanid, state)) + + +@spanning_tree.group('statistics', cls=clicommon.AliasedGroup, invoke_without_command=True) +@click.pass_context +def show_stp_statistics(ctx): + """Show spanning_tree statistics""" + + if ctx.invoked_subcommand is None: + keys = g_stp_appl_db.keys(g_stp_appl_db.APPL_DB, "*STP_VLAN_TABLE:Vlan*") + if not keys: + return + + vlan_list = [] + for key in keys: + result = re.search('.STP_VLAN_TABLE:Vlan(.*)', key) + vlanid = result.group(1) + vlan_list.append(int(vlanid)) + vlan_list.sort() + for vlanid in vlan_list: + ctx.invoke(show_stp_vlan_statistics, vlanid=vlanid) + + +@show_stp_statistics.command('vlan') +@click.argument('vlanid', metavar='', required=True, type=int) +@click.pass_context +def show_stp_vlan_statistics(ctx, vlanid): + """Show spanning_tree statistics vlan""" + + stp_inst_entry = stp_get_all_from_pattern( + g_stp_appl_db, g_stp_appl_db.APPL_DB, "*STP_VLAN_TABLE:Vlan{}".format(vlanid)) + if not stp_inst_entry: + return + + click.echo("VLAN {} - STP instance {}".format(vlanid, stp_inst_entry['stp_instance'])) + click.echo("--------------------------------------------------------------------") + click.echo("{:17}{:15}{:15}{:15}{}".format("PortNum", "BPDU Tx", "BPDU Rx", "TCN Tx", "TCN Rx")) + keys = g_stp_appl_db.keys(g_stp_appl_db.APPL_DB, "*STP_VLAN_PORT_TABLE:Vlan{}:*".format(vlanid)) + if keys: + for key in keys: + result = re.search('.STP_VLAN_PORT_TABLE:Vlan(.*):(.*)', key) + ifname = result.group(2) + entry = g_stp_appl_db.get_all(g_stp_appl_db.APPL_DB, key) + if entry: + if 'bpdu_sent' not in entry: + entry['bpdu_sent'] = '-' + if 'bpdu_received' not in entry: + entry['bpdu_received'] = '-' + if 'tc_sent' not in entry: + entry['tc_sent'] = '-' + if 'tc_received' not in entry: + entry['tc_received'] = '-' + + click.echo("{:17}{:15}{:15}{:15}{}".format( + ifname, entry['bpdu_sent'], entry['bpdu_received'], entry['tc_sent'], entry['tc_received'])) diff --git a/sonic-utilities-data/templates/timer.unit.j2 b/sonic-utilities-data/templates/timer.unit.j2 deleted file mode 100644 index 09989f2c51..0000000000 --- a/sonic-utilities-data/templates/timer.unit.j2 +++ /dev/null @@ -1,19 +0,0 @@ -# -# =============== Managed by SONiC Package Manager. DO NOT EDIT! =============== -# auto-generated from {{ source }} by sonic-package-manager -# -[Unit] -Description=Delays {{ manifest.service.name }} until SONiC has started -PartOf={{ manifest.service.name }}{% if multi_instance %}@%i{% endif %}.service - -[Timer] -OnUnitActiveSec=0 sec -OnBootSec=3min 30 sec -Unit={{ manifest.service.name }}{% if multi_instance %}@%i{% endif %}.service - -[Install] -WantedBy=timers.target sonic.target sonic-delayed.target -{%- for service in manifest.service["wanted-by"] %} -WantedBy={{ service }}{% if multi_instance and service in multi_instance_services %}@%i{% endif %}.service -{%- endfor %} - diff --git a/sonic_installer/bootloader/aboot.py b/sonic_installer/bootloader/aboot.py index ac327feb4c..d6492171ab 100644 --- a/sonic_installer/bootloader/aboot.py +++ b/sonic_installer/bootloader/aboot.py @@ -71,6 +71,8 @@ class AbootBootloader(Bootloader): def _boot_config_read(self, path=BOOT_CONFIG_PATH): config = collections.OrderedDict() + if not os.path.exists(path): + return config with open(path) as f: for line in f.readlines(): line = line.strip() @@ -112,7 +114,10 @@ def get_installed_images(self): def get_next_image(self): config = self._boot_config_read() - match = re.search(r"flash:/*(\S+)/", config['SWI']) + swi = config.get('SWI', '') + match = re.search(r"flash:/*(\S+)/", swi) + if not match: + return swi.split(':', 1)[-1] return match.group(1).replace(IMAGE_DIR_PREFIX, IMAGE_PREFIX, 1) def set_default_image(self, image): diff --git a/sonic_installer/bootloader/grub.py b/sonic_installer/bootloader/grub.py index d76ddcc0c7..029ebf34f1 100644 --- a/sonic_installer/bootloader/grub.py +++ b/sonic_installer/bootloader/grub.py @@ -164,7 +164,7 @@ def is_secure_upgrade_image_verification_supported(self): if ! [ -n "$(ls -A /sys/firmware/efi/efivars 2>/dev/null)" ]; then mount -t efivarfs none /sys/firmware/efi/efivars 2>/dev/null fi - SECURE_UPGRADE_ENABLED=$(bootctl status 2>/dev/null | grep -c "Secure Boot: enabled") + SECURE_UPGRADE_ENABLED=$(mokutil --sb-state 2>/dev/null | grep -c "enabled") else echo "efi not supported - exiting without verification" exit 1 diff --git a/sonic_installer/main.py b/sonic_installer/main.py index 341111f265..d85e3731aa 100644 --- a/sonic_installer/main.py +++ b/sonic_installer/main.py @@ -337,6 +337,8 @@ def migrate_sonic_packages(bootloader, binary_image_version): new_image_docker_mount = os.path.join(new_image_mount, "var", "lib", "docker") docker_default_config = os.path.join(new_image_mount, "etc", "default", "docker") docker_default_config_backup = os.path.join(new_image_mount, TMP_DIR, "docker_config_backup") + custom_manifests_path = os.path.join(PACKAGE_MANAGER_DIR, "manifests") + new_image_package_directory_path = os.path.join(new_image_mount, "var", "lib", "sonic-package-manager") if not os.path.isdir(new_image_docker_dir): # NOTE: This codepath can be reached if the installation process did not @@ -372,6 +374,8 @@ def migrate_sonic_packages(bootloader, binary_image_version): run_command_or_raise(["chroot", new_image_mount, DOCKER_CTL_SCRIPT, "start"]) docker_started = True run_command_or_raise(["cp", packages_path, os.path.join(new_image_mount, TMP_DIR, packages_file)]) + run_command_or_raise(["mkdir", "-p", custom_manifests_path]) + run_command_or_raise(["cp", "-arf", custom_manifests_path, new_image_package_directory_path]) run_command_or_raise(["touch", os.path.join(new_image_mount, "tmp", DOCKERD_SOCK)]) run_command_or_raise(["mount", "--bind", os.path.join(VAR_RUN_PATH, DOCKERD_SOCK), @@ -759,6 +763,7 @@ def cleanup(): "radv", "restapi", "sflow", + "stp", "snmp", "swss", "syncd", diff --git a/sonic_package_manager/main.py b/sonic_package_manager/main.py index 8a0aabb901..d305e3c911 100644 --- a/sonic_package_manager/main.py +++ b/sonic_package_manager/main.py @@ -15,6 +15,7 @@ from sonic_package_manager.errors import PackageManagerError from sonic_package_manager.logger import log from sonic_package_manager.manager import PackageManager +from sonic_package_manager.manifest import MANIFESTS_LOCATION BULLET_UC = '\u2022' @@ -157,6 +158,13 @@ def repository(ctx): pass +@cli.group() +@click.pass_context +def manifests(ctx): + """ Custom local Manifest management commands. """ + + pass + @cli.group() @click.pass_context def show(ctx): @@ -280,6 +288,73 @@ def changelog(ctx, exit_cli(f'Failed to print package changelog: {err}', fg='red') +@manifests.command('create') +@click.pass_context +@click.argument('name', type=click.Path()) +@click.option('--from-json', type=str, help='specify manifest json file') +@root_privileges_required +def create_manifest(ctx, name, from_json): + """Create a new custom local manifest file.""" + + manager: PackageManager = ctx.obj + try: + manager.create_package_manifest(name, from_json) + except Exception as e: + click.echo("Error: Manifest {} creation failed - {}".format(name, str(e))) + return + + +@manifests.command('update') +@click.pass_context +@click.argument('name', type=click.Path()) +@click.option('--from-json', type=str, required=True) +@root_privileges_required +def update_manifest(ctx, name, from_json): + """Update an existing custom local manifest file with new one.""" + + manager: PackageManager = ctx.obj + try: + manager.update_package_manifest(name, from_json) + except Exception as e: + click.echo(f"Error occurred while updating manifest '{name}': {e}") + return + + +@manifests.command('delete') +@click.pass_context +@click.argument('name', type=click.Path()) +@root_privileges_required +def delete_manifest(ctx, name): + """Delete a custom local manifest file.""" + manager: PackageManager = ctx.obj + try: + manager.delete_package_manifest(name) + except Exception as e: + click.echo("Error: Failed to delete manifest file '{}'. {}".format(name, e)) + + +@manifests.command('show') +@click.pass_context +@click.argument('name', type=click.Path()) +@root_privileges_required +def show_manifest(ctx, name): + """Show the contents of custom local manifest file.""" + manager: PackageManager = ctx.obj + try: + manager.show_package_manifest(name) + except FileNotFoundError: + click.echo("Manifest file '{}' not found.".format(name)) + + +@manifests.command('list') +@click.pass_context +@root_privileges_required +def list_manifests(ctx): + """List all custom local manifest files.""" + manager: PackageManager = ctx.obj + manager.list_package_manifest() + + @repository.command() @click.argument('name', type=str) @click.argument('repository', type=str) @@ -334,6 +409,14 @@ def remove(ctx, name): help='Allow package downgrade. By default an attempt to downgrade the package ' 'will result in a failure since downgrade might not be supported by the package, ' 'thus requires explicit request from the user.') +@click.option('--use-local-manifest', + is_flag=True, + default=None, + help='Use locally created custom manifest file. ', + hidden=True) +@click.option('--name', + type=str, + help='custom name for the package') @add_options(PACKAGE_SOURCE_OPTIONS) @add_options(PACKAGE_COMMON_OPERATION_OPTIONS) @add_options(PACKAGE_COMMON_INSTALL_OPTIONS) @@ -348,7 +431,9 @@ def install(ctx, enable, set_owner, skip_host_plugins, - allow_downgrade): + allow_downgrade, + use_local_manifest, + name): """ Install/Upgrade package using [PACKAGE_EXPR] in format "[=|@]". The repository to pull the package from is resolved by lookup in package database, @@ -378,16 +463,58 @@ def install(ctx, if allow_downgrade is not None: install_opts['allow_downgrade'] = allow_downgrade + if use_local_manifest: + if not name: + click.echo('name argument is not provided to use local manifest') + return + original_file = os.path.join(MANIFESTS_LOCATION, name) + if not os.path.exists(original_file): + click.echo(f'Local Manifest file for {name} does not exists to install') + return + try: manager.install(package_expr, from_repository, from_tarball, + use_local_manifest, + name, **install_opts) except Exception as err: exit_cli(f'Failed to install {package_source}: {err}', fg='red') except KeyboardInterrupt: exit_cli('Operation canceled by user', fg='red') +# At the end of sonic-package-manager install, a new manifest file is created with the name. +# At the end of sonic-package-manager uninstall name, +# this manifest file name and name.edit will be deleted. +# At the end of sonic-package-manager update, +# we need to mv maniests name.edit to name in case of success, else keep it as such. +# So during sonic-package-manager update, +# we could take old package from name and new package from edit and at the end, follow 3rd point + + +@cli.command() +@add_options(PACKAGE_COMMON_OPERATION_OPTIONS) +@add_options(PACKAGE_COMMON_INSTALL_OPTIONS) +@click.argument('name') +@click.pass_context +@root_privileges_required +def update(ctx, name, force, yes, skip_host_plugins): + """ Update package to the updated manifest file. """ + + manager: PackageManager = ctx.obj + + update_opts = { + 'force': force, + 'skip_host_plugins': skip_host_plugins, + 'update_only': True, + } + try: + manager.update(name, **update_opts) + except Exception as err: + exit_cli(f'Failed to update package {name}: {err}', fg='red') + except KeyboardInterrupt: + exit_cli('Operation canceled by user', fg='red') @cli.command() @add_options(PACKAGE_COMMON_OPERATION_OPTIONS) diff --git a/sonic_package_manager/manager.py b/sonic_package_manager/manager.py index e41bb00e8f..b6a3be50c3 100644 --- a/sonic_package_manager/manager.py +++ b/sonic_package_manager/manager.py @@ -65,7 +65,15 @@ version_to_tag, tag_to_version ) - +import click +import json +import requests +import getpass +import paramiko +import urllib.parse +from scp import SCPClient +from sonic_package_manager.manifest import Manifest, MANIFESTS_LOCATION, DEFAULT_MANIFEST_FILE +LOCAL_JSON = "/tmp/local_json" @contextlib.contextmanager def failure_ignore(ignore: bool): @@ -344,6 +352,8 @@ def install(self, expression: Optional[str] = None, repotag: Optional[str] = None, tarball: Optional[str] = None, + use_local_manifest: bool = False, + name: Optional[str] = None, **kwargs): """ Install/Upgrade SONiC Package from either an expression representing the package and its version, repository and tag or @@ -358,7 +368,7 @@ def install(self, PackageManagerError """ - source = self.get_package_source(expression, repotag, tarball) + source = self.get_package_source(expression, repotag, tarball, use_local_manifest=use_local_manifest, name=name) package = source.get_package() if self.is_installed(package.name): @@ -446,6 +456,37 @@ def install_from_source(self, self.database.update_package(package.entry) self.database.commit() + @under_lock + def update(self, + name: str, + **kwargs): + """ Update SONiC Package referenced by name. The update + can be forced if force argument is True. + + Args: + name: SONiC Package name. + Raises: + PackageManagerError + """ + if self.is_installed(name): + edit_name = name + '.edit' + edit_file = os.path.join(MANIFESTS_LOCATION, edit_name) + if os.path.exists(edit_file): + self.upgrade_from_source(None, name=name, **kwargs) + else: + click.echo("Package manifest {}.edit file does not exists to update".format(name)) + return + else: + click.echo("Package {} is not installed".format(name)) + return + + def remove_unused_docker_image(self, package): + image_id_used = any(entry.image_id == package.image_id for entry in self.database if entry.name != package.name) + if not image_id_used: + self.docker.rmi(package.image_id, force=True) + else: + log.info(f'Image with ID {package.image_id} is in use by other package(s). Skipping deletion') + @under_lock @opt_check def uninstall(self, name: str, @@ -493,7 +534,8 @@ def uninstall(self, name: str, self._get_installed_packages_except(package) ) self.docker.rm_by_ancestor(package.image_id, force=True) - self.docker.rmi(package.image_id, force=True) + # Delete image if it is not in use, otherwise skip deletion + self.remove_unused_docker_image(package) package.entry.image_id = None except Exception as err: raise PackageUninstallationError( @@ -504,6 +546,13 @@ def uninstall(self, name: str, package.entry.version = None self.database.update_package(package.entry) self.database.commit() + manifest_path = os.path.join(MANIFESTS_LOCATION, name) + edit_path = os.path.join(MANIFESTS_LOCATION, name + ".edit") + if os.path.exists(manifest_path): + os.remove(manifest_path) + if os.path.exists(edit_path): + os.remove(edit_path) + @under_lock @opt_check @@ -511,7 +560,9 @@ def upgrade_from_source(self, source: PackageSource, force=False, skip_host_plugins=False, - allow_downgrade=False): + allow_downgrade=False, + update_only: Optional[bool] = False, + name: Optional[str] = None): """ Upgrade SONiC Package to a version the package reference expression specifies. Can force the upgrade if force parameter is True. Force can allow a package downgrade. @@ -521,12 +572,17 @@ def upgrade_from_source(self, force: Force the upgrade. skip_host_plugins: Skip host OS plugins installation. allow_downgrade: Flag to allow package downgrade. + update_only: Perform package update with new manifest. + name: name of package. Raises: PackageManagerError """ - new_package = source.get_package() - name = new_package.name + if update_only: + new_package = self.get_installed_package(name, use_edit=True) + else: + new_package = source.get_package() + name = new_package.name with failure_ignore(force): if not self.is_installed(name): @@ -543,19 +599,20 @@ def upgrade_from_source(self, old_version = old_package.manifest['package']['version'] new_version = new_package.manifest['package']['version'] - with failure_ignore(force): - if old_version == new_version: - raise PackageUpgradeError(f'{new_version} is already installed') - - # TODO: Not all packages might support downgrade. - # We put a check here but we understand that for some packages - # the downgrade might be safe to do. There can be a variable in manifest - # describing package downgrade ability or downgrade-able versions. - if new_version < old_version and not allow_downgrade: - raise PackageUpgradeError( - f'Request to downgrade from {old_version} to {new_version}. ' - f'Downgrade might be not supported by the package' - ) + if not update_only: + with failure_ignore(force): + if old_version == new_version: + raise PackageUpgradeError(f'{new_version} is already installed') + + # TODO: Not all packages might support downgrade. + # We put a check here but we understand that for some packages + # the downgrade might be safe to do. There can be a variable in manifest + # describing package downgrade ability or downgrade-able versions. + if new_version < old_version and not allow_downgrade: + raise PackageUpgradeError( + f'Request to downgrade from {old_version} to {new_version}. ' + f'Downgrade might be not supported by the package' + ) # remove currently installed package from the list installed_packages = self._get_installed_packages_and(new_package) @@ -579,8 +636,9 @@ def upgrade_from_source(self, self._uninstall_cli_plugins(old_package) exits.callback(rollback(self._install_cli_plugins, old_package)) - source.install(new_package) - exits.callback(rollback(source.uninstall, new_package)) + if not update_only: + source.install(new_package) + exits.callback(rollback(source.uninstall, new_package)) feature_enabled = self.feature_registry.is_feature_enabled(old_feature) @@ -620,7 +678,8 @@ def upgrade_from_source(self, self._install_cli_plugins(new_package) exits.callback(rollback(self._uninstall_cli_plugin, new_package)) - self.docker.rmi(old_package.image_id, force=True) + if old_package.image_id != new_package.image_id: + self.remove_unused_docker_image(old_package) exits.pop_all() except Exception as err: @@ -633,6 +692,10 @@ def upgrade_from_source(self, new_package_entry.version = new_version self.database.update_package(new_package_entry) self.database.commit() + if update_only: + manifest_path = os.path.join(MANIFESTS_LOCATION, name) + edit_path = os.path.join(MANIFESTS_LOCATION, name + ".edit") + os.rename(edit_path, manifest_path) @under_lock @opt_check @@ -718,7 +781,7 @@ def migrate_package(old_package_entry, file.write(chunk) file.flush() - self.install(tarball=file.name) + self.install(tarball=file.name, name=name) else: log.info(f'installing {name} version {version}') @@ -755,7 +818,9 @@ def migrate_package(old_package_entry, new_package.version = old_package.version migrate_package(old_package, new_package) else: - self.install(f'{new_package.name}={new_package_default_version}') + # self.install(f'{new_package.name}={new_package_default_version}') + repo_tag_formed = "{}:{}".format(new_package.repository, new_package.default_reference) + self.install(None, repo_tag_formed, name=new_package.name) else: # No default version and package is not installed. # Migrate old package same version. @@ -764,7 +829,7 @@ def migrate_package(old_package_entry, self.database.commit() - def get_installed_package(self, name: str) -> Package: + def get_installed_package(self, name: str, use_local_manifest: bool = False, use_edit: bool = False) -> Package: """ Get installed package by name. Args: @@ -777,14 +842,19 @@ def get_installed_package(self, name: str) -> Package: source = LocalSource(package_entry, self.database, self.docker, - self.metadata_resolver) + self.metadata_resolver, + use_local_manifest=use_local_manifest, + name=name, + use_edit=use_edit) return source.get_package() def get_package_source(self, package_expression: Optional[str] = None, repository_reference: Optional[str] = None, tarboll_path: Optional[str] = None, - package_ref: Optional[PackageReference] = None): + package_ref: Optional[PackageReference] = None, + use_local_manifest: bool = False, + name: Optional[str] = None): """ Returns PackageSource object based on input source. Args: @@ -800,7 +870,7 @@ def get_package_source(self, if package_expression: ref = parse_reference_expression(package_expression) - return self.get_package_source(package_ref=ref) + return self.get_package_source(package_ref=ref, name=name) elif repository_reference: repo_ref = utils.DockerReference.parse(repository_reference) repository = repo_ref['name'] @@ -810,15 +880,19 @@ def get_package_source(self, reference, self.database, self.docker, - self.metadata_resolver) + self.metadata_resolver, + use_local_manifest, + name) elif tarboll_path: return TarballSource(tarboll_path, self.database, self.docker, - self.metadata_resolver) + self.metadata_resolver, + use_local_manifest, + name) elif package_ref: package_entry = self.database.get_package(package_ref.name) - + name = package_ref.name # Determine the reference if not specified. # If package is installed assume the installed # one is requested, otherwise look for default @@ -829,7 +903,9 @@ def get_package_source(self, return LocalSource(package_entry, self.database, self.docker, - self.metadata_resolver) + self.metadata_resolver, + use_local_manifest, + name) if package_entry.default_reference is not None: package_ref.reference = package_entry.default_reference else: @@ -840,7 +916,9 @@ def get_package_source(self, package_ref.reference, self.database, self.docker, - self.metadata_resolver) + self.metadata_resolver, + use_local_manifest, + name) else: raise ValueError('No package source provided') @@ -939,8 +1017,10 @@ def _get_installed_packages_except(self, package: Package) -> Dict[str, Package] def _stop_feature(self, package: Package): self._systemctl_action(package, 'stop') + self._systemctl_action(package, 'disable') def _start_feature(self, package: Package): + self._systemctl_action(package, 'enable') self._systemctl_action(package, 'start') def _systemctl_action(self, package: Package, action: str): @@ -1018,6 +1098,196 @@ def _uninstall_cli_plugin(self, package: Package, command: str): if os.path.exists(host_plugin_path): os.remove(host_plugin_path) + def download_file(self, url, local_path): + # Parse information from the URL + parsed_url = urllib.parse.urlparse(url) + protocol = parsed_url.scheme + username = parsed_url.username + password = parsed_url.password + hostname = parsed_url.hostname + remote_path = parsed_url.path + supported_protocols = ['http', 'https', 'scp', 'sftp'] + + # clear the temporary local file + if os.path.exists(local_path): + os.remove(local_path) + + if not protocol: + # check for local file + if os.path.exists(url): + os.rename(url, local_path) + return True + else: + click.echo("Local file not present") + return False + if protocol not in supported_protocols: + click.echo("Protocol not supported") + return False + + # If the protocol is HTTP and no username or password is provided, proceed with the download using requests + if (protocol == 'http' or protocol == 'https') and not username and not password: + try: + with requests.get(url, stream=True) as response: + response.raise_for_status() + with open(local_path, 'wb') as f: + for chunk in response.iter_content(chunk_size=8192): + if chunk: + f.write(chunk) + except requests.exceptions.RequestException as e: + click.echo("Download error", e) + return False + else: + # If password is not provided, prompt the user for it securely + if password is None: + password = getpass.getpass(prompt=f"Enter password for {username}@{hostname}: ") + + # Create an SSH client + client = paramiko.SSHClient() + # Automatically add the server's host key (this is insecure and should be handled differently in production) + client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + + try: + # Connect to the SSH server + client.connect(hostname, username=username, password=password) + + if protocol == 'scp': + # Create an SCP client + scp = SCPClient(client.get_transport()) + # Download the file + scp.get(remote_path, local_path) + elif protocol == 'sftp': + # Open an SFTP channel + with client.open_sftp() as sftp: + # Download the file + sftp.get(remote_path, local_path) + elif protocol == 'http' or protocol == 'https': + # Download using HTTP for URLs without credentials + try: + with requests.get(url, auth=(username, password), stream=True) as response: + response.raise_for_status() # Raise an exception if the request was not successful + with open(local_path, 'wb') as f: + for chunk in response.iter_content(chunk_size=8192): + if chunk: + f.write(chunk) + except requests.exceptions.RequestException as e: + click.echo("Download error", e) + return False + else: + click.echo(f"Error: Source file '{remote_path}' does not exist.") + + finally: + # Close the SSH connection + client.close() + + def create_package_manifest(self, name, from_json): + if name == "default_manifest": + click.echo("Default Manifest creation is not allowed by user") + return + if self.is_installed(name): + click.echo("Error: A package with the same name {} is already installed".format(name)) + return + mfile_name = os.path.join(MANIFESTS_LOCATION, name) + if os.path.exists(mfile_name): + click.echo("Error: Manifest file '{}' already exists.".format(name)) + return + + if from_json: + ret = self.download_file(from_json, LOCAL_JSON) + if ret is False: + return + from_json = LOCAL_JSON + else: + from_json = DEFAULT_MANIFEST_FILE + data = {} + with open(from_json, 'r') as file: + data = json.load(file) + # Validate with manifest scheme + Manifest.marshal(data) + + # Make sure the 'name' is overwritten into the dict + data['package']['name'] = name + data['service']['name'] = name + + with open(mfile_name, 'w') as file: + json.dump(data, file, indent=4) + click.echo(f"Manifest '{name}' created successfully.") + + def update_package_manifest(self, name, from_json): + if name == "default_manifest": + click.echo("Default Manifest updation is not allowed") + return + + original_file = os.path.join(MANIFESTS_LOCATION, name) + if not os.path.exists(original_file): + click.echo(f'Local Manifest file for {name} does not exists to update') + return + # download json file from remote/local path + ret = self.download_file(from_json, LOCAL_JSON) + if ret is False: + return + from_json = LOCAL_JSON + + with open(from_json, 'r') as file: + data = json.load(file) + + # Validate with manifest scheme + Manifest.marshal(data) + + # Make sure the 'name' is overwritten into the dict + data['package']['name'] = name + data['service']['name'] = name + + if self.is_installed(name): + edit_name = name + '.edit' + edit_file = os.path.join(MANIFESTS_LOCATION, edit_name) + with open(edit_file, 'w') as edit_file: + json.dump(data, edit_file, indent=4) + click.echo(f"Manifest '{name}' updated successfully.") + else: + # If package is not installed, + # update the name file directly + with open(original_file, 'w') as orig_file: + json.dump(data, orig_file, indent=4) + click.echo(f"Manifest '{name}' updated successfully.") + + def delete_package_manifest(self, name): + if name == "default_manifest": + click.echo("Default Manifest deletion is not allowed") + return + # Check if the manifest file exists + mfile_name = "{}/{}".format(MANIFESTS_LOCATION, name) + if not os.path.exists(mfile_name): + click.echo("Error: Manifest file '{}' not found.".format(name)) + return + # Confirm deletion with user input + confirm = click.prompt("Are you sure you want to delete the manifest file '{}'? (y/n)".format(name), type=str) + if confirm.lower() == 'y': + os.remove(mfile_name) + click.echo("Manifest '{}' deleted successfully.".format(name)) + else: + click.echo("Deletion cancelled.") + return + + def show_package_manifest(self, name): + mfile_name = "{}/{}".format(MANIFESTS_LOCATION, name) + edit_file_name = "{}.edit".format(mfile_name) + if os.path.exists(edit_file_name): + mfile_name = edit_file_name + with open(mfile_name, 'r') as file: + data = json.load(file) + click.echo("Manifest file: {}".format(name)) + click.echo(json.dumps(data, indent=4)) + + def list_package_manifest(self): + # Get all files in the manifest location + manifest_files = os.listdir(MANIFESTS_LOCATION) + if not manifest_files: + click.echo("No custom local manifest files found.") + else: + click.echo("Custom Local Manifest files:") + for file in manifest_files: + click.echo("- {}".format(file)) + @staticmethod def get_manager() -> 'PackageManager': """ Creates and returns PackageManager instance. diff --git a/sonic_package_manager/manifest.py b/sonic_package_manager/manifest.py index 865db7ef5c..bc156f102c 100644 --- a/sonic_package_manager/manifest.py +++ b/sonic_package_manager/manifest.py @@ -10,7 +10,12 @@ ) from sonic_package_manager.errors import ManifestError from sonic_package_manager.version import Version +from sonic_package_manager.database import BASE_LIBRARY_PATH +import os +import json +MANIFESTS_LOCATION = os.path.join(BASE_LIBRARY_PATH, "manifests") +DEFAULT_MANIFEST_FILE = os.path.join(BASE_LIBRARY_PATH, "default_manifest") class ManifestSchema: """ ManifestSchema class describes and provides marshalling @@ -249,3 +254,38 @@ def marshal(cls, input_dict: dict): def unmarshal(self) -> Dict: return self.SCHEMA.unmarshal(self) + + def get_manifest_from_local_file(name): + + if '.edit' in name: + actual_name = name.split('.edit')[0] + else: + actual_name = name + + manifest_path = os.path.join(MANIFESTS_LOCATION, name) + if os.path.exists(manifest_path): + with open(manifest_path, 'r') as file: + manifest_dict = json.load(file) + manifest_dict["package"]["name"] = actual_name + manifest_dict["service"]["name"] = actual_name + else: + with open(DEFAULT_MANIFEST_FILE, 'r') as file: + manifest_dict = json.load(file) + manifest_dict["package"]["name"] = actual_name + manifest_dict["service"]["name"] = actual_name + new_manifest_path = os.path.join(MANIFESTS_LOCATION, name) + with open(new_manifest_path, 'w') as file: + json.dump(manifest_dict, file, indent=4) + + json_str = json.dumps(manifest_dict, indent=4) + desired_dict = { + 'Tag': 'master', + 'com': { + 'azure': { + 'sonic': { + 'manifest': json_str + } + } + } + } + return desired_dict diff --git a/sonic_package_manager/metadata.py b/sonic_package_manager/metadata.py index b44b658a74..6485a10782 100644 --- a/sonic_package_manager/metadata.py +++ b/sonic_package_manager/metadata.py @@ -4,15 +4,13 @@ import json import tarfile -from typing import Dict, List - +from typing import Dict, List, Optional from sonic_package_manager import utils from sonic_package_manager.errors import MetadataError from sonic_package_manager.logger import log from sonic_package_manager.manifest import Manifest from sonic_package_manager.version import Version - def translate_plain_to_tree(plain: Dict[str, str], sep='.') -> Dict: """ Convert plain key/value dictionary into a tree by spliting the key with '.' @@ -65,7 +63,8 @@ def __init__(self, docker, registry_resolver): self.docker = docker self.registry_resolver = registry_resolver - def from_local(self, image: str) -> Metadata: + def from_local(self, image: str, use_local_manifest: bool = False, + name: Optional[str] = None, use_edit: bool = False) -> Metadata: """ Reads manifest from locally installed docker image. Args: @@ -75,16 +74,31 @@ def from_local(self, image: str) -> Metadata: Raises: MetadataError """ + if name and (use_local_manifest or use_edit): + edit_file_name = name + '.edit' + if use_edit: + labels = Manifest.get_manifest_from_local_file(edit_file_name) + return self.from_labels(labels) + elif use_local_manifest: + labels = Manifest.get_manifest_from_local_file(name) + return self.from_labels(labels) labels = self.docker.labels(image) - if labels is None: - raise MetadataError('No manifest found in image labels') + if labels is None or len(labels) == 0 or 'com.azure.sonic.manifest' not in labels: + if name: + labels = Manifest.get_manifest_from_local_file(name) + if labels is None: + raise MetadataError('No manifest found in image labels') + else: + raise MetadataError('No manifest found in image labels') return self.from_labels(labels) def from_registry(self, repository: str, - reference: str) -> Metadata: + reference: str, + use_local_manifest: bool = False, + name: Optional[str] = None) -> Metadata: """ Reads manifest from remote registry. Args: @@ -96,19 +110,25 @@ def from_registry(self, MetadataError """ - registry = self.registry_resolver.get_registry_for(repository) + if use_local_manifest: + labels = Manifest.get_manifest_from_local_file(name) + return self.from_labels(labels) + registry = self.registry_resolver.get_registry_for(repository) manifest = registry.manifest(repository, reference) digest = manifest['config']['digest'] blob = registry.blobs(repository, digest) - labels = blob['config']['Labels'] + labels = blob['config'].get('Labels') + if labels is None or len(labels) == 0 or 'com.azure.sonic.manifest' not in labels: + if name is None: + raise MetadataError('The name(custom) option is required as there is no metadata found in image labels') + labels = Manifest.get_manifest_from_local_file(name) if labels is None: raise MetadataError('No manifest found in image labels') - return self.from_labels(labels) - def from_tarball(self, image_path: str) -> Metadata: + def from_tarball(self, image_path: str, use_local_manifest: bool = False, name: Optional[str] = None) -> Metadata: """ Reads manifest image tarball. Args: image_path: Path to image tarball. @@ -117,16 +137,23 @@ def from_tarball(self, image_path: str) -> Metadata: Raises: MetadataError """ + if use_local_manifest: + labels = Manifest.get_manifest_from_local_file(name) + return self.from_labels(labels) with tarfile.open(image_path) as image: manifest = json.loads(image.extractfile('manifest.json').read()) blob = manifest[0]['Config'] image_config = json.loads(image.extractfile(blob).read()) - labels = image_config['config']['Labels'] - if labels is None: - raise MetadataError('No manifest found in image labels') - + labels = image_config['config'].get('Labels') + if labels is None or len(labels) == 0 or 'com.azure.sonic.manifest' not in labels: + if name is None: + raise MetadataError('The name(custom) option is \ + required as there is no metadata found in image labels') + labels = Manifest.get_manifest_from_local_file(name) + if labels is None: + raise MetadataError('No manifest found in image labels') return self.from_labels(labels) @classmethod diff --git a/sonic_package_manager/service_creator/creator.py b/sonic_package_manager/service_creator/creator.py index 15d3aedd76..c88e96a44a 100644 --- a/sonic_package_manager/service_creator/creator.py +++ b/sonic_package_manager/service_creator/creator.py @@ -2,6 +2,7 @@ import contextlib import os +import glob import sys import shutil import stat @@ -30,9 +31,9 @@ SERVICE_FILE_TEMPLATE = 'sonic.service.j2' -TIMER_UNIT_TEMPLATE = 'timer.unit.j2' SYSTEMD_LOCATION = '/usr/lib/systemd/system' +ETC_SYSTEMD_LOCATION = '/etc/systemd/system' GENERATED_SERVICES_CONF_FILE = '/etc/sonic/generated_services.conf' @@ -92,18 +93,30 @@ def set_executable_bit(filepath): os.chmod(filepath, st.st_mode | stat.S_IEXEC) -def remove_if_exists(path): +def remove_file(path): """ Remove filepath if it exists """ - if not os.path.exists(path): - return + try: + os.remove(path) + log.info(f'removed {path}') + except FileNotFoundError: + pass + + +def remove_dir(path): + """ Remove filepath if it exists """ + + try: + shutil.rmtree(path) + log.info(f'removed {path}') + except FileNotFoundError: + pass - os.remove(path) - log.info(f'removed {path}') def is_list_of_strings(command): return isinstance(command, list) and all(isinstance(item, str) for item in command) + def run_command(command: List[str]): """ Run arbitrary bash command. Args: @@ -197,12 +210,22 @@ def remove(self, """ name = package.manifest['service']['name'] - remove_if_exists(os.path.join(SYSTEMD_LOCATION, f'{name}.service')) - remove_if_exists(os.path.join(SYSTEMD_LOCATION, f'{name}@.service')) - remove_if_exists(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh')) - remove_if_exists(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh')) - remove_if_exists(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}')) - remove_if_exists(os.path.join(ETC_SONIC_PATH, f'{name}_reconcile')) + remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}.service')) + remove_file(os.path.join(SYSTEMD_LOCATION, f'{name}@.service')) + remove_file(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, f'{name}.sh')) + remove_file(os.path.join(DOCKER_CTL_SCRIPT_LOCATION, f'{name}.sh')) + remove_file(os.path.join(DEBUG_DUMP_SCRIPT_LOCATION, f'{name}')) + remove_file(os.path.join(ETC_SONIC_PATH, f'{name}_reconcile')) + + # remove symlinks and configuration directories created by featured + remove_file(os.path.join(ETC_SYSTEMD_LOCATION, f'{name}.service')) + for unit_file in glob.glob(os.path.join(ETC_SYSTEMD_LOCATION, f'{name}@*.service')): + remove_file(unit_file) + + remove_dir(os.path.join(ETC_SYSTEMD_LOCATION, f'{name}.service.d')) + for unit_dir in glob.glob(os.path.join(ETC_SYSTEMD_LOCATION, f'{name}@*.service.d')): + remove_dir(unit_dir) + self.update_dependent_list_file(package, remove=True) self.update_generated_services_conf_file(package, remove=True) @@ -281,7 +304,7 @@ def generate_service_mgmt(self, package: Package): log.info(f'generated {script_path}') def generate_systemd_service(self, package: Package): - """ Generates systemd service(s) file and timer(s) (if needed) for package. + """ Generates systemd service(s) file for package. Args: package: Package object to generate service for. @@ -309,23 +332,6 @@ def generate_systemd_service(self, package: Package): render_template(template, output_file, template_vars) log.info(f'generated {output_file}') - if package.manifest['service']['delayed']: - template_vars = { - 'source': get_tmpl_path(TIMER_UNIT_TEMPLATE), - 'manifest': package.manifest.unmarshal(), - 'multi_instance': False, - } - output_file = os.path.join(SYSTEMD_LOCATION, f'{name}.timer') - template = os.path.join(TEMPLATES_PATH, TIMER_UNIT_TEMPLATE) - render_template(template, output_file, template_vars) - log.info(f'generated {output_file}') - - if package.manifest['service']['asic-service']: - output_file = os.path.join(SYSTEMD_LOCATION, f'{name}@.timer') - template_vars['multi_instance'] = True - render_template(template, output_file, template_vars) - log.info(f'generated {output_file}') - def update_generated_services_conf_file(self, package: Package, remove=False): """ Updates generated_services.conf file. diff --git a/sonic_package_manager/service_creator/feature.py b/sonic_package_manager/service_creator/feature.py index 43b6c309fe..32a155206c 100644 --- a/sonic_package_manager/service_creator/feature.py +++ b/sonic_package_manager/service_creator/feature.py @@ -105,8 +105,7 @@ def update(self, old_manifest: Manifest, new_manifest: Manifest): """ Migrate feature configuration. It can be that non-configurable - feature entries have to be updated. e.g: "delayed" for example if - the new feature introduces a service timer or name of the service has + feature entries have to be updated. e.g: name of the service has changed, but user configurable entries are not changed). Args: diff --git a/sonic_package_manager/source.py b/sonic_package_manager/source.py index 7a13dccbac..2a0f07b0f1 100644 --- a/sonic_package_manager/source.py +++ b/sonic_package_manager/source.py @@ -4,7 +4,7 @@ from sonic_package_manager.dockerapi import DockerApi, get_repository_from_image from sonic_package_manager.metadata import Metadata, MetadataResolver from sonic_package_manager.package import Package - +from typing import Optional class PackageSource(object): """ PackageSource abstracts the way manifest is read @@ -105,20 +105,24 @@ def __init__(self, tarball_path: str, database: PackageDatabase, docker: DockerApi, - metadata_resolver: MetadataResolver): + metadata_resolver: MetadataResolver, + use_local_manifest: bool = False, + name: Optional[str] = None): super().__init__(database, docker, metadata_resolver) self.tarball_path = tarball_path + self.use_local_manifest = use_local_manifest + self.name = name def get_metadata(self) -> Metadata: """ Returns manifest read from tarball. """ - - return self.metadata_resolver.from_tarball(self.tarball_path) + return self.metadata_resolver.from_tarball(self.tarball_path, + use_local_manifest=self.use_local_manifest, + name=self.name) def install_image(self, package: Package): """ Installs image from local tarball source. """ - return self.docker.load(self.tarball_path) @@ -131,18 +135,24 @@ def __init__(self, reference: str, database: PackageDatabase, docker: DockerApi, - metadata_resolver: MetadataResolver): + metadata_resolver: MetadataResolver, + use_local_manifest: bool = False, + name: Optional[str] = None): super().__init__(database, docker, metadata_resolver) self.repository = repository self.reference = reference + self.use_local_manifest = use_local_manifest + self.name = name def get_metadata(self) -> Metadata: """ Returns manifest read from registry. """ return self.metadata_resolver.from_registry(self.repository, - self.reference) + self.reference, + self.use_local_manifest, + self.name) def install_image(self, package: Package): """ Installs image from registry. """ @@ -161,11 +171,17 @@ def __init__(self, entry: PackageEntry, database: PackageDatabase, docker: DockerApi, - metadata_resolver: MetadataResolver): + metadata_resolver: MetadataResolver, + use_local_manifest: bool = False, + name: Optional[str] = None, + use_edit: bool = False): super().__init__(database, docker, metadata_resolver) self.entry = entry + self.use_local_manifest = use_local_manifest + self.name = name + self.use_edit = use_edit def get_metadata(self) -> Metadata: """ Returns manifest read from locally installed Docker. """ @@ -177,8 +193,7 @@ def get_metadata(self) -> Metadata: # won't have image_id in database. Using their # repository name as image. image = f'{self.entry.repository}:latest' - - return self.metadata_resolver.from_local(image) + return self.metadata_resolver.from_local(image, self.use_local_manifest, self.name, self.use_edit) def get_package(self) -> Package: return Package(self.entry, self.get_metadata()) diff --git a/ssdutil/main.py b/ssdutil/main.py index 62f43037e7..460c7f769a 100755 --- a/ssdutil/main.py +++ b/ssdutil/main.py @@ -6,21 +6,61 @@ # try: - import argparse import os import sys + import argparse + import psutil + from blkinfo import BlkDiskInfo from sonic_py_common import device_info, logger except ImportError as e: raise ImportError("%s - required module not found" % str(e)) -DEFAULT_DEVICE="/dev/sda" +DEFAULT_DEVICE = "/dev/sda" SYSLOG_IDENTIFIER = "ssdutil" +DISK_TYPE_SSD = "sata" # Global logger instance log = logger.Logger(SYSLOG_IDENTIFIER) +def get_default_disk(): + """Check default disk""" + default_device = DEFAULT_DEVICE + host_mnt = '/host' + host_partition = None + partitions = psutil.disk_partitions() + + if partitions is None: + return (default_device, None) + + for parts in partitions: + if parts.mountpoint == host_mnt: + host_partition = parts + break + + disk_major = os.major(os.stat(host_partition.device).st_rdev) + filters = { + 'maj:min': '{}:0'.format(disk_major) + } + + myblkd = BlkDiskInfo() + my_filtered_disks = myblkd.get_disks(filters) + + if my_filtered_disks is None: + return (default_device, None) + + json_output = my_filtered_disks[0] + blkdev = json_output['name'] + disk_type = json_output['tran'] + default_device = os.path.join("/dev/", blkdev) + + # Disk Type Support for eMMC devices + disk_type = 'eMMC' if len(disk_type) == 0 and 'mmcblk' in host_partition.device else disk_type # noqa: E501 + + return default_device, disk_type + + def import_ssd_api(diskdev): """ Loads platform specific or generic ssd_util module from source @@ -37,15 +77,16 @@ def import_ssd_api(diskdev): sys.path.append(os.path.abspath(platform_plugins_path)) from ssd_util import SsdUtil except ImportError as e: - log.log_warning("Platform specific SsdUtil module not found. Falling down to the generic implementation") + log.log_warning("Platform specific SsdUtil module not found. Falling down to the generic implementation") # noqa: E501 try: - from sonic_platform_base.sonic_ssd.ssd_generic import SsdUtil + from sonic_platform_base.sonic_storage.ssd import SsdUtil except ImportError as e: - log.log_error("Failed to import default SsdUtil. Error: {}".format(str(e)), True) + log.log_error("Failed to import default SsdUtil. Error: {}".format(str(e)), True) # noqa: E501 raise e return SsdUtil(diskdev) + def is_number(s): try: float(s) @@ -53,6 +94,7 @@ def is_number(s): except ValueError: return False + # ==================== Entry point ==================== def ssdutil(): if os.geteuid() != 0: @@ -60,21 +102,24 @@ def ssdutil(): sys.exit(1) parser = argparse.ArgumentParser() - parser.add_argument("-d", "--device", help="Device name to show health info", default=DEFAULT_DEVICE) - parser.add_argument("-v", "--verbose", action="store_true", default=False, help="Show verbose output (some additional parameters)") - parser.add_argument("-e", "--vendor", action="store_true", default=False, help="Show vendor output (extended output if provided by platform vendor)") + (default_device, disk_type) = get_default_disk() + parser.add_argument("-d", "--device", help="Device name to show health info", default=default_device) # noqa: E501 + parser.add_argument("-v", "--verbose", action="store_true", default=False, help="Show verbose output (some additional parameters)") # noqa: E501 + parser.add_argument("-e", "--vendor", action="store_true", default=False, help="Show vendor output (extended output if provided by platform vendor)") # noqa: E501 args = parser.parse_args() + print("Disk Type : {0}".format(disk_type.upper())) ssd = import_ssd_api(args.device) print("Device Model : {}".format(ssd.get_model())) if args.verbose: print("Firmware : {}".format(ssd.get_firmware())) print("Serial : {}".format(ssd.get_serial())) - print("Health : {}{}".format(ssd.get_health(), "%" if is_number(ssd.get_health()) else "")) - print("Temperature : {}{}".format(ssd.get_temperature(), "C" if is_number(ssd.get_temperature()) else "")) + print("Health : {}{}".format(ssd.get_health(), "%" if is_number(ssd.get_health()) else "")) # noqa: E501 + print("Temperature : {}{}".format(ssd.get_temperature(), "C" if is_number(ssd.get_temperature()) else "")) # noqa: E501 if args.vendor: print(ssd.get_vendor_output()) + if __name__ == '__main__': ssdutil() diff --git a/tests/bgp_commands_input/bgp_network_test_vector.py b/tests/bgp_commands_input/bgp_network_test_vector.py index da93e8e8e8..f9edd66fa2 100644 --- a/tests/bgp_commands_input/bgp_network_test_vector.py +++ b/tests/bgp_commands_input/bgp_network_test_vector.py @@ -227,6 +227,9 @@ multi_asic_bgp_network_err = \ """Error: -n/--namespace option required. provide namespace from list ['asic0', 'asic1']""" +multi_asic_bgp_network_asic_unknown_err = \ + """Error: invalid namespace asic_unknown. provide namespace from list ['asic0', 'asic1']""" + bgp_v4_network_asic0 = \ """ BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 @@ -276,7 +279,7 @@ *=i10.0.0.42/31 10.1.0.2 0 100 0 ? *>i 10.1.0.0 0 100 0 ? *=i10.0.0.44/31 10.1.0.2 0 100 0 ? -*>i 10.1.0.0 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? """ bgp_v4_network_ip_address_asic0 = \ @@ -311,6 +314,111 @@ Last update: Thu Apr 22 02:13:30 2021 """ +bgp_v4_network_all_asic = \ + """ +======== namespace asic0 ======== + +BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +* i0.0.0.0/0 10.1.0.2 100 0 65200 6666 6667 i +* i 10.1.0.0 100 0 65200 6666 6667 i +*= 10.0.0.5 0 65200 6666 6667 i +*> 10.0.0.1 0 65200 6666 6667 i +* i8.0.0.0/32 10.1.0.2 0 100 0 i +* i 10.1.0.0 0 100 0 i +* 0.0.0.0 0 32768 ? +*> 0.0.0.0 0 32768 i +*=i8.0.0.1/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.2/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.3/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*>i8.0.0.4/32 10.1.0.0 0 100 0 i +*>i8.0.0.5/32 10.1.0.2 0 100 0 i +* i10.0.0.0/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +* i10.0.0.4/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +*=i10.0.0.8/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.12/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.32/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.34/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.36/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.38/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.40/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.42/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.44/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? + +======== namespace asic1 ======== + +BGP table version is 11256, local router ID is 10.1.0.32, vrf id 0 +Default local pref 100, local AS 65100 +Status codes: s suppressed, d damped, h history, * valid, > best, = multipath, + i internal, r RIB-failure, S Stale, R Removed +Nexthop codes: @NNN nexthop's vrf id, < announce-nh-self +Origin codes: i - IGP, e - EGP, ? - incomplete + + Network Next Hop Metric LocPrf Weight Path +* i0.0.0.0/0 10.1.0.2 100 0 65200 6666 6667 i +* i 10.1.0.0 100 0 65200 6666 6667 i +*= 10.0.0.5 0 65200 6666 6667 i +*> 10.0.0.1 0 65200 6666 6667 i +* i8.0.0.0/32 10.1.0.2 0 100 0 i +* i 10.1.0.0 0 100 0 i +* 0.0.0.0 0 32768 ? +*> 0.0.0.0 0 32768 i +*=i8.0.0.1/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.2/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*=i8.0.0.3/32 10.1.0.2 0 100 0 i +*>i 10.1.0.0 0 100 0 i +*>i8.0.0.4/32 10.1.0.0 0 100 0 i +*>i8.0.0.5/32 10.1.0.2 0 100 0 i +* i10.0.0.0/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +* i10.0.0.4/31 10.1.0.2 0 100 0 ? +* i 10.1.0.0 0 100 0 ? +*> 0.0.0.0 0 32768 ? +*=i10.0.0.8/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.12/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.32/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.34/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.36/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.38/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.40/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.42/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +*=i10.0.0.44/31 10.1.0.2 0 100 0 ? +*>i 10.1.0.0 0 100 0 ? +""" + bgp_v6_network_asic0 = \ """ BGP table version is 12849, local router ID is 10.1.0.32, vrf id 0 @@ -429,6 +537,9 @@ def mock_show_bgp_network_multi_asic(param): return bgp_v6_network_ip_address_asic0 elif param == 'bgp_v6_network_bestpath_asic0': return bgp_v6_network_ip_address_asic0_bestpath + elif param == "bgp_v4_network_all_asic": + # this is mocking the output of a single LC + return bgp_v4_network_asic0 else: return '' @@ -454,6 +565,11 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 1, 'rc_output': bgp_v4_network_longer_prefixes_error }, + 'bgp_v4_network_all_asic_on_single_asic': { + 'args': ['-nall'], + 'rc': 0, + 'rc_output': bgp_v4_network + }, 'bgp_v6_network': { 'args': [], 'rc': 0, @@ -479,10 +595,10 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 0, 'rc_output': bgp_v6_network_longer_prefixes }, - 'bgp_v4_network_multi_asic': { + 'bgp_v4_network_default_multi_asic': { 'args': [], - 'rc': 2, - 'rc_err_msg': multi_asic_bgp_network_err + 'rc': 0, + 'rc_output': bgp_v4_network_all_asic }, 'bgp_v4_network_asic0': { 'args': ['-nasic0'], @@ -499,6 +615,16 @@ def mock_show_bgp_network_multi_asic(param): 'rc': 0, 'rc_output': bgp_v4_network_bestpath_asic0 }, + 'bgp_v4_network_all_asic': { + 'args': ['-nall'], + 'rc': 0, + 'rc_output': bgp_v4_network_all_asic + }, + 'bgp_v4_network_asic_unknown': { + 'args': ['-nasic_unknown'], + 'rc': 2, + 'rc_err_msg': multi_asic_bgp_network_asic_unknown_err + }, 'bgp_v6_network_multi_asic': { 'args': [], 'rc': 2, diff --git a/tests/bgp_commands_test.py b/tests/bgp_commands_test.py index a60ba8c81f..11415e8727 100644 --- a/tests/bgp_commands_test.py +++ b/tests/bgp_commands_test.py @@ -336,7 +336,7 @@ 3.3.3.8 4 65100 12 10 0 0 0 00:00:15 4 str2-sonic-lc1-1-ASIC1 Total number of neighbors 6 -""" +""" # noqa: E501 class TestBgpCommandsSingleAsic(object): diff --git a/tests/bgp_input/assert_show_output.py b/tests/bgp_input/assert_show_output.py new file mode 100644 index 0000000000..3671c3ce5f --- /dev/null +++ b/tests/bgp_input/assert_show_output.py @@ -0,0 +1,55 @@ +""" +Module holding the correct values for show CLI command outputs for the bgp_test.py +""" + +show_device_global_empty = """\ +No configuration is present in CONFIG DB +""" + +show_device_global_all_disabled = """\ +TSA W-ECMP +-------- -------- +disabled disabled +""" +show_device_global_all_disabled_json = """\ +{ + "tsa": "disabled", + "w-ecmp": "disabled" +} +""" + +show_device_global_all_enabled = """\ +TSA W-ECMP +------- -------- +enabled enabled +""" +show_device_global_all_enabled_json = """\ +{ + "tsa": "enabled", + "w-ecmp": "enabled" +} +""" + +show_device_global_tsa_enabled = """\ +TSA W-ECMP +------- -------- +enabled disabled +""" +show_device_global_tsa_enabled_json = """\ +{ + "tsa": "enabled", + "w-ecmp": "disabled" +} +""" + +show_device_global_wcmp_enabled = """\ +TSA W-ECMP +-------- -------- +disabled enabled +""" +show_device_global_wcmp_enabled_json = """\ +{ + "tsa": "disabled", + "w-ecmp": "enabled" +} +""" diff --git a/tests/bgp_input/mock_config/all_disabled.json b/tests/bgp_input/mock_config/all_disabled.json new file mode 100644 index 0000000000..30a929c7b7 --- /dev/null +++ b/tests/bgp_input/mock_config/all_disabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "false", + "wcmp_enabled": "false" + } +} diff --git a/tests/bgp_input/mock_config/all_enabled.json b/tests/bgp_input/mock_config/all_enabled.json new file mode 100644 index 0000000000..eab39897bb --- /dev/null +++ b/tests/bgp_input/mock_config/all_enabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "true", + "wcmp_enabled": "true" + } +} diff --git a/tests/bgp_input/mock_config/empty.json b/tests/bgp_input/mock_config/empty.json new file mode 100644 index 0000000000..e77dd4d79e --- /dev/null +++ b/tests/bgp_input/mock_config/empty.json @@ -0,0 +1,5 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "NULL": "NULL" + } +} diff --git a/tests/bgp_input/mock_config/tsa_enabled.json b/tests/bgp_input/mock_config/tsa_enabled.json new file mode 100644 index 0000000000..9c72a5f79d --- /dev/null +++ b/tests/bgp_input/mock_config/tsa_enabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "true", + "wcmp_enabled": "false" + } +} diff --git a/tests/bgp_input/mock_config/wcmp_enabled.json b/tests/bgp_input/mock_config/wcmp_enabled.json new file mode 100644 index 0000000000..fddc76b618 --- /dev/null +++ b/tests/bgp_input/mock_config/wcmp_enabled.json @@ -0,0 +1,6 @@ +{ + "BGP_DEVICE_GLOBAL|STATE": { + "tsa_enabled": "false", + "wcmp_enabled": "true" + } +} diff --git a/tests/bgp_test.py b/tests/bgp_test.py new file mode 100644 index 0000000000..d64d0b9eea --- /dev/null +++ b/tests/bgp_test.py @@ -0,0 +1,130 @@ +import pytest +import os +import logging +import show.main as show +import config.main as config + +from click.testing import CliRunner +from utilities_common.db import Db +from .mock_tables import dbconnector +from .bgp_input import assert_show_output + + +test_path = os.path.dirname(os.path.abspath(__file__)) +input_path = os.path.join(test_path, "bgp_input") +mock_config_path = os.path.join(input_path, "mock_config") + +logger = logging.getLogger(__name__) + + +SUCCESS = 0 + + +class TestBgp: + @classmethod + def setup_class(cls): + logger.info("Setup class: {}".format(cls.__name__)) + os.environ['UTILITIES_UNIT_TESTING'] = "1" + + @classmethod + def teardown_class(cls): + logger.info("Teardown class: {}".format(cls.__name__)) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + dbconnector.dedicated_dbs.clear() + + # ---------- CONFIG BGP ---------- # + + @pytest.mark.parametrize( + "feature", [ + "tsa", + "w-ecmp" + ] + ) + @pytest.mark.parametrize( + "state", [ + "enabled", + "disabled" + ] + ) + def test_config_device_global(self, feature, state): + db = Db() + runner = CliRunner() + + result = runner.invoke( + config.config.commands["bgp"].commands["device-global"]. + commands[feature].commands[state], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + assert result.exit_code == SUCCESS + + # ---------- SHOW BGP ---------- # + + @pytest.mark.parametrize( + "cfgdb,output", [ + pytest.param( + os.path.join(mock_config_path, "empty"), + { + "plain": assert_show_output.show_device_global_empty, + "json": assert_show_output.show_device_global_empty + }, + id="empty" + ), + pytest.param( + os.path.join(mock_config_path, "all_disabled"), + { + "plain": assert_show_output.show_device_global_all_disabled, + "json": assert_show_output.show_device_global_all_disabled_json + }, + id="all-disabled" + ), + pytest.param( + os.path.join(mock_config_path, "all_enabled"), + { + "plain": assert_show_output.show_device_global_all_enabled, + "json": assert_show_output.show_device_global_all_enabled_json + }, + id="all-enabled" + ), + pytest.param( + os.path.join(mock_config_path, "tsa_enabled"), + { + "plain": assert_show_output.show_device_global_tsa_enabled, + "json": assert_show_output.show_device_global_tsa_enabled_json + }, + id="tsa-enabled" + ), + pytest.param( + os.path.join(mock_config_path, "wcmp_enabled"), + { + "plain": assert_show_output.show_device_global_wcmp_enabled, + "json": assert_show_output.show_device_global_wcmp_enabled_json + }, + id="w-ecmp-enabled" + ) + ] + ) + @pytest.mark.parametrize( + "format", [ + "plain", + "json", + ] + ) + def test_show_device_global(self, cfgdb, output, format): + dbconnector.dedicated_dbs["CONFIG_DB"] = cfgdb + + db = Db() + runner = CliRunner() + + result = runner.invoke( + show.cli.commands["bgp"].commands["device-global"], + [] if format == "plain" else ["--json"], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + assert result.output == output[format] + assert result.exit_code == SUCCESS diff --git a/tests/bmp_input/bmp.json b/tests/bmp_input/bmp.json new file mode 100644 index 0000000000..6f3583f549 --- /dev/null +++ b/tests/bmp_input/bmp.json @@ -0,0 +1,9 @@ +{ + "BMP": { + "table": { + "bgp_neighbor_table": "false", + "bgp_rib_in_table": "false", + "bgp_rib_out_table": "false" + } + } +} diff --git a/tests/bmp_input/bmp_invalid.json b/tests/bmp_input/bmp_invalid.json new file mode 100644 index 0000000000..87a4f937da --- /dev/null +++ b/tests/bmp_input/bmp_invalid.json @@ -0,0 +1,6 @@ +{ + "BMP": { + "table": { + } + } +} diff --git a/tests/chassis_modules_test.py b/tests/chassis_modules_test.py old mode 100644 new mode 100755 index 940e30c04b..f59341a487 --- a/tests/chassis_modules_test.py +++ b/tests/chassis_modules_test.py @@ -7,6 +7,8 @@ import tests.mock_tables.dbconnector from utilities_common.db import Db from .utils import get_result_and_return_code +from unittest import mock +sys.modules['clicommon'] = mock.Mock() show_linecard0_shutdown_output="""\ LINE-CARD0 line-card 1 Empty down LC1000101 @@ -15,6 +17,15 @@ show_linecard0_startup_output="""\ LINE-CARD0 line-card 1 Empty up LC1000101 """ + +show_fabriccard0_shutdown_output = """\ +FABRIC-CARD0 fabric-card 17 Online down FC1000101 +""" + +show_fabriccard0_startup_output = """\ +FABRIC-CARD0 fabric-card 17 Online up FC1000101 +""" + header_lines = 2 warning_lines = 0 @@ -113,6 +124,17 @@ Linecard4|Asic2|PortChannel0001 2 22 Linecard4|Asic2|Ethernet29, Linecard4|Asic2|Ethernet30 """ + +def mock_run_command_side_effect(*args, **kwargs): + print("command: {}".format(*args)) + if isinstance(*args, list): + return '', 0 + else: + print("Expected type of command is list. Actual type is {}".format(*args)) + assert 0 + return '', 0 + + class TestChassisModules(object): @classmethod def setup_class(cls): @@ -186,6 +208,47 @@ def test_config_shutdown_module(self): #db.cfgdb.set_entry("CHASSIS_MODULE", "LINE-CARD0", { "admin_status" : "down" }) #db.get_data("CHASSIS_MODULE", "LINE-CARD0") + def test_config_shutdown_module_fabric(self): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + runner = CliRunner() + db = Db() + + chassisdb = db.db + chassisdb.connect("CHASSIS_STATE_DB") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "asic_id_in_module", "0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "asic_pci_address", "nokia-bdb:4:0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "name", "FABRIC-CARD0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "asic_id_in_module", "1") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "asic_pci_address", "nokia-bdb:4:1") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "name", "FABRIC-CARD0") + chassisdb.close("CHASSIS_STATE_DB") + + result = runner.invoke(config.config.commands["chassis"].commands["modules"].commands["shutdown"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["chassis"].commands["modules"].commands["status"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + result_lines = result.output.strip('\n').split('\n') + assert result.exit_code == 0 + header_lines = 2 + result_out = " ".join((result_lines[header_lines]).split()) + assert result_out.strip('\n') == show_fabriccard0_shutdown_output.strip('\n') + + fvs = {'admin_status': 'down'} + db.cfgdb.set_entry('CHASSIS_MODULE', "FABRIC-CARD0", fvs) + result = runner.invoke(config.config.commands["chassis"].commands["modules"].commands["shutdown"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 6 + def test_config_startup_module(self): runner = CliRunner() db = Db() @@ -202,6 +265,62 @@ def test_config_startup_module(self): result_out = " ".join((result_lines[header_lines]).split()) assert result_out.strip('\n') == show_linecard0_startup_output.strip('\n') + def test_config_startup_module_fabric(self): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + runner = CliRunner() + db = Db() + + chassisdb = db.db + chassisdb.connect("CHASSIS_STATE_DB") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "asic_id_in_module", "0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "asic_pci_address", "nokia-bdb:4:0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic6", "name", "FABRIC-CARD0") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "asic_id_in_module", "1") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "asic_pci_address", "nokia-bdb:4:1") + chassisdb.set("CHASSIS_STATE_DB", "CHASSIS_FABRIC_ASIC_TABLE|asic7", "name", "FABRIC-CARD0") + chassisdb.close("CHASSIS_STATE_DB") + + # FC is down and doing startup + fvs = {'admin_status': 'down'} + db.cfgdb.set_entry('CHASSIS_MODULE', "FABRIC-CARD0", fvs) + + result = runner.invoke(config.config.commands["chassis"].commands["modules"].commands["startup"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["chassis"].commands["modules"].commands["status"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + result_lines = result.output.strip('\n').split('\n') + assert result.exit_code == 0 + result_out = " ".join((result_lines[header_lines]).split()) + assert result_out.strip('\n') == show_fabriccard0_startup_output.strip('\n') + assert mock_run_command.call_count == 2 + + # FC is up and doing startup + fvs = {'admin_status': 'up'} + db.cfgdb.set_entry('CHASSIS_MODULE', "FABRIC-CARD0", fvs) + + result = runner.invoke(config.config.commands["chassis"].commands["modules"].commands["startup"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands["chassis"].commands["modules"].commands["status"], + ["FABRIC-CARD0"], obj=db) + print(result.exit_code) + print(result.output) + result_lines = result.output.strip('\n').split('\n') + assert result.exit_code == 0 + result_out = " ".join((result_lines[header_lines]).split()) + assert result_out.strip('\n') == show_fabriccard0_startup_output.strip('\n') + assert mock_run_command.call_count == 2 + def test_config_incorrect_module(self): runner = CliRunner() db = Db() diff --git a/tests/cli_sessions_test.py b/tests/cli_sessions_test.py new file mode 100644 index 0000000000..755b232708 --- /dev/null +++ b/tests/cli_sessions_test.py @@ -0,0 +1,32 @@ +from click.testing import CliRunner + +import config.main as config +import show.main as show +from utilities_common.db import Db + + +class TestCliSessionsCommands: + def test_config_command(self): + runner = CliRunner() + + db = Db() + + result = runner.invoke(config.config.commands['serial_console'].commands['sysrq-capabilities'], + ['enabled'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands['serial_console'].commands['inactivity-timeout'], + ['180'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands['serial_console'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands['ssh'].commands['inactivity-timeout'], ['190'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(config.config.commands['ssh'].commands['max-sessions'], ['60'], obj=db) + assert result.exit_code == 0 + + result = runner.invoke(show.cli.commands['ssh'], obj=db) + assert result.exit_code == 0 diff --git a/tests/config_dpb_test.py b/tests/config_dpb_test.py index 58a24dc958..0a3d99cbcd 100644 --- a/tests/config_dpb_test.py +++ b/tests/config_dpb_test.py @@ -350,7 +350,7 @@ def test_config_breakout_extra_table_warning(self, breakout_cfg_file, sonic_db): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v', '-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Below Config can not be verified' in result.output assert 'UNKNOWN_TABLE' in result.output assert 'Do you wish to Continue?' in result.output @@ -396,7 +396,7 @@ def test_config_breakout_verbose(self, sonic_db): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v', '-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Dependencies Exist.' in result.output # verbose must be set while creating instance of ConfigMgmt class @@ -538,7 +538,7 @@ def config_dpb_port8_2x50G_1x100G(): commands["breakout"], ['{}'.format(interface), '{}'.format(newMode), '-v','-y'], obj=obj) print(result.exit_code, result.output) - assert result.exit_code == 0 + assert result.exit_code == 1 assert 'Dependencies Exist.' in result.output assert 'Printing dependencies' in result.output assert 'NO-NSW-PACL-V4' in result.output diff --git a/tests/config_mlnx_test.py b/tests/config_mlnx_test.py deleted file mode 100644 index 0cf2e117b4..0000000000 --- a/tests/config_mlnx_test.py +++ /dev/null @@ -1,47 +0,0 @@ -import sys -import click -import pytest -import config.plugins.mlnx as config -from unittest.mock import patch, Mock -from click.testing import CliRunner -from utilities_common.db import Db - - -@patch('config.plugins.mlnx.sniffer_env_variable_set', Mock(return_value=False)) -@patch('config.plugins.mlnx.sniffer_filename_generate', Mock(return_value="sdk_file_name")) -class TestConfigMlnx(object): - def setup(self): - print('SETUP') - - - @patch('config.plugins.mlnx.restart_swss', Mock(return_value=0)) - def test_config_sniffer_enable(self): - db = Db() - runner = CliRunner() - result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["enable"],["-y"]) - assert "SDK sniffer is Enabled, recording file is sdk_file_name." in result.output - - @patch('config.plugins.mlnx.restart_swss', Mock(return_value=0)) - def test_config_sniffer_disble(self): - db = Db() - runner = CliRunner() - result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["disable"],["-y"]) - assert "SDK sniffer is Disabled." in result.output - - @patch('config.plugins.mlnx.restart_swss', Mock(return_value=1)) - def test_config_sniffer_enable_fail(self): - db = Db() - runner = CliRunner() - result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["enable"],["-y"]) - assert "SDK sniffer is Enabled, recording file is sdk_file_name." not in result.output - - @patch('config.plugins.mlnx.restart_swss', Mock(return_value=1)) - def test_config_sniffer_disble_fail(self): - db = Db() - runner = CliRunner() - result = runner.invoke(config.mlnx.commands["sniffer"].commands["sdk"].commands["disable"],["-y"]) - assert "SDK sniffer is Disabled." not in result.output - - def teardown(self): - print('TEARDOWN') - diff --git a/tests/config_override_input/golden_input_yang_failure.json b/tests/config_override_input/golden_input_yang_failure.json deleted file mode 100644 index 4b533e1598..0000000000 --- a/tests/config_override_input/golden_input_yang_failure.json +++ /dev/null @@ -1,89 +0,0 @@ -{ - "running_config": { - "ACL_TABLE": { - "DATAACL": { - "policy_desc": "DATAACL", - "ports": [ - "Ethernet4" - ], - "stage": "ingress", - "type": "L3" - }, - "NTP_ACL": { - "policy_desc": "NTP_ACL", - "services": [ - "NTP" - ], - "stage": "ingress", - "type": "CTRLPLANE" - } - }, - "AUTO_TECHSUPPORT_FEATURE": { - "bgp": { - "rate_limit_interval": "600", - "state": "enabled" - }, - "database": { - "rate_limit_interval": "600", - "state": "enabled" - } - }, - "PORT": { - "Ethernet4": { - "admin_status": "up", - "alias": "fortyGigE0/4", - "description": "Servers0:eth0", - "index": "1", - "lanes": "29,30,31,32", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000", - "tpid": "0x8100" - }, - "Ethernet8": { - "admin_status": "up", - "alias": "fortyGigE0/8", - "description": "Servers1:eth0", - "index": "2", - "lanes": "33,34,35,36", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000", - "tpid": "0x8100" - } - } - }, - "golden_config": { - "ACL_TABLE": { - "EVERFLOWV6": { - "policy_desc": "EVERFLOWV6", - "ports": [ - "Ethernet0" - ], - "stage": "ingress", - "type": "MIRRORV6" - } - }, - "AUTO_TECHSUPPORT_FEATURE": { - "bgp": { - "state": "disabled" - }, - "database": { - "state": "disabled" - } - }, - "PORT": { - "Ethernet12": { - "admin_status": "up", - "alias": "fortyGigE0/12", - "description": "Servers2:eth0", - "index": "3", - "lanes": "37,38,39,40", - "mtu": "9100", - "pfc_asym": "off", - "speed": "40000", - "tpid": "0x8100" - } - } - } -} diff --git a/tests/config_override_input/partial_config_override.json b/tests/config_override_input/partial_config_override.json index 2021ea282b..f28a8ed7ae 100644 --- a/tests/config_override_input/partial_config_override.json +++ b/tests/config_override_input/partial_config_override.json @@ -71,6 +71,30 @@ "stage": "ingress", "type": "CTRLPLANE" } + }, + "PORT": { + "Ethernet4": { + "admin_status": "up", + "alias": "fortyGigE0/4", + "description": "Servers0:eth0", + "index": "1", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + }, + "Ethernet8": { + "admin_status": "up", + "alias": "fortyGigE0/8", + "description": "Servers1:eth0", + "index": "2", + "lanes": "33,34,35,36", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000", + "tpid": "0x8100" + } } }, "expected_config": { diff --git a/tests/config_override_test.py b/tests/config_override_test.py index a46be5ef60..5137585832 100644 --- a/tests/config_override_test.py +++ b/tests/config_override_test.py @@ -20,7 +20,6 @@ EMPTY_TABLE_REMOVAL = os.path.join(DATA_DIR, "empty_table_removal.json") AAA_YANG_HARD_CHECK = os.path.join(DATA_DIR, "aaa_yang_hard_check.json") RUNNING_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "running_config_yang_failure.json") -GOLDEN_INPUT_YANG_FAILURE = os.path.join(DATA_DIR, "golden_input_yang_failure.json") FINAL_CONFIG_YANG_FAILURE = os.path.join(DATA_DIR, "final_config_yang_failure.json") MULTI_ASIC_MACSEC_OV = os.path.join(DATA_DIR, "multi_asic_macsec_ov.json") MULTI_ASIC_FEATURE_RM = os.path.join(DATA_DIR, "multi_asic_feature_rm.json") @@ -179,7 +178,7 @@ def read_json_file_side_effect(filename): ['golden_config_db.json'], obj=db) assert result.exit_code != 0 - assert "Authentication with 'tacacs+' is not allowed when passkey not exits." in result.output + assert "Authentication with 'tacacs+' is not allowed when passkey not exists." in result.output def check_override_config_table(self, db, config, running_config, golden_config, expected_config): @@ -233,17 +232,6 @@ def is_yang_config_validation_enabled_side_effect(filename): self.check_yang_verification_failure( db, config, read_data['running_config'], read_data['golden_config'], "running config") - def test_golden_input_yang_failure(self): - def is_yang_config_validation_enabled_side_effect(filename): - return True - db = Db() - with open(GOLDEN_INPUT_YANG_FAILURE, "r") as f: - read_data = json.load(f) - with mock.patch('config.main.device_info.is_yang_config_validation_enabled', - mock.MagicMock(side_effect=is_yang_config_validation_enabled_side_effect)): - self.check_yang_verification_failure( - db, config, read_data['running_config'], read_data['golden_config'], "config_input") - def test_final_config_yang_failure(self): def is_yang_config_validation_enabled_side_effect(filename): return True diff --git a/tests/config_save_output/all_config_db.json b/tests/config_save_output/all_config_db.json new file mode 100644 index 0000000000..17c3e7fc6c --- /dev/null +++ b/tests/config_save_output/all_config_db.json @@ -0,0 +1,5 @@ +{ + "localhost": {}, + "asic0": {}, + "asic1": {} +} \ No newline at end of file diff --git a/tests/config_snmp_test.py b/tests/config_snmp_test.py index 76f5675690..25c54d36ec 100644 --- a/tests/config_snmp_test.py +++ b/tests/config_snmp_test.py @@ -877,6 +877,34 @@ def test_config_snmp_community_add_new_community_with_invalid_type_yang_validati assert result.exit_code != 0 assert 'SNMP community configuration failed' in result.output + @patch('netifaces.interfaces', mock.Mock(return_value=['eth0'])) + @patch('netifaces.ifaddresses', mock.Mock(return_value={2: + [{'addr': '10.1.0.32', 'netmask': '255.255.255.0', + 'broadcast': '10.1.0.255'}], + 10: [{'addr': 'fe80::1%eth0', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]})) + @patch('os.system', mock.Mock(return_value=0)) + def test_config_snmpagentaddress_add_linklocal(self): + db = Db() + obj = {'db': db.cfgdb} + runner = CliRunner() + runner.invoke(config.config.commands["snmpagentaddress"].commands["add"], ["fe80::1%eth0"], obj=obj) + assert ('fe80::1%eth0', '', '') in db.cfgdb.get_keys('SNMP_AGENT_ADDRESS_CONFIG') + assert db.cfgdb.get_entry("SNMP_AGENT_ADDRESS_CONFIG", "fe80::1%eth0||") == {} + + @patch('netifaces.interfaces', mock.Mock(return_value=['eth0'])) + @patch('netifaces.ifaddresses', mock.Mock(return_value={2: + [{'addr': '10.1.0.32', 'netmask': '255.255.255.0', + 'broadcast': '10.1.0.255'}], + 10: [{'addr': 'fe80::1', 'netmask': 'ffff:ffff:ffff:ffff::/64'}]})) + @patch('os.system', mock.Mock(return_value=0)) + def test_config_snmpagentaddress_add_ipv4(self): + db = Db() + obj = {'db': db.cfgdb} + runner = CliRunner() + runner.invoke(config.config.commands["snmpagentaddress"].commands["add"], ["10.1.0.32"], obj=obj) + assert ('10.1.0.32', '', '') in db.cfgdb.get_keys('SNMP_AGENT_ADDRESS_CONFIG') + assert db.cfgdb.get_entry("SNMP_AGENT_ADDRESS_CONFIG", "10.1.0.32||") == {} + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/config_test.py b/tests/config_test.py index 1054a52a33..1809b5545d 100644 --- a/tests/config_test.py +++ b/tests/config_test.py @@ -1,3 +1,5 @@ +import copy +import datetime import pytest import filecmp import importlib @@ -16,6 +18,7 @@ from click.testing import CliRunner from sonic_py_common import device_info, multi_asic +from utilities_common import flock from utilities_common.db import Db from utilities_common.general import load_module_from_source from mock import call, patch, mock_open, MagicMock @@ -37,12 +40,32 @@ # Config Reload input Path mock_db_path = os.path.join(test_path, "config_reload_input") +mock_bmp_db_path = os.path.join(test_path, "bmp_input") + + # Load minigraph input Path load_minigraph_input_path = os.path.join(test_path, "load_minigraph_input") load_minigraph_platform_path = os.path.join(load_minigraph_input_path, "platform") load_minigraph_platform_false_path = os.path.join(load_minigraph_input_path, "platform_false") load_minigraph_command_output="""\ +Acquired lock on {0} +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db +Running command: config qos reload --no-dynamic-buffer --no-delay +Running command: pfcwd start_default +Restarting SONiC target ... +Reloading Monit configuration ... +Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`. +Released lock on {0} +""" + +load_minigraph_lock_failure_output = """\ +Failed to acquire lock on {0} +""" + +load_minigraph_command_bypass_lock_output = """\ +Bypass lock on {} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db Running command: config qos reload --no-dynamic-buffer --no-delay @@ -53,6 +76,7 @@ """ load_minigraph_platform_plugin_command_output="""\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -H -m --write-to-db Running command: config qos reload --no-dynamic-buffer --no-delay @@ -61,6 +85,7 @@ Restarting SONiC target ... Reloading Monit configuration ... Please note setting loaded from minigraph will be lost after system reboot. To preserve setting, run `config save`. +Released lock on {0} """ load_mgmt_config_command_ipv4_only_output="""\ @@ -135,6 +160,20 @@ """ RELOAD_CONFIG_DB_OUTPUT = """\ +Acquired lock on {0} +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db +Restarting SONiC target ... +Reloading Monit configuration ... +Released lock on {0} +""" + +RELOAD_CONFIG_DB_LOCK_FAILURE_OUTPUT = """\ +Failed to acquire lock on {0} +""" + +RELOAD_CONFIG_DB_BYPASS_LOCK_OUTPUT = """\ +Bypass lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Restarting SONiC target ... @@ -142,31 +181,114 @@ """ RELOAD_YANG_CFG_OUTPUT = """\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -Y /tmp/config.json --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ RELOAD_MASIC_CONFIG_DB_OUTPUT = """\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic0 --write-to-db Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json -n asic1 --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} """ reload_config_with_sys_info_command_output="""\ +Acquired lock on {0} Running command: /usr/local/bin/sonic-cfggen -H -k Seastone-DX010-25-50 --write-to-db""" reload_config_with_disabled_service_output="""\ +Acquired lock on {0} Stopping SONiC target ... Running command: /usr/local/bin/sonic-cfggen -j /tmp/config.json --write-to-db Restarting SONiC target ... Reloading Monit configuration ... +Released lock on {0} +""" + +reload_config_masic_onefile_output = """\ +Acquired lock on {0} +Stopping SONiC target ... +Restarting SONiC target ... +Reloading Monit configuration ... +Released lock on {0} +""" + +reload_config_masic_onefile_gen_sysinfo_output = """\ +Acquired lock on {0} +Stopping SONiC target ... +Running command: /usr/local/bin/sonic-cfggen -H -k Mellanox-SN3800-D112C8 --write-to-db +Running command: /usr/local/bin/sonic-cfggen -H -k multi_asic -n asic0 --write-to-db +Running command: /usr/local/bin/sonic-cfggen -H -k multi_asic -n asic1 --write-to-db +Restarting SONiC target ... +Reloading Monit configuration ... +Released lock on {0} +""" + +save_config_output = """\ +Running command: /usr/local/bin/sonic-cfggen -d --print-data > /etc/sonic/config_db.json +""" + +save_config_filename_output = """\ +Running command: /usr/local/bin/sonic-cfggen -d --print-data > /tmp/config_db.json +""" + +save_config_masic_output = """\ +Running command: /usr/local/bin/sonic-cfggen -d --print-data > /etc/sonic/config_db.json +Running command: /usr/local/bin/sonic-cfggen -n asic0 -d --print-data > /etc/sonic/config_db0.json +Running command: /usr/local/bin/sonic-cfggen -n asic1 -d --print-data > /etc/sonic/config_db1.json +""" + +save_config_filename_masic_output = """\ +Running command: /usr/local/bin/sonic-cfggen -d --print-data > config_db.json +Running command: /usr/local/bin/sonic-cfggen -n asic0 -d --print-data > config_db0.json +Running command: /usr/local/bin/sonic-cfggen -n asic1 -d --print-data > config_db1.json +""" + +save_config_onefile_masic_output = """\ +Integrate each ASIC's config into a single JSON file /tmp/all_config_db.json. """ +config_temp = { + "scope": { + "ACL_TABLE": { + "MY_ACL_TABLE": { + "policy_desc": "My ACL", + "ports": ["Ethernet1", "Ethernet2"], + "stage": "ingress", + "type": "L3" + } + }, + "PORT": { + "Ethernet1": { + "alias": "fortyGigE0/0", + "description": "fortyGigE0/0", + "index": "0", + "lanes": "29,30,31,32", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + }, + "Ethernet2": { + "alias": "fortyGigE0/100", + "description": "fortyGigE0/100", + "index": "25", + "lanes": "125,126,127,128", + "mtu": "9100", + "pfc_asym": "off", + "speed": "40000" + } + } + } + } + def mock_run_command_side_effect(*args, **kwargs): command = args[0] if isinstance(command, str): @@ -186,6 +308,10 @@ def mock_run_command_side_effect(*args, **kwargs): return 'enabled', 0 elif command == 'cat /var/run/dhclient.eth0.pid': return '101', 0 + elif command == 'sudo systemctl show --no-pager interfaces-config -p ExecMainExitTimestamp --value': + return f'{datetime.datetime.now()}', 0 + elif command == 'sudo systemctl show --no-pager networking -p ExecMainExitTimestamp --value': + return f'{datetime.datetime.now()}', 0 else: return '', 0 @@ -299,6 +425,191 @@ def test_plattform_fw_update(self, mock_check_call): assert result.exit_code == 0 mock_check_call.assert_called_with(["fwutil", "update", 'update', 'module', 'Module1', 'component', 'BIOS', 'fw']) + +class TestConfigSave(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + print("SETUP") + import config.main + importlib.reload(config.main) + + def test_config_save(self, get_cmd_module, setup_single_broadcom_asic): + def read_json_file_side_effect(filename): + return {} + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('config.main.open', + mock.MagicMock()): + (config, show) = get_cmd_module + + runner = CliRunner() + + result = runner.invoke(config.config.commands["save"], ["-y"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_output + + def test_config_save_filename(self, get_cmd_module, setup_single_broadcom_asic): + def read_json_file_side_effect(filename): + return {} + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('config.main.open', + mock.MagicMock()): + + (config, show) = get_cmd_module + + runner = CliRunner() + + output_file = os.path.join(os.sep, "tmp", "config_db.json") + result = runner.invoke(config.config.commands["save"], ["-y", output_file]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_filename_output + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + + +class TestConfigSaveMasic(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + import config.main + importlib.reload(config.main) + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + def test_config_save_masic(self): + def read_json_file_side_effect(filename): + return {} + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('config.main.open', + mock.MagicMock()): + + runner = CliRunner() + + result = runner.invoke(config.config.commands["save"], ["-y"]) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_masic_output + + def test_config_save_filename_masic(self): + def read_json_file_side_effect(filename): + return {} + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)),\ + mock.patch('config.main.open', + mock.MagicMock()): + + runner = CliRunner() + + result = runner.invoke( + config.config.commands["save"], + ["-y", "config_db.json,config_db0.json,config_db1.json"] + ) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_filename_masic_output + + def test_config_save_filename_wrong_cnt_masic(self): + def read_json_file_side_effect(filename): + return {} + + with mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): + + runner = CliRunner() + + result = runner.invoke( + config.config.commands["save"], + ["-y", "config_db.json,config_db0.json"] + ) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + + assert "Input 3 config file(s) separated by comma for multiple files" in result.output + + def test_config_save_onefile_masic(self): + def get_config_side_effect(): + return {} + + with mock.patch('swsscommon.swsscommon.ConfigDBConnector.get_config', + mock.MagicMock(side_effect=get_config_side_effect)): + runner = CliRunner() + + output_file = os.path.join(os.sep, "tmp", "all_config_db.json") + print("Saving output in {}".format(output_file)) + try: + os.remove(output_file) + except OSError: + pass + result = runner.invoke( + config.config.commands["save"], + ["-y", output_file] + ) + + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == save_config_onefile_masic_output + + cwd = os.path.dirname(os.path.realpath(__file__)) + expected_result = os.path.join( + cwd, "config_save_output", "all_config_db.json" + ) + assert filecmp.cmp(output_file, expected_result, shallow=False) + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() + + class TestConfigReload(object): dummy_cfg_file = os.path.join(os.sep, "tmp", "config.json") @@ -338,7 +649,8 @@ def test_config_reload(self, get_cmd_module, setup_single_broadcom_asic): assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')][:1]) == reload_config_with_sys_info_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')][:2]) == \ + reload_config_with_sys_info_command_output.format(config.SYSTEM_RELOAD_LOCK) def test_config_reload_stdin(self, get_cmd_module, setup_single_broadcom_asic): def mock_json_load(f): @@ -378,7 +690,8 @@ def mock_json_load(f): assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')][:1]) == reload_config_with_sys_info_command_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')][:2]) == \ + reload_config_with_sys_info_command_output.format(config.SYSTEM_RELOAD_LOCK) @classmethod def teardown_class(cls): @@ -392,63 +705,362 @@ def teardown_class(cls): dbconnector.load_namespace_config() -class TestLoadMinigraph(object): +class TestBMPConfig(object): @classmethod def setup_class(cls): + print("SETUP") os.environ['UTILITIES_UNIT_TESTING'] = "1" + yield + print("TEARDOWN") + os.environ["UTILITIES_UNIT_TESTING"] = "0" + + @pytest.mark.parametrize("table_name", [ + "bgp-neighbor-table", + "bgp-rib-in-table", + "bgp-rib-out-table" + ]) + @pytest.mark.parametrize("enabled", ["true", "false"]) + @pytest.mark.parametrize("filename", ["bmp_invalid.json", "bmp.json"]) + def test_enable_disable_table( + self, + get_cmd_module, + setup_single_broadcom_asic, + table_name, + enabled, + filename): + (config, show) = get_cmd_module + jsonfile_config = os.path.join(mock_bmp_db_path, filename) + config.DEFAULT_CONFIG_DB_FILE = jsonfile_config + runner = CliRunner() + db = Db() + + # Enable table + result = runner.invoke(config.config.commands["bmp"].commands["enable"], + [table_name], obj=db) + assert result.exit_code == 0 + + # Disable table + result = runner.invoke(config.config.commands["bmp"].commands["disable"], + [table_name], obj=db) + assert result.exit_code == 0 + + # Enable table again + result = runner.invoke(config.config.commands["bmp"].commands["enable"], + [table_name], obj=db) + assert result.exit_code == 0 + + +class TestConfigReloadMasic(object): + @classmethod + def setup_class(cls): print("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" import config.main importlib.reload(config.main) + # change to multi asic config + from .mock_tables import dbconnector + from .mock_tables import mock_multi_asic + importlib.reload(mock_multi_asic) + dbconnector.load_namespace_config() + + def test_config_reload_onefile_masic(self): + def read_json_file_side_effect(filename): + return { + "localhost": { + "DEVICE_METADATA": { + "localhost": { + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "1", + "docker_routing_config_mode": "separated", + "hostname": "sonic-switch", + "hwsku": "Mellanox-SN3800-D112C8", + "mac": "1d:34:db:16:a6:00", + "platform": "x86_64-mlnx_msn3800-r0", + "peer_switch": "sonic-switch", + "type": "ToRRouter", + "suppress-fib-pending": "enabled" + } + } + }, + "asic0": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "01.00.0", + "asic_name": "asic0", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "mac": "02:42:f0:7f:01:05", + "platform": "multi_asic", + "region": "None", + "sub_role": "FrontEnd", + "type": "LeafRouter" + } + } + }, + "asic1": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "08:00.0", + "asic_name": "asic1", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "mac": "02:42:f0:7f:01:06", + "platform": "multi_asic", + "region": "None", + "sub_role": "BackEnd", + "type": "LeafRouter" + } + } + } + } + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): - @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=("dummy_path", None))) - def test_load_minigraph(self, get_cmd_module, setup_single_broadcom_asic): - with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: - (config, show) = get_cmd_module runner = CliRunner() - result = runner.invoke(config.config.commands["load_minigraph"], ["-y"]) + + result = runner.invoke(config.config.commands["reload"], ["-y", "-f", "all_config_db.json"]) + print(result.exit_code) print(result.output) traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_command_output - # Verify "systemctl reset-failed" is called for services under sonic.target - mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) - assert mock_run_command.call_count == 8 + assert "\n".join([li.rstrip() for li in result.output.split('\n')]) == \ + reload_config_masic_onefile_output.format(config.SYSTEM_RELOAD_LOCK) + + def test_config_reload_onefile_gen_sysinfo_masic(self): + def read_json_file_side_effect(filename): + return { + "localhost": { + "DEVICE_METADATA": { + "localhost": { + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "1", + "docker_routing_config_mode": "separated", + "hostname": "sonic-switch", + "hwsku": "Mellanox-SN3800-D112C8", + "peer_switch": "sonic-switch", + "type": "ToRRouter", + "suppress-fib-pending": "enabled" + } + } + }, + "asic0": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "01.00.0", + "asic_name": "asic0", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "region": "None", + "sub_role": "FrontEnd", + "type": "LeafRouter" + } + } + }, + "asic1": { + "DEVICE_METADATA": { + "localhost": { + "asic_id": "08:00.0", + "asic_name": "asic1", + "bgp_asn": "65100", + "cloudtype": "None", + "default_bgp_status": "down", + "default_pfcwd_status": "enable", + "deployment_id": "None", + "docker_routing_config_mode": "separated", + "hostname": "sonic", + "hwsku": "multi_asic", + "region": "None", + "sub_role": "BackEnd", + "type": "LeafRouter" + } + } + } + } + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): - @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_path, None))) - def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broadcom_asic): - with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: - (config, show) = get_cmd_module runner = CliRunner() - result = runner.invoke(config.config.commands["load_minigraph"], ["-y"]) + + result = runner.invoke(config.config.commands["reload"], ["-y", "-f", "all_config_db.json"]) + print(result.exit_code) print(result.output) traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == load_minigraph_platform_plugin_command_output - # Verify "systemctl reset-failed" is called for services under sonic.target - mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) - assert mock_run_command.call_count == 8 + assert "\n".join( + [li.rstrip() for li in result.output.split('\n')] + ) == reload_config_masic_onefile_gen_sysinfo_output.format(config.SYSTEM_RELOAD_LOCK) + + def test_config_reload_onefile_bad_format_masic(self): + def read_json_file_side_effect(filename): + return { + "localhost": {}, + "asic0": {} + } + + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)),\ + mock.patch('config.main.read_json_file', + mock.MagicMock(side_effect=read_json_file_side_effect)): - @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_false_path, None))) - def test_load_minigraph_platform_plugin_fail(self, get_cmd_module, setup_single_broadcom_asic): - print(load_minigraph_platform_false_path) - with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: - (config, show) = get_cmd_module runner = CliRunner() - result = runner.invoke(config.config.commands["load_minigraph"], ["-y"]) + + result = runner.invoke(config.config.commands["reload"], ["-y", "-f", "all_config_db.json"]) + print(result.exit_code) print(result.output) traceback.print_tb(result.exc_info[2]) - assert result.exit_code != 0 - assert "Platform plugin failed" in result.output - @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=("dummy_path", None))) - def test_load_minigraph_with_port_config_bad_format(self, get_cmd_module, setup_single_broadcom_asic): - with mock.patch( - "utilities_common.cli.run_command", - mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: - (config, show) = get_cmd_module + assert result.exit_code != 0 + assert "Input file all_config_db.json must contain all asics config" in result.output + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + # change back to single asic config + from .mock_tables import dbconnector + from .mock_tables import mock_single_asic + importlib.reload(mock_single_asic) + dbconnector.load_namespace_config() + + +class TestLoadMinigraph(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + print("SETUP") + import config.main + importlib.reload(config.main) + + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, show) = get_cmd_module + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) == \ + (load_minigraph_command_output.format(config.SYSTEM_RELOAD_LOCK)) + # Verify "systemctl reset-failed" is called for services under sonic.target + mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) + assert mock_run_command.call_count == 12 + + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', + mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_lock_failure(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, _) = get_cmd_module + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert result.output == \ + (load_minigraph_lock_failure_output.format(config.SYSTEM_RELOAD_LOCK)) + assert mock_run_command.call_count == 0 + finally: + flock.release_flock(fd) + + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', + mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_bypass_lock(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch("utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, _) = get_cmd_module + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y", "-b"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert result.output == \ + load_minigraph_command_bypass_lock_output.format(config.SYSTEM_RELOAD_LOCK) + assert mock_run_command.call_count == 12 + finally: + flock.release_flock(fd) + + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_path, None))) + def test_load_minigraph_platform_plugin(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, show) = get_cmd_module + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) == \ + (load_minigraph_platform_plugin_command_output.format(config.SYSTEM_RELOAD_LOCK)) + # Verify "systemctl reset-failed" is called for services under sonic.target + mock_run_command.assert_any_call(['systemctl', 'reset-failed', 'swss']) + assert mock_run_command.call_count == 12 + + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=(load_minigraph_platform_false_path, None))) + def test_load_minigraph_platform_plugin_fail(self, get_cmd_module, setup_single_broadcom_asic): + print(load_minigraph_platform_false_path) + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, show) = get_cmd_module + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["-y"]) + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "Platform plugin failed" in result.output + + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_with_port_config_bad_format(self, get_cmd_module, setup_single_broadcom_asic): + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: + (config, show) = get_cmd_module # Not in an array port_config = {"PORT": {"Ethernet0": {"admin_status": "up"}}} @@ -519,8 +1131,13 @@ def is_file_side_effect(filename): def test_load_minigraph_with_specified_golden_config_path(self, get_cmd_module): def is_file_side_effect(filename): return True if 'golden_config' in filename else False + + def read_json_file_side_effect(filename): + return {} + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command, \ - mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): (config, show) = get_cmd_module runner = CliRunner() result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "--golden_config_path", "golden_config.json", "-y"]) @@ -531,14 +1148,47 @@ def is_file_side_effect(filename): def test_load_minigraph_with_default_golden_config_path(self, get_cmd_module): def is_file_side_effect(filename): return True if 'golden_config' in filename else False + + def read_json_file_side_effect(filename): + return {} + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command, \ - mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): (config, show) = get_cmd_module runner = CliRunner() result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "-y"]) assert result.exit_code == 0 assert "config override-config-table /etc/sonic/golden_config_db.json" in result.output + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', + mock.MagicMock(return_value=("dummy_path", None))) + def test_load_minigraph_hard_dependency_check(self, get_cmd_module): + def is_file_side_effect(filename): + return True if 'golden_config' in filename else False + + def read_json_file_side_effect(filename): + return { + "AAA": { + "authentication": { + "login": "tacacs+" + } + }, + "TACPLUS": { + "global": { + } + } + } + + with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)), \ + mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): + (config, _) = get_cmd_module + runner = CliRunner() + result = runner.invoke(config.config.commands["load_minigraph"], ["--override_config", "-y"]) + assert result.exit_code != 0 + assert "Authentication with 'tacacs+' is not allowed when passkey not exists." in result.output + @mock.patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', mock.MagicMock(return_value=("dummy_path", None))) def test_load_minigraph_with_traffic_shift_away(self, get_cmd_module): with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: @@ -556,7 +1206,12 @@ def test_load_minigraph_with_traffic_shift_away_with_golden_config(self, get_cmd with mock.patch("utilities_common.cli.run_command", mock.MagicMock(side_effect=mock_run_command_side_effect)) as mock_run_command: def is_file_side_effect(filename): return True if 'golden_config' in filename else False - with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)): + + def read_json_file_side_effect(filename): + return {} + + with mock.patch('os.path.isfile', mock.MagicMock(side_effect=is_file_side_effect)), \ + mock.patch('config.main.read_json_file', mock.MagicMock(side_effect=read_json_file_side_effect)): (config, show) = get_cmd_module db = Db() golden_config = {} @@ -659,7 +1314,59 @@ def test_reload_config(self, get_cmd_module, setup_single_broadcom_asic): traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ - == RELOAD_CONFIG_DB_OUTPUT + == RELOAD_CONFIG_DB_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + + def test_reload_config_lock_failure(self, get_cmd_module, setup_single_broadcom_asic): + self.add_sysinfo_to_cfg_file() + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect) + ): + (config, show) = get_cmd_module + runner = CliRunner() + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + result = runner.invoke( + config.config.commands["reload"], + [self.dummy_cfg_file, '-y', '-f']) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) \ + == RELOAD_CONFIG_DB_LOCK_FAILURE_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + finally: + flock.release_flock(fd) + + def test_reload_config_bypass_lock(self, get_cmd_module, setup_single_broadcom_asic): + self.add_sysinfo_to_cfg_file() + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect) + ): + (config, show) = get_cmd_module + runner = CliRunner() + + fd = open(config.SYSTEM_RELOAD_LOCK, 'r') + assert flock.acquire_flock(fd, 0) + + try: + result = runner.invoke( + config.config.commands["reload"], + [self.dummy_cfg_file, '-y', '-f', '-b']) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code == 0 + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) \ + == RELOAD_CONFIG_DB_BYPASS_LOCK_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + finally: + flock.release_flock(fd) def test_config_reload_disabled_service(self, get_cmd_module, setup_single_broadcom_asic): self.add_sysinfo_to_cfg_file() @@ -679,7 +1386,8 @@ def test_config_reload_disabled_service(self, get_cmd_module, setup_single_broad assert result.exit_code == 0 - assert "\n".join([l.rstrip() for l in result.output.split('\n')]) == reload_config_with_disabled_service_output + assert "\n".join([line.rstrip() for line in result.output.split('\n')]) == \ + reload_config_with_disabled_service_output.format(config.SYSTEM_RELOAD_LOCK) def test_reload_config_masic(self, get_cmd_module, setup_multi_broadcom_masic): self.add_sysinfo_to_cfg_file() @@ -703,7 +1411,7 @@ def test_reload_config_masic(self, get_cmd_module, setup_multi_broadcom_masic): traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ - == RELOAD_MASIC_CONFIG_DB_OUTPUT + == RELOAD_MASIC_CONFIG_DB_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) def test_reload_yang_config(self, get_cmd_module, setup_single_broadcom_asic): @@ -722,7 +1430,35 @@ def test_reload_yang_config(self, get_cmd_module, traceback.print_tb(result.exc_info[2]) assert result.exit_code == 0 assert "\n".join([l.rstrip() for l in result.output.split('\n')]) \ - == RELOAD_YANG_CFG_OUTPUT + == RELOAD_YANG_CFG_OUTPUT.format(config.SYSTEM_RELOAD_LOCK) + + def test_reload_config_fails_yang_validation(self, get_cmd_module, setup_single_broadcom_asic): + with open(self.dummy_cfg_file, 'w') as f: + device_metadata = { + "DEVICE_METADATA": { + "localhost": { + "invalid_hwsku": "some_hwsku" + } + } + } + f.write(json.dumps(device_metadata)) + + with mock.patch( + "utilities_common.cli.run_command", + mock.MagicMock(side_effect=mock_run_command_side_effect) + ): + (config, _) = get_cmd_module + runner = CliRunner() + + result = runner.invoke( + config.config.commands["reload"], + [self.dummy_cfg_file, '-y', '-f']) + + print(result.exit_code) + print(result.output) + traceback.print_tb(result.exc_info[2]) + assert result.exit_code != 0 + assert "fails YANG validation! Error" in result.output @classmethod def teardown_class(cls): @@ -1023,6 +1759,7 @@ def setUp(self): self.any_checkpoints_list = ["checkpoint1", "checkpoint2", "checkpoint3"] self.any_checkpoints_list_as_text = json.dumps(self.any_checkpoints_list, indent=4) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__no_params__get_required_params_error_msg(self): # Arrange unexpected_exit_code = 0 @@ -1035,6 +1772,7 @@ def test_apply_patch__no_params__get_required_params_error_msg(self): self.assertNotEqual(unexpected_exit_code, result.exit_code) self.assertTrue(expected_output in result.output) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__help__gets_help_msg(self): # Arrange expected_exit_code = 0 @@ -1047,6 +1785,7 @@ def test_apply_patch__help__gets_help_msg(self): self.assertEqual(expected_exit_code, result.exit_code) self.assertTrue(expected_output in result.output) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__only_required_params__default_values_used_for_optional_params(self): # Arrange expected_exit_code = 0 @@ -1065,6 +1804,7 @@ def test_apply_patch__only_required_params__default_values_used_for_optional_par mock_generic_updater.apply_patch.assert_called_once() mock_generic_updater.apply_patch.assert_has_calls([expected_call_with_default_values]) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__all_optional_params_non_default__non_default_values_used(self): # Arrange expected_exit_code = 0 @@ -1094,6 +1834,7 @@ def test_apply_patch__all_optional_params_non_default__non_default_values_used(s mock_generic_updater.apply_patch.assert_called_once() mock_generic_updater.apply_patch.assert_has_calls([expected_call_with_non_default_values]) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch__exception_thrown__error_displayed_error_code_returned(self): # Arrange unexpected_exit_code = 0 @@ -1129,6 +1870,7 @@ def test_apply_patch__optional_parameters_passed_correctly(self): ["--ignore-path", "/ANY_TABLE"], mock.call(self.any_patch, ConfigFormat.CONFIGDB, False, False, False, ("/ANY_TABLE",))) + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def validate_apply_patch_optional_parameter(self, param_args, expected_call): # Arrange expected_exit_code = 0 @@ -2651,6 +3393,7 @@ def setUp(self): self.runner = CliRunner() self.patch_file_path = 'path/to/patch.json' + self.replace_file_path = 'path/to/replace.json' self.patch_content = [ { "op": "add", @@ -2679,6 +3422,16 @@ def setUp(self): } ] + test_config = copy.deepcopy(config_temp) + data = test_config.pop("scope") + self.all_config = {} + self.all_config["localhost"] = data + self.all_config["asic0"] = data + self.all_config["asic0"]["bgpraw"] = "" + self.all_config["asic1"] = data + self.all_config["asic1"]["bgpraw"] = "" + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch_multiasic(self): # Mock open to simulate file reading with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: @@ -2698,7 +3451,201 @@ def test_apply_patch_multiasic(self): # Verify mocked_open was called as expected mocked_open.assert_called_with(self.patch_file_path, 'r') + @patch('config.main.validate_patch', mock.Mock(return_value=True)) def test_apply_patch_dryrun_multiasic(self): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_dryrun_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--dry-run", + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_called_once() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_check_running_in_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_called_once() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.apply_patch_wrapper') + def test_apply_patch_check_apply_call_parallel_multiasic(self, mock_apply_patch): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + self.assertEqual(mock_apply_patch.call_count, + multi_asic.get_num_asics() + 1, + "apply_patch_wrapper function should be called number of ASICs plus host times") + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + @patch('config.main.concurrent.futures.wait', autospec=True) + def test_apply_patch_check_running_in_not_parallel_multiasic(self, MockThreadPoolWait): + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + # Mock ConfigDBConnector to ensure it's not called during dry-run + with patch('config.main.ConfigDBConnector') as mock_config_db_connector: + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--format", ConfigFormat.SONICYANG.name, + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], + catch_exceptions=False) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Patch applied successfully.", result.output) + + # Assertions to check if ThreadPoolExecutor was used correctly + MockThreadPoolWait.assert_not_called() + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + # Ensure ConfigDBConnector was never instantiated or called + mock_config_db_connector.assert_not_called() + + @patch('config.main.validate_patch', mock.Mock(return_value=True)) + def test_apply_patch_parallel_with_error_multiasic(self): # Mock open to simulate file reading with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: # Mock GenericUpdater to avoid actual patch application @@ -2713,12 +3660,13 @@ def test_apply_patch_dryrun_multiasic(self): result = self.runner.invoke(config.config.commands["apply-patch"], [self.patch_file_path, "--format", ConfigFormat.SONICYANG.name, - "--dry-run", - "--ignore-non-yang-tables", - "--ignore-path", "/ANY_TABLE", - "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", - "--ignore-path", "", - "--verbose"], + "--dry-run", + "--parallel", + "--ignore-non-yang-tables", + "--ignore-path", "/ANY_TABLE", + "--ignore-path", "/ANY_OTHER_TABLE/ANY_FIELD", + "--ignore-path", "", + "--verbose"], catch_exceptions=False) print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) @@ -2732,6 +3680,269 @@ def test_apply_patch_dryrun_multiasic(self): # Ensure ConfigDBConnector was never instantiated or called mock_config_db_connector.assert_not_called() + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_validate_patch_multiasic(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 0) + mock_subprocess_popen.return_value = mock_instance + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed.") + self.assertIn("Patch applied successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_validate_patch_with_badpath_multiasic(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 0) + mock_subprocess_popen.return_value = mock_instance + + bad_patch = copy.deepcopy(self.patch_content) + bad_patch.append({ + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet3", "Ethernet4"], + "stage": "ingress", + "type": "L3" + } + }) + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(bad_patch)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed.") + self.assertIn("Failed to apply patch", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_parallel_badpath_multiasic(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 0) + mock_subprocess_popen.return_value = mock_instance + + bad_patch = copy.deepcopy(self.patch_content) + bad_patch.append({ + "value": { + "policy_desc": "New ACL Table", + "ports": ["Ethernet3", "Ethernet4"], + "stage": "ingress", + "type": "L3" + } + }) + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(bad_patch)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path, + "--parallel"], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed.") + self.assertIn("Failed to apply patch", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + @patch('config.main.subprocess.Popen') + @patch('config.main.SonicYangCfgDbGenerator.validate_config_db_json', mock.Mock(return_value=True)) + def test_apply_patch_validate_patch_with_wrong_fetch_config(self, mock_subprocess_popen): + mock_instance = MagicMock() + mock_instance.communicate.return_value = (json.dumps(self.all_config), 2) + mock_subprocess_popen.return_value = mock_instance + + # Mock open to simulate file reading + with patch('builtins.open', mock_open(read_data=json.dumps(self.patch_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.apply_patch = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["apply-patch"], + [self.patch_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed.") + self.assertIn("Failed to apply patch", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.patch_file_path, 'r') + + @patch('generic_config_updater.generic_updater.ConfigReplacer.replace', MagicMock()) + def test_replace_multiasic(self): + # Mock open to simulate file reading + mock_replace_content = copy.deepcopy(self.all_config) + with patch('builtins.open', mock_open(read_data=json.dumps(mock_replace_content)), create=True) as mocked_open: + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.replace_all = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["replace"], + [self.replace_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Config replaced successfully.", result.output) + + # Verify mocked_open was called as expected + mocked_open.assert_called_with(self.replace_file_path, 'r') + + @patch('generic_config_updater.generic_updater.ConfigReplacer.replace', MagicMock()) + def test_replace_multiasic_missing_scope(self): + # Mock open to simulate file reading + mock_replace_content = copy.deepcopy(self.all_config) + mock_replace_content.pop("asic0") + with patch('builtins.open', mock_open(read_data=json.dumps(mock_replace_content)), create=True): + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["replace"], + [self.replace_file_path], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertNotEqual(result.exit_code, 0, "Command should failed") + self.assertIn("Failed to replace config", result.output) + + @patch('generic_config_updater.generic_updater.subprocess.Popen') + @patch('generic_config_updater.generic_updater.Util.ensure_checkpoints_dir_exists', mock.Mock(return_value=True)) + @patch('generic_config_updater.generic_updater.Util.save_json_file', MagicMock()) + def test_checkpoint_multiasic(self, mock_subprocess_popen): + allconfigs = copy.deepcopy(self.all_config) + + # Create mock instances for each subprocess call + mock_instance_localhost = MagicMock() + mock_instance_localhost.communicate.return_value = (json.dumps(allconfigs["localhost"]), 0) + mock_instance_localhost.returncode = 0 + + mock_instance_asic0 = MagicMock() + mock_instance_asic0.communicate.return_value = (json.dumps(allconfigs["asic0"]), 0) + mock_instance_asic0.returncode = 0 + + mock_instance_asic1 = MagicMock() + mock_instance_asic1.communicate.return_value = (json.dumps(allconfigs["asic1"]), 0) + mock_instance_asic1.returncode = 0 + + # Setup side effect to return different mock instances based on input arguments + def side_effect(*args, **kwargs): + if "asic" not in args[0]: + return mock_instance_localhost + elif "asic0" in args[0]: + return mock_instance_asic0 + elif "asic1" in args[0]: + return mock_instance_asic1 + else: + return MagicMock() # Default case + + mock_subprocess_popen.side_effect = side_effect + + checkpointname = "checkpointname" + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["checkpoint"], + [checkpointname], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Checkpoint created successfully.", result.output) + + @patch('generic_config_updater.generic_updater.Util.check_checkpoint_exists', mock.Mock(return_value=True)) + @patch('generic_config_updater.generic_updater.ConfigReplacer.replace', MagicMock()) + @patch('generic_config_updater.generic_updater.Util.get_checkpoint_content') + def test_rollback_multiasic(self, mock_get_checkpoint_content): + mock_get_checkpoint_content.return_value = copy.deepcopy(self.all_config) + checkpointname = "checkpointname" + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["rollback"], + [checkpointname], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Config rolled back successfully.", result.output) + + @patch('generic_config_updater.generic_updater.Util.checkpoints_dir_exist', mock.Mock(return_value=True)) + @patch('generic_config_updater.generic_updater.Util.get_checkpoint_names', + mock.Mock(return_value=["checkpointname"])) + def test_list_checkpoint_multiasic(self): + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["list-checkpoints"], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("checkpointname", result.output) + + @patch('generic_config_updater.generic_updater.Util.delete_checkpoint', MagicMock()) + @patch('generic_config_updater.generic_updater.Util.check_checkpoint_exists', mock.Mock(return_value=True)) + def test_delete_checkpoint_multiasic(self): + checkpointname = "checkpointname" + # Mock GenericUpdater to avoid actual patch application + with patch('config.main.GenericUpdater') as mock_generic_updater: + mock_generic_updater.return_value.delete_checkpoint = MagicMock() + + print("Multi ASIC: {}".format(multi_asic.is_multi_asic())) + # Invocation of the command with the CliRunner + result = self.runner.invoke(config.config.commands["delete-checkpoint"], + [checkpointname], + catch_exceptions=True) + + print("Exit Code: {}, output: {}".format(result.exit_code, result.output)) + # Assertions and verifications + self.assertEqual(result.exit_code, 0, "Command should succeed") + self.assertIn("Checkpoint deleted successfully.", result.output) + @classmethod def teardown_class(cls): print("TEARDOWN") @@ -2741,4 +3952,64 @@ def teardown_class(cls): from .mock_tables import dbconnector from .mock_tables import mock_single_asic importlib.reload(mock_single_asic) - dbconnector.load_database_config() \ No newline at end of file + dbconnector.load_database_config() + + +class TestConfigBanner(object): + @classmethod + def setup_class(cls): + print('SETUP') + import config.main + importlib.reload(config.main) + + @patch('utilities_common.cli.run_command', + mock.MagicMock(side_effect=mock_run_command_side_effect)) + def test_banner_state(self): + runner = CliRunner() + obj = {'db': Db().cfgdb} + + result = runner.invoke( + config.config.commands['banner'].commands['state'], + ['enabled'], obj=obj) + + assert result.exit_code == 0 + + @patch('utilities_common.cli.run_command', + mock.MagicMock(side_effect=mock_run_command_side_effect)) + def test_banner_login(self): + runner = CliRunner() + obj = {'db': Db().cfgdb} + + result = runner.invoke( + config.config.commands['banner'].commands['login'], + ['Login message'], obj=obj) + + assert result.exit_code == 0 + + @patch('utilities_common.cli.run_command', + mock.MagicMock(side_effect=mock_run_command_side_effect)) + def test_banner_logout(self): + runner = CliRunner() + obj = {'db': Db().cfgdb} + + result = runner.invoke( + config.config.commands['banner'].commands['logout'], + ['Logout message'], obj=obj) + + assert result.exit_code == 0 + + @patch('utilities_common.cli.run_command', + mock.MagicMock(side_effect=mock_run_command_side_effect)) + def test_banner_motd(self): + runner = CliRunner() + obj = {'db': Db().cfgdb} + + result = runner.invoke( + config.config.commands['banner'].commands['motd'], + ['Motd message'], obj=obj) + + assert result.exit_code == 0 + + @classmethod + def teardown_class(cls): + print('TEARDOWN') diff --git a/tests/conftest.py b/tests/conftest.py index c3bb69af71..3874668a67 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -194,7 +194,11 @@ def setup_single_bgp_instance(request): elif request.param == 'ipv6_route': bgp_mocked_json = 'ipv6_route.json' elif request.param == 'ip_special_route': - bgp_mocked_json = 'ip_special_route.json' + bgp_mocked_json = 'ip_special_route.json' + elif request.param == 'ip_route_lc': + bgp_mocked_json = 'ip_route_lc.json' + elif request.param == 'ip_route_remote_lc': + bgp_mocked_json = 'ip_route_remote_lc.json' else: bgp_mocked_json = os.path.join( test_path, 'mock_tables', 'dummy.json') @@ -240,7 +244,8 @@ def mock_run_bgp_route_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constan _old_run_bgp_command = bgp_util.run_bgp_command if any([request.param == 'ip_route', request.param == 'ip_specific_route', request.param == 'ip_special_route', - request.param == 'ipv6_route', request.param == 'ipv6_specific_route']): + request.param == 'ipv6_route', request.param == 'ipv6_specific_route', + request.param == 'ip_route_lc', request.param == 'ip_route_remote_lc']): bgp_util.run_bgp_command = mock.MagicMock( return_value=mock_run_bgp_route_command("", "")) elif request.param.startswith('ipv6_route_err'): @@ -303,6 +308,12 @@ def setup_multi_asic_bgp_instance(request): request.param.startswith('bgp_v4_neighbor') or \ request.param.startswith('bgp_v6_neighbor'): m_asic_json_file = request.param + elif request.param == 'ip_route_lc': + m_asic_json_file = 'ip_route_lc.json' + elif request.param == 'ip_route_remote_lc': + m_asic_json_file = 'ip_route_remote_lc.json' + elif request.param == 'ip_route_lc_2': + m_asic_json_file = 'ip_route_lc_2.json' else: m_asic_json_file = os.path.join( test_path, 'mock_tables', 'dummy.json') @@ -317,7 +328,7 @@ def mock_run_bgp_command_for_static(vtysh_cmd, bgp_namespace="", vtysh_shell_cmd else: return "" - def mock_run_bgp_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.RVTYSH_COMMAND): + def mock_run_bgp_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.RVTYSH_COMMAND, exit_on_fail=True): if m_asic_json_file.startswith('bgp_v4_network') or \ m_asic_json_file.startswith('bgp_v6_network'): return mock_show_bgp_network_multi_asic(m_asic_json_file) @@ -335,7 +346,8 @@ def mock_run_bgp_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.RVT else: return "" - def mock_run_show_sum_bgp_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.VTYSH_COMMAND): + def mock_run_show_sum_bgp_command( + vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.VTYSH_COMMAND, exit_on_fail=True): if vtysh_cmd == "show ip bgp summary json": m_asic_json_file = 'no_bgp_neigh.json' else: @@ -350,7 +362,8 @@ def mock_run_show_sum_bgp_command(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=cons else: return "" - def mock_run_show_summ_bgp_command_no_ext_neigh_on_all_asic(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.VTYSH_COMMAND): + def mock_run_show_summ_bgp_command_no_ext_neigh_on_all_asic( + vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.VTYSH_COMMAND, exit_on_fail=True): if vtysh_cmd == "show ip bgp summary json": m_asic_json_file = 'no_ext_bgp_neigh.json' else: @@ -365,7 +378,8 @@ def mock_run_show_summ_bgp_command_no_ext_neigh_on_all_asic(vtysh_cmd, bgp_names else: return "" - def mock_run_show_summ_bgp_command_no_ext_neigh_on_asic1(vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.VTYSH_COMMAND): + def mock_run_show_summ_bgp_command_no_ext_neigh_on_asic1( + vtysh_cmd, bgp_namespace, vtysh_shell_cmd=constants.VTYSH_COMMAND, exit_on_fail=True): if vtysh_cmd == "show ip bgp summary json": if bgp_namespace == "asic1": m_asic_json_file = 'no_ext_bgp_neigh.json' @@ -383,6 +397,13 @@ def mock_run_show_summ_bgp_command_no_ext_neigh_on_asic1(vtysh_cmd, bgp_namespac else: return "" + def mock_multi_asic_list(): + return ["asic0", "asic1"] + + # mock multi-asic list + if request.param == "bgp_v4_network_all_asic": + multi_asic.get_namespace_list = mock_multi_asic_list + _old_run_bgp_command = bgp_util.run_bgp_command if request.param == 'ip_route_for_int_ip': bgp_util.run_bgp_command = mock_run_bgp_command_for_static diff --git a/wol/__init__.py b/tests/console_mock/dev/ttyACM1 similarity index 100% rename from wol/__init__.py rename to tests/console_mock/dev/ttyACM1 diff --git a/tests/console_mock/dev/ttyUSB0 b/tests/console_mock/dev/ttyUSB0 new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/console_test.py b/tests/console_test.py index 528f5f4ba8..4a52a3c52e 100644 --- a/tests/console_test.py +++ b/tests/console_test.py @@ -14,10 +14,15 @@ from click.testing import CliRunner from utilities_common.db import Db -from consutil.lib import * +from consutil.lib import ConsolePortProvider, ConsolePortInfo, ConsoleSession, SysInfoProvider, DbUtils, \ + InvalidConfigurationError, LineBusyError, LineNotFoundError, ConnectionFailedError from sonic_py_common import device_info from jsonpatch import JsonPatchConflict +SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +CONSOLE_MOCK_DIR = SCRIPT_DIR + "/console_mock" + + class TestConfigConsoleCommands(object): @classmethod def setup_class(cls): @@ -543,17 +548,15 @@ def test_sys_info_provider_init_device_prefix_plugin(self): with mock.patch("builtins.open", mock.mock_open(read_data="C0-")): SysInfoProvider.init_device_prefix() assert SysInfoProvider.DEVICE_PREFIX == "/dev/C0-" - SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" - @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=("/dev/ttyUSB0\n/dev/ttyACM1", ""))) def test_sys_info_provider_list_console_ttys(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = CONSOLE_MOCK_DIR + "/dev/ttyUSB" ttys = SysInfoProvider.list_console_ttys() print(SysInfoProvider.DEVICE_PREFIX) assert len(ttys) == 1 - @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=("", "ls: cannot access '/dev/ttyUSB*': No such file or directory"))) def test_sys_info_provider_list_console_ttys_device_not_exists(self): + SysInfoProvider.DEVICE_PREFIX = CONSOLE_MOCK_DIR + "/dev_not_exist/ttyUSB" ttys = SysInfoProvider.list_console_ttys() assert len(ttys) == 0 @@ -563,7 +566,7 @@ def test_sys_info_provider_list_console_ttys_device_not_exists(self): """ @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=all_active_processes_output)) def test_sys_info_provider_list_active_console_processes(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" procs = SysInfoProvider.list_active_console_processes() assert len(procs) == 1 assert "0" in procs @@ -572,7 +575,7 @@ def test_sys_info_provider_list_active_console_processes(self): active_process_output = "13751 Wed Mar 6 08:31:35 2019 /usr/bin/sudo picocom -b 9600 -f n /dev/ttyUSB1" @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=active_process_output)) def test_sys_info_provider_get_active_console_process_info_exists(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" proc = SysInfoProvider.get_active_console_process_info("13751") assert proc is not None assert proc == ("1", "13751", "Wed Mar 6 08:31:35 2019") @@ -580,7 +583,7 @@ def test_sys_info_provider_get_active_console_process_info_exists(self): active_process_empty_output = "" @mock.patch('consutil.lib.SysInfoProvider.run_command', mock.MagicMock(return_value=active_process_empty_output)) def test_sys_info_provider_get_active_console_process_info_nonexists(self): - SysInfoProvider.DEVICE_PREFIX == "/dev/ttyUSB" + SysInfoProvider.DEVICE_PREFIX = "/dev/ttyUSB" proc = SysInfoProvider.get_active_console_process_info("2") assert proc is None diff --git a/tests/counterpoll_test.py b/tests/counterpoll_test.py index 4a4da07ee9..6c165498c5 100644 --- a/tests/counterpoll_test.py +++ b/tests/counterpoll_test.py @@ -2,6 +2,7 @@ import json import os import pytest +import mock import sys from click.testing import CliRunner from shutil import copyfile @@ -31,6 +32,21 @@ FLOW_CNT_ROUTE_STAT 10000 enable """ +expected_counterpoll_show_dpu = """Type Interval (in ms) Status +-------------------- ------------------ -------- +QUEUE_STAT 10000 enable +PORT_STAT 1000 enable +PORT_BUFFER_DROP 60000 enable +QUEUE_WATERMARK_STAT default (60000) enable +PG_WATERMARK_STAT default (60000) enable +PG_DROP_STAT 10000 enable +ACL 5000 enable +TUNNEL_STAT 3000 enable +FLOW_CNT_TRAP_STAT 10000 enable +FLOW_CNT_ROUTE_STAT 10000 enable +ENI_STAT 1000 enable +""" + class TestCounterpoll(object): @classmethod def setup_class(cls): @@ -44,6 +60,13 @@ def test_show(self): print(result.output) assert result.output == expected_counterpoll_show + @mock.patch('counterpoll.main.device_info.get_platform_info') + def test_show_dpu(self, mock_get_platform_info): + mock_get_platform_info.return_value = {'switch_type': 'dpu'} + runner = CliRunner() + result = runner.invoke(counterpoll.cli.commands["show"], []) + assert result.output == expected_counterpoll_show_dpu + def test_port_buffer_drop_interval(self): runner = CliRunner() result = runner.invoke(counterpoll.cli.commands["port-buffer-drop"].commands["interval"], ["30000"]) @@ -221,6 +244,38 @@ def test_update_route_counter_interval(self): assert result.exit_code == 2 assert expected in result.output + @pytest.mark.parametrize("status", ["disable", "enable"]) + def test_update_eni_status(self, status): + runner = CliRunner() + result = runner.invoke(counterpoll.cli, ["eni", status]) + assert result.exit_code == 1 + assert result.output == "ENI counters are not supported on non DPU platforms\n" + + @pytest.mark.parametrize("status", ["disable", "enable"]) + @mock.patch('counterpoll.main.device_info.get_platform_info') + def test_update_eni_status_dpu(self, mock_get_platform_info, status): + mock_get_platform_info.return_value = {'switch_type': 'dpu'} + runner = CliRunner() + db = Db() + + result = runner.invoke(counterpoll.cli.commands["eni"].commands[status], [], obj=db.cfgdb) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert status == table["ENI"]["FLEX_COUNTER_STATUS"] + + @mock.patch('counterpoll.main.device_info.get_platform_info') + def test_update_eni_interval(self, mock_get_platform_info): + mock_get_platform_info.return_value = {'switch_type': 'dpu'} + runner = CliRunner() + db = Db() + test_interval = "2000" + + result = runner.invoke(counterpoll.cli.commands["eni"].commands["interval"], [test_interval], obj=db.cfgdb) + assert result.exit_code == 0 + + table = db.cfgdb.get_table('FLEX_COUNTER_TABLE') + assert test_interval == table["ENI"]["POLL_INTERVAL"] @classmethod def teardown_class(cls): diff --git a/tests/db_migrator_input/config_db/cross_branch_upgrade_to_4_0_3_expected.json b/tests/db_migrator_input/config_db/cross_branch_upgrade_to_4_0_3_expected.json index 1ebfbc6afe..a64d38bc51 100644 --- a/tests/db_migrator_input/config_db/cross_branch_upgrade_to_4_0_3_expected.json +++ b/tests/db_migrator_input/config_db/cross_branch_upgrade_to_4_0_3_expected.json @@ -3,17 +3,17 @@ "VERSION": "version_4_0_3" }, "FLEX_COUNTER_TABLE|ACL": { - "FLEX_COUNTER_STATUS": "true", + "FLEX_COUNTER_STATUS": "enable", "FLEX_COUNTER_DELAY_STATUS": "true", "POLL_INTERVAL": "10000" }, "FLEX_COUNTER_TABLE|QUEUE": { - "FLEX_COUNTER_STATUS": "true", + "FLEX_COUNTER_STATUS": "enable", "FLEX_COUNTER_DELAY_STATUS": "true", "POLL_INTERVAL": "10000" }, "FLEX_COUNTER_TABLE|PG_WATERMARK": { - "FLEX_COUNTER_STATUS": "false", + "FLEX_COUNTER_STATUS": "disable", "FLEX_COUNTER_DELAY_STATUS": "true" } } diff --git a/tests/db_migrator_input/config_db/cross_branch_upgrade_to_4_0_3_input.json b/tests/db_migrator_input/config_db/cross_branch_upgrade_to_4_0_3_input.json index 07ce763683..e2d8d04588 100644 --- a/tests/db_migrator_input/config_db/cross_branch_upgrade_to_4_0_3_input.json +++ b/tests/db_migrator_input/config_db/cross_branch_upgrade_to_4_0_3_input.json @@ -3,16 +3,16 @@ "VERSION": "version_1_0_1" }, "FLEX_COUNTER_TABLE|ACL": { - "FLEX_COUNTER_STATUS": "true", + "FLEX_COUNTER_STATUS": "enable", "FLEX_COUNTER_DELAY_STATUS": "true", "POLL_INTERVAL": "10000" }, "FLEX_COUNTER_TABLE|QUEUE": { - "FLEX_COUNTER_STATUS": "true", + "FLEX_COUNTER_STATUS": "enable", "FLEX_COUNTER_DELAY_STATUS": "false", "POLL_INTERVAL": "10000" }, "FLEX_COUNTER_TABLE|PG_WATERMARK": { - "FLEX_COUNTER_STATUS": "false" + "FLEX_COUNTER_STATUS": "disable" } } diff --git a/tests/db_migrator_input/config_db/portchannel-expected.json b/tests/db_migrator_input/config_db/portchannel-expected.json index 2644e5f4e9..f380c75363 100644 --- a/tests/db_migrator_input/config_db/portchannel-expected.json +++ b/tests/db_migrator_input/config_db/portchannel-expected.json @@ -1,28 +1,24 @@ { "PORTCHANNEL|PortChannel0": { "admin_status": "up", - "members@": "Ethernet0,Ethernet4", "min_links": "2", "mtu": "9100", "lacp_key": "auto" }, "PORTCHANNEL|PortChannel1": { "admin_status": "up", - "members@": "Ethernet8,Ethernet12", "min_links": "2", "mtu": "9100", "lacp_key": "auto" }, "PORTCHANNEL|PortChannel0123": { "admin_status": "up", - "members@": "Ethernet16", "min_links": "1", "mtu": "9100", "lacp_key": "auto" }, "PORTCHANNEL|PortChannel0011": { "admin_status": "up", - "members@": "Ethernet20,Ethernet24", "min_links": "2", "mtu": "9100", "lacp_key": "auto" diff --git a/tests/db_migrator_input/config_db/portchannel-input.json b/tests/db_migrator_input/config_db/portchannel-input.json index 753a88601d..43a9fabdb5 100644 --- a/tests/db_migrator_input/config_db/portchannel-input.json +++ b/tests/db_migrator_input/config_db/portchannel-input.json @@ -1,25 +1,21 @@ { "PORTCHANNEL|PortChannel0": { "admin_status": "up", - "members@": "Ethernet0,Ethernet4", "min_links": "2", "mtu": "9100" }, "PORTCHANNEL|PortChannel1": { "admin_status": "up", - "members@": "Ethernet8,Ethernet12", "min_links": "2", "mtu": "9100" }, "PORTCHANNEL|PortChannel0123": { "admin_status": "up", - "members@": "Ethernet16", "min_links": "1", "mtu": "9100" }, "PORTCHANNEL|PortChannel0011": { "admin_status": "up", - "members@": "Ethernet20,Ethernet24", "min_links": "2", "mtu": "9100" }, diff --git a/tests/db_migrator_input/config_db/qos_map_table_expected.json b/tests/db_migrator_input/config_db/qos_map_table_expected.json index 47381ec550..f84c1a900b 100644 --- a/tests/db_migrator_input/config_db/qos_map_table_expected.json +++ b/tests/db_migrator_input/config_db/qos_map_table_expected.json @@ -29,6 +29,14 @@ "pfc_to_queue_map": "AZURE", "tc_to_pg_map": "AZURE", "tc_to_queue_map": "AZURE" - } + }, + "TC_TO_QUEUE_MAP|AZURE": {"0": "0"}, + "TC_TO_PRIORITY_GROUP_MAP|AZURE": {"0": "0"}, + "MAP_PFC_PRIORITY_TO_QUEUE|AZURE": {"0": "0"}, + "DSCP_TO_TC_MAP|AZURE": {"0": "0"}, + "PORT|Ethernet0": {"lanes": "0", "speed": "1000"}, + "PORT|Ethernet92": {"lanes": "92", "speed": "1000"}, + "PORT|Ethernet96": {"lanes": "96", "speed": "1000"}, + "PORT|Ethernet100": {"lanes": "100", "speed": "1000"} } diff --git a/tests/db_migrator_input/config_db/qos_map_table_input.json b/tests/db_migrator_input/config_db/qos_map_table_input.json index c62e293daf..3c288b9534 100644 --- a/tests/db_migrator_input/config_db/qos_map_table_input.json +++ b/tests/db_migrator_input/config_db/qos_map_table_input.json @@ -27,5 +27,13 @@ "pfc_to_queue_map": "AZURE", "tc_to_pg_map": "AZURE", "tc_to_queue_map": "AZURE" - } + }, + "TC_TO_QUEUE_MAP|AZURE": {"0": "0"}, + "TC_TO_PRIORITY_GROUP_MAP|AZURE": {"0": "0"}, + "MAP_PFC_PRIORITY_TO_QUEUE|AZURE": {"0": "0"}, + "DSCP_TO_TC_MAP|AZURE": {"0": "0"}, + "PORT|Ethernet0": {"lanes": "0", "speed": "1000"}, + "PORT|Ethernet92": {"lanes": "92", "speed": "1000"}, + "PORT|Ethernet96": {"lanes": "96", "speed": "1000"}, + "PORT|Ethernet100": {"lanes": "100", "speed": "1000"} } diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-expected.json b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-expected.json index 5181daa057..b969575c78 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-expected.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-expected.json @@ -12,7 +12,7 @@ "profile": "NULL" }, "BUFFER_PG|Ethernet8|3-4": { - "profile": "customized_lossless_profile" + "profile": "customized_ingress_lossless_profile" }, "BUFFER_PG|Ethernet12|0": { "profile": "ingress_lossy_profile" @@ -103,6 +103,11 @@ "BUFFER_PORT_INGRESS_PROFILE_LIST|Ethernet24": { "profile_list": "ingress_lossless_profile,ingress_lossy_profile" }, + "BUFFER_PROFILE|customized_egress_lossless_profile": { + "dynamic_th": "7", + "pool": "egress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|egress_lossless_profile": { "dynamic_th": "7", "pool": "egress_lossless_pool", @@ -118,6 +123,11 @@ "pool": "ingress_lossless_pool", "size": "0" }, + "BUFFER_PROFILE|customized_ingress_lossless_profile": { + "dynamic_th": "7", + "pool": "ingress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|ingress_lossy_profile": { "dynamic_th": "3", "pool": "ingress_lossy_pool", diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-input.json b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-input.json index d8deef194f..d3337ccadb 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-input.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-double-pools-input.json @@ -3,7 +3,7 @@ "profile": "NULL" }, "BUFFER_PG|Ethernet8|3-4": { - "profile": "customized_lossless_profile" + "profile": "customized_ingress_lossless_profile" }, "BUFFER_PG|Ethernet12|0": { "profile": "ingress_lossy_profile" @@ -55,6 +55,11 @@ "BUFFER_PORT_INGRESS_PROFILE_LIST|Ethernet24": { "profile_list": "ingress_lossless_profile,ingress_lossy_profile" }, + "BUFFER_PROFILE|customized_egress_lossless_profile": { + "dynamic_th": "7", + "pool": "egress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|egress_lossless_profile": { "dynamic_th": "7", "pool": "egress_lossless_pool", @@ -65,6 +70,11 @@ "pool": "egress_lossy_pool", "size": "9216" }, + "BUFFER_PROFILE|customized_ingress_lossless_profile": { + "dynamic_th": "7", + "pool": "ingress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|ingress_lossless_profile": { "dynamic_th": "7", "pool": "ingress_lossless_pool", diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-expected.json b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-expected.json index 278a40bc0a..3572be8b69 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-expected.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-expected.json @@ -12,7 +12,7 @@ "profile": "NULL" }, "BUFFER_PG|Ethernet8|3-4": { - "profile": "customized_lossless_profile" + "profile": "customized_ingress_lossless_profile" }, "BUFFER_PG|Ethernet12|0": { "profile": "ingress_lossy_profile" @@ -99,6 +99,11 @@ "BUFFER_PORT_INGRESS_PROFILE_LIST|Ethernet24": { "profile_list": "ingress_lossless_profile" }, + "BUFFER_PROFILE|customized_egress_lossless_profile": { + "dynamic_th": "7", + "pool": "egress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|egress_lossless_profile": { "dynamic_th": "7", "pool": "egress_lossless_pool", @@ -109,6 +114,11 @@ "pool": "egress_lossy_pool", "size": "9216" }, + "BUFFER_PROFILE|customized_ingress_lossless_profile": { + "dynamic_th": "7", + "pool": "ingress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|ingress_lossless_profile": { "dynamic_th": "7", "pool": "ingress_lossless_pool", diff --git a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-input.json b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-input.json index b3bda32f23..60f4455cad 100644 --- a/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-input.json +++ b/tests/db_migrator_input/config_db/reclaiming-buffer-dynamic-single-pool-input.json @@ -3,7 +3,7 @@ "profile": "NULL" }, "BUFFER_PG|Ethernet8|3-4": { - "profile": "customized_lossless_profile" + "profile": "customized_ingress_lossless_profile" }, "BUFFER_PG|Ethernet12|0": { "profile": "ingress_lossy_profile" @@ -51,6 +51,11 @@ "BUFFER_PORT_INGRESS_PROFILE_LIST|Ethernet24": { "profile_list": "ingress_lossless_profile" }, + "BUFFER_PROFILE|customized_egress_lossless_profile": { + "dynamic_th": "7", + "pool": "egress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|egress_lossless_profile": { "dynamic_th": "7", "pool": "egress_lossless_pool", @@ -61,6 +66,11 @@ "pool": "egress_lossy_pool", "size": "9216" }, + "BUFFER_PROFILE|customized_ingress_lossless_profile": { + "dynamic_th": "7", + "pool": "ingress_lossless_pool", + "size": "0" + }, "BUFFER_PROFILE|ingress_lossless_profile": { "dynamic_th": "7", "pool": "ingress_lossless_pool", diff --git a/tests/db_migrator_test.py b/tests/db_migrator_test.py index e21539766a..cdf4251bd7 100644 --- a/tests/db_migrator_test.py +++ b/tests/db_migrator_test.py @@ -74,24 +74,27 @@ class TestVersionComparison(object): def setup_class(cls): cls.version_comp_list = [ # Old format v.s old format - { 'v1' : 'version_1_0_1', 'v2' : 'version_1_0_2', 'result' : False }, - { 'v1' : 'version_1_0_2', 'v2' : 'version_1_0_1', 'result' : True }, - { 'v1' : 'version_1_0_1', 'v2' : 'version_2_0_1', 'result' : False }, - { 'v1' : 'version_2_0_1', 'v2' : 'version_1_0_1', 'result' : True }, + {'v1': 'version_1_0_1', 'v2': 'version_1_0_2', 'result': False}, + {'v1': 'version_1_0_2', 'v2': 'version_1_0_1', 'result': True}, + {'v1': 'version_1_0_1', 'v2': 'version_2_0_1', 'result': False}, + {'v1': 'version_2_0_1', 'v2': 'version_1_0_1', 'result': True}, # New format v.s old format - { 'v1' : 'version_1_0_1', 'v2' : 'version_202311_01', 'result' : False }, - { 'v1' : 'version_202311_01', 'v2' : 'version_1_0_1', 'result' : True }, - { 'v1' : 'version_1_0_1', 'v2' : 'version_master_01', 'result' : False }, - { 'v1' : 'version_master_01', 'v2' : 'version_1_0_1', 'result' : True }, + {'v1': 'version_1_0_1', 'v2': 'version_202311_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_1_0_1', 'result': True}, + {'v1': 'version_1_0_1', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_master_01', 'v2': 'version_1_0_1', 'result': True}, # New format v.s new format - { 'v1' : 'version_202311_01', 'v2' : 'version_202311_02', 'result' : False }, - { 'v1' : 'version_202311_02', 'v2' : 'version_202311_01', 'result' : True }, - { 'v1' : 'version_202305_01', 'v2' : 'version_202311_01', 'result' : False }, - { 'v1' : 'version_202311_01', 'v2' : 'version_202305_01', 'result' : True }, - { 'v1' : 'version_202311_01', 'v2' : 'version_master_01', 'result' : False }, - { 'v1' : 'version_master_01', 'v2' : 'version_202311_01', 'result' : True }, - { 'v1' : 'version_master_01', 'v2' : 'version_master_02', 'result' : False }, - { 'v1' : 'version_master_02', 'v2' : 'version_master_01', 'result' : True }, + {'v1': 'version_202311_01', 'v2': 'version_202311_02', 'result': False}, + {'v1': 'version_202311_02', 'v2': 'version_202311_01', 'result': True}, + {'v1': 'version_202305_01', 'v2': 'version_202311_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_202305_01', 'result': True}, + {'v1': 'version_202405_01', 'v2': 'version_202411_01', 'result': False}, + {'v1': 'version_202411_01', 'v2': 'version_202405_01', 'result': True}, + {'v1': 'version_202411_01', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_202311_01', 'v2': 'version_master_01', 'result': False}, + {'v1': 'version_master_01', 'v2': 'version_202311_01', 'result': True}, + {'v1': 'version_master_01', 'v2': 'version_master_02', 'result': False}, + {'v1': 'version_master_02', 'v2': 'version_master_01', 'result': True}, ] def test_version_comparison(self): @@ -383,7 +386,7 @@ def test_dns_nameserver_migrator(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'dns-nameserver-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_keys = dbmgtr.configDB.keys(dbmgtr.configDB.CONFIG_DB, 'DNS_NAMESERVER*') expected_keys = expected_db.cfgdb.keys(expected_db.cfgdb.CONFIG_DB, 'DNS_NAMESERVER*') @@ -895,7 +898,7 @@ def test_init(self, mock_args): @mock.patch('swsscommon.swsscommon.SonicDBConfig.isInit', mock.MagicMock(return_value=False)) @mock.patch('swsscommon.swsscommon.SonicDBConfig.initialize', mock.MagicMock()) def test_init_no_namespace(self, mock_args): - mock_args.return_value=argparse.Namespace(namespace=None, operation='version_202405_01', socket=None) + mock_args.return_value = argparse.Namespace(namespace=None, operation='version_202411_01', socket=None) import db_migrator db_migrator.main() @@ -903,7 +906,7 @@ def test_init_no_namespace(self, mock_args): @mock.patch('swsscommon.swsscommon.SonicDBConfig.isGlobalInit', mock.MagicMock(return_value=False)) @mock.patch('swsscommon.swsscommon.SonicDBConfig.initializeGlobalConfig', mock.MagicMock()) def test_init_namespace(self, mock_args): - mock_args.return_value=argparse.Namespace(namespace="asic0", operation='version_202405_01', socket=None) + mock_args.return_value = argparse.Namespace(namespace="asic0", operation='version_202411_01', socket=None) import db_migrator db_migrator.main() @@ -940,7 +943,7 @@ def test_dns_nameserver_migrator_minigraph(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'gnmi-minigraph-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_table = dbmgtr.configDB.get_table("GNMI") expected_table = expected_db.cfgdb.get_table("GNMI") @@ -956,7 +959,7 @@ def test_dns_nameserver_migrator_configdb(self): dbmgtr.migrate() dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'config_db', 'gnmi-configdb-expected') expected_db = Db() - advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202405_01') + advance_version_for_expected_database(dbmgtr.configDB, expected_db.cfgdb, 'version_202411_01') resulting_table = dbmgtr.configDB.get_table("GNMI") expected_table = expected_db.cfgdb.get_table("GNMI") diff --git a/tests/disk_check_test.py b/tests/disk_check_test.py index 82b8b16ff6..ac541b05b9 100644 --- a/tests/disk_check_test.py +++ b/tests/disk_check_test.py @@ -1,7 +1,6 @@ import sys import syslog from unittest.mock import patch -import pytest import subprocess sys.path.append("scripts") @@ -178,3 +177,7 @@ def test_readonly(self, mock_proc, mock_log): assert max_log_lvl == syslog.LOG_ERR + @classmethod + def teardown_class(cls): + subprocess.run("rm -rf /tmp/tmp*", shell=True) # cleanup the temporary dirs + print("TEARDOWN") diff --git a/tests/drops_group_test.py b/tests/drops_group_test.py index d374275a48..93f99e3f1b 100644 --- a/tests/drops_group_test.py +++ b/tests/drops_group_test.py @@ -3,6 +3,7 @@ import shutil from click.testing import CliRunner +from utilities_common.cli import UserCache test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -20,13 +21,13 @@ SWITCH_EGRESS_DROPS 2 PORT_INGRESS_DROPS - IP_HEADER_ERROR - NO_L3_HEADER + IP_HEADER_ERROR + NO_L3_HEADER SWITCH_EGRESS_DROPS - ACL_ANY - L2_ANY - L3_ANY + ACL_ANY + L2_ANY + L3_ANY """ expected_counter_configuration = """\ @@ -56,6 +57,21 @@ sonic_drops_test 1000 0 """ +expected_counts_voq = """\ + SWITCH-ID PKT_INTEGRITY_ERR +---------------- ------------------- +sonic_drops_test 500 + + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 10 100 0 0 80 20 +Ethernet4 N/A 0 1000 0 0 800 100 +Ethernet8 N/A 100 10 0 0 10 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 1000 0 +""" expected_counts_with_group = """ DEVICE SWITCH_DROPS ---------------- -------------- @@ -82,14 +98,17 @@ sonic_drops_test 0 0 """ -dropstat_path = "/tmp/dropstat-27" + +def remove_tmp_dropstat_file(): + # remove the tmp portstat + cache = UserCache("dropstat") + cache.remove_all() class TestDropCounters(object): @classmethod def setup_class(cls): print("SETUP") - if os.path.exists(dropstat_path): - shutil.rmtree(dropstat_path) + remove_tmp_dropstat_file() os.environ["PATH"] += os.pathsep + scripts_path os.environ["UTILITIES_UNIT_TESTING"] = "1" @@ -117,6 +136,14 @@ def test_show_counts(self): print(result.output) assert result.output == expected_counts + def test_show_counts_voq(self): + runner = CliRunner() + os.environ["VOQ_DROP_COUNTER_TESTING"] = "1" + result = runner.invoke(show.cli.commands["dropcounters"].commands["counts"], []) + os.environ["VOQ_DROP_COUNTER_TESTING"] = "0" + print(result.output) + assert result.output == expected_counts_voq + def test_show_counts_with_group(self): runner = CliRunner() result = runner.invoke(show.cli.commands["dropcounters"].commands["counts"], ["-g", "PACKET_DROPS"]) diff --git a/tests/ecn_input/ecn_test_vectors.py b/tests/ecn_input/ecn_test_vectors.py index c53bf48a24..fe47f0b7a3 100644 --- a/tests/ecn_input/ecn_test_vectors.py +++ b/tests/ecn_input/ecn_test_vectors.py @@ -18,205 +18,356 @@ """ +ecn_show_config_output_specific_namespace = """\ +Profile: AZURE_LOSSLESS +----------------------- ------- +red_max_threshold 2097152 +ecn ecn_all +green_min_threshold 1048576 +red_min_threshold 1048576 +yellow_min_threshold 1048576 +green_max_threshold 2097152 +green_drop_probability 5 +yellow_max_threshold 2097152 +yellow_drop_probability 5 +red_drop_probability 5 +----------------------- ------- + +""" + +ecn_show_config_output_multi = """\ +Profile: AZURE_LOSSLESS +----------------------- ------- +red_max_threshold 2097152 +ecn ecn_all +green_min_threshold 1048576 +red_min_threshold 1048576 +yellow_min_threshold 1048576 +green_max_threshold 2097152 +green_drop_probability 5 +yellow_max_threshold 2097152 +yellow_drop_probability 5 +red_drop_probability 5 +----------------------- ------- + +Profile: AZURE_LOSSY +----------------------- ----- +red_max_threshold 32760 +red_min_threshold 4095 +yellow_max_threshold 32760 +yellow_min_threshold 4095 +green_max_threshold 32760 +green_min_threshold 4095 +yellow_drop_probability 2 +----------------------- ----- + +""" + testData = { - 'ecn_show_config' : {'cmd' : ['show'], - 'args' : [], - 'rc' : 0, - 'rc_output': ecn_show_config_output + 'ecn_show_config': {'cmd': ['show'], + 'args': [], + 'rc': 0, + 'rc_output': ecn_show_config_output }, - 'ecn_show_config_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-l', '-vv'], - 'rc' : 0, - 'rc_output': ecn_show_config_output + 'Total profiles: 1\n' + 'ecn_show_config_verbose': {'cmd': ['q_cmd'], + 'args': ['-l', '-vv'], + 'rc': 0, + 'rc_output': ecn_show_config_output + 'Total profiles: 1\n' }, - 'ecn_cfg_gmin' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_min_threshold,1048600'] + 'ecn_cfg_gmin': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_min_threshold,1048600'] }, - 'ecn_cfg_gmin_verbose' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600', '-vv'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_min_threshold,1048600'], - 'rc_output' : 'Running command: ecnconfig -p AZURE_LOSSLESS -gmin 1048600 -vv\nSetting green_min_threshold value to 1048600\n' + 'ecn_cfg_gmin_verbose': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmin', '1048600', '-vv'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_min_threshold,1048600'], + 'rc_output': ('Running command: ecnconfig -p AZURE_LOSSLESS -gmin 1048600 -vv\n' + 'Setting green_min_threshold value to 1048600\n') }, - 'ecn_cfg_gmax' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_max_threshold,2097153'] + 'ecn_cfg_gmax': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_max_threshold,2097153'] }, - 'ecn_cfg_ymin' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ymin', '1048600'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,yellow_min_threshold,1048600'] + 'ecn_cfg_ymin': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ymin', '1048600'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,yellow_min_threshold,1048600'] }, - 'ecn_cfg_ymax' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ymax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,yellow_max_threshold,2097153'] + 'ecn_cfg_ymax': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ymax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,yellow_max_threshold,2097153'] }, - 'ecn_cfg_rmin' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmin', '1048600'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,red_min_threshold,1048600'] + 'ecn_cfg_rmin': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmin', '1048600'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,red_min_threshold,1048600'] }, - 'ecn_cfg_rmax' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,red_max_threshold,2097153'] + 'ecn_cfg_rmax': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,red_max_threshold,2097153'] }, - 'ecn_cfg_rdrop' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rdrop', '10'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,red_drop_probability,10'] + 'ecn_cfg_rdrop': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rdrop', '10'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,red_drop_probability,10'] }, - 'ecn_cfg_ydrop' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ydrop', '11'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,yellow_drop_probability,11'] + 'ecn_cfg_ydrop': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ydrop', '11'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,yellow_drop_probability,11'] }, - 'ecn_cfg_gdrop' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_drop_probability,12'] + 'ecn_cfg_gdrop': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_drop_probability,12'] }, - 'ecn_cfg_gdrop_verbose' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-vv'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_drop_probability,12'], - 'rc_output' : 'Running command: ecnconfig -p AZURE_LOSSLESS -gdrop 12 -vv\nSetting green_drop_probability value to 12%\n' + 'ecn_cfg_gdrop_verbose': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-vv'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_drop_probability,12'], + 'rc_output': ('Running command: ecnconfig -p AZURE_LOSSLESS -gdrop 12 -vv\n' + 'Setting green_drop_probability value to 12%\n') }, - 'ecn_cfg_multi_set' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-gmax', '2097153'], - 'rc' : 0, - 'cmp_args' : ['AZURE_LOSSLESS,green_drop_probability,12', - 'AZURE_LOSSLESS,green_max_threshold,2097153' - ] + 'ecn_cfg_multi_set': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gdrop', '12', '-gmax', '2097153'], + 'rc': 0, + 'cmp_args': [',AZURE_LOSSLESS,green_drop_probability,12', + ',AZURE_LOSSLESS,green_max_threshold,2097153'] }, - 'ecn_cfg_gmin_gmax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-gmax', '2097153', '-gmin', '2097154'], - 'rc' : 1, - 'rc_msg' : 'Invalid gmin (2097154) and gmax (2097153). gmin should be smaller than gmax' + 'ecn_cfg_gmin_gmax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-gmax', + '2097153', '-gmin', '2097154'], + 'rc': 1, + 'rc_msg': ('Invalid gmin (2097154) and gmax (2097153).' + ' gmin should be smaller than gmax') }, - 'ecn_cfg_ymin_ymax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-ymax', '2097153', '-ymin', '2097154'], - 'rc' : 1, - 'rc_msg' : 'Invalid ymin (2097154) and ymax (2097153). ymin should be smaller than ymax' + 'ecn_cfg_ymin_ymax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-ymax', + '2097153', '-ymin', '2097154'], + 'rc': 1, + 'rc_msg': ('Invalid ymin (2097154) and ymax (2097153).' + ' ymin should be smaller than ymax') }, - 'ecn_cfg_rmin_rmax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmax', '2097153', '-rmin', '2097154'], - 'rc' : 1, - 'rc_msg' : 'Invalid rmin (2097154) and rmax (2097153). rmin should be smaller than rmax' + 'ecn_cfg_rmin_rmax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmax', + '2097153', '-rmin', '2097154'], + 'rc': 1, + 'rc_msg': ('Invalid rmin (2097154) and rmax (2097153).' + ' rmin should be smaller than rmax') }, - 'ecn_cfg_rmax_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rmax', '-2097153'], - 'rc' : 1, - 'rc_msg' : 'Invalid rmax (-2097153). rmax should be an non-negative integer' - }, - 'ecn_cfg_rdrop_invalid' : {'cmd' : ['config'], - 'args' : ['-profile', 'AZURE_LOSSLESS', '-rdrop', '105'], - 'rc' : 1, - 'rc_msg' : 'Invalid value for "-rdrop": 105 is not in the valid range of 0 to 100' + 'ecn_cfg_rmax_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rmax', '-2097153'], + 'rc': 1, + 'rc_msg': 'Invalid rmax (-2097153). rmax should be an non-negative integer' }, - 'ecn_q_get' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\nqueue 3: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_cfg_rdrop_invalid': {'cmd': ['config'], + 'args': ['-profile', 'AZURE_LOSSLESS', '-rdrop', '105'], + 'rc': 1, + 'rc_msg': 'Invalid value for "-rdrop": 105 is not in the valid range of 0 to 100' + }, + 'ecn_q_get': {'cmd': ['q_cmd'], + 'args': ['-q', '3'], + 'rc': 0, + 'rc_msg': 'ECN status:\nqueue 3: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_q_get_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3', '-vv'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\n{0} queue 3: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR' + 'ecn_q_get_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3', '-vv'], + 'rc': 0, + 'rc_msg': 'ECN status:\n{0} queue 3: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR' }, - 'ecn_lossy_q_get' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '2'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\nqueue 2: off\n', - 'cmp_args' : [None], - 'cmp_q_args' : ['2'] + 'ecn_lossy_q_get': {'cmd': ['q_cmd'], + 'args': ['-q', '2'], + 'rc': 0, + 'rc_msg': 'ECN status:\nqueue 2: off\n', + 'cmp_args': [',None,None'], + 'cmp_q_args': ['2'] }, - 'ecn_q_all_get_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', '-vv'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\n{0} queue 3: on\n{0} queue 4: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR' + 'ecn_q_all_get_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', '-vv'], + 'rc': 0, + 'rc_msg': 'ECN status:\n{0} queue 3: on\n{0} queue 4: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR' }, - 'ecn_q_all_get' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4'], - 'rc' : 0, - 'rc_msg' : 'ECN status:\nqueue 3: on\nqueue 4: on\n', - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_q_all_get': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4'], + 'rc': 0, + 'rc_msg': 'ECN status:\nqueue 3: on\nqueue 4: on\n', + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_cfg_q_all_off' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'off'], - 'rc' : 0, - 'cmp_args' : [None], - 'cmp_q_args' : ['3', '4'] - }, - 'ecn_cfg_q_all_off_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'off', '-vv'], - 'rc' : 0, - 'cmp_args' : [None], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Disable ECN on {0} queue 3\nDisable ECN on {0} queue 4' + 'ecn_cfg_q_all_off': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'off'], + 'rc': 0, + 'cmp_args': [',None,None'], + 'cmp_q_args': ['3', '4'] + }, + 'ecn_cfg_q_all_off_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'off', '-vv'], + 'rc': 0, + 'cmp_args': [',None,None'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Disable ECN on {0} queue 3\nDisable ECN on {0} queue 4' }, - 'ecn_cfg_q_off' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3', 'off'], - 'rc' : 0, - 'cmp_args' : [None, 'wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3'], - 'other_q' : ['4'] + 'ecn_cfg_q_off': {'cmd': ['q_cmd'], + 'args': ['-q', '3', 'off'], + 'rc': 0, + 'cmp_args': [',None,None', ',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3'], + 'other_q': ['4'] }, - 'ecn_cfg_q_off_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3', 'off', '-vv'], - 'rc' : 0, - 'cmp_args' : [None, 'wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3'], - 'other_q' : ['4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Disable ECN on {0} queue 3' + 'ecn_cfg_q_off_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3', 'off', '-vv'], + 'rc': 0, + 'cmp_args': [',None,None', ',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3'], + 'other_q': ['4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Disable ECN on {0} queue 3' }, - 'ecn_cfg_q_all_on' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'on'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_cfg_q_all_on': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'on'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_cfg_q_all_on_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '3,4', 'on', '-vv'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Enable ECN on {0} queue 3\nEnable ECN on {0} queue 4' + 'ecn_cfg_q_all_on_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '3,4', 'on', '-vv'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Enable ECN on {0} queue 3\nEnable ECN on {0} queue 4' }, - 'ecn_cfg_q_on' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '4', 'on'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'] + 'ecn_cfg_q_on': {'cmd': ['q_cmd'], + 'args': ['-q', '4', 'on'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'] }, - 'ecn_cfg_q_on_verbose' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '4', 'on', '-vv'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['3', '4'], - 'db_table' : 'DEVICE_NEIGHBOR', - 'rc_msg' : 'Enable ECN on {0} queue 4' + 'ecn_cfg_q_on_verbose': {'cmd': ['q_cmd'], + 'args': ['-q', '4', 'on', '-vv'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['3', '4'], + 'db_table': 'DEVICE_NEIGHBOR', + 'rc_msg': 'Enable ECN on {0} queue 4' }, - 'ecn_cfg_lossy_q_on' : {'cmd' : ['q_cmd'], - 'args' : ['-q', '0,1,2,5,6,7', 'on'], - 'rc' : 0, - 'cmp_args' : ['wred_profile,AZURE_LOSSLESS'], - 'cmp_q_args' : ['0', '1', '2', '5', '6', '7'] - } + 'ecn_cfg_lossy_q_on': {'cmd': ['q_cmd'], + 'args': ['-q', '0,1,2,5,6,7', 'on'], + 'rc': 0, + 'cmp_args': [',wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['0', '1', '2', '5', '6', '7'] + }, + 'ecn_show_config_masic': {'cmd': ['show_masic'], + 'args': ['-l'], + 'rc': 0, + 'rc_output': ecn_show_config_output_multi, + }, + 'test_ecn_show_config_verbose_masic': {'cmd': ['show_masic'], + 'args': ['-l', '-vv'], + 'rc': 0, + 'rc_output': ecn_show_config_output_multi + 'Total profiles: 2\n', + }, + 'test_ecn_show_config_namespace': {'cmd': ['show_masic'], + 'args': ['-l', '-n', 'asic0'], + 'rc': 0, + 'rc_output': ecn_show_config_output_specific_namespace, + }, + 'test_ecn_show_config_namespace_verbose': {'cmd': ['show_masic'], + 'args': ['-l', '-n', 'asic0', '-vv'], + 'rc': 0, + 'rc_output': ecn_show_config_output_specific_namespace + + 'Total profiles: 1\n', + }, + 'ecn_cfg_threshold_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSY', '-gmax', '35000', '-n', 'asic1'], + 'rc': 0, + 'cmp_args': ['asic1,AZURE_LOSSY,green_max_threshold,35000'] + }, + 'ecn_cfg_probability_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSY', '-ydrop', '3', '-n', 'asic1'], + 'rc': 0, + 'cmp_args': ['asic1,AZURE_LOSSY,yellow_drop_probability,3'] + }, + 'ecn_cfg_gdrop_verbose_all_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSLESS', '-gdrop', '12', '-vv'], + 'rc': 0, + 'cmp_args': ['asic0-asic1,AZURE_LOSSLESS,green_drop_probability,12'], + 'rc_output': ('Setting green_drop_probability value to 12% ' + 'for namespace asic0\n' + 'Setting green_drop_probability value to 12% ' + 'for namespace asic1\n') + }, + 'ecn_cfg_multi_set_verbose_all_masic': {'cmd': ['config_masic'], + 'args': ['-p', 'AZURE_LOSSLESS', '-gdrop', + '14', '-gmax', '2097153', '-vv'], + 'rc': 0, + 'cmp_args': [('asic0-asic1,AZURE_LOSSLESS,' + 'green_drop_probability,14'), + ('asic0-asic1,AZURE_LOSSLESS,' + 'green_max_threshold,2097153')], + 'rc_output': ('Setting green_max_threshold value to 2097153 ' + 'for namespace asic0\n' + 'Setting green_max_threshold value to 2097153 ' + 'for namespace asic1\n' + 'Setting green_drop_probability value to 14% ' + 'for namespace asic0\n' + 'Setting green_drop_probability value to 14% ' + 'for namespace asic1\n') + }, + 'ecn_q_get_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '1', '-n', 'asic0'], + 'rc': 0, + 'rc_msg': 'ECN status for namespace asic0:\nqueue 1: on\n', + 'cmp_args': ['asic0,wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['1'] + }, + 'ecn_q_get_verbose_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '1', '-vv', '-n', 'asic0'], + 'rc': 0, + 'rc_msg': 'ECN status for namespace asic0:\nEthernet4 queue 1: on\n', + 'cmp_args': ['asic0,wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['1'], + 'db_table': 'DEVICE_NEIGHBOR' + }, + 'ecn_q_get_all_ns_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '0'], + 'rc': 0, + 'rc_msg': ('ECN status for namespace asic0:\nqueue 0: off\n' + 'ECN status for namespace asic1:\nqueue 0: on\n') + }, + 'ecn_q_get_all_ns_verbose_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '0', '-vv'], + 'rc': 0, + 'rc_msg': ('ECN status for namespace asic0:\nEthernet4 queue 0: off\n' + 'ECN status for namespace asic1:\nEthernet0 queue 0: on\n') + }, + 'ecn_cfg_q_all_ns_off_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '0,1', 'off'], + 'rc': 0, + 'cmp_args': ['asic0-asic1,None,None'], + 'cmp_q_args': ['0', '1'] + }, + 'ecn_cfg_q_one_ns_off_verbose_masic': {'cmd': ['q_cmd'], + 'args': ['-q', '1', 'on', '-n', 'asic1', '-vv'], + 'rc': 0, + 'rc_msg': 'Enable ECN on Ethernet0 queue 1\n', + 'cmp_args': ['asic1,wred_profile,AZURE_LOSSLESS', + 'asic1,wred_profile,AZURE_LOSSLESS'], + 'cmp_q_args': ['0'], + 'other_q': ['1'] + } } diff --git a/tests/ecn_test.py b/tests/ecn_test.py index 13474b12e8..5d2ac36011 100644 --- a/tests/ecn_test.py +++ b/tests/ecn_test.py @@ -6,11 +6,15 @@ from click.testing import CliRunner import config.main as config -from .ecn_input.ecn_test_vectors import * +from .ecn_input.ecn_test_vectors import testData from .utils import get_result_and_return_code from utilities_common.db import Db import show.main as show +# Constants +ARGS_DELIMITER = ',' +NAMESPACE_DELIMITER = '-' + test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") @@ -18,13 +22,107 @@ sys.path.insert(0, modules_path) -class TestEcnConfig(object): +class TestEcnConfigBase(object): @classmethod def setup_class(cls): + print("SETUP") os.environ["PATH"] += os.pathsep + scripts_path os.environ['UTILITIES_UNIT_TESTING'] = "2" - print("SETUP") + def process_cmp_args(self, cmp_args): + """ + The arguments are a string marked by delimiters + Arguments marked as 'None', are treated as None objects + First arg is always a collection of namespaces + """ + + args = cmp_args.split(ARGS_DELIMITER) + args = [None if arg == "None" else arg for arg in args] + args[0] = args[0].split(NAMESPACE_DELIMITER) + return args + + def verify_profile(self, queue_db_entry, profile, value): + if profile is not None: + assert queue_db_entry[profile] == value + else: + assert profile not in queue_db_entry,\ + "Profile needs to be fully removed from table to propagate NULL OID to SAI" + + def executor(self, input): + runner = CliRunner() + + if 'db_table' in input: + db = Db() + data_list = list(db.cfgdb.get_table(input['db_table'])) + input['rc_msg'] = input['rc_msg'].format(",".join(data_list)) + + if 'show' in input['cmd']: + exec_cmd = show.cli.commands["ecn"] + result = runner.invoke(exec_cmd, input['args']) + exit_code = result.exit_code + output = result.output + elif 'q_cmd' in input['cmd'] or 'show_masic' in input['cmd'] or 'config_masic' in input['cmd']: + exit_code, output = get_result_and_return_code(["ecnconfig"] + input['args']) + else: + exec_cmd = config.config.commands["ecn"] + result = runner.invoke(exec_cmd, input['args']) + exit_code = result.exit_code + output = result.output + + print(exit_code) + print(output) + + if input['rc'] == 0: + assert exit_code == 0 + else: + assert exit_code != 0 + + if 'cmp_args' in input: + fd = open('/tmp/ecnconfig', 'r') + cmp_data = json.load(fd) + + # Verify queue assignments + if 'cmp_q_args' in input: + namespaces, profile, value = self.process_cmp_args(input['cmp_args'][0]) + for namespace in namespaces: + for key in cmp_data[namespace]: + queue_idx = ast.literal_eval(key)[-1] + if queue_idx in input['cmp_q_args']: + self.verify_profile(cmp_data[namespace][key], profile, value) + + # other_q helps verify two different queue assignments + if 'other_q' in input: + namespaces1, profile1, value1 = self.process_cmp_args(input['cmp_args'][-1]) + for namespace1 in namespaces1: + for key in cmp_data[namespace1]: + queue_idx = ast.literal_eval(key)[-1] + if 'other_q' in input and queue_idx in input['other_q']: + self.verify_profile(cmp_data[namespace1][key], profile1, value1) + # Verify non-queue related assignments + else: + for args in input['cmp_args']: + namespaces, profile, name, value = self.process_cmp_args(args) + for namespace in namespaces: + assert(cmp_data[namespace][profile][name] == value) + fd.close() + + if 'rc_msg' in input: + assert input['rc_msg'] in output + + if 'rc_output' in input: + assert output == input['rc_output'] + + @classmethod + def teardown_class(cls): + os.environ['PATH'] = os.pathsep.join(os.environ['PATH'].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + + if os.path.isfile('/tmp/ecnconfig'): + os.remove('/tmp/ecnconfig') + print("TEARDOWN") + + +class TestEcnConfig(TestEcnConfigBase): def test_ecn_show_config(self): self.executor(testData['ecn_show_config']) @@ -123,77 +221,3 @@ def test_ecn_queue_set_all_on_verbose(self): def test_ecn_queue_set_lossy_q_on(self): self.executor(testData['ecn_cfg_lossy_q_on']) - - def process_cmp_args(self, cmp_args): - if cmp_args is None: - return (None, None) - return cmp_args.split(',') - - def verify_profile(self, queue_db_entry, profile, value): - if profile != None: - assert queue_db_entry[profile] == value - else: - assert profile not in queue_db_entry,\ - "Profile needs to be fully removed from table to propagate NULL OID to SAI" - - def executor(self, input): - runner = CliRunner() - - if 'db_table' in input: - db = Db() - data_list = list(db.cfgdb.get_table(input['db_table'])) - input['rc_msg'] = input['rc_msg'].format(",".join(data_list)) - - if 'show' in input['cmd']: - exec_cmd = show.cli.commands["ecn"] - result = runner.invoke(exec_cmd, input['args']) - exit_code = result.exit_code - output = result.output - elif 'q_cmd' in input['cmd'] : - exit_code, output = get_result_and_return_code(["ecnconfig"] + input['args']) - else: - exec_cmd = config.config.commands["ecn"] - result = runner.invoke(exec_cmd, input['args']) - exit_code = result.exit_code - output = result.output - - print(exit_code) - print(output) - - if input['rc'] == 0: - assert exit_code == 0 - else: - assert exit_code != 0 - - if 'cmp_args' in input: - fd = open('/tmp/ecnconfig', 'r') - cmp_data = json.load(fd) - if 'cmp_q_args' in input: - profile, value = self.process_cmp_args(input['cmp_args'][0]) - if 'other_q' in input: - profile1, value1 = self.process_cmp_args(input['cmp_args'][-1]) - for key in cmp_data: - queue_idx = ast.literal_eval(key)[-1] - if queue_idx in input['cmp_q_args']: - self.verify_profile(cmp_data[key], profile, value) - if 'other_q' in input and queue_idx in input['other_q']: - self.verify_profile(cmp_data[key], profile1, value1) - else: - for args in input['cmp_args']: - profile, name, value = args.split(',') - assert(cmp_data[profile][name] == value) - fd.close() - - if 'rc_msg' in input: - assert input['rc_msg'] in output - - if 'rc_output' in input: - assert output == input['rc_output'] - - @classmethod - def teardown_class(cls): - os.environ['PATH'] = os.pathsep.join(os.environ['PATH'].split(os.pathsep)[:-1]) - os.environ['UTILITIES_UNIT_TESTING'] = "0" - if os.path.isfile('/tmp/ecnconfig'): - os.remove('/tmp/ecnconfig') - print("TEARDOWN") diff --git a/tests/fabricstat_test.py b/tests/fabricstat_test.py index cc4c049806..a8a334cb92 100644 --- a/tests/fabricstat_test.py +++ b/tests/fabricstat_test.py @@ -200,6 +200,45 @@ 7 0 0 0 """ +multi_asic_fabric_rate = """\ + + ASIC Link ID Rx Data Mbps Tx Data Mbps +------ --------- -------------- -------------- + asic0 0 0 19.8 + asic0 1 0 19.8 + asic0 2 0 39.8 + asic0 3 0 39.8 + asic0 4 0 39.8 + asic0 5 0 39.8 + asic0 6 0 39.3 + asic0 7 0 39.3 + + ASIC Link ID Rx Data Mbps Tx Data Mbps +------ --------- -------------- -------------- + asic1 0 0 0 + asic1 1 0 0 + asic1 2 0 0 + asic1 3 0 0 + asic1 4 0 0 + asic1 5 0 0 + asic1 6 0 0 + asic1 7 0 0 +""" + +multi_asic_fabric_rate_asic0 = """\ + + ASIC Link ID Rx Data Mbps Tx Data Mbps +------ --------- -------------- -------------- + asic0 0 0 19.8 + asic0 1 0 19.8 + asic0 2 0 39.8 + asic0 3 0 39.8 + asic0 4 0 39.8 + asic0 5 0 39.8 + asic0 6 0 39.3 + asic0 7 0 39.3 +""" + class TestFabricStat(object): @classmethod def setup_class(cls): @@ -348,6 +387,20 @@ def test_multi_show_fabric_isolation_asic(self): assert return_code == 0 assert result == multi_asic_fabric_isolation_asic0 + def test_mutli_show_fabric_rate(self): + return_code, result = get_result_and_return_code(['fabricstat', '-s']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_rate + + def test_multi_show_fabric_rate_asic(self): + return_code, result = get_result_and_return_code(['fabricstat', '-s', '-n', 'asic0']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == multi_asic_fabric_rate_asic0 + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/flock_test.py b/tests/flock_test.py new file mode 100644 index 0000000000..7d9039dd2d --- /dev/null +++ b/tests/flock_test.py @@ -0,0 +1,187 @@ +import pytest +import tempfile +import threading +import time + +from unittest import mock +from utilities_common import flock + + +f0_exit = threading.Event() +f1_exit = threading.Event() +f2_exit = threading.Event() + + +def dummy_f0(): + while not f0_exit.is_set(): + time.sleep(1) + + +def dummy_f1(bypass_lock=False): + while not f1_exit.is_set(): + time.sleep(1) + + +def dummy_f2(bypass_lock=True): + while not f2_exit.is_set(): + time.sleep(1) + + +class TestFLock: + def setup(self): + print("SETUP") + f0_exit.clear() + f1_exit.clear() + f2_exit.clear() + + def test_flock_acquire_lock_non_blocking(self): + """Test flock non-blocking acquire lock.""" + with tempfile.NamedTemporaryFile() as fd0: + fd1 = open(fd0.name, "r") + + assert flock.acquire_flock(fd0.fileno(), 0) + assert not flock.acquire_flock(fd1.fileno(), 0) + + flock.release_flock(fd0.fileno()) + + assert flock.acquire_flock(fd1.fileno(), 0) + flock.release_flock(fd1.fileno()) + + def test_flock_acquire_lock_blocking(self): + """Test flock blocking acquire.""" + with tempfile.NamedTemporaryFile() as fd0: + fd1 = open(fd0.name, "r") + res = [] + + assert flock.acquire_flock(fd0.fileno(), 0) + thrd = threading.Thread(target=lambda: res.append(flock.acquire_flock(fd1.fileno(), -1))) + thrd.start() + + time.sleep(5) + assert thrd.is_alive() + + flock.release_flock(fd0.fileno()) + thrd.join() + assert len(res) == 1 and res[0] + + fd2 = open(fd0.name, "r") + assert not flock.acquire_flock(fd2.fileno(), 0) + + flock.release_flock(fd1.fileno()) + assert flock.acquire_flock(fd2.fileno(), 0) + flock.release_flock(fd2.fileno()) + + def test_flock_acquire_lock_timeout(self): + """Test flock timeout acquire.""" + with tempfile.NamedTemporaryFile() as fd0: + def acquire_helper(): + nonlocal elapsed + start = time.time() + res.append(flock.acquire_flock(fd1.fileno(), 5)) + end = time.time() + elapsed = end - start + + fd1 = open(fd0.name, "r") + elapsed = 0 + res = [] + + assert flock.acquire_flock(fd0.fileno(), 0) + thrd = threading.Thread(target=acquire_helper) + thrd.start() + + thrd.join() + assert ((len(res) == 1) and (not res[0])) + assert elapsed >= 5 + + flock.release_flock(fd0.fileno()) + + @mock.patch("click.echo") + def test_try_lock(self, mock_echo): + """Test try_lock decorator.""" + with tempfile.NamedTemporaryFile() as fd0: + def get_file_content(fd): + fd.seek(0) + return fd.read() + + f0_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f0) + f1_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f1) + + thrd = threading.Thread(target=f0_with_try_lock) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Acquired lock on {fd0.name}")] + assert b"dummy_f0" in get_file_content(fd0) + + with pytest.raises(SystemExit): + f1_with_try_lock() + assert mock_echo.call_args_list == [mock.call(f"Acquired lock on {fd0.name}"), + mock.call(f"Failed to acquire lock on {fd0.name}")] + finally: + f0_exit.set() + thrd.join() + + assert b"dummy_f0" not in get_file_content(fd0) + + thrd = threading.Thread(target=f1_with_try_lock) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Acquired lock on {fd0.name}"), + mock.call(f"Failed to acquire lock on {fd0.name}"), + mock.call(f"Released lock on {fd0.name}"), + mock.call(f"Acquired lock on {fd0.name}")] + assert b"dummy_f1" in get_file_content(fd0) + finally: + f1_exit.set() + thrd.join() + + assert b"dummy_f1" not in get_file_content(fd0) + + @mock.patch("click.echo") + def test_try_lock_with_bypass(self, mock_echo): + with tempfile.NamedTemporaryFile() as fd0: + def get_file_content(fd): + fd.seek(0) + return fd.read() + + f1_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f1) + + thrd = threading.Thread(target=f1_with_try_lock, args=(True,)) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Bypass lock on {fd0.name}")] + assert b"dummy_f1" not in get_file_content(fd0) + finally: + f1_exit.set() + thrd.join() + + @mock.patch("click.echo") + def test_try_lock_with_bypass_default(self, mock_echo): + with tempfile.NamedTemporaryFile() as fd0: + def get_file_content(fd): + fd.seek(0) + return fd.read() + + f2_with_try_lock = flock.try_lock(fd0.name, timeout=0)(dummy_f2) + + thrd = threading.Thread(target=f2_with_try_lock) + thrd.start() + time.sleep(2) + + try: + assert mock_echo.call_args_list == [mock.call(f"Bypass lock on {fd0.name}")] + assert b"dummy_f2" not in get_file_content(fd0) + finally: + f2_exit.set() + thrd.join() + + def teardown(self): + print("TEARDOWN") + f0_exit.clear() + f1_exit.clear() + f2_exit.clear() diff --git a/tests/generic_config_updater/change_applier_test.py b/tests/generic_config_updater/change_applier_test.py index 4c9b33c3a4..6a8926f013 100644 --- a/tests/generic_config_updater/change_applier_test.py +++ b/tests/generic_config_updater/change_applier_test.py @@ -72,28 +72,25 @@ def debug_print(msg): print(msg) - -# Mimics os.system call for sonic-cfggen -d --print-data > filename +# Mimics os.system call for `sonic-cfggen -d --print-data` output def subprocess_Popen_cfggen(cmd, *args, **kwargs): global running_config - # Extract file name from kwargs if 'stdout' is a file object - stdout = kwargs.get('stdout') - if hasattr(stdout, 'name'): - fname = stdout.name + stdout = kwargs.get('stdout', None) + + if stdout is None: + output = json.dumps(running_config, indent=4) + elif isinstance(stdout, int) and stdout == -1: + output = json.dumps(running_config, indent=4) else: - raise ValueError("stdout is not a file") + raise ValueError("stdout must be set to subprocess.PIPE or omitted for capturing output") - # Write the running configuration to the file specified in stdout - with open(fname, "w") as s: - json.dump(running_config, s, indent=4) - class MockPopen: def __init__(self): - self.returncode = 0 # Simulate successful command execution + self.returncode = 0 def communicate(self): - return "", "" # Simulate empty stdout and stderr + return output.encode(), "".encode() return MockPopen() @@ -225,7 +222,7 @@ def vlan_validate(old_cfg, new_cfg, keys): class TestChangeApplier(unittest.TestCase): - @patch("generic_config_updater.change_applier.subprocess.Popen") + @patch("generic_config_updater.gu_common.subprocess.Popen") @patch("generic_config_updater.change_applier.get_config_db") @patch("generic_config_updater.change_applier.set_config") def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): @@ -242,10 +239,11 @@ def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): running_config = copy.deepcopy(read_data["running_data"]) json_changes = copy.deepcopy(read_data["json_changes"]) + generic_config_updater.change_applier.ChangeApplier.updater_conf = None generic_config_updater.change_applier.UPDATER_CONF_FILE = CONF_FILE generic_config_updater.change_applier.set_verbose(True) generic_config_updater.services_validator.set_verbose(True) - + applier = generic_config_updater.change_applier.ChangeApplier() debug_print("invoked applier") @@ -254,7 +252,7 @@ def test_change_apply(self, mock_set, mock_db, mock_subprocess_Popen): # Take copy for comparison start_running_config = copy.deepcopy(running_config) - + debug_print("main: json_change_index={}".format(json_change_index)) applier.apply(mock_obj()) @@ -297,4 +295,3 @@ def test_apply__calls_apply_change_to_config_db(self): # Assert applier.config_wrapper.apply_change_to_config_db.assert_has_calls([call(change)]) - diff --git a/tests/generic_config_updater/gcu_feature_patch_application_test.py b/tests/generic_config_updater/gcu_feature_patch_application_test.py index db625e8cd1..27d9ebf216 100644 --- a/tests/generic_config_updater/gcu_feature_patch_application_test.py +++ b/tests/generic_config_updater/gcu_feature_patch_application_test.py @@ -6,13 +6,15 @@ from mock import patch import generic_config_updater.change_applier +import generic_config_updater.gu_common import generic_config_updater.patch_sorter as ps import generic_config_updater.generic_updater as gu from .gutest_helpers import Files from generic_config_updater.gu_common import ConfigWrapper, PatchWrapper running_config = {} - + + def set_entry(config_db, tbl, key, data): global running_config if data != None: @@ -26,9 +28,11 @@ def set_entry(config_db, tbl, key, data): if not running_config[tbl]: running_config.pop(tbl) -def get_running_config(): + +def get_running_config(scope="localhost"): return running_config + class TestFeaturePatchApplication(unittest.TestCase): def setUp(self): self.config_wrapper = ConfigWrapper() @@ -87,13 +91,13 @@ def create_patch_applier(self, config): config_wrapper = self.config_wrapper config_wrapper.get_config_db_as_json = MagicMock(side_effect=get_running_config) change_applier = generic_config_updater.change_applier.ChangeApplier() - change_applier._get_running_config = MagicMock(side_effect=get_running_config) patch_wrapper = PatchWrapper(config_wrapper) return gu.PatchApplier(config_wrapper=config_wrapper, patch_wrapper=patch_wrapper, changeapplier=change_applier) + @patch('generic_config_updater.change_applier.get_config_db_as_json', side_effect=get_running_config) @patch("generic_config_updater.change_applier.get_config_db") @patch("generic_config_updater.change_applier.set_config") - def run_single_success_case_applier(self, data, mock_set, mock_db): + def run_single_success_case_applier(self, data, mock_set, mock_db, mock_get_config_db_as_json): current_config = data["current_config"] expected_config = data["expected_config"] patch = jsonpatch.JsonPatch(data["patch"]) @@ -121,7 +125,8 @@ def run_single_success_case_applier(self, data, mock_set, mock_db): self.assertEqual(simulated_config, expected_config) @patch("generic_config_updater.change_applier.get_config_db") - def run_single_failure_case_applier(self, data, mock_db): + @patch('generic_config_updater.change_applier.get_config_db_as_json', side_effect=get_running_config) + def run_single_failure_case_applier(self, data, mock_db, mock_get_config_db_as_json): current_config = data["current_config"] patch = jsonpatch.JsonPatch(data["patch"]) expected_error_substrings = data["expected_error_substrings"] diff --git a/tests/generic_config_updater/generic_updater_test.py b/tests/generic_config_updater/generic_updater_test.py index 96c25e3552..8480dc23b0 100644 --- a/tests/generic_config_updater/generic_updater_test.py +++ b/tests/generic_config_updater/generic_updater_test.py @@ -2,7 +2,7 @@ import os import shutil import unittest -from unittest.mock import MagicMock, Mock, call +from unittest.mock import MagicMock, Mock, call, patch from .gutest_helpers import create_side_effect_dict, Files import generic_config_updater.generic_updater as gu @@ -124,6 +124,8 @@ def __create_config_replacer(self, changes=None, verified_same_config=True): return gu.ConfigReplacer(patch_applier, config_wrapper, patch_wrapper) + +@patch('generic_config_updater.generic_updater.get_config_json', MagicMock(return_value={})) class TestFileSystemConfigRollbacker(unittest.TestCase): def setUp(self): self.checkpoints_dir = os.path.join(os.getcwd(),"checkpoints") diff --git a/tests/generic_config_updater/gu_common_test.py b/tests/generic_config_updater/gu_common_test.py index a2a776c0bb..4a16a5ca4f 100644 --- a/tests/generic_config_updater/gu_common_test.py +++ b/tests/generic_config_updater/gu_common_test.py @@ -76,6 +76,28 @@ def test_ctor__default_values_set(self): self.assertEqual("/usr/local/yang-models", gu_common.YANG_DIR) + @patch('generic_config_updater.gu_common.subprocess.Popen') + def test_get_config_db_as_text(self, mock_popen): + config_wrapper = gu_common.ConfigWrapper() + mock_proc = MagicMock() + mock_proc.communicate = MagicMock( + return_value=("[]", None)) + mock_proc.returncode = 0 + mock_popen.return_value = mock_proc + actual = config_wrapper._get_config_db_as_text() + expected = "[]" + self.assertEqual(actual, expected) + + config_wrapper = gu_common.ConfigWrapper(scope="asic0") + mock_proc = MagicMock() + mock_proc.communicate = MagicMock( + return_value=("[]", None)) + mock_proc.returncode = 0 + mock_popen.return_value = mock_proc + actual = config_wrapper._get_config_db_as_text() + expected = "[]" + self.assertEqual(actual, expected) + def test_get_sonic_yang_as_json__returns_sonic_yang_as_json(self): # Arrange config_wrapper = self.config_wrapper_mock diff --git a/tests/generic_config_updater/multiasic_change_applier_test.py b/tests/generic_config_updater/multiasic_change_applier_test.py index e8b277618f..743969737d 100644 --- a/tests/generic_config_updater/multiasic_change_applier_test.py +++ b/tests/generic_config_updater/multiasic_change_applier_test.py @@ -1,33 +1,168 @@ +import jsonpointer import unittest from importlib import reload from unittest.mock import patch, MagicMock from generic_config_updater.generic_updater import extract_scope +from generic_config_updater.generic_updater import GenericConfigUpdaterError import generic_config_updater.change_applier import generic_config_updater.services_validator import generic_config_updater.gu_common +def mock_get_running_config_side_effect(scope): + print(f"mocked_value_for_{scope}") + return { + "tables": { + "ACL_TABLE": { + "services_to_validate": ["aclservice"], + "validate_commands": ["acl_loader show table"] + }, + "PORT": { + "services_to_validate": ["portservice"], + "validate_commands": ["show interfaces status"] + } + }, + "services": { + "aclservice": { + "validate_commands": ["acl_loader show table"] + }, + "portservice": { + "validate_commands": ["show interfaces status"] + } + } + } + + class TestMultiAsicChangeApplier(unittest.TestCase): - def test_extract_scope(self): + @patch('sonic_py_common.multi_asic.is_multi_asic') + def test_extract_scope_multiasic(self, mock_is_multi_asic): + mock_is_multi_asic.return_value = True test_paths_expectedresults = { - "/asic0/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic01/PORTCHANNEL/PortChannel102/admin_status": (True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status"), - "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": (True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status"), - "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), - "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled"), - "/sometable/data": (True, "", "/sometable/data"), - "": (False, "", ""), - "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": (False, "", ""), + "/asic0/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic01/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic1/PORTCHANNEL_INTERFACE/PortChannel106|10.0.0.6/31": ( + True, "asic1", "/PORTCHANNEL_INTERFACE/PortChannel106|10.0.0.6/31" + ), + "/asic1/PORTCHANNEL_INTERFACE/PortChannel106|10.0.0.6~131": ( + True, "asic1", "/PORTCHANNEL_INTERFACE/PortChannel106|10.0.0.6~131" + ), + "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/sometable/data": ( + False, "", "/sometable/data" + ), + "": ( + False, "", "" + ), + "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + False, "", "" + ), + "/asic77": ( + False, "", "" + ), + "/Asic0/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/Localhost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asci1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asicx/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asic-12/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + } + + for test_path, (result, expectedscope, expectedremainder) in test_paths_expectedresults.items(): + try: + scope, remainder = extract_scope(test_path) + assert(scope == expectedscope) + assert(remainder == expectedremainder) + except AssertionError: + assert(not result) + except GenericConfigUpdaterError: + assert(not result) + except jsonpointer.JsonPointerException: + assert(not result) + + @patch('sonic_py_common.multi_asic.is_multi_asic') + def test_extract_scope_singleasic(self, mock_is_multi_asic): + mock_is_multi_asic.return_value = False + test_paths_expectedresults = { + "/asic0/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic01/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic01", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/asic0123456789/PORTCHANNEL/PortChannel102/admin_status": ( + True, "asic0123456789", "/PORTCHANNEL/PortChannel102/admin_status" + ), + "/localhost/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "localhost", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/asic1/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + True, "asic1", "/BGP_DEVICE_GLOBAL/STATE/tsa_enabled" + ), + "/sometable/data": ( + True, "", "/sometable/data" + ), + "": ( + False, "", "" + ), + "localhostabc/BGP_DEVICE_GLOBAL/STATE/tsa_enabled": ( + False, "", "" + ), "/asic77": (False, "", ""), - "/Asic0/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/Localhost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asci1/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asicx/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), - "/asic-12/PORTCHANNEL/PortChannel102/admin_status": (False, "", ""), + "/Asic0/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/ASIC1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/Localhost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/LocalHost/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asci1/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asicx/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), + "/asic-12/PORTCHANNEL/PortChannel102/admin_status": ( + False, "", "" + ), } for test_path, (result, expectedscope, expectedremainder) in test_paths_expectedresults.items(): @@ -35,39 +170,24 @@ def test_extract_scope(self): scope, remainder = extract_scope(test_path) assert(scope == expectedscope) assert(remainder == expectedremainder) - except Exception as e: - assert(result == False) + except AssertionError: + assert(not result) + except GenericConfigUpdaterError: + assert(not result) + except jsonpointer.JsonPointerException: + assert(not result) - @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.get_config_db_as_json', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) - def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_running_config): + def test_apply_change_default_scope(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector mock_db = MagicMock() mock_ConfigDBConnector.return_value = mock_db # Setup mock for json.load to return some running configuration - mock_get_running_config.return_value = { - "tables": { - "ACL_TABLE": { - "services_to_validate": ["aclservice"], - "validate_commands": ["acl_loader show table"] - }, - "PORT": { - "services_to_validate": ["portservice"], - "validate_commands": ["show interfaces status"] - } - }, - "services": { - "aclservice": { - "validate_commands": ["acl_loader show table"] - }, - "portservice": { - "validate_commands": ["show interfaces status"] - } - } - } + mock_get_running_config.side_effect = mock_get_running_config_side_effect - # Instantiate ChangeApplier with the default namespace + # Instantiate ChangeApplier with the default scope applier = generic_config_updater.change_applier.ChangeApplier() # Prepare a change object or data that applier.apply would use @@ -79,37 +199,16 @@ def test_apply_change_default_namespace(self, mock_ConfigDBConnector, mock_get_r # Assert ConfigDBConnector called with the correct namespace mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="") - @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.get_config_db_as_json', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) - def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_running_config): + def test_apply_change_given_scope(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector mock_db = MagicMock() mock_ConfigDBConnector.return_value = mock_db + mock_get_running_config.side_effect = mock_get_running_config_side_effect - # Setup mock for json.load to return some running configuration - mock_get_running_config.return_value = { - "tables": { - "ACL_TABLE": { - "services_to_validate": ["aclservice"], - "validate_commands": ["acl_loader show table"] - }, - "PORT": { - "services_to_validate": ["portservice"], - "validate_commands": ["show interfaces status"] - } - }, - "services": { - "aclservice": { - "validate_commands": ["acl_loader show table"] - }, - "portservice": { - "validate_commands": ["show interfaces status"] - } - } - } - - # Instantiate ChangeApplier with the default namespace - applier = generic_config_updater.change_applier.ChangeApplier(namespace="asic0") + # Instantiate ChangeApplier with the default scope + applier = generic_config_updater.change_applier.ChangeApplier(scope="asic0") # Prepare a change object or data that applier.apply would use change = MagicMock() @@ -117,10 +216,10 @@ def test_apply_change_given_namespace(self, mock_ConfigDBConnector, mock_get_run # Call the apply method with the change object applier.apply(change) - # Assert ConfigDBConnector called with the correct namespace + # Assert ConfigDBConnector called with the correct scope mock_ConfigDBConnector.assert_called_once_with(use_unix_socket_path=True, namespace="asic0") - @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.get_config_db_as_json', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) def test_apply_change_failure(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector @@ -129,9 +228,9 @@ def test_apply_change_failure(self, mock_ConfigDBConnector, mock_get_running_con # Setup mock for json.load to return some running configuration mock_get_running_config.side_effect = Exception("Failed to get running config") - # Instantiate ChangeApplier with a specific namespace to simulate applying changes in a multi-asic environment - namespace = "asic0" - applier = generic_config_updater.change_applier.ChangeApplier(namespace=namespace) + # Instantiate ChangeApplier with a specific scope to simulate applying changes in a multi-asic environment + scope = "asic0" + applier = generic_config_updater.change_applier.ChangeApplier(scope=scope) # Prepare a change object or data that applier.apply would use change = MagicMock() @@ -142,7 +241,7 @@ def test_apply_change_failure(self, mock_ConfigDBConnector, mock_get_running_con self.assertTrue('Failed to get running config' in str(context.exception)) - @patch('generic_config_updater.change_applier.ChangeApplier._get_running_config', autospec=True) + @patch('generic_config_updater.change_applier.get_config_db_as_json', autospec=True) @patch('generic_config_updater.change_applier.ConfigDBConnector', autospec=True) def test_apply_patch_with_empty_tables_failure(self, mock_ConfigDBConnector, mock_get_running_config): # Setup mock for ConfigDBConnector @@ -150,17 +249,20 @@ def test_apply_patch_with_empty_tables_failure(self, mock_ConfigDBConnector, moc mock_ConfigDBConnector.return_value = mock_db # Setup mock for json.load to simulate configuration where crucial tables are unexpectedly empty - mock_get_running_config.return_value = { - "tables": { - # Simulate empty tables or missing crucial configuration - }, - "services": { - # Normally, services would be listed here + def mock_get_empty_running_config_side_effect(): + return { + "tables": { + # Simulate empty tables or missing crucial configuration + }, + "services": { + # Normally, services would be listed here + } } - } - # Instantiate ChangeApplier with a specific namespace to simulate applying changes in a multi-asic environment - applier = generic_config_updater.change_applier.ChangeApplier(namespace="asic0") + mock_get_running_config.side_effect = mock_get_empty_running_config_side_effect + + # Instantiate ChangeApplier with a specific scope to simulate applying changes in a multi-asic environment + applier = generic_config_updater.change_applier.ChangeApplier(scope="asic0") # Prepare a change object or data that applier.apply would use, simulating a patch that requires non-empty tables change = MagicMock() diff --git a/tests/generic_config_updater/multiasic_generic_updater_test.py b/tests/generic_config_updater/multiasic_generic_updater_test.py index 4a55eb98be..5acdd391f0 100644 --- a/tests/generic_config_updater/multiasic_generic_updater_test.py +++ b/tests/generic_config_updater/multiasic_generic_updater_test.py @@ -19,7 +19,7 @@ class TestMultiAsicPatchApplier(unittest.TestCase): @patch('generic_config_updater.gu_common.PatchWrapper.simulate_patch') @patch('generic_config_updater.generic_updater.ChangeApplier') def test_apply_patch_specific_namespace(self, mock_ChangeApplier, mock_simulate_patch, mock_get_config, mock_get_empty_tables): - namespace = "asic0" + scope = "asic0" patch_data = jsonpatch.JsonPatch([ { "op": "add", @@ -158,10 +158,10 @@ def test_apply_patch_specific_namespace(self, mock_ChangeApplier, mock_simulate_ } } - patch_applier = generic_config_updater.generic_updater.PatchApplier(namespace=namespace) + patch_applier = generic_config_updater.generic_updater.PatchApplier(scope=scope) # Apply the patch and verify patch_applier.apply(patch_data) # Assertions to ensure the namespace is correctly used in underlying calls - mock_ChangeApplier.assert_called_once_with(namespace=namespace) + mock_ChangeApplier.assert_called_once_with(scope=scope) diff --git a/tests/installer_bootloader_aboot_test.py b/tests/installer_bootloader_aboot_test.py index fbe580a638..be09223b5f 100644 --- a/tests/installer_bootloader_aboot_test.py +++ b/tests/installer_bootloader_aboot_test.py @@ -8,6 +8,7 @@ # Constants image_dir = f'{aboot.IMAGE_DIR_PREFIX}expeliarmus-{aboot.IMAGE_DIR_PREFIX}abcde' +image_chainloader = f'{image_dir}/.sonic-boot.swi' exp_image = f'{aboot.IMAGE_PREFIX}expeliarmus-{aboot.IMAGE_DIR_PREFIX}abcde' image_dirs = [image_dir] @@ -45,15 +46,27 @@ def test_get_installed_images(): assert bootloader.get_installed_images() == [exp_image] -@patch("sonic_installer.bootloader.aboot.re.search") -def test_get_next_image(re_search_patch): +def test_get_next_image(): bootloader = aboot.AbootBootloader() - bootloader._boot_config_read = Mock(return_value={'SWI': None}) + + # Test missing boot-config + bootloader._boot_config_read() + + # Test missing SWI value + bootloader._boot_config_read = Mock(return_value={}) + assert bootloader.get_next_image() == '' # Test convertion image dir to image name - re_search_patch().group = Mock(return_value=image_dir) + swi = f'flash:{image_chainloader}' + bootloader._boot_config_read = Mock(return_value={'SWI': swi}) assert bootloader.get_next_image() == exp_image + # Test some other image + next_image = 'EOS.swi' + bootloader._boot_config_read = Mock(return_value={'SWI': f'flash:{next_image}'}) + assert bootloader.get_next_image() == next_image + + def test_install_image(): image_path = 'sonic' env = os.environ.copy() diff --git a/tests/ip_config_test.py b/tests/ip_config_test.py index ffa2931093..9338d341e4 100644 --- a/tests/ip_config_test.py +++ b/tests/ip_config_test.py @@ -130,6 +130,35 @@ def test_add_del_interface_valid_ipv4(self): assert mock_run_command.call_count == 1 assert ('Eth36.10', '32.11.10.1/24') not in db.cfgdb.get_table('VLAN_SUB_INTERFACE') + # config int ip add vlan1000 10.21.20.1/24 as secondary + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Vlan1000", "10.11.20.1/24", "--secondary"], obj=obj) + assert result.exit_code == 0 + assert ('Vlan1000', '10.11.20.1/24') in db.cfgdb.get_table('VLAN_INTERFACE') + assert db.cfgdb.get_table('VLAN_INTERFACE')[('Vlan1000', '10.11.20.1/24')]['secondary'] == "true" + + # config int ip add vlan2000 10.21.20.1/24 as secondary + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Vlan2000", "10.21.20.1/24", "-s"], obj=obj) + assert result.exit_code == 0 + assert ('Vlan2000', '10.21.20.1/24') in db.cfgdb.get_table('VLAN_INTERFACE') + assert db.cfgdb.get_table('VLAN_INTERFACE')[('Vlan2000', '10.21.20.1/24')]['secondary'] == "true" + + # config int ip add vlan4000 10.16.20.1/24 as primary and make sure secondary is not present in table + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Vlan4000", "10.16.20.1/24"], obj=obj) + assert result.exit_code == 0 + assert ('Vlan4000', '10.16.20.1/24') in db.cfgdb.get_table('VLAN_INTERFACE') + assert 'secondary' not in db.cfgdb.get_table('VLAN_INTERFACE')[('Vlan4000', '10.16.20.1/24')] + + # create vlan 500 + result = runner.invoke(config.config.commands["vlan"].commands["add"], ["500"], obj=db) + # config int ip add vlan500 10.21.20.1/24 as secondary - should fail as vlan500 is not added in table + ERR_MSG = "Error: Primary for the interface Vlan500 is not set, so skipping adding the interface" + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Vlan500", "10.21.20.1/24", "--secondary"], obj=obj) + assert result.exit_code != 0 + assert ERR_MSG in result.output def test_add_interface_invalid_ipv4(self): db = Db() diff --git a/tests/ip_show_routes_multi_asic_test.py b/tests/ip_show_routes_multi_asic_test.py index bfce5e539d..08bea36910 100644 --- a/tests/ip_show_routes_multi_asic_test.py +++ b/tests/ip_show_routes_multi_asic_test.py @@ -1,10 +1,11 @@ import os from importlib import reload - import pytest + from . import show_ip_route_common from click.testing import CliRunner + test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") diff --git a/tests/ip_show_routes_voq_chassis_test.py b/tests/ip_show_routes_voq_chassis_test.py new file mode 100644 index 0000000000..de7f7ade8f --- /dev/null +++ b/tests/ip_show_routes_voq_chassis_test.py @@ -0,0 +1,112 @@ +import os +from importlib import reload +import pytest +from unittest import mock + +import show.main as show +from . import show_ip_route_common +import utilities_common.multi_asic as multi_asic_util +from click.testing import CliRunner + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") + + +class TestMultiAsicVoqLcShowIpRouteDisplayAllCommands(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + from .mock_tables import mock_multi_asic + reload(mock_multi_asic) + from .mock_tables import dbconnector + dbconnector.load_namespace_config() + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_lc( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["-dfrontend"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_LC + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_remote_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_remote_lc( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["-dfrontend"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_REMOTE_LC + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_lc_def_route( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["0.0.0.0/0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_LC_DEFAULT_ROUTE + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_remote_lc'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + def test_voq_chassis_remote_lc_default_route( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["0.0.0.0/0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_REMOTE_LC_DEFAULT_ROUTE + + @pytest.mark.parametrize('setup_multi_asic_bgp_instance', + ['ip_route_lc_2'], indirect=['setup_multi_asic_bgp_instance']) + @mock.patch("sonic_py_common.device_info.is_voq_chassis", mock.MagicMock(return_value=True)) + @mock.patch.object(multi_asic_util.MultiAsic, "get_ns_list_based_on_options", + mock.MagicMock(return_value=["asic0", "asic1"])) + def test_voq_chassis_lc_def_route_2( + self, + setup_ip_route_commands, + setup_multi_asic_bgp_instance): + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["ip"].commands["route"], ["0.0.0.0/0"]) + print("{}".format(result.output)) + assert result.exit_code == 0 + assert result.output == show_ip_route_common.SHOW_IP_ROUTE_LC_DEFAULT_ROUTE_2 + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + from .mock_tables import mock_single_asic + reload(mock_single_asic) diff --git a/tests/ldap_input/assert_show_output.py b/tests/ldap_input/assert_show_output.py new file mode 100644 index 0000000000..c3ecaf472f --- /dev/null +++ b/tests/ldap_input/assert_show_output.py @@ -0,0 +1,20 @@ +""" +Module holding the correct values for show CLI command outputs for the ldap_test.py +""" + +show_ldap_global = """\ +BIND DN BIND PASSWORD BIND TIMEOUT VERSION BASE DN PORT TIMEOUT +---------------------------- --------------- -------------- --------- ----------------- ------ --------- +cn=ldapadm,dc=test1,dc=test2 password 3 3 dc=test1,dc=test2 389 2 +""" + +show_ldap_server = """\ +HOSTNAME PRIORITY +---------- ---------- +10.0.0.1 1 +""" + +show_ldap_server_deleted = """\ +HOSTNAME PRIORITY +---------- ---------- +""" diff --git a/tests/ldap_input/default_config_db.json b/tests/ldap_input/default_config_db.json new file mode 100644 index 0000000000..95aed20118 --- /dev/null +++ b/tests/ldap_input/default_config_db.json @@ -0,0 +1,11 @@ +{ + "LDAP|GLOBAL": { + "bind_dn": "cn=ldapadm,dc=test1,dc=test2", + "base_dn": "dc=test1,dc=test2", + "bind_password": "password", + "timeout": "2", + "bind_timeout": "3", + "version" : 3, + "port" : 389 + } +} diff --git a/tests/ldap_input/server_config_db.json b/tests/ldap_input/server_config_db.json new file mode 100644 index 0000000000..2fdea84748 --- /dev/null +++ b/tests/ldap_input/server_config_db.json @@ -0,0 +1,5 @@ +{ + "LDAP_SERVER|10.0.0.1": { + "priority": 1 + } +} diff --git a/tests/ldap_test.py b/tests/ldap_test.py new file mode 100644 index 0000000000..3ac824b446 --- /dev/null +++ b/tests/ldap_test.py @@ -0,0 +1,126 @@ +#!/usr/bin/env python + +import os +import logging +import show.main as show +import config.main as config + +from .ldap_input import assert_show_output +from utilities_common.db import Db +from click.testing import CliRunner +from .mock_tables import dbconnector + +logger = logging.getLogger(__name__) +test_path = os.path.dirname(os.path.abspath(__file__)) +mock_db_path = os.path.join(test_path, "ldap_input") + +SUCCESS = 0 +ERROR = 1 +INVALID_VALUE = 'INVALID' +EXP_GOOD_FLOW = 1 +EXP_BAD_FLOW = 0 + + +class TestLdap: + @classmethod + def setup_class(cls): + logger.info("SETUP") + os.environ['UTILITIES_UNIT_TESTING'] = "2" + + @classmethod + def teardown_class(cls): + logger.info("TEARDOWN") + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + dbconnector.dedicated_dbs['CONFIG_DB'] = None + + def verify_ldap_global_output(self, db, runner, output, expected=EXP_GOOD_FLOW): + result = runner.invoke(show.cli.commands["ldap"].commands["global"], [], obj=db) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + logger.info("\n" + result.output) + logger.info(result.exit_code) + + if expected: # good flow expected (default) + assert result.exit_code == SUCCESS + assert result.output == output + else: # bad flow expected + assert result.exit_code == ERROR + + def verify_ldap_server_output(self, db, runner, output, expected=EXP_GOOD_FLOW): + result = runner.invoke(show.cli.commands["ldap-server"], [], obj=db) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + logger.info("\n" + result.output) + logger.info(result.exit_code) + + if expected: # good flow expected (default) + assert result.exit_code == SUCCESS + assert result.output == output + else: # bad flow expected + assert result.exit_code == ERROR + + def ldap_global_set_policy(self, runner, db, attr, value, expected=EXP_GOOD_FLOW): + result = runner.invoke( + config.config.commands["ldap"].commands["global"].commands[attr], + [value], obj=db + ) + if expected: # good flow expected (default) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + else: # bad flow expected + assert result.exit_code == ERROR + + def ldap_server_set_policy(self, runner, db, value, expected=EXP_GOOD_FLOW): + result = runner.invoke( + config.config.commands["ldap-server"].commands["add"], + value, obj=db + ) + + if expected: # good flow expected (default) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + else: # bad flow expected + assert result.exit_code == ERROR + + def ldap_server_del_policy(self, runner, db, value, expected=EXP_GOOD_FLOW): + result = runner.invoke( + config.config.commands["ldap-server"].commands["delete"], + value, obj=db + ) + if expected: # good flow expected (default) + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + else: # bad flow expected + assert result.exit_code == ERROR + + # LDAP + + def test_ldap_global_feature_enabled(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'default_config_db.json') + db = Db() + runner = CliRunner() + + self.ldap_global_set_policy(runner, db, "base-dn", "dc=test1,dc=test2") + self.ldap_global_set_policy(runner, db, "bind-dn", "cn=ldapadm,dc=test1,dc=test2") + self.ldap_global_set_policy(runner, db, "bind-password", "password") + self.ldap_global_set_policy(runner, db, "bind-timeout", "3") + self.ldap_global_set_policy(runner, db, "port", "389") + self.ldap_global_set_policy(runner, db, "timeout", "2") + self.ldap_global_set_policy(runner, db, "version", "3") + + self.verify_ldap_global_output(db, runner, assert_show_output.show_ldap_global) + + def test_ldap_server(self): + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'server_config_db.json') + db = Db() + runner = CliRunner() + + self.ldap_server_set_policy(runner, db, ["10.0.0.1", "--priority", "1"]) + self.verify_ldap_server_output(db, runner, assert_show_output.show_ldap_server) + + self.ldap_server_del_policy(runner, db, ["10.0.0.1"]) + self.verify_ldap_server_output(db, runner, assert_show_output.show_ldap_server_deleted) diff --git a/tests/lldp_test.py b/tests/lldp_test.py index 89177338e0..1d6e55152c 100644 --- a/tests/lldp_test.py +++ b/tests/lldp_test.py @@ -2,6 +2,7 @@ from click.testing import CliRunner from utilities_common.general import load_module_from_source +from importlib import reload test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -83,6 +84,22 @@ def test_get_info(self): output = lldp.get_summary_output(lldp_detail_info=True) assert output.strip('\n') == expected_lldpctl_xml_output[0].strip('\n') + def test_get_info_multi_asic(self): + from .mock_tables import mock_multi_asic + from .mock_tables import dbconnector + reload(mock_multi_asic) + dbconnector.load_namespace_config() + lldp = lldpshow.Lldpshow() + from .mock_tables import mock_single_asic + reload(mock_single_asic) + dbconnector.load_namespace_config() + lldp.lldp_instance = [''] + lldp.lldpraw = expected_lldpctl_xml_output + lldp.get_info(lldp_detail_info=True, lldp_port='Ethernet0') + lldp.parse_info(lldp_detail_info=True) + output = lldp.get_summary_output(lldp_detail_info=True) + assert output.strip('\n') == expected_lldpctl_xml_output[0].strip('\n') + @classmethod def teardown_class(cls): print("TEARDOWN") diff --git a/tests/mmuconfig_input/mmuconfig_test_vectors.py b/tests/mmuconfig_input/mmuconfig_test_vectors.py index c20a964516..1d72ed6725 100644 --- a/tests/mmuconfig_input/mmuconfig_test_vectors.py +++ b/tests/mmuconfig_input/mmuconfig_test_vectors.py @@ -83,30 +83,267 @@ """ +show_mmu_config_asic0 = """\ +Pool for namespace asic0: ingress_lossy_pool +---- ------- +mode dynamic +type ingress +---- ------- + +Pool for namespace asic0: ingress_lossless_pool_hbm +---- --------- +mode static +size 139458240 +type ingress +---- --------- + +Profile for namespace asic0: ingress_lossy_profile +---------- ------------------ +dynamic_th 3 +pool ingress_lossy_pool +size 0 +---------- ------------------ + +Profile for namespace asic0: ingress_lossless_profile_hbm +--------- ------------------------- +static_th 12121212 +pool ingress_lossless_pool_hbm +size 0 +--------- ------------------------- + +""" + +show_mmu_config_asic1_verbose = """\ +Pool for namespace asic1: ingress_lossless_pool +---- ------- +mode dynamic +type ingress +---- ------- + +Pool for namespace asic1: egress_lossless_pool +---- -------- +mode dynamic +size 13945824 +type egress +---- -------- + +Pool for namespace asic1: egress_lossy_pool +---- ------- +mode dynamic +type egress +---- ------- + +Total pools: 3 + + +Profile for namespace asic1: alpha_profile +------------- --------------------- +dynamic_th 0 +pool ingress_lossless_pool +headroom_type dynamic +------------- --------------------- + +Profile for namespace asic1: headroom_profile +---------- --------------------- +dynamic_th 0 +pool ingress_lossless_pool +xon 18432 +xoff 32768 +size 51200 +---------- --------------------- + +Profile for namespace asic1: egress_lossless_profile +---------- -------------------- +dynamic_th 0 +pool egress_lossless_pool +size 0 +---------- -------------------- + +Profile for namespace asic1: egress_lossy_profile +---------- ----------------- +dynamic_th 0 +pool egress_lossy_pool +size 0 +---------- ----------------- + +Total profiles: 4 +""" + +show_mmu_config_all_masic = """\ +Pool for namespace asic0: ingress_lossy_pool +---- ------- +mode dynamic +type ingress +---- ------- + +Pool for namespace asic0: ingress_lossless_pool_hbm +---- --------- +mode static +size 139458240 +type ingress +---- --------- + +Profile for namespace asic0: ingress_lossy_profile +---------- ------------------ +dynamic_th 3 +pool ingress_lossy_pool +size 0 +---------- ------------------ + +Profile for namespace asic0: ingress_lossless_profile_hbm +--------- ------------------------- +static_th 12121212 +pool ingress_lossless_pool_hbm +size 0 +--------- ------------------------- + +Pool for namespace asic1: ingress_lossless_pool +---- ------- +mode dynamic +type ingress +---- ------- + +Pool for namespace asic1: egress_lossless_pool +---- -------- +mode dynamic +size 13945824 +type egress +---- -------- + +Pool for namespace asic1: egress_lossy_pool +---- ------- +mode dynamic +type egress +---- ------- + +Profile for namespace asic1: alpha_profile +------------- --------------------- +dynamic_th 0 +pool ingress_lossless_pool +headroom_type dynamic +------------- --------------------- + +Profile for namespace asic1: headroom_profile +---------- --------------------- +dynamic_th 0 +pool ingress_lossless_pool +xon 18432 +xoff 32768 +size 51200 +---------- --------------------- + +Profile for namespace asic1: egress_lossless_profile +---------- -------------------- +dynamic_th 0 +pool egress_lossless_pool +size 0 +---------- -------------------- + +Profile for namespace asic1: egress_lossy_profile +---------- ----------------- +dynamic_th 0 +pool egress_lossy_pool +size 0 +---------- ----------------- + +""" + testData = { 'mmuconfig_list' : {'cmd' : ['show'], 'args' : [], 'rc' : 0, 'rc_output': show_mmu_config }, - 'mmu_cfg_static_th' : {'cmd' : ['config'], - 'args' : ['-p', 'ingress_lossless_profile_hbm', '-s', '12121213'], - 'rc' : 0, - 'db_table' : 'BUFFER_PROFILE', - 'cmp_args' : ['ingress_lossless_profile_hbm,static_th,12121213'], - 'rc_msg' : '' - }, + 'mmu_cfg_static_th': {'cmd': ['config'], + 'args': ['-p', 'ingress_lossless_profile_hbm', '-s', '12121213'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': [',ingress_lossless_profile_hbm,static_th,12121213'], + 'rc_msg': '' + }, 'mmu_cfg_alpha' : {'cmd' : ['config'], 'args' : ['-p', 'alpha_profile', '-a', '2'], 'rc' : 0, 'db_table' : 'BUFFER_PROFILE', - 'cmp_args' : ['alpha_profile,dynamic_th,2'], + 'cmp_args': [',alpha_profile,dynamic_th,2'], 'rc_msg' : '' }, - 'mmu_cfg_alpha_invalid' : {'cmd' : ['config'], - 'args' : ['-p', 'alpha_profile', '-a', '12'], - 'rc' : 2, - 'rc_msg' : 'Usage: mmu [OPTIONS]\nTry "mmu --help" for help.\n\nError: Invalid value for "-a": 12 is not in the valid range of -8 to 8.\n' - } - + 'mmu_cfg_alpha_invalid': {'cmd': ['config'], + 'args': ['-p', 'alpha_profile', '-a', '12'], + 'rc': 2, + 'rc_msg': ('Usage: mmu [OPTIONS]\nTry "mmu --help" for help.\n' + '\nError: Invalid value for "-a": 12 is not in the ' + 'valid range of -8 to 8.\n') + }, + 'mmu_cfg_list_one_masic': {'cmd': ['show'], + 'args': ['-n', 'asic0'], + 'rc': 0, + 'rc_output': show_mmu_config_asic0 + }, + 'mmu_cfg_list_one_verbose_masic': {'cmd': ['show'], + 'args': ['-n', 'asic1', '-vv'], + 'rc': 0, + 'rc_output': show_mmu_config_asic1_verbose + }, + 'mmu_cfg_list_all_masic': {'cmd': ['show'], + 'args': [], + 'rc': 0, + 'rc_output': show_mmu_config_all_masic + }, + 'mmu_cfg_alpha_one_masic': {'cmd': ['config'], + 'args': ['-p', 'alpha_profile', '-a', '2', '-n', 'asic0'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': ['asic0,alpha_profile,dynamic_th,2'], + 'rc_msg': '' + }, + 'mmu_cfg_alpha_all_verbose_masic': {'cmd': ['config'], + 'args': ['-p', 'alpha_profile', '-a', '2', '-vv'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': ['asic0,alpha_profile,dynamic_th,2', + 'asic1,alpha_profile,dynamic_th,2'], + 'rc_msg': ('Setting alpha_profile dynamic_th value ' + 'to 2 for namespace asic0\n' + 'Setting alpha_profile dynamic_th value ' + 'to 2 for namespace asic1\n') + }, + 'mmu_cfg_static_th_one_masic': {'cmd': ['config'], + 'args': ['-p', 'ingress_lossless_profile_hbm', + '-s', '12121215', '-n', 'asic0'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': ['asic0,ingress_lossless_profile_hbm,static_th,12121215'], + 'rc_msg': '' + }, + 'mmu_cfg_static_th_all_verbose_masic': {'cmd': ['config'], + 'args': ['-p', 'ingress_lossless_profile_hbm', + '-s', '12121214', '-vv'], + 'rc': 0, + 'db_table': 'BUFFER_PROFILE', + 'cmp_args': [('asic0,ingress_lossless_profile_hbm,' + 'static_th,12121214'), + ('asic1,ingress_lossless_profile_hbm,' + 'static_th,12121214')], + 'rc_msg': ('Setting ingress_lossless_profile_hbm static_th ' + 'value to 12121214 for namespace asic0\n' + 'Setting ingress_lossless_profile_hbm static_th ' + 'value to 12121214 for namespace asic1\n') + }, + 'mmu_cfg_alpha_invalid_masic': {'cmd': ['config'], + 'args': ['-p', 'alpha_profile', '-a', '12'], + 'rc': 2, + 'rc_msg': ('Usage: mmu [OPTIONS]\n' + 'Try "mmu --help" for help.\n\n' + 'Error: Invalid value for "-a": 12 ' + 'is not in the valid range of -8 to 8.\n') + }, + 'mmu_cfg_static_th_invalid_masic': {'cmd': ['config'], + 'args': ['-p', 'ingress_lossless_profile_hbm', '-s', '-1'], + 'rc': 2, + 'rc_msg': ('Usage: mmu [OPTIONS]\n' + 'Try "mmu --help" for help.\n\n' + 'Error: Invalid value for "-s": ' + '-1 is smaller than the minimum valid value 0.\n') + } } diff --git a/tests/mmuconfig_test.py b/tests/mmuconfig_test.py index 7218270e36..03a849eed5 100644 --- a/tests/mmuconfig_test.py +++ b/tests/mmuconfig_test.py @@ -7,7 +7,7 @@ import config.main as config import show.main as show from utilities_common.db import Db -from .mmuconfig_input.mmuconfig_test_vectors import * +from .mmuconfig_input.mmuconfig_test_vectors import testData test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -16,24 +16,12 @@ sys.path.insert(0, modules_path) -class Testmmuconfig(object): +class TestMmuConfigBase(object): @classmethod def setup_class(cls): + print('SETUP') os.environ["PATH"] += os.pathsep + scripts_path os.environ['UTILITIES_UNIT_TESTING'] = "2" - print("SETUP") - - def test_mmu_show_config(self): - self.executor(testData['mmuconfig_list']) - - def test_mmu_alpha_config(self): - self.executor(testData['mmu_cfg_alpha']) - - def test_mmu_alpha_invalid_config(self): - self.executor(testData['mmu_cfg_alpha_invalid']) - - def test_mmu_staticth_config(self): - self.executor(testData['mmu_cfg_static_th']) def executor(self, input): runner = CliRunner() @@ -48,6 +36,7 @@ def executor(self, input): result = runner.invoke(exec_cmd, input['args']) exit_code = result.exit_code output = result.output + elif 'config' in input['cmd']: exec_cmd = config.config.commands["mmu"] result = runner.invoke(exec_cmd, input['args'], catch_exceptions=False) @@ -66,8 +55,8 @@ def executor(self, input): fd = open('/tmp/mmuconfig', 'r') cmp_data = json.load(fd) for args in input['cmp_args']: - profile, name, value = args.split(',') - assert(cmp_data[profile][name] == value) + namespace, profile, name, value = args.split(',') + assert(cmp_data[namespace][profile][name] == value) fd.close() if 'rc_msg' in input: @@ -76,7 +65,6 @@ def executor(self, input): if 'rc_output' in input: assert output == input['rc_output'] - @classmethod def teardown_class(cls): os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) @@ -84,3 +72,17 @@ def teardown_class(cls): if os.path.isfile('/tmp/mmuconfig'): os.remove('/tmp/mmuconfig') print("TEARDOWN") + + +class TestMmuConfig(TestMmuConfigBase): + def test_mmu_show_config(self): + self.executor(testData['mmuconfig_list']) + + def test_mmu_alpha_config(self): + self.executor(testData['mmu_cfg_alpha']) + + def test_mmu_alpha_invalid_config(self): + self.executor(testData['mmu_cfg_alpha_invalid']) + + def test_mmu_staticth_config(self): + self.executor(testData['mmu_cfg_static_th']) diff --git a/tests/mock_tables/appl_db.json b/tests/mock_tables/appl_db.json index e967caa758..d755f46428 100644 --- a/tests/mock_tables/appl_db.json +++ b/tests/mock_tables/appl_db.json @@ -305,6 +305,40 @@ "type": "dynamic", "vni": "200" }, + "_STP_VLAN_TABLE:Vlan500": { + "bridge_id": "8064b86a97e24e9c", + "max_age": "20", + "hello_time": "2", + "forward_delay": "15", + "hold_time": "1", + "root_bridge_id": "0064b86a97e24e9c", + "root_path_cost": "600", + "desig_bridge_id": "806480a235f281ec", + "root_port": "Root", + "root_max_age": "20", + "root_hello_time": "2", + "root_forward_delay": "15", + "stp_instance": "0", + "topology_change_count": "1", + "last_topology_change": "0" + }, + "_STP_VLAN_PORT_TABLE:Vlan500:Ethernet4": { + "port_num": "4", + "priority": "128", + "path_cost": "200", + "port_state": "FORWARDING", + "desig_cost": "400", + "desig_root": "0064b86a97e24e9c", + "desig_bridge": "806480a235f281ec", + "desig_port": "4", + "bpdu_sent": "10", + "bpdu_received": "15", + "config_bpdu_sent": "10", + "config_bpdu_received": "2", + "tc_sent": "15", + "tc_received": "5", + "root_guard_timer": "0" + }, "MUX_CABLE_TABLE:Ethernet32": { "state": "active" }, diff --git a/tests/mock_tables/asic0/config_db.json b/tests/mock_tables/asic0/config_db.json index 8b867bdc96..593170630f 100644 --- a/tests/mock_tables/asic0/config_db.json +++ b/tests/mock_tables/asic0/config_db.json @@ -303,5 +303,47 @@ "SYSLOG_CONFIG_FEATURE|database": { "rate_limit_interval": "222", "rate_limit_burst": "22222" + }, + "WRED_PROFILE|AZURE_LOSSLESS": { + "red_max_threshold": "2097152", + "ecn": "ecn_all", + "green_min_threshold": "1048576", + "red_min_threshold": "1048576", + "yellow_min_threshold": "1048576", + "green_max_threshold": "2097152", + "green_drop_probability": "5", + "yellow_max_threshold": "2097152", + "yellow_drop_probability": "5", + "red_drop_probability": "5" + }, + "DEVICE_NEIGHBOR|Ethernet4": { + "name": "Serverss0", + "port": "eth0" + }, + "QUEUE|Ethernet4|0": { + "scheduler": "[SCHEDULAR|scheduler.0]" + }, + "QUEUE|Ethernet4|1": { + "scheduler": "[SCHEDULAR|scheduler.0]", + "wred_profile": "AZURE_LOSSLESS" + }, + "BUFFER_POOL|ingress_lossy_pool": { + "mode": "dynamic", + "type": "ingress" + }, + "BUFFER_POOL|ingress_lossless_pool_hbm": { + "mode": "static", + "size": "139458240", + "type": "ingress" + }, + "BUFFER_PROFILE|ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossy_pool", + "size": "0" + }, + "BUFFER_PROFILE|ingress_lossless_profile_hbm": { + "static_th": "12121212", + "pool": "ingress_lossless_pool_hbm", + "size": "0" } } diff --git a/tests/mock_tables/asic0/counters_db.json b/tests/mock_tables/asic0/counters_db.json index 53e3b558a2..610662a019 100644 --- a/tests/mock_tables/asic0/counters_db.json +++ b/tests/mock_tables/asic0/counters_db.json @@ -2202,14 +2202,14 @@ "oid:0x1000000004005": "SAI_QUEUE_TYPE_UNICAST", "oid:0x1000000004006": "SAI_QUEUE_TYPE_UNICAST", "oid:0x1000000004007": "SAI_QUEUE_TYPE_UNICAST", - "oid:0x1000000004008": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004009": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004010": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004011": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004012": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004013": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004014": "SAI_QUEUE_TYPE_MULTICAST", - "oid:0x1000000004015": "SAI_QUEUE_TYPE_MULTICAST" + "oid:0x1000000004008": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004009": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004010": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004011": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004012": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004013": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004014": "SAI_QUEUE_TYPE_ALL", + "oid:0x1000000004015": "SAI_QUEUE_TYPE_ALL" }, "COUNTERS_FABRIC_PORT_NAME_MAP" : { "PORT0": "oid:0x1000000000143", @@ -2489,5 +2489,302 @@ "COUNTERS:oid:0x1600000000034d":{ "SAI_COUNTER_STAT_PACKETS": 200, "SAI_COUNTER_STAT_BYTES": 4000 + }, + "COUNTERS_BUFFER_POOL_NAME_MAP": { + "ingress_lossless_pool": "oid:0x18000000000c10" + }, + "COUNTERS_PG_NAME_MAP": { + "Enternet0:0": "oid:100000000b0f0", + "Enternet0:1": "oid:100000000b0f1", + "Enternet0:2": "oid:100000000b0f2", + "Enternet0:3": "oid:100000000b0f3", + "Enternet0:4": "oid:100000000b0f4", + "Enternet0:5": "oid:100000000b0f5", + "Enternet0:6": "oid:100000000b0f6", + "Enternet0:7": "oid:100000000b0f7", + "Enternet0:8": "oid:100000000b0f8", + "Enternet0:9": "oid:100000000b0f9", + "Enternet0:10": "oid:100000000b0fa", + "Enternet0:11": "oid:100000000b0fb", + "Enternet0:12": "oid:100000000b0fc", + "Enternet0:13": "oid:100000000b0fd", + "Enternet0:14": "oid:100000000b0fe", + "Enternet0:15": "oid:100000000b0ff", + "Enternet4:0": "oid:0x100000000b1f0", + "Enternet4:1": "oid:0x100000000b1f1", + "Enternet4:2": "oid:0x100000000b1f2", + "Enternet4:3": "oid:0x100000000b1f3", + "Enternet4:4": "oid:0x100000000b1f4", + "Enternet4:5": "oid:0x100000000b1f5", + "Enternet4:6": "oid:0x100000000b1f6", + "Enternet4:7": "oid:0x100000000b1f7", + "Enternet4:8": "oid:0x100000000b1f8", + "Enternet4:9": "oid:0x100000000b1f9", + "Enternet4:10": "oid:0x100000000b1fa", + "Enternet4:11": "oid:0x100000000b1fb", + "Enternet4:12": "oid:0x100000000b1fc", + "Enternet4:13": "oid:0x100000000b1fd", + "Enternet4:14": "oid:0x100000000b1fe", + "Enternet4:15": "oid:0x100000000b1ff" + }, + "COUNTERS_PG_PORT_MAP": { + "oid:100000000b0f0": "oid:0x1000000000002", + "oid:100000000b0f1": "oid:0x1000000000002", + "oid:100000000b0f2": "oid:0x1000000000002", + "oid:100000000b0f3": "oid:0x1000000000002", + "oid:100000000b0f4": "oid:0x1000000000002", + "oid:100000000b0f5": "oid:0x1000000000002", + "oid:100000000b0f6": "oid:0x1000000000002", + "oid:100000000b0f7": "oid:0x1000000000002", + "oid:100000000b0f8": "oid:0x1000000000002", + "oid:100000000b0f9": "oid:0x1000000000002", + "oid:100000000b0fa": "oid:0x1000000000002", + "oid:100000000b0fb": "oid:0x1000000000002", + "oid:100000000b0fc": "oid:0x1000000000002", + "oid:100000000b0fd": "oid:0x1000000000002", + "oid:100000000b0fe": "oid:0x1000000000002", + "oid:100000000b0ff": "oid:0x1000000000002", + "oid:0x100000000b1f0": "oid:0x1000000000004", + "oid:0x100000000b1f1": "oid:0x1000000000004", + "oid:0x100000000b1f2": "oid:0x1000000000004", + "oid:0x100000000b1f3": "oid:0x1000000000004", + "oid:0x100000000b1f4": "oid:0x1000000000004", + "oid:0x100000000b1f5": "oid:0x1000000000004", + "oid:0x100000000b1f6": "oid:0x1000000000004", + "oid:0x100000000b1f7": "oid:0x1000000000004", + "oid:0x100000000b1f8": "oid:0x1000000000004", + "oid:0x100000000b1f9": "oid:0x1000000000004", + "oid:0x100000000b1fa": "oid:0x1000000000004", + "oid:0x100000000b1fb": "oid:0x1000000000004", + "oid:0x100000000b1fc": "oid:0x1000000000004", + "oid:0x100000000b1fd": "oid:0x1000000000004", + "oid:0x100000000b1fe": "oid:0x1000000000004", + "oid:0x100000000b1ff" : "oid:0x1000000000004" + }, + "COUNTERS_PG_INDEX_MAP": { + "oid:100000000b0f0": "0", + "oid:100000000b0f1": "1", + "oid:100000000b0f2": "2", + "oid:100000000b0f3": "3", + "oid:100000000b0f4": "4", + "oid:100000000b0f5": "5", + "oid:100000000b0f6": "6", + "oid:100000000b0f7": "7", + "oid:100000000b0f8": "8", + "oid:100000000b0f9": "9", + "oid:100000000b0fa": "10", + "oid:100000000b0fb": "11", + "oid:100000000b0fc": "12", + "oid:100000000b0fd": "13", + "oid:100000000b0fe": "14", + "oid:100000000b0ff": "15", + "oid:0x100000000b1f0": "0", + "oid:0x100000000b1f1": "1", + "oid:0x100000000b1f2": "2", + "oid:0x100000000b1f3": "3", + "oid:0x100000000b1f4": "4", + "oid:0x100000000b1f5": "5", + "oid:0x100000000b1f6": "6", + "oid:0x100000000b1f7": "7", + "oid:0x100000000b1f8": "8", + "oid:0x100000000b1f9": "9", + "oid:0x100000000b1fa": "10", + "oid:0x100000000b1fb": "11", + "oid:0x100000000b1fc": "12", + "oid:0x100000000b1fd": "13", + "oid:0x100000000b1fe": "14", + "oid:0x100000000b1ff" : "15" + }, + "USER_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 100, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 100 + }, + "USER_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 101, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 101 + }, + "USER_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 102, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 102 + }, + "USER_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 103, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 103 + }, + "USER_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 104, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 104 + }, + "USER_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 105, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 105 + }, + "USER_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 106, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 106 + }, + "USER_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 107, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 107 + }, + "USER_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 108, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 108 + }, + "USER_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 109, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 109 + }, + "USER_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 110, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 110 + }, + "USER_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 111, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 111 + }, + "USER_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 112, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 112 + }, + "USER_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 113, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 113 + }, + "USER_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 114, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 114 + }, + "USER_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 115, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 115 + }, + "USER_WATERMARKS:oid:0x100000000b1f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 200, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 200 + }, + "USER_WATERMARKS:oid:0x100000000b1f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 201, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 201 + }, + "USER_WATERMARKS:oid:0x100000000b1f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 202, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 202 + }, + "USER_WATERMARKS:oid:0x100000000b1f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 203, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 203 + }, + "USER_WATERMARKS:oid:0x100000000b1f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 204, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 204 + }, + "USER_WATERMARKS:oid:0x100000000b1f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 205, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 205 + }, + "USER_WATERMARKS:oid:0x100000000b1f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 206, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 206 + }, + "USER_WATERMARKS:oid:0x100000000b1f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 207, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 207 + }, + "USER_WATERMARKS:oid:0x100000000b1f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 208, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 208 + }, + "USER_WATERMARKS:oid:0x100000000b1f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 209, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 209 + }, + "USER_WATERMARKS:oid:0x100000000b1fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 210, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 210 + }, + "USER_WATERMARKS:oid:0x100000000b1fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 211, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 211 + }, + "USER_WATERMARKS:oid:0x100000000b1fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 212, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 212 + }, + "USER_WATERMARKS:oid:0x100000000b1fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 213, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 213 + }, + "USER_WATERMARKS:oid:0x100000000b1fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 214, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 214 + }, + "USER_WATERMARKS:oid:0x100000000b1ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 215, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 215 + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "200", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "200" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "201", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "201" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "202", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "202" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "203", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "203" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "204", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "204" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "205", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "205" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "206", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "206" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "207", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "207" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "500", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "500" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "501", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "501" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "502", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "502" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "503", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "503" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "504", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "504" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "505", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "505" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "506", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "506" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "507", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "507" } } diff --git a/tests/mock_tables/asic0/database_config.json b/tests/mock_tables/asic0/database_config.json index d3028b0b45..5fca7834f6 100644 --- a/tests/mock_tables/asic0/database_config.json +++ b/tests/mock_tables/asic0/database_config.json @@ -4,6 +4,11 @@ "hostname" : "127.0.0.1", "port" : 6379, "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_bmp": { + "hostname" : "127.0.0.1", + "port" : 6400, + "unix_socket_path" : "/var/run/redis/redis_bmp.sock" } }, "DATABASES" : { @@ -51,6 +56,11 @@ "id" : 7, "separator": "|", "instance" : "redis" + }, + "BMP_STATE_DB" : { + "id" : 20, + "separator": "|", + "instance" : "redis_bmp" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/asic0/ip_route_lc.json b/tests/mock_tables/asic0/ip_route_lc.json new file mode 100644 index 0000000000..19cfd5e5f0 --- /dev/null +++ b/tests/mock_tables/asic0/ip_route_lc.json @@ -0,0 +1,66 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 8, + "internalNextHopNum": 4, + "internalNextHopActiveNum": 4, + "nexthopGroupId": 566, + "installedNexthopGroupId": 566, + "uptime": "04w0d11h", + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 2, + "interfaceName": "PortChannel1", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 4, + "interfaceName": "PortChannel5", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 5, + "interfaceName": "PortChannel9", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 3, + "interfaceName": "PortChannel13", + "active": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic0/ip_route_lc_2.json b/tests/mock_tables/asic0/ip_route_lc_2.json new file mode 100644 index 0000000000..8cadf1db22 --- /dev/null +++ b/tests/mock_tables/asic0/ip_route_lc_2.json @@ -0,0 +1,56 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthopGroupId": 2122, + "installedNexthopGroupId": 2122, + "uptime": "01:01:51", + "nexthops": [ + { + "flags": 3, + "fib": true, + "ip": "10.0.0.1", + "afi": "ipv4", + "interfaceIndex": 29, + "interfaceName": "PortChannel102", + "active": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "10.0.0.7", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "10.0.0.7", + "afi": "ipv4", + "interfaceIndex": 52, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic0/ip_route_remote_lc.json b/tests/mock_tables/asic0/ip_route_remote_lc.json new file mode 100644 index 0000000000..0e8f4a56c7 --- /dev/null +++ b/tests/mock_tables/asic0/ip_route_remote_lc.json @@ -0,0 +1,106 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 200, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 13, + "internalNextHopNum": 8, + "internalNextHopActiveNum": 8, + "nexthopGroupId": 465, + "installedNexthopGroupId": 465, + "uptime": "04w0d12h", + "nexthops": [ + { + "flags": 5, + "ip": "20.1.0.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.8.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.16.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.24.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB0", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic0/state_db.json b/tests/mock_tables/asic0/state_db.json index 4f3f13c0ae..5ae87ea975 100644 --- a/tests/mock_tables/asic0/state_db.json +++ b/tests/mock_tables/asic0/state_db.json @@ -256,7 +256,12 @@ "FABRIC_PORT_TABLE|PORT0" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "79" + "REMOTE_PORT": "79", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "19.8", + "OLD_TX_DATA": "18490000000", + "LAST_TIME": "1676672799" }, "FABRIC_PORT_TABLE|PORT1" : { "STATUS": "down" @@ -264,7 +269,12 @@ "FABRIC_PORT_TABLE|PORT2" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "94" + "REMOTE_PORT": "94", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "39.8", + "OLD_TX_DATA": "24490000000", + "LAST_TIME": "1676672799" }, "FABRIC_PORT_TABLE|PORT3" : { "STATUS": "down" @@ -272,7 +282,12 @@ "FABRIC_PORT_TABLE|PORT4" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "85" + "REMOTE_PORT": "85", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "39.8", + "OLD_TX_DATA": "24490000000", + "LAST_TIME": "1676672799" }, "FABRIC_PORT_TABLE|PORT5" : { "STATUS": "down" @@ -280,12 +295,22 @@ "FABRIC_PORT_TABLE|PORT6" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "84" + "REMOTE_PORT": "84", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "39.3", + "OLD_TX_DATA": "24170000000", + "LAST_TIME": "1676672799" }, "FABRIC_PORT_TABLE|PORT7" : { "STATUS": "up", "REMOTE_MOD": "0", - "REMOTE_PORT": "93" + "REMOTE_PORT": "93", + "OLD_RX_RATE_AVG": "0", + "OLD_RX_DATA": "0", + "OLD_TX_RATE_AVG": "39.3", + "OLD_TX_DATA": "24190000000", + "LAST_TIME": "1676672799" }, "CHASSIS_MIDPLANE_TABLE|LINE-CARD0": { "ip_address": "127.0.0.1", diff --git a/tests/mock_tables/asic1/asic_db.json b/tests/mock_tables/asic1/asic_db.json new file mode 100644 index 0000000000..1a769b82b5 --- /dev/null +++ b/tests/mock_tables/asic1/asic_db.json @@ -0,0 +1,6 @@ +{ + "ASIC_STATE:SAI_OBJECT_TYPE_SWITCH:oid:0x21000000000000": { + "SAI_SWITCH_ATTR_INIT_SWITCH": "true", + "SAI_SWITCH_ATTR_SRC_MAC_ADDRESS": "DE:AD:BE:EF:CA:FE" + } +} diff --git a/tests/mock_tables/asic1/config_db.json b/tests/mock_tables/asic1/config_db.json index 56823ae113..5c1d9f344c 100644 --- a/tests/mock_tables/asic1/config_db.json +++ b/tests/mock_tables/asic1/config_db.json @@ -242,5 +242,60 @@ "SYSLOG_CONFIG_FEATURE|database": { "rate_limit_interval": "555", "rate_limit_burst": "55555" + }, + "WRED_PROFILE|AZURE_LOSSY": { + "red_max_threshold":"32760", + "red_min_threshold":"4095", + "yellow_max_threshold":"32760", + "yellow_min_threshold":"4095", + "green_max_threshold": "32760", + "green_min_threshold": "4095", + "yellow_drop_probability": "2" + }, + "DEVICE_NEIGHBOR|Ethernet0": { + "name": "Servers", + "port": "eth0" + }, + "QUEUE|Ethernet0|0": { + "scheduler": "[SCHEDULAR|scheduler.0]", + "wred_profile": "AZURE_LOSSLESS" + }, + "QUEUE|Ethernet0|1": { + "scheduler": "[SCHEDULAR|scheduler.0]" + }, + "BUFFER_POOL|ingress_lossless_pool": { + "mode": "dynamic", + "type": "ingress" + }, + "BUFFER_PROFILE|alpha_profile": { + "dynamic_th": "0", + "pool": "ingress_lossless_pool", + "headroom_type": "dynamic" + }, + "BUFFER_PROFILE|headroom_profile": { + "dynamic_th": "0", + "pool": "ingress_lossless_pool", + "xon": "18432", + "xoff": "32768", + "size": "51200" + }, + "BUFFER_POOL|egress_lossless_pool": { + "mode": "dynamic", + "size": "13945824", + "type": "egress" + }, + "BUFFER_PROFILE|egress_lossless_profile": { + "dynamic_th": "0", + "pool": "egress_lossless_pool", + "size": "0" + }, + "BUFFER_POOL|egress_lossy_pool": { + "mode": "dynamic", + "type": "egress" + }, + "BUFFER_PROFILE|egress_lossy_profile": { + "dynamic_th": "0", + "pool": "egress_lossy_pool", + "size": "0" } } diff --git a/tests/mock_tables/asic1/counters_db.json b/tests/mock_tables/asic1/counters_db.json index c364d8599e..1455f069c0 100644 --- a/tests/mock_tables/asic1/counters_db.json +++ b/tests/mock_tables/asic1/counters_db.json @@ -207,6 +207,213 @@ "Ethernet-BP256": "oid:0x1000000000b06", "Ethernet-BP260": "oid:0x1000000000b08" }, + "COUNTERS_PG_NAME_MAP": { + "Ethernet-BP256:0": "oid:100000000b0f0", + "Ethernet-BP256:1": "oid:100000000b0f1", + "Ethernet-BP256:2": "oid:100000000b0f2", + "Ethernet-BP256:3": "oid:100000000b0f3", + "Ethernet-BP256:4": "oid:100000000b0f4", + "Ethernet-BP256:5": "oid:100000000b0f5", + "Ethernet-BP256:6": "oid:100000000b0f6", + "Ethernet-BP256:7": "oid:100000000b0f7", + "Ethernet-BP256:8": "oid:100000000b0f8", + "Ethernet-BP256:9": "oid:100000000b0f9", + "Ethernet-BP256:10": "oid:100000000b0fa", + "Ethernet-BP256:11": "oid:100000000b0fb", + "Ethernet-BP256:12": "oid:100000000b0fc", + "Ethernet-BP256:13": "oid:100000000b0fd", + "Ethernet-BP256:14": "oid:100000000b0fe", + "Ethernet-BP256:15": "oid:100000000b0ff", + "Ethernet-BP260:0": "oid:0x100000000b1f0", + "Ethernet-BP260:1": "oid:0x100000000b1f1", + "Ethernet-BP260:2": "oid:0x100000000b1f2", + "Ethernet-BP260:3": "oid:0x100000000b1f3", + "Ethernet-BP260:4": "oid:0x100000000b1f4", + "Ethernet-BP260:5": "oid:0x100000000b1f5", + "Ethernet-BP260:6": "oid:0x100000000b1f6", + "Ethernet-BP260:7": "oid:0x100000000b1f7", + "Ethernet-BP260:8": "oid:0x100000000b1f8", + "Ethernet-BP260:9": "oid:0x100000000b1f9", + "Ethernet-BP260:10": "oid:0x100000000b1fa", + "Ethernet-BP260:11": "oid:0x100000000b1fb", + "Ethernet-BP260:12": "oid:0x100000000b1fc", + "Ethernet-BP260:13": "oid:0x100000000b1fd", + "Ethernet-BP260:14": "oid:0x100000000b1fe", + "Ethernet-BP260:15": "oid:0x100000000b1ff" + }, + "COUNTERS_PG_PORT_MAP": { + "oid:100000000b0f0": "oid:0x1000000000b06", + "oid:100000000b0f1": "oid:0x1000000000b06", + "oid:100000000b0f2": "oid:0x1000000000b06", + "oid:100000000b0f3": "oid:0x1000000000b06", + "oid:100000000b0f4": "oid:0x1000000000b06", + "oid:100000000b0f5": "oid:0x1000000000b06", + "oid:100000000b0f6": "oid:0x1000000000b06", + "oid:100000000b0f7": "oid:0x1000000000b06", + "oid:100000000b0f8": "oid:0x1000000000b06", + "oid:100000000b0f9": "oid:0x1000000000b06", + "oid:100000000b0fa": "oid:0x1000000000b06", + "oid:100000000b0fb": "oid:0x1000000000b06", + "oid:100000000b0fc": "oid:0x1000000000b06", + "oid:100000000b0fd": "oid:0x1000000000b06", + "oid:100000000b0fe": "oid:0x1000000000b06", + "oid:100000000b0ff": "oid:0x1000000000b06", + "oid:0x100000000b1f0": "oid:0x1000000000b08", + "oid:0x100000000b1f1": "oid:0x1000000000b08", + "oid:0x100000000b1f2": "oid:0x1000000000b08", + "oid:0x100000000b1f3": "oid:0x1000000000b08", + "oid:0x100000000b1f4": "oid:0x1000000000b08", + "oid:0x100000000b1f5": "oid:0x1000000000b08", + "oid:0x100000000b1f6": "oid:0x1000000000b08", + "oid:0x100000000b1f7": "oid:0x1000000000b08", + "oid:0x100000000b1f8": "oid:0x1000000000b08", + "oid:0x100000000b1f9": "oid:0x1000000000b08", + "oid:0x100000000b1fa": "oid:0x1000000000b08", + "oid:0x100000000b1fb": "oid:0x1000000000b08", + "oid:0x100000000b1fc": "oid:0x1000000000b08", + "oid:0x100000000b1fd": "oid:0x1000000000b08", + "oid:0x100000000b1fe": "oid:0x1000000000b08", + "oid:0x100000000b1ff" : "oid:0x1000000000b08" + }, + "COUNTERS_PG_INDEX_MAP": { + "oid:100000000b0f0": "0", + "oid:100000000b0f1": "1", + "oid:100000000b0f2": "2", + "oid:100000000b0f3": "3", + "oid:100000000b0f4": "4", + "oid:100000000b0f5": "5", + "oid:100000000b0f6": "6", + "oid:100000000b0f7": "7", + "oid:100000000b0f8": "8", + "oid:100000000b0f9": "9", + "oid:100000000b0fa": "10", + "oid:100000000b0fb": "11", + "oid:100000000b0fc": "12", + "oid:100000000b0fd": "13", + "oid:100000000b0fe": "14", + "oid:100000000b0ff": "15", + "oid:0x100000000b1f0": "0", + "oid:0x100000000b1f1": "1", + "oid:0x100000000b1f2": "2", + "oid:0x100000000b1f3": "3", + "oid:0x100000000b1f4": "4", + "oid:0x100000000b1f5": "5", + "oid:0x100000000b1f6": "6", + "oid:0x100000000b1f7": "7", + "oid:0x100000000b1f8": "8", + "oid:0x100000000b1f9": "9", + "oid:0x100000000b1fa": "10", + "oid:0x100000000b1fb": "11", + "oid:0x100000000b1fc": "12", + "oid:0x100000000b1fd": "13", + "oid:0x100000000b1fe": "14", + "oid:0x100000000b1ff" : "15" + }, + "COUNTERS_BUFFER_POOL_NAME_MAP": { + "ingress_lossless_pool": "oid:0x18000000000c10" + }, + "COUNTERS_QUEUE_PORT_MAP": { + "oid:0x100000000b100": "oid:0x1000000000b06", + "oid:0x100000000b101": "oid:0x1000000000b06", + "oid:0x100000000b102": "oid:0x1000000000b06", + "oid:0x100000000b103": "oid:0x1000000000b06", + "oid:0x100000000b104": "oid:0x1000000000b06", + "oid:0x100000000b105": "oid:0x1000000000b06", + "oid:0x100000000b106": "oid:0x1000000000b06", + "oid:0x100000000b107": "oid:0x1000000000b06", + "oid:0x100000000b108": "oid:0x1000000000b06", + "oid:0x100000000b109": "oid:0x1000000000b06", + "oid:0x100000000b110": "oid:0x1000000000b06", + "oid:0x100000000b111": "oid:0x1000000000b06", + "oid:0x100000000b112": "oid:0x1000000000b06", + "oid:0x100000000b113": "oid:0x1000000000b06", + "oid:0x100000000b114": "oid:0x1000000000b06", + "oid:0x100000000b115": "oid:0x1000000000b06", + "oid:0x100000000b200": "oid:0x1000000000b08", + "oid:0x100000000b201": "oid:0x1000000000b08", + "oid:0x100000000b202": "oid:0x1000000000b08", + "oid:0x100000000b203": "oid:0x1000000000b08", + "oid:0x100000000b204": "oid:0x1000000000b08", + "oid:0x100000000b205": "oid:0x1000000000b08", + "oid:0x100000000b206": "oid:0x1000000000b08", + "oid:0x100000000b207": "oid:0x1000000000b08", + "oid:0x100000000b208": "oid:0x1000000000b08", + "oid:0x100000000b209": "oid:0x1000000000b08", + "oid:0x100000000b210": "oid:0x1000000000b08", + "oid:0x100000000b211": "oid:0x1000000000b08", + "oid:0x100000000b212": "oid:0x1000000000b08", + "oid:0x100000000b213": "oid:0x1000000000b08", + "oid:0x100000000b214": "oid:0x1000000000b08", + "oid:0x100000000b215": "oid:0x1000000000b08" + }, + "COUNTERS_QUEUE_TYPE_MAP": { + "oid:0x100000000b100": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b101": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b102": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b103": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b104": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b105": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b106": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b107": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b108": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b109": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b110": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b111": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b112": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b113": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b114": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b115": "SAI_QUEUE_TYPE_MULTICAST", + "oid:0x100000000b200": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b201": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b202": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b203": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b204": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b205": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b206": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b207": "SAI_QUEUE_TYPE_UNICAST", + "oid:0x100000000b208": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b209": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b210": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b211": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b212": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b213": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b214": "SAI_QUEUE_TYPE_ALL", + "oid:0x100000000b215": "SAI_QUEUE_TYPE_ALL" + }, + "COUNTERS_QUEUE_INDEX_MAP": { + "oid:0x100000000b100": "0", + "oid:0x100000000b101": "1", + "oid:0x100000000b102": "2", + "oid:0x100000000b103": "3", + "oid:0x100000000b104": "4", + "oid:0x100000000b105": "5", + "oid:0x100000000b106": "6", + "oid:0x100000000b107": "7", + "oid:0x100000000b108": "8", + "oid:0x100000000b109": "9", + "oid:0x100000000b110": "10", + "oid:0x100000000b111": "11", + "oid:0x100000000b112": "12", + "oid:0x100000000b113": "13", + "oid:0x100000000b114": "14", + "oid:0x100000000b115": "15", + "oid:0x100000000b200": "0", + "oid:0x100000000b201": "1", + "oid:0x100000000b202": "2", + "oid:0x100000000b203": "3", + "oid:0x100000000b204": "4", + "oid:0x100000000b205": "5", + "oid:0x100000000b206": "6", + "oid:0x100000000b207": "7", + "oid:0x100000000b208": "8", + "oid:0x100000000b209": "9", + "oid:0x100000000b210": "10", + "oid:0x100000000b211": "11", + "oid:0x100000000b212": "12", + "oid:0x100000000b213": "13", + "oid:0x100000000b214": "14", + "oid:0x100000000b215": "15" + }, "COUNTERS_LAG_NAME_MAP": { "PortChannel0001": "oid:0x60000000005a1", "PortChannel0002": "oid:0x60000000005a2", @@ -1160,5 +1367,313 @@ "COUNTERS:oid:0x1600000000034f":{ "SAI_COUNTER_STAT_PACKETS": 1000, "SAI_COUNTER_STAT_BYTES": 2000 + }, + "USER_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 100, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 100 + }, + "USER_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 101, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 101 + }, + "USER_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 102, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 102 + }, + "USER_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 103, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 103 + }, + "USER_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 104, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 104 + }, + "USER_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 105, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 105 + }, + "USER_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 106, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 106 + }, + "USER_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 107, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 107 + }, + "USER_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 108, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 108 + }, + "USER_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 109, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 109 + }, + "USER_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 110, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 110 + }, + "USER_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 111, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 111 + }, + "USER_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 112, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 112 + }, + "USER_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 113, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 113 + }, + "USER_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 114, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 114 + }, + "USER_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 115, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 115 + }, + "USER_WATERMARKS:oid:0x100000000b1f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 200, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 200 + }, + "USER_WATERMARKS:oid:0x100000000b1f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 201, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 201 + }, + "USER_WATERMARKS:oid:0x100000000b1f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 202, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 202 + }, + "USER_WATERMARKS:oid:0x100000000b1f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 203, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 203 + }, + "USER_WATERMARKS:oid:0x100000000b1f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 204, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 204 + }, + "USER_WATERMARKS:oid:0x100000000b1f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 205, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 205 + }, + "USER_WATERMARKS:oid:0x100000000b1f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 206, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 206 + }, + "USER_WATERMARKS:oid:0x100000000b1f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 207, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 207 + }, + "USER_WATERMARKS:oid:0x100000000b1f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 208, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 208 + }, + "USER_WATERMARKS:oid:0x100000000b1f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 209, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 209 + }, + "USER_WATERMARKS:oid:0x100000000b1fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 210, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 210 + }, + "USER_WATERMARKS:oid:0x100000000b1fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 211, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 211 + }, + "USER_WATERMARKS:oid:0x100000000b1fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 212, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 212 + }, + "USER_WATERMARKS:oid:0x100000000b1fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 213, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 213 + }, + "USER_WATERMARKS:oid:0x100000000b1fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 214, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 214 + }, + "USER_WATERMARKS:oid:0x100000000b1ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": 215, + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": 215 + }, + "USER_WATERMARKS:oid:0x18000000000c10": { + "SAI_BUFFER_POOL_STAT_WATERMARK_BYTES": "3000", + "SAI_BUFFER_POOL_STAT_XOFF_ROOM_WATERMARK_BYTES": "432640" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "200", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "200" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "201", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "201" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "202", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "202" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "203", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "203" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "204", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "204" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "205", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "205" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "206", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "206" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "207", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "207" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "500", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "500" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "501", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "501" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "502", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "502" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "503", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "503" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "504", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "504" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "505", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "505" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "506", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "506" + }, + "PERSISTENT_WATERMARKS:oid:100000000b0ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "507", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "507" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f0": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "200", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "200" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f1": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "201", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "201" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f2": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "202", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "202" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f3": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "203", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "203" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f4": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "204", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "204" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f5": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "205", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "205" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f6": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "206", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "206" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f7": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "207", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "207" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f8": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "500", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "500" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1f9": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "501", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "501" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fa": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "502", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "502" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fb": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "503", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "503" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fc": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "504", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "504" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fd": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "505", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "505" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1fe": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "506", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "506" + }, + "PERSISTENT_WATERMARKS:oid:100000000b1ff": { + "SAI_INGRESS_PRIORITY_GROUP_STAT_SHARED_WATERMARK_BYTES": "507", + "SAI_INGRESS_PRIORITY_GROUP_STAT_XOFF_ROOM_WATERMARK_BYTES": "507" + }, + "USER_WATERMARKS:oid:0x100000000b100": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2057328" + }, + "USER_WATERMARKS:oid:0x100000000b101": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "20" + }, + "USER_WATERMARKS:oid:0x100000000b102": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b103": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "USER_WATERMARKS:oid:0x100000000b104": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "USER_WATERMARKS:oid:0x100000000b105": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b106": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b107": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "28" + }, + "USER_WATERMARKS:oid:0x100000000b108": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" + }, + "USER_WATERMARKS:oid:0x100000000b109": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "0" + }, + "USER_WATERMARKS:oid:0x100000000b110": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "5" + }, + "USER_WATERMARKS:oid:0x100000000b111": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2057328" + }, + "USER_WATERMARKS:oid:0x100000000b112": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "208" + }, + "USER_WATERMARKS:oid:0x100000000b113": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "20" + }, + "USER_WATERMARKS:oid:0x100000000b114": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "228" + }, + "USER_WATERMARKS:oid:0x100000000b115": { + "SAI_QUEUE_STAT_SHARED_WATERMARK_BYTES": "2" } } diff --git a/tests/mock_tables/asic1/database_config.json b/tests/mock_tables/asic1/database_config.json index d3028b0b45..5fca7834f6 100644 --- a/tests/mock_tables/asic1/database_config.json +++ b/tests/mock_tables/asic1/database_config.json @@ -4,6 +4,11 @@ "hostname" : "127.0.0.1", "port" : 6379, "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_bmp": { + "hostname" : "127.0.0.1", + "port" : 6400, + "unix_socket_path" : "/var/run/redis/redis_bmp.sock" } }, "DATABASES" : { @@ -51,6 +56,11 @@ "id" : 7, "separator": "|", "instance" : "redis" + }, + "BMP_STATE_DB" : { + "id" : 20, + "separator": "|", + "instance" : "redis_bmp" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/asic1/ip_route_lc.json b/tests/mock_tables/asic1/ip_route_lc.json new file mode 100644 index 0000000000..e6c0063f90 --- /dev/null +++ b/tests/mock_tables/asic1/ip_route_lc.json @@ -0,0 +1,106 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 200, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 13, + "internalNextHopNum": 8, + "internalNextHopActiveNum": 8, + "nexthopGroupId": 465, + "installedNexthopGroupId": 465, + "uptime": "04w0d12h", + "nexthops": [ + { + "flags": 5, + "ip": "20.1.0.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.8.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.16.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.24.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic1/ip_route_lc_2.json b/tests/mock_tables/asic1/ip_route_lc_2.json new file mode 100644 index 0000000000..f7dff5d51b --- /dev/null +++ b/tests/mock_tables/asic1/ip_route_lc_2.json @@ -0,0 +1,56 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 9, + "internalNextHopNum": 3, + "internalNextHopActiveNum": 3, + "nexthopGroupId": 2173, + "installedNexthopGroupId": 2173, + "uptime": "01:01:57", + "nexthops": [ + { + "flags": 5, + "ip": "10.0.0.1", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "10.0.0.1", + "afi": "ipv4", + "interfaceIndex": 52, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "ip": "10.0.0.7", + "afi": "ipv4", + "interfaceIndex": 29, + "interfaceName": "PortChannel106", + "active": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic1/ip_route_remote_lc.json b/tests/mock_tables/asic1/ip_route_remote_lc.json new file mode 100644 index 0000000000..e6c0063f90 --- /dev/null +++ b/tests/mock_tables/asic1/ip_route_remote_lc.json @@ -0,0 +1,106 @@ +{ + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 200, + "metric": 0, + "installed": true, + "table": 254, + "internalStatus": 16, + "internalFlags": 13, + "internalNextHopNum": 8, + "internalNextHopActiveNum": 8, + "nexthopGroupId": 465, + "installedNexthopGroupId": 465, + "uptime": "04w0d12h", + "nexthops": [ + { + "flags": 5, + "ip": "20.1.0.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.0.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.8.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.8.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.16.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.16.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + }, + { + "flags": 5, + "ip": "20.1.24.128", + "afi": "ipv4", + "active": true, + "recursive": true, + "weight": 1 + }, + { + "flags": 11, + "fib": true, + "ip": "20.1.24.128", + "afi": "ipv4", + "interfaceIndex": 26, + "interfaceName": "Ethernet-IB1", + "resolver": true, + "active": true, + "onLink": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/mock_tables/asic2/database_config.json b/tests/mock_tables/asic2/database_config.json index d3028b0b45..5fca7834f6 100644 --- a/tests/mock_tables/asic2/database_config.json +++ b/tests/mock_tables/asic2/database_config.json @@ -4,6 +4,11 @@ "hostname" : "127.0.0.1", "port" : 6379, "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_bmp": { + "hostname" : "127.0.0.1", + "port" : 6400, + "unix_socket_path" : "/var/run/redis/redis_bmp.sock" } }, "DATABASES" : { @@ -51,6 +56,11 @@ "id" : 7, "separator": "|", "instance" : "redis" + }, + "BMP_STATE_DB" : { + "id" : 20, + "separator": "|", + "instance" : "redis_bmp" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/chassis_state_db.json b/tests/mock_tables/chassis_state_db.json index 5178c49ca0..365cbf80cd 100644 --- a/tests/mock_tables/chassis_state_db.json +++ b/tests/mock_tables/chassis_state_db.json @@ -4,6 +4,65 @@ }, "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc1": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc3": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_TABLE|Ethernet1/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet2/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet11/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 } - } \ No newline at end of file diff --git a/tests/mock_tables/config_db.json b/tests/mock_tables/config_db.json index af37538447..3deca74255 100644 --- a/tests/mock_tables/config_db.json +++ b/tests/mock_tables/config_db.json @@ -848,6 +848,8 @@ "FEATURE|lldp": { "state": "enabled", "auto_restart": "enabled", + "has_global_scope": "False", + "has_per_asic_scope": "True", "high_mem_alert": "disabled", "set_owner": "kube" }, @@ -1783,6 +1785,10 @@ "POLL_INTERVAL": "10000", "FLEX_COUNTER_STATUS": "enable" }, + "FLEX_COUNTER_TABLE|ENI": { + "POLL_INTERVAL": "1000", + "FLEX_COUNTER_STATUS": "enable" + }, "PFC_WD|Ethernet0": { "action": "drop", "detection_time": "600", @@ -2792,5 +2798,28 @@ "dhcpv6_servers": [ "fc02:2000::1" ] - } + }, + "STP|GLOBAL": { + "forward_delay": "15", + "hello_time": "2", + "max_age": "20", + "mode": "pvst", + "priority": "32768", + "rootguard_timeout": "30" + }, + "STP_PORT|Ethernet4": { + "bpdu_guard": "true", + "bpdu_guard_do_disable": "false", + "enabled": "true", + "portfast": "true", + "root_guard": "true", + "uplink_fast": "false" + }, + "STP_VLAN|Vlan500": { + "enabled": "true", + "forward_delay": "15", + "hello_time": "2", + "max_age": "20", + "priority": "32768" + } } diff --git a/tests/mock_tables/counters_db.json b/tests/mock_tables/counters_db.json index d62c34cb3c..9e553c2901 100644 --- a/tests/mock_tables/counters_db.json +++ b/tests/mock_tables/counters_db.json @@ -402,145 +402,169 @@ "SAI_QUEUE_STAT_BYTES": "0", "SAI_QUEUE_STAT_DROPPED_BYTES": "0", "SAI_QUEUE_STAT_DROPPED_PACKETS": "0", - "SAI_QUEUE_STAT_PACKETS": "0" + "SAI_QUEUE_STAT_PACKETS": "0", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "0" }, "COUNTERS:oid:0x15000000000658": { "SAI_QUEUE_STAT_BYTES": "43", "SAI_QUEUE_STAT_DROPPED_BYTES": "1", "SAI_QUEUE_STAT_DROPPED_PACKETS": "39", - "SAI_QUEUE_STAT_PACKETS": "60" + "SAI_QUEUE_STAT_PACKETS": "60", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "1" }, "COUNTERS:oid:0x15000000000659": { "SAI_QUEUE_STAT_BYTES": "7", "SAI_QUEUE_STAT_DROPPED_BYTES": "21", "SAI_QUEUE_STAT_DROPPED_PACKETS": "39", - "SAI_QUEUE_STAT_PACKETS": "82" + "SAI_QUEUE_STAT_PACKETS": "82", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "19" }, "COUNTERS:oid:0x1500000000065a": { "SAI_QUEUE_STAT_BYTES": "59", "SAI_QUEUE_STAT_DROPPED_BYTES": "94", "SAI_QUEUE_STAT_DROPPED_PACKETS": "12", - "SAI_QUEUE_STAT_PACKETS": "11" + "SAI_QUEUE_STAT_PACKETS": "11", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "12" }, "COUNTERS:oid:0x1500000000065b": { "SAI_QUEUE_STAT_BYTES": "62", "SAI_QUEUE_STAT_DROPPED_BYTES": "40", "SAI_QUEUE_STAT_DROPPED_PACKETS": "35", - "SAI_QUEUE_STAT_PACKETS": "36" + "SAI_QUEUE_STAT_PACKETS": "36", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "11" }, "COUNTERS:oid:0x1500000000065c": { "SAI_QUEUE_STAT_BYTES": "91", "SAI_QUEUE_STAT_DROPPED_BYTES": "88", "SAI_QUEUE_STAT_DROPPED_PACKETS": "2", - "SAI_QUEUE_STAT_PACKETS": "49" + "SAI_QUEUE_STAT_PACKETS": "49", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "15" }, "COUNTERS:oid:0x1500000000065d": { "SAI_QUEUE_STAT_BYTES": "17", "SAI_QUEUE_STAT_DROPPED_BYTES": "74", "SAI_QUEUE_STAT_DROPPED_PACKETS": "94", - "SAI_QUEUE_STAT_PACKETS": "33" + "SAI_QUEUE_STAT_PACKETS": "33", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "17" }, "COUNTERS:oid:0x1500000000065e": { "SAI_QUEUE_STAT_BYTES": "71", "SAI_QUEUE_STAT_DROPPED_BYTES": "33", "SAI_QUEUE_STAT_DROPPED_PACKETS": "95", - "SAI_QUEUE_STAT_PACKETS": "40" + "SAI_QUEUE_STAT_PACKETS": "40", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "73" }, "COUNTERS:oid:0x15000000000667": { "SAI_QUEUE_STAT_BYTES": "8", "SAI_QUEUE_STAT_DROPPED_BYTES": "78", "SAI_QUEUE_STAT_DROPPED_PACKETS": "93", - "SAI_QUEUE_STAT_PACKETS": "54" + "SAI_QUEUE_STAT_PACKETS": "54", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "29" }, "COUNTERS:oid:0x15000000000668": { "SAI_QUEUE_STAT_BYTES": "96", "SAI_QUEUE_STAT_DROPPED_BYTES": "9", "SAI_QUEUE_STAT_DROPPED_PACKETS": "74", - "SAI_QUEUE_STAT_PACKETS": "83" + "SAI_QUEUE_STAT_PACKETS": "83", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "42" }, "COUNTERS:oid:0x15000000000669": { "SAI_QUEUE_STAT_BYTES": "60", "SAI_QUEUE_STAT_DROPPED_BYTES": "31", "SAI_QUEUE_STAT_DROPPED_PACKETS": "61", - "SAI_QUEUE_STAT_PACKETS": "15" + "SAI_QUEUE_STAT_PACKETS": "15", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "54" }, "COUNTERS:oid:0x1500000000066a": { "SAI_QUEUE_STAT_BYTES": "52", "SAI_QUEUE_STAT_DROPPED_BYTES": "94", "SAI_QUEUE_STAT_DROPPED_PACKETS": "82", - "SAI_QUEUE_STAT_PACKETS": "45" + "SAI_QUEUE_STAT_PACKETS": "45", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "19" }, "COUNTERS:oid:0x1500000000066b": { "SAI_QUEUE_STAT_BYTES": "88", "SAI_QUEUE_STAT_DROPPED_BYTES": "52", "SAI_QUEUE_STAT_DROPPED_PACKETS": "89", - "SAI_QUEUE_STAT_PACKETS": "55" + "SAI_QUEUE_STAT_PACKETS": "55", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "28" }, "COUNTERS:oid:0x1500000000066c": { "SAI_QUEUE_STAT_BYTES": "70", "SAI_QUEUE_STAT_DROPPED_BYTES": "79", "SAI_QUEUE_STAT_DROPPED_PACKETS": "95", - "SAI_QUEUE_STAT_PACKETS": "14" + "SAI_QUEUE_STAT_PACKETS": "14", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "13" }, "COUNTERS:oid:0x1500000000066d": { "SAI_QUEUE_STAT_BYTES": "60", "SAI_QUEUE_STAT_DROPPED_BYTES": "81", "SAI_QUEUE_STAT_DROPPED_PACKETS": "66", - "SAI_QUEUE_STAT_PACKETS": "68" + "SAI_QUEUE_STAT_PACKETS": "68", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "22" }, "COUNTERS:oid:0x1500000000066e": { "SAI_QUEUE_STAT_BYTES": "4", "SAI_QUEUE_STAT_DROPPED_BYTES": "76", "SAI_QUEUE_STAT_DROPPED_PACKETS": "48", - "SAI_QUEUE_STAT_PACKETS": "63" + "SAI_QUEUE_STAT_PACKETS": "63", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "53" }, "COUNTERS:oid:0x15000000000677": { "SAI_QUEUE_STAT_BYTES": "73", "SAI_QUEUE_STAT_DROPPED_BYTES": "74", "SAI_QUEUE_STAT_DROPPED_PACKETS": "77", - "SAI_QUEUE_STAT_PACKETS": "41" + "SAI_QUEUE_STAT_PACKETS": "41", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "67" }, "COUNTERS:oid:0x15000000000678": { "SAI_QUEUE_STAT_BYTES": "21", "SAI_QUEUE_STAT_DROPPED_BYTES": "54", "SAI_QUEUE_STAT_DROPPED_PACKETS": "56", - "SAI_QUEUE_STAT_PACKETS": "60" + "SAI_QUEUE_STAT_PACKETS": "60", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "79" }, "COUNTERS:oid:0x15000000000679": { "SAI_QUEUE_STAT_BYTES": "31", "SAI_QUEUE_STAT_DROPPED_BYTES": "39", "SAI_QUEUE_STAT_DROPPED_PACKETS": "12", - "SAI_QUEUE_STAT_PACKETS": "57" + "SAI_QUEUE_STAT_PACKETS": "57", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "35" }, "COUNTERS:oid:0x1500000000067a": { "SAI_QUEUE_STAT_BYTES": "96", "SAI_QUEUE_STAT_DROPPED_BYTES": "98", "SAI_QUEUE_STAT_DROPPED_PACKETS": "70", - "SAI_QUEUE_STAT_PACKETS": "41" + "SAI_QUEUE_STAT_PACKETS": "41", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "47" }, "COUNTERS:oid:0x1500000000067b": { "SAI_QUEUE_STAT_BYTES": "49", "SAI_QUEUE_STAT_DROPPED_BYTES": "36", "SAI_QUEUE_STAT_DROPPED_PACKETS": "63", - "SAI_QUEUE_STAT_PACKETS": "18" + "SAI_QUEUE_STAT_PACKETS": "18", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "62" }, "COUNTERS:oid:0x1500000000067c": { "SAI_QUEUE_STAT_BYTES": "90", "SAI_QUEUE_STAT_DROPPED_BYTES": "15", "SAI_QUEUE_STAT_DROPPED_PACKETS": "3", - "SAI_QUEUE_STAT_PACKETS": "99" + "SAI_QUEUE_STAT_PACKETS": "99", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "19" }, "COUNTERS:oid:0x1500000000067d": { "SAI_QUEUE_STAT_BYTES": "84", "SAI_QUEUE_STAT_DROPPED_BYTES": "94", "SAI_QUEUE_STAT_DROPPED_PACKETS": "82", - "SAI_QUEUE_STAT_PACKETS": "8" + "SAI_QUEUE_STAT_PACKETS": "8", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "33" }, "COUNTERS:oid:0x1500000000067e": { "SAI_QUEUE_STAT_BYTES": "15", "SAI_QUEUE_STAT_DROPPED_BYTES": "92", "SAI_QUEUE_STAT_DROPPED_PACKETS": "75", - "SAI_QUEUE_STAT_PACKETS": "83" + "SAI_QUEUE_STAT_PACKETS": "83", + "SAI_QUEUE_STAT_CREDIT_WD_DELETED_PACKETS": "3" }, "COUNTERS:oid:0x60000000005a3": { "SAI_ROUTER_INTERFACE_STAT_IN_ERROR_OCTETS": "0", @@ -858,7 +882,23 @@ "SAI_PORT_STAT_ETHER_STATS_JABBERS": "0", "SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES": "130402", "SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES": "3", - "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "4" + "SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS": "4", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S0": "1000000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S1": "900000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S2": "800000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S3": "700000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S4": "600000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S5": "500000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S6": "400000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S7": "300000", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S8": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S9": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S10": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S11": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S12": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S13": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S14": "0", + "SAI_PORT_STAT_IF_IN_FEC_CODEWORD_ERRORS_S15": "0" }, "COUNTERS:oid:0x1000000000013": { "SAI_PORT_STAT_IF_IN_UCAST_PKTS": "4", @@ -982,7 +1022,8 @@ }, "COUNTERS:oid:0x21000000000000": { "SAI_SWITCH_STAT_OUT_DROP_REASON_RANGE_BASE": "1000", - "SAI_SWITCH_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS": "0" + "SAI_SWITCH_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS": "0", + "SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP": "500" }, "COUNTERS:oid:0x1a00000000034f": { @@ -1772,7 +1813,8 @@ }, "COUNTERS_DEBUG_NAME_SWITCH_STAT_MAP": { "DEBUG_1": "SAI_SWITCH_STAT_OUT_DROP_REASON_RANGE_BASE", - "lowercase_counter": "SAI_SWITCH_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS" + "lowercase_counter": "SAI_SWITCH_STAT_OUT_CONFIGURED_DROP_REASONS_1_DROPPED_PKTS", + "SWITCH_STD_DROP_COUNTER-SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP": "SAI_SWITCH_STAT_PACKET_INTEGRITY_DROP" }, "COUNTERS:oid:0x1500000000035a": { "PFC_WD_ACTION": "drop", diff --git a/tests/mock_tables/database_config.json b/tests/mock_tables/database_config.json index f55c0734c2..9d6125fc74 100644 --- a/tests/mock_tables/database_config.json +++ b/tests/mock_tables/database_config.json @@ -4,6 +4,11 @@ "hostname" : "227.0.0.1", "port" : 6379, "unix_socket_path" : "/var/run/redis/redis.sock" + }, + "redis_bmp": { + "hostname" : "127.0.0.1", + "port" : 6400, + "unix_socket_path" : "/var/run/redis/redis_bmp.sock" } }, "DATABASES" : { @@ -61,6 +66,11 @@ "id" : 13, "separator": "|", "instance" : "redis" + }, + "BMP_STATE_DB" : { + "id" : 20, + "separator": "|", + "instance" : "redis_bmp" } }, "VERSION" : "1.1" diff --git a/tests/mock_tables/dbconnector.py b/tests/mock_tables/dbconnector.py index 4ccb392368..379c4e75cd 100644 --- a/tests/mock_tables/dbconnector.py +++ b/tests/mock_tables/dbconnector.py @@ -68,6 +68,32 @@ def config_set(self, *args): class MockPubSub: + class MessageList: + """A custom subscriptable class to hold messages in a list-like format""" + def __init__(self, channel): + self._data = [] + self._channel = channel + + def __getitem__(self, index): + return self._data[index] + + def __setitem__(self, index, value): + self._data[index] = value + + def append(self, msg): + print(f"Message published to {self._channel}: ", msg) + self._data.append(msg) + + def __init__(self, namespace): + # Initialize channels required for testing + self.messages = self.MessageList('WATERMARK_CLEAR_REQUEST') + self.channels = {'WATERMARK_CLEAR_REQUEST': self.messages} + self.namespace = namespace + + def __getitem__(self, key): + print("Channel:", key, "accessed in namespace:", self.namespace) + return self.channels[key] + def get_message(self): return None @@ -99,7 +125,7 @@ def __init__(self, *args, **kwargs): db_name = kwargs.pop('db_name') self.decode_responses = kwargs.pop('decode_responses', False) == True fname = db_name.lower() + ".json" - self.pubsub = MockPubSub() + self.pubsub = MockPubSub(namespace) if namespace is not None and namespace is not multi_asic.DEFAULT_NAMESPACE: fname = os.path.join(INPUT_DIR, namespace, fname) diff --git a/tests/mock_tables/state_db.json b/tests/mock_tables/state_db.json index 49ffaeedd8..bad7882cb6 100644 --- a/tests/mock_tables/state_db.json +++ b/tests/mock_tables/state_db.json @@ -1681,5 +1681,8 @@ }, "PORT_CAPACITY_TABLE|PORT_CAPACITY_DATA" : { "capacity": "80000" + }, + "STP_TABLE|GLOBAL": { + "max_stp_inst": "510" } } diff --git a/tests/mocked_libs/__init__.py b/tests/mocked_libs/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/mocked_libs/blkinfo.py b/tests/mocked_libs/blkinfo.py new file mode 100644 index 0000000000..6d5d809837 --- /dev/null +++ b/tests/mocked_libs/blkinfo.py @@ -0,0 +1,90 @@ +mock_json_op = \ + [ + { + "name": "sdx", + "kname": "sdx", + "fstype": "", + "label": "", + "mountpoint": "", + "size": "3965714432", + "maj:min": "8:0", + "rm": "0", + "model": "SMART EUSB", + "vendor": "SMART EUSB", + "serial": "SPG200807J1", + "hctl": "2:0:0:0", + "tran": "usb", + "rota": "1", + "type": "disk", + "ro": "0", + "owner": "", + "group": "", + "mode": "brw-rw----", + "children": [ + { + "name": "sdx1", + "kname": "sdx1", + "fstype": "ext4", + "label": "", + "mountpoint": "/host", + "size": "3964665856", + "maj:min": "8:1", + "rm": "0", + "model": " ", + "vendor": " ", + "serial": "", + "hctl": "", + "tran": "", + "rota": "1", + "type": "part", + "ro": "0", + "owner": "", + "group": "", + "mode": "brw-rw----", + "children": [], + "parents": ["sdx"], + "statistics": { + "major": "8", + "minor": "1", + "kname": "sdx1", + "reads_completed": "22104", + "reads_merged": "5299", + "sectors_read": "1091502", + "time_spent_reading_ms": "51711", + "writes_completed": "11283", + "writes_merged": "13401", + "sectors_written": "443784", + "time_spent_ writing": "133398", + "ios_in_progress": "0", + "time_spent_doing_ios_ms": "112040", + "weighted_time_ios_ms": "112040", + }, + } + ], + "parents": [], + "statistics": { + "major": "8", + "minor": "0", + "kname": "sdx", + "reads_completed": "22151", + "reads_merged": "5299", + "sectors_read": "1093606", + "time_spent_reading_ms": "52005", + "writes_completed": "11283", + "writes_merged": "13401", + "sectors_written": "443784", + "time_spent_ writing": "133398", + "ios_in_progress": "0", + "time_spent_doing_ios_ms": "112220", + "weighted_time_ios_ms": "112220", + }, + } + ] + + +class BlkDiskInfo: + def __init__(self): + return + + def get_disks(self, filters): + return mock_json_op diff --git a/tests/mocked_libs/psutil.py b/tests/mocked_libs/psutil.py new file mode 100644 index 0000000000..f43f024d1c --- /dev/null +++ b/tests/mocked_libs/psutil.py @@ -0,0 +1,6 @@ +from collections import namedtuple + + +def disk_partitions(): + sdiskpart = namedtuple('sdiskpart', ['mountpoint', 'device']) + return [sdiskpart(mountpoint="/host", device="/dev/sdx1")] diff --git a/tests/multi_asic_dropstat_test.py b/tests/multi_asic_dropstat_test.py new file mode 100644 index 0000000000..8b9dd72826 --- /dev/null +++ b/tests/multi_asic_dropstat_test.py @@ -0,0 +1,122 @@ +import os +import sys +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +dropstat_masic_result_asic0 = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +------------ ------- -------- ---------- -------- ---------- --------- --------- + Ethernet0 U 10 100 0 0 80 20 + Ethernet4 U 0 1000 0 0 800 100 +Ethernet-BP0 U 0 1000 0 0 800 100 +Ethernet-BP4 U 0 1000 0 0 800 100 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 1000 +""" + +dropstat_masic_result_asic1 = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +-------------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet-BP256 U 10 100 0 0 80 20 +Ethernet-BP260 U 0 1000 0 0 800 100 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 1000 +""" + +dropstat_masic_result_clear_all = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +------------ ------- -------- ---------- -------- ---------- --------- --------- + Ethernet0 U 0 0 0 0 0 0 + Ethernet4 U 0 0 0 0 0 0 +Ethernet-BP0 U 0 0 0 0 0 0 +Ethernet-BP4 U 0 0 0 0 0 0 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 0 + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +-------------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet-BP256 U 0 0 0 0 0 0 +Ethernet-BP260 U 0 0 0 0 0 0 + + DEVICE DEBUG_1 +---------------- --------- +sonic_drops_test 0 +""" + + +class TestMultiAsicDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def test_show_dropcount_masic_asic0(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show', '-n', 'asic0' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_asic0 and return_code == 0 + + def test_show_dropcount_masic_all_and_clear(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_asic0 + dropstat_masic_result_asic1 + assert return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == 'Cleared drop counters\n' and return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_masic_result_clear_all and return_code == 0 + + def test_show_dropcount_masic_invalid_ns(self): + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show', '-n', 'asic5' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 2 + assert "invalid choice: asic5" in result + + def test_show_dropcount_version(self): + return_code, result = get_result_and_return_code([ + 'dropstat', '--version' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ.pop("UTILITIES_UNIT_TESTING") + os.environ.pop("UTILITIES_UNIT_TESTING_TOPOLOGY") + print("TEARDOWN") diff --git a/tests/multi_asic_ecnconfig_test.py b/tests/multi_asic_ecnconfig_test.py new file mode 100644 index 0000000000..034a517ace --- /dev/null +++ b/tests/multi_asic_ecnconfig_test.py @@ -0,0 +1,64 @@ +import os +import sys +from .ecn_test import TestEcnConfigBase +from .ecn_input.ecn_test_vectors import testData + +root_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(root_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, root_path) +sys.path.insert(0, modules_path) + + +class TestEcnConfigMultiAsic(TestEcnConfigBase): + @classmethod + def setup_class(cls): + super().setup_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + + def test_ecn_show_config_all_masic(self): + self.executor(testData['ecn_show_config_masic']) + + def test_ecn_show_config_all_verbose_masic(self): + self.executor(testData['test_ecn_show_config_verbose_masic']) + + def test_ecn_show_config_one_masic(self): + self.executor(testData['test_ecn_show_config_namespace']) + + def test_ecn_show_config_one_verbose_masic(self): + self.executor(testData['test_ecn_show_config_namespace_verbose']) + + def test_ecn_config_change_other_threshold_masic(self): + self.executor(testData['ecn_cfg_threshold_masic']) + + def test_ecn_config_change_other_prob_masic(self): + self.executor(testData['ecn_cfg_probability_masic']) + + def test_ecn_config_change_gdrop_verbose_all_masic(self): + self.executor(testData['ecn_cfg_gdrop_verbose_all_masic']) + + def test_ecn_config_multi_set_verbose_all_masic(self): + self.executor(testData['ecn_cfg_multi_set_verbose_all_masic']) + + def test_ecn_queue_get_masic(self): + self.executor(testData['ecn_q_get_masic']) + + def test_ecn_queue_get_verbose_masic(self): + self.executor(testData['ecn_q_get_verbose_masic']) + + def test_ecn_queue_get_all_masic(self): + self.executor(testData['ecn_q_get_all_ns_masic']) + + def test_ecn_queue_get_all_verbose_masic(self): + self.executor(testData['ecn_q_get_all_ns_verbose_masic']) + + def test_ecn_q_set_off_all_masic(self): + self.executor(testData['ecn_cfg_q_all_ns_off_masic']) + + def test_ecn_q_set_off_one_masic(self): + self.executor(testData['ecn_cfg_q_one_ns_off_verbose_masic']) + + @classmethod + def teardown_class(cls): + super().teardown_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" diff --git a/tests/multi_asic_mmuconfig_test.py b/tests/multi_asic_mmuconfig_test.py new file mode 100644 index 0000000000..1590d3f38f --- /dev/null +++ b/tests/multi_asic_mmuconfig_test.py @@ -0,0 +1,49 @@ +import os +import sys +from .mmuconfig_test import TestMmuConfigBase +from .mmuconfig_input.mmuconfig_test_vectors import testData + +root_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(root_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, root_path) +sys.path.insert(0, modules_path) + + +class TestMmuConfigMultiAsic(TestMmuConfigBase): + @classmethod + def setup_class(cls): + super().setup_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + + def test_mmu_show_config_one_masic(self): + self.executor(testData['mmu_cfg_list_one_masic']) + + def test_mmu_show_config_one_verbose_masic(self): + self.executor(testData['mmu_cfg_list_one_verbose_masic']) + + def test_mmu_show_config_all_masic(self): + self.executor(testData['mmu_cfg_list_all_masic']) + + def test_mmu_alpha_config_one_masic(self): + self.executor(testData['mmu_cfg_alpha_one_masic']) + + def test_mmu_alpha_config_all_verbose_masic(self): + self.executor(testData['mmu_cfg_alpha_all_verbose_masic']) + + def test_mmu_staticth_config_one_masic(self): + self.executor(testData['mmu_cfg_static_th_one_masic']) + + def test_mmu_staticth_config_all_verbose_masic(self): + self.executor(testData['mmu_cfg_static_th_all_verbose_masic']) + + def test_mmu_alpha_config_invalid_masic(self): + self.executor(testData['mmu_cfg_alpha_invalid_masic']) + + def test_mmu_staticth_config_invalid_masic(self): + self.executor(testData['mmu_cfg_static_th_invalid_masic']) + + @classmethod + def teardown_class(cls): + super().teardown_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" diff --git a/tests/multi_asic_pfc_test.py b/tests/multi_asic_pfc_test.py new file mode 100644 index 0000000000..52bfcf4982 --- /dev/null +++ b/tests/multi_asic_pfc_test.py @@ -0,0 +1,133 @@ +import os +import sys +import json +import importlib +import pfc.main as pfc +from .pfc_test import TestPfcBase +from click.testing import CliRunner +from .pfc_input.pfc_test_vectors import testData + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "pfc") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + + +class TestPfcMultiAsic(TestPfcBase): + @classmethod + def setup_class(cls): + super().setup_class() + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + + # Multi-asic utils rely on the database that is loaded + # We reload the multi_asic database and update the multi-asic utils + # Pfc uses click cmds that use multi_asic utils, hence we reload pfc too + + import mock_tables.mock_multi_asic + importlib.reload(mock_tables.mock_multi_asic) + mock_tables.dbconnector.load_namespace_config() + + import utilities_common + importlib.reload(utilities_common.multi_asic) + importlib.reload(pfc) + + def executor(self, input): + runner = CliRunner() + result = runner.invoke(pfc.cli, input['cmd']) + exit_code = result.exit_code + output = result.output + + print(exit_code) + print(output) + + assert exit_code == input['rc'] + + # For config commands we dump modified value in a tmp JSON file for testing + if 'cmp_args' in input: + fd = open('/tmp/pfc_testdata.json', 'r') + cmp_data = json.load(fd) + + # Verify assignments + for args in input['cmp_args']: + namespace, table, key, field, expected_val = args + assert(cmp_data[namespace][table][key][field] == expected_val) + fd.close() + + if 'rc_msg' in input: + assert input['rc_msg'] in output + + if 'rc_output' in input: + assert output == input['rc_output'] + + def test_pfc_show_asymmetric_all_asic0_masic(self): + self.executor(testData['pfc_show_asymmetric_all_asic0_masic']) + + def test_pfc_show_asymmetric_all_asic1_masic(self): + self.executor(testData['pfc_show_asymmetric_all_asic1_masic']) + + def test_pfc_show_asymmetric_all_masic(self): + self.executor(testData['pfc_show_asymmetric_all_masic']) + + def test_pfc_show_asymmetric_intf_one_masic(self): + self.executor(testData['pfc_show_asymmetric_intf_one_masic']) + + def test_pfc_show_asymmetric_intf_all_masic(self): + self.executor(testData['pfc_show_asymmetric_intf_all_masic']) + + def test_pfc_show_asymmetric_intf_fake_one_masic(self): + self.executor(testData['pfc_show_asymmetric_intf_fake_one_masic']) + + def test_pfc_show_priority_all_asic0_masic(self): + self.executor(testData['pfc_show_priority_all_asic0_masic']) + + def test_pfc_show_priority_all_asic1_masic(self): + self.executor(testData['pfc_show_priority_all_asic1_masic']) + + def test_pfc_show_priority_all_masic(self): + self.executor(testData['pfc_show_priority_all_masic']) + + def test_pfc_show_priority_intf_one_masic(self): + self.executor(testData['pfc_show_priority_intf_one_masic']) + + def test_pfc_show_priority_intf_all_masic(self): + self.executor(testData['pfc_show_priority_intf_all_masic']) + + def test_pfc_show_priority_intf_fake_one_masic(self): + self.executor(testData['pfc_show_priority_intf_fake_one_masic']) + + def test_pfc_show_priority_intf_fake_all_masic(self): + self.executor(testData['pfc_show_priority_intf_fake_all_masic']) + + def test_pfc_config_asymmetric_one_masic(self): + self.executor(testData['pfc_config_asymmetric_one_masic']) + + def test_pfc_config_asymmetric_invalid_one_masic(self): + self.executor(testData['pfc_config_asymmetric_invalid_one_masic']) + + def test_pfc_config_asymmetric_all_masic(self): + self.executor(testData['pfc_config_asymmetric_all_masic']) + + def test_pfc_config_asymmetric_invalid_all_masic(self): + self.executor(testData['pfc_config_asymmetric_invalid_all_masic']) + + def test_pfc_config_priority_one_masic(self): + self.executor(testData['pfc_config_priority_one_masic']) + + def test_pfc_config_priority_invalid_one_masic(self): + self.executor(testData['pfc_config_priority_invalid_one_masic']) + + def test_pfc_config_priority_all_masic(self): + self.executor(testData['pfc_config_priority_all_masic']) + + def test_pfc_config_priority_invalid_all_masic(self): + self.executor(testData['pfc_config_priority_invalid_all_masic']) + + @classmethod + def teardown_class(cls): + # Reset the database to mock single-asic state + import mock_tables.mock_single_asic + mock_tables.dbconnector.load_database_config() + + super().teardown_class() + os.environ.pop("UTILITIES_UNIT_TESTING_TOPOLOGY") diff --git a/tests/multi_asic_pgdropstat_test.py b/tests/multi_asic_pgdropstat_test.py new file mode 100644 index 0000000000..2a5e97cfdb --- /dev/null +++ b/tests/multi_asic_pgdropstat_test.py @@ -0,0 +1,95 @@ +import os +import sys +from utilities_common.cli import UserCache +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +pg_drop_masic_one_result = """\ +Ingress PG dropped packets: + Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 PG8 PG9 PG10 PG11 PG12 PG13\ + PG14 PG15 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ + ------ ------ +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A +""" + +pg_drop_masic_all_result = """\ +Ingress PG dropped packets: + Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 PG8 PG9 PG10 PG11 PG12 PG13\ + PG14 PG15 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- ----- ----- ------ ------ ------ ------\ + ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A + Ethernet-BP0 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 + Ethernet-BP4 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 +Ethernet-BP256 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 +Ethernet-BP260 0 0 0 0 0 0 0 0 0 0 0 0 0 0\ + 0 0 +""" + + +class TestMultiAsicPgDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def test_show_pg_drop_masic_all(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == pg_drop_masic_all_result + + def test_show_pg_drop_masic(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show', '-n', 'asic1' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == pg_drop_masic_one_result + + def test_show_pg_drop_masic_not_exist(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'show', '-n', 'asic5' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 1 + assert result == "Input value for '--namespace' / '-n'. Choose from one of (asic0, asic1)" + + def test_clear_pg_drop(self): + return_code, result = get_result_and_return_code([ + 'pg-drop', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == "Cleared PG drop counter\n" + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + UserCache('pg-drop').remove_all() + print("TEARDOWN") diff --git a/tests/multi_asic_queue_counter_test.py b/tests/multi_asic_queue_counter_test.py index fe8b057b5d..af57fa75e5 100644 --- a/tests/multi_asic_queue_counter_test.py +++ b/tests/multi_asic_queue_counter_test.py @@ -22,6 +22,7 @@ show_queue_counters = """\ +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet0 UC0 68 30 56 74 @@ -41,6 +42,7 @@ Ethernet0 MC14 82 44 42 60 Ethernet0 MC15 83 45 41 59 +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet4 UC0 84 46 40 58 @@ -60,6 +62,7 @@ Ethernet4 MC14 98 60 26 44 Ethernet4 MC15 99 61 25 43 +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes ------------ ----- -------------- --------------- ----------- ------------ Ethernet-BP0 UC0 100 62 24 42 @@ -79,6 +82,7 @@ Ethernet-BP0 MC14 114 76 10 28 Ethernet-BP0 MC15 115 77 9 27 +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes ------------ ----- -------------- --------------- ----------- ------------ Ethernet-BP4 UC0 116 78 8 26 @@ -89,19 +93,142 @@ Ethernet-BP4 UC5 121 83 3 21 Ethernet-BP4 UC6 122 84 2 20 Ethernet-BP4 UC7 123 85 1 19 -Ethernet-BP4 MC8 124 86 0 18 -Ethernet-BP4 MC9 125 87 1 17 -Ethernet-BP4 MC10 126 88 2 16 -Ethernet-BP4 MC11 127 89 3 15 -Ethernet-BP4 MC12 128 90 4 14 -Ethernet-BP4 MC13 129 91 5 13 -Ethernet-BP4 MC14 130 92 6 12 -Ethernet-BP4 MC15 131 93 7 11 +Ethernet-BP4 ALL8 124 86 0 18 +Ethernet-BP4 ALL9 125 87 1 17 +Ethernet-BP4 ALL10 126 88 2 16 +Ethernet-BP4 ALL11 127 89 3 15 +Ethernet-BP4 ALL12 128 90 4 14 +Ethernet-BP4 ALL13 129 91 5 13 +Ethernet-BP4 ALL14 130 92 6 12 +Ethernet-BP4 ALL15 131 93 7 11 """ +show_queue_counters_all_asics = """\ +For namespace asic0: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet0 UC0 68 30 56 74 +Ethernet0 UC1 69 31 55 73 +Ethernet0 UC2 70 32 54 72 +Ethernet0 UC3 71 33 53 71 +Ethernet0 UC4 72 34 52 70 +Ethernet0 UC5 73 35 51 69 +Ethernet0 UC6 74 36 50 68 +Ethernet0 UC7 75 37 49 67 +Ethernet0 MC8 76 38 48 66 +Ethernet0 MC9 77 39 47 65 +Ethernet0 MC10 78 40 46 64 +Ethernet0 MC11 79 41 45 63 +Ethernet0 MC12 80 42 44 62 +Ethernet0 MC13 81 43 43 61 +Ethernet0 MC14 82 44 42 60 +Ethernet0 MC15 83 45 41 59 + +For namespace asic0: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +--------- ----- -------------- --------------- ----------- ------------ +Ethernet4 UC0 84 46 40 58 +Ethernet4 UC1 85 47 39 57 +Ethernet4 UC2 86 48 38 56 +Ethernet4 UC3 87 49 37 55 +Ethernet4 UC4 88 50 36 54 +Ethernet4 UC5 89 51 35 53 +Ethernet4 UC6 90 52 34 52 +Ethernet4 UC7 91 53 33 51 +Ethernet4 MC8 92 54 32 50 +Ethernet4 MC9 93 55 31 49 +Ethernet4 MC10 94 56 30 48 +Ethernet4 MC11 95 57 29 47 +Ethernet4 MC12 96 58 28 46 +Ethernet4 MC13 97 59 27 45 +Ethernet4 MC14 98 60 26 44 +Ethernet4 MC15 99 61 25 43 + +For namespace asic0: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +------------ ----- -------------- --------------- ----------- ------------ +Ethernet-BP0 UC0 100 62 24 42 +Ethernet-BP0 UC1 101 63 23 41 +Ethernet-BP0 UC2 102 64 22 40 +Ethernet-BP0 UC3 103 65 21 39 +Ethernet-BP0 UC4 104 66 20 38 +Ethernet-BP0 UC5 105 67 19 37 +Ethernet-BP0 UC6 106 68 18 36 +Ethernet-BP0 UC7 107 69 17 35 +Ethernet-BP0 MC8 108 70 16 34 +Ethernet-BP0 MC9 109 71 15 33 +Ethernet-BP0 MC10 110 72 14 32 +Ethernet-BP0 MC11 111 73 13 31 +Ethernet-BP0 MC12 112 74 12 30 +Ethernet-BP0 MC13 113 75 11 29 +Ethernet-BP0 MC14 114 76 10 28 +Ethernet-BP0 MC15 115 77 9 27 + +For namespace asic0: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +------------ ----- -------------- --------------- ----------- ------------ +Ethernet-BP4 UC0 116 78 8 26 +Ethernet-BP4 UC1 117 79 7 25 +Ethernet-BP4 UC2 118 80 6 24 +Ethernet-BP4 UC3 119 81 5 23 +Ethernet-BP4 UC4 120 82 4 22 +Ethernet-BP4 UC5 121 83 3 21 +Ethernet-BP4 UC6 122 84 2 20 +Ethernet-BP4 UC7 123 85 1 19 +Ethernet-BP4 ALL8 124 86 0 18 +Ethernet-BP4 ALL9 125 87 1 17 +Ethernet-BP4 ALL10 126 88 2 16 +Ethernet-BP4 ALL11 127 89 3 15 +Ethernet-BP4 ALL12 128 90 4 14 +Ethernet-BP4 ALL13 129 91 5 13 +Ethernet-BP4 ALL14 130 92 6 12 +Ethernet-BP4 ALL15 131 93 7 11 + +For namespace asic1: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +-------------- ----- -------------- --------------- ----------- ------------ +Ethernet-BP256 UC0 N/A N/A N/A N/A +Ethernet-BP256 UC1 N/A N/A N/A N/A +Ethernet-BP256 UC2 N/A N/A N/A N/A +Ethernet-BP256 UC3 N/A N/A N/A N/A +Ethernet-BP256 UC4 N/A N/A N/A N/A +Ethernet-BP256 UC5 N/A N/A N/A N/A +Ethernet-BP256 UC6 N/A N/A N/A N/A +Ethernet-BP256 UC7 N/A N/A N/A N/A +Ethernet-BP256 MC8 N/A N/A N/A N/A +Ethernet-BP256 MC9 N/A N/A N/A N/A +Ethernet-BP256 MC10 N/A N/A N/A N/A +Ethernet-BP256 MC11 N/A N/A N/A N/A +Ethernet-BP256 MC12 N/A N/A N/A N/A +Ethernet-BP256 MC13 N/A N/A N/A N/A +Ethernet-BP256 MC14 N/A N/A N/A N/A +Ethernet-BP256 MC15 N/A N/A N/A N/A + +For namespace asic1: + Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes +-------------- ----- -------------- --------------- ----------- ------------ +Ethernet-BP260 UC0 N/A N/A N/A N/A +Ethernet-BP260 UC1 N/A N/A N/A N/A +Ethernet-BP260 UC2 N/A N/A N/A N/A +Ethernet-BP260 UC3 N/A N/A N/A N/A +Ethernet-BP260 UC4 N/A N/A N/A N/A +Ethernet-BP260 UC5 N/A N/A N/A N/A +Ethernet-BP260 UC6 N/A N/A N/A N/A +Ethernet-BP260 UC7 N/A N/A N/A N/A +Ethernet-BP260 ALL8 N/A N/A N/A N/A +Ethernet-BP260 ALL9 N/A N/A N/A N/A +Ethernet-BP260 ALL10 N/A N/A N/A N/A +Ethernet-BP260 ALL11 N/A N/A N/A N/A +Ethernet-BP260 ALL12 N/A N/A N/A N/A +Ethernet-BP260 ALL13 N/A N/A N/A N/A +Ethernet-BP260 ALL14 N/A N/A N/A N/A +Ethernet-BP260 ALL15 N/A N/A N/A N/A + +""" show_queue_counters_port = """\ +For namespace asic0: Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes ------------ ----- -------------- --------------- ----------- ------------ Ethernet-BP4 UC0 116 78 8 26 @@ -112,14 +239,14 @@ Ethernet-BP4 UC5 121 83 3 21 Ethernet-BP4 UC6 122 84 2 20 Ethernet-BP4 UC7 123 85 1 19 -Ethernet-BP4 MC8 124 86 0 18 -Ethernet-BP4 MC9 125 87 1 17 -Ethernet-BP4 MC10 126 88 2 16 -Ethernet-BP4 MC11 127 89 3 15 -Ethernet-BP4 MC12 128 90 4 14 -Ethernet-BP4 MC13 129 91 5 13 -Ethernet-BP4 MC14 130 92 6 12 -Ethernet-BP4 MC15 131 93 7 11 +Ethernet-BP4 ALL8 124 86 0 18 +Ethernet-BP4 ALL9 125 87 1 17 +Ethernet-BP4 ALL10 126 88 2 16 +Ethernet-BP4 ALL11 127 89 3 15 +Ethernet-BP4 ALL12 128 90 4 14 +Ethernet-BP4 ALL13 129 91 5 13 +Ethernet-BP4 ALL14 130 92 6 12 +Ethernet-BP4 ALL15 131 93 7 11 """ @@ -143,6 +270,12 @@ def test_queue_counters_port(self): print(result) assert result == show_queue_counters_port + def test_queue_counters_all_masic(self): + return_code, result = get_result_and_return_code(['queuestat']) + assert return_code == 0 + print(result) + assert result == show_queue_counters_all_asics + @classmethod def teardown_class(cls): os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) diff --git a/tests/multi_asic_watermarkstat_test.py b/tests/multi_asic_watermarkstat_test.py new file mode 100644 index 0000000000..b3bc011011 --- /dev/null +++ b/tests/multi_asic_watermarkstat_test.py @@ -0,0 +1,145 @@ +import os +import sys +from .wm_input.wm_test_vectors import testData +from .utils import get_result_and_return_code +from click.testing import CliRunner +import show.main as show + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + + +class TestWatermarkstatMultiAsic(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ['UTILITIES_UNIT_TESTING'] = "2" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "multi_asic" + print("SETUP") + + def executor(self, testcase): + runner = CliRunner() + for input in testcase: + if 'clear' in input['cmd']: + exec_cmd = input['cmd'][1:] + print(exec_cmd) + exit_code, output = get_result_and_return_code(exec_cmd) + else: + if len(input['cmd']) == 3: + exec_cmd = show.cli.commands[input['cmd'][0]].commands[input['cmd'][1]].commands[input['cmd'][2]] + else: + exec_cmd = show.cli.commands[input['cmd'][0]].commands[input['cmd'][1]] + args = [] if 'args' not in input else input['args'] + result = runner.invoke(exec_cmd, args) + exit_code = result.exit_code + output = result.output + + print(exit_code) + print(output) + + expected_code = 0 if 'rc' not in input else input['rc'] + assert exit_code == expected_code + assert output == input['rc_output'] + + def test_show_pg_shared_one_masic(self): + self.executor(testData['show_pg_wm_shared_one_masic']) + + def test_show_pg_shared_all_masic(self): + self.executor(testData['show_pg_wm_shared_all_masic']) + + def test_show_pg_headroom_wm_one_masic(self): + self.executor(testData['show_pg_wm_hdrm_one_masic']) + + def test_show_pg_headroom_wm_all_masic(self): + self.executor(testData['show_pg_wm_hdrm_all_masic']) + + def test_show_pg_shared_pwm_one_masic(self): + self.executor(testData['show_pg_pwm_shared_one_masic']) + + def test_show_pg_shared_pwm_all_masic(self): + self.executor(testData['show_pg_pwm_shared_all_masic']) + + def test_show_pg_headroom_pwm_one_masic(self): + self.executor(testData['show_pg_pwm_hdrm_one_masic']) + + def test_show_pg_headroom_pwm_all_masic(self): + self.executor(testData['show_pg_pwm_hdrm_all_masic']) + + def test_show_queue_unicast_wm_one_masic(self): + self.executor(testData['show_q_wm_unicast_one_masic']) + + def test_show_queue_unicast_wm_all_masic(self): + self.executor(testData['show_q_wm_unicast_all_masic']) + + def test_show_queue_unicast_pwm_one_masic(self): + self.executor(testData['show_q_pwm_unicast_one_masic']) + + def test_show_queue_unicast_pwm_all_masic(self): + self.executor(testData['show_q_pwm_unicast_all_masic']) + + def test_show_queue_multicast_wm_one_masic(self): + self.executor(testData['show_q_wm_multicast_one_masic']) + + def test_show_queue_multicast_wm_all_masic(self): + self.executor(testData['show_q_wm_multicast_all_masic']) + + def test_show_queue_multicast_pwm_one_masic(self): + self.executor(testData['show_q_pwm_multicast_one_masic']) + + def test_show_queue_multicast_pwm_all_masic(self): + self.executor(testData['show_q_pwm_multicast_all_masic']) + + def test_show_queue_all_wm_one_masic(self): + self.executor(testData['show_q_wm_all_one_masic']) + + def test_show_queue_all_wm_all_masic(self): + self.executor(testData['show_q_wm_all_all_masic']) + + def test_show_queue_all_pwm_one_masic(self): + self.executor(testData['show_q_pwm_all_one_masic']) + + def test_show_queue_all_pwm_all_masic(self): + self.executor(testData['show_q_pwm_all_all_masic']) + + def test_show_buffer_pool_wm_one_masic(self): + self.executor(testData['show_buffer_pool_wm_one_masic']) + + def test_show_buffer_pool_wm_all_masic(self): + self.executor(testData['show_buffer_pool_wm_all_masic']) + + def test_show_buffer_pool_pwm_one_masic(self): + self.executor(testData['show_buffer_pool_pwm_one_masic']) + + def test_show_buffer_pool_pwm_all_masic(self): + self.executor(testData['show_buffer_pool_pwm_all_masic']) + + def test_show_headroom_pool_wm_one_masic(self): + self.executor(testData['show_hdrm_pool_wm_one_masic']) + + def test_show_headroom_pool_wm_all_masic(self): + self.executor(testData['show_hdrm_pool_wm_all_masic']) + + def test_show_headroom_pool_pwm_one_masic(self): + self.executor(testData['show_hdrm_pool_pwm_one_masic']) + + def test_show_headroom_pool_pwm_all_masic(self): + self.executor(testData['show_hdrm_pool_pwm_all_masic']) + + def test_show_invalid_namespace_masic(self): + self.executor(testData['show_invalid_namespace_masic']) + + def test_clear_headroom_one_masic(self): + self.executor(testData['clear_hdrm_pool_wm_one_masic']) + + def test_clear_headroom_all_masic(self): + self.executor(testData['clear_hdrm_pool_wm_all_masic']) + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ['UTILITIES_UNIT_TESTING'] = "0" + os.environ["UTILITIES_UNIT_TESTING_TOPOLOGY"] = "" + print("TEARDOWN") diff --git a/tests/ntp.conf b/tests/ntp.conf new file mode 100644 index 0000000000..58bf276dce --- /dev/null +++ b/tests/ntp.conf @@ -0,0 +1,37 @@ +############################################################################### +# This file was AUTOMATICALLY GENERATED. DO NOT MODIFY. +# Controlled by ntp-config.service +############################################################################### + +# /etc/ntp.conf, configuration for ntpd; see ntp.conf(5) for help + +# To avoid ntpd from panic and exit if the drift between new time and +# current system time is large. +tinker panic 0 + +driftfile /var/lib/ntpsec/ntp.drift +leapfile /usr/share/zoneinfo/leap-seconds.list + +server 10.1.1.1 iburst +restrict 10.1.1.1 kod limited nomodify noquery + +server 10.22.1.12 iburst +restrict 10.22.1.12 kod limited nomodify noquery + + +interface ignore wildcard + + +interface listen eth0 +interface listen 127.0.0.1 + + +# Access control configuration +# By default, exchange time with everybody, but don't allow configuration. +# NTPsec doesn't establish peer associations, and so nopeer has no effect, and +# has been removed from here +restrict default kod nomodify noquery limited + +# Local users may interrogate the ntp server more closely. +restrict 127.0.0.1 +restrict ::1 diff --git a/tests/pbh_input/assert_show_output.py b/tests/pbh_input/assert_show_output.py index 7a701ba4bc..5538f3aada 100644 --- a/tests/pbh_input/assert_show_output.py +++ b/tests/pbh_input/assert_show_output.py @@ -78,6 +78,14 @@ """ +show_pbh_statistics_partial = """\ +TABLE RULE RX PACKETS COUNT RX BYTES COUNT +---------- ------ ------------------ ---------------- +pbh_table1 nvgre 100 200 +pbh_table2 vxlan 0 0 +""" + + show_pbh_statistics_updated="""\ TABLE RULE RX PACKETS COUNT RX BYTES COUNT ---------- ------ ------------------ ---------------- diff --git a/tests/pbh_input/counters_db_partial.json b/tests/pbh_input/counters_db_partial.json new file mode 100644 index 0000000000..aa140188c8 --- /dev/null +++ b/tests/pbh_input/counters_db_partial.json @@ -0,0 +1,11 @@ +{ + "COUNTERS:oid:0x9000000000000": { }, + "COUNTERS:oid:0x9000000000001": { + "SAI_ACL_COUNTER_ATTR_PACKETS": "300", + "SAI_ACL_COUNTER_ATTR_BYTES": "400" + }, + "ACL_COUNTER_RULE_MAP": { + "pbh_table1:nvgre": "oid:0x9000000000000", + "pbh_table2:vxlan": "oid:0x9000000000001" + } +} diff --git a/tests/pbh_test.py b/tests/pbh_test.py index 7dddfea9ca..0d68f458ee 100644 --- a/tests/pbh_test.py +++ b/tests/pbh_test.py @@ -946,6 +946,34 @@ def test_show_pbh_statistics_after_clear(self): assert result.exit_code == SUCCESS assert result.output == assert_show_output.show_pbh_statistics_zero + def test_show_pbh_statistics_after_clear_and_counters_partial(self): + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db_partial') + dbconnector.dedicated_dbs['CONFIG_DB'] = os.path.join(mock_db_path, 'full_pbh_config') + + self.remove_pbh_counters_file() + + db = Db() + runner = CliRunner() + + result = runner.invoke( + clear.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + + dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') + + result = runner.invoke( + show.cli.commands["pbh"]. + commands["statistics"], [], obj=db + ) + + logger.debug("\n" + result.output) + logger.debug(result.exit_code) + assert result.exit_code == SUCCESS + assert result.output == assert_show_output.show_pbh_statistics_partial def test_show_pbh_statistics_after_clear_and_counters_updated(self): dbconnector.dedicated_dbs['COUNTERS_DB'] = os.path.join(mock_db_path, 'counters_db') diff --git a/tests/pfc_input/assert_show_output.py b/tests/pfc_input/assert_show_output.py new file mode 100644 index 0000000000..2406f8b49f --- /dev/null +++ b/tests/pfc_input/assert_show_output.py @@ -0,0 +1,82 @@ +pfc_asym_cannot_find_intf = """\ + +Interface Asymmetric +----------- ------------ + +""" + +pfc_cannot_find_intf = """\ +Cannot find interface Ethernet1234 +""" + +pfc_show_asymmetric_all = """\ + +Interface Asymmetric +----------- ------------ +Ethernet0 off +Ethernet4 off +Ethernet8 off +Ethernet12 off +Ethernet16 off +Ethernet20 off +Ethernet24 off +Ethernet28 off +Ethernet32 off +Ethernet36 off +Ethernet40 off +Ethernet44 off +Ethernet48 off +Ethernet52 off +Ethernet56 off +Ethernet60 off +Ethernet64 off +Ethernet68 off +Ethernet72 off +Ethernet76 off +Ethernet80 off +Ethernet84 off +Ethernet88 off +Ethernet92 off +Ethernet96 off +Ethernet100 off +Ethernet104 off +Ethernet108 off +Ethernet112 off +Ethernet116 off +Ethernet120 off +Ethernet124 off + +""" + +pfc_show_asymmetric_intf = """\ + +Interface Asymmetric +----------- ------------ +Ethernet0 off + +""" + +pfc_show_priority_all = """\ + +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 + +""" + +pfc_show_priority_intf = """\ + +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 + +""" + +pfc_config_priority_on = """\ + +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4,5 + +""" diff --git a/tests/pfc_input/pfc_test_vectors.py b/tests/pfc_input/pfc_test_vectors.py new file mode 100644 index 0000000000..20d6b59af3 --- /dev/null +++ b/tests/pfc_input/pfc_test_vectors.py @@ -0,0 +1,286 @@ +# Golden outputs +show_asym_all_asic0_masic = """\ +Namespace asic0 +Interface Asymmetric +------------ ------------ +Ethernet0 off +Ethernet4 off +Ethernet16 off +Ethernet-BP0 off +Ethernet-BP4 off + +""" + +show_asym_all_asic1_masic = """\ +Namespace asic1 +Interface Asymmetric +-------------- ------------ +Ethernet64 off +Ethernet-BP256 off +Ethernet-BP260 off + +""" + +show_asym_all_masic = """\ +Namespace asic0 +Interface Asymmetric +------------ ------------ +Ethernet0 off +Ethernet4 off +Ethernet16 off +Ethernet-BP0 off +Ethernet-BP4 off + +Namespace asic1 +Interface Asymmetric +-------------- ------------ +Ethernet64 off +Ethernet-BP256 off +Ethernet-BP260 off + +""" + +show_asym_intf_one_masic = """\ +Namespace asic0 +Interface Asymmetric +----------- ------------ +Ethernet0 off + +""" + +show_asym_intf_all_masic = """\ +Namespace asic0 +Interface Asymmetric +----------- ------------ +Ethernet0 off + +Namespace asic1 +Interface Asymmetric +----------- ------------ + +""" + +show_asym_intf_fake_one_masic = """\ +Namespace asic0 +Interface Asymmetric +----------- ------------ + +""" + +show_prio_all_asic0_masic = """\ +Namespace asic0 +Interface Lossless priorities +-------------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 +Ethernet8 3,4 +Ethernet-BP0 3,4 +Ethernet-BP4 3,4 +Ethernet-BP256 3,4 +Ethernet-BP260 3,4 + +""" + +show_prio_all_asic1_masic = """\ +Namespace asic1 +Interface Lossless priorities +-------------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 +Ethernet8 3,4 +Ethernet-BP0 3,4 +Ethernet-BP4 3,4 +Ethernet-BP256 3,4 + +""" + +show_prio_all_masic = """\ +Namespace asic0 +Interface Lossless priorities +-------------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 +Ethernet8 3,4 +Ethernet-BP0 3,4 +Ethernet-BP4 3,4 +Ethernet-BP256 3,4 +Ethernet-BP260 3,4 + +Namespace asic1 +Interface Lossless priorities +-------------- --------------------- +Ethernet0 3,4 +Ethernet4 3,4 +Ethernet8 3,4 +Ethernet-BP0 3,4 +Ethernet-BP4 3,4 +Ethernet-BP256 3,4 + +""" + +show_prio_intf_one_masic = """\ +Namespace asic0 +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 + +""" + +show_prio_intf_all_masic = """\ +Namespace asic0 +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 + +Namespace asic1 +Interface Lossless priorities +----------- --------------------- +Ethernet0 3,4 + +""" + +show_prio_intf_fake_one_masic = """\ +Cannot find interface Ethernet1234 for Namespace asic0 +""" + +show_prio_intf_fake_all_masic = """\ +Cannot find interface Ethernet1234 for Namespace asic0 +Cannot find interface Ethernet1234 for Namespace asic1 +""" + +testData = { + 'pfc_show_asymmetric_all_asic0_masic': {'cmd': ['show', 'asymmetric', + '--namespace', 'asic0'], + 'rc': 0, + 'rc_output': show_asym_all_asic0_masic + }, + 'pfc_show_asymmetric_all_asic1_masic': {'cmd': ['show', 'asymmetric', + '--namespace', 'asic1'], + 'rc': 0, + 'rc_output': show_asym_all_asic1_masic + }, + 'pfc_show_asymmetric_all_masic': {'cmd': ['show', 'asymmetric'], + 'rc': 0, + 'rc_output': show_asym_all_masic + }, + 'pfc_show_asymmetric_intf_one_masic': {'cmd': ['show', 'asymmetric', + 'Ethernet0', '--namespace', + 'asic0'], + 'rc': 0, + 'rc_output': show_asym_intf_one_masic + }, + 'pfc_show_asymmetric_intf_all_masic': {'cmd': ['show', 'asymmetric', + 'Ethernet0'], + 'rc': 0, + 'rc_output': show_asym_intf_all_masic + }, + 'pfc_show_asymmetric_intf_fake_one_masic': {'cmd': ['show', 'asymmetric', + 'Ethernet1234', '--namespace', + 'asic0'], + 'rc': 0, + 'rc_output': show_asym_intf_fake_one_masic + }, + 'pfc_show_priority_all_asic0_masic': {'cmd': ['show', 'priority', + '--namespace', 'asic0'], + 'rc': 0, + 'rc_output': show_prio_all_asic0_masic + }, + 'pfc_show_priority_all_asic1_masic': {'cmd': ['show', 'priority', + '--namespace', 'asic1'], + 'rc': 0, + 'rc_output': show_prio_all_asic1_masic + }, + 'pfc_show_priority_all_masic': {'cmd': ['show', 'priority'], + 'rc': 0, + 'rc_output': show_prio_all_masic + }, + 'pfc_show_priority_intf_one_masic': {'cmd': ['show', 'priority', + 'Ethernet0', '--namespace', + 'asic0'], + 'rc': 0, + 'rc_output': show_prio_intf_one_masic + }, + 'pfc_show_priority_intf_all_masic': {'cmd': ['show', 'priority', + 'Ethernet0'], + 'rc': 0, + 'rc_output': show_prio_intf_all_masic + }, + 'pfc_show_priority_intf_fake_one_masic': {'cmd': ['show', 'priority', + 'Ethernet1234', '--namespace', + 'asic0'], + 'rc': 0, + 'rc_output': show_prio_intf_fake_one_masic + }, + 'pfc_show_priority_intf_fake_all_masic': {'cmd': ['show', 'priority', + 'Ethernet1234'], + 'rc': 0, + 'rc_output': show_prio_intf_fake_all_masic + }, + 'pfc_config_asymmetric_one_masic': {'cmd': ['config', 'asymmetric', + 'on', 'Ethernet0', '--namespace', + 'asic0'], + 'rc': 0, + 'cmp_args': [['asic0', 'PORT', 'Ethernet0', 'pfc_asym', 'on']] + }, + 'pfc_config_asymmetric_invalid_one_masic': {'cmd': ['config', 'asymmetric', + 'onn', 'Ethernet0', '--namespace', + 'asic0'], + 'rc': 2, + 'rc_msg': ('Usage: cli config asymmetric [OPTIONS] ' + '[on|off] INTERFACE\nTry "cli config ' + 'asymmetric --help" for help.\n\n' + 'Error: Invalid value for "[on|off]": ' + 'invalid choice: onn. (choose from on, off)') + }, + 'pfc_config_asymmetric_all_masic': {'cmd': ['config', 'asymmetric', + 'on', 'Ethernet0'], + 'rc': 0, + 'cmp_args': [['asic0', 'PORT', 'Ethernet0', 'pfc_asym', 'on'], + ['asic1', 'PORT', 'Ethernet0', 'pfc_asym', 'on']] + }, + 'pfc_config_asymmetric_invalid_all_masic': {'cmd': ['config', 'asymmetric', + 'onn', 'Ethernet0'], + 'rc': 2, + 'rc_msg': ('Usage: cli config asymmetric [OPTIONS] ' + '[on|off] INTERFACE\nTry "cli config ' + 'asymmetric --help" for help.\n\n' + 'Error: Invalid value for "[on|off]": ' + 'invalid choice: onn. (choose from on, off)') + }, + 'pfc_config_priority_one_masic': {'cmd': ['config', 'priority', + 'on', 'Ethernet0', '5', + '--namespace', 'asic0'], + 'rc': 0, + 'cmp_args': [['asic0', 'PORT_QOS_MAP', 'Ethernet0', + 'pfc_enable', '3,4,5']] + }, + 'pfc_config_priority_invalid_one_masic': {'cmd': ['config', 'priority', + 'onn', 'Ethernet0', '5', + '--namespace', 'asic0'], + 'rc': 2, + 'rc_msg': ('Usage: cli config priority [OPTIONS] ' + '[on|off] INTERFACE [0|1|2|3|4|5|6|7]\n' + 'Try "cli config priority --help" for ' + 'help.\n\nError: Invalid value for ' + '"[on|off]": invalid choice: onn. ' + '(choose from on, off)') + }, + 'pfc_config_priority_all_masic': {'cmd': ['config', 'priority', + 'on', 'Ethernet0', '5'], + 'rc': 0, + 'cmp_args': [['asic0', 'PORT_QOS_MAP', 'Ethernet0', + 'pfc_enable', '3,4,5'], + ['asic1', 'PORT_QOS_MAP', 'Ethernet0', + 'pfc_enable', '3,4,5']] + }, + 'pfc_config_priority_invalid_all_masic': {'cmd': ['config', 'priority', + 'onn', 'Ethernet0', '5'], + 'rc': 2, + 'rc_msg': ('Usage: cli config priority [OPTIONS] ' + '[on|off] INTERFACE [0|1|2|3|4|5|6|7]\n' + 'Try "cli config priority --help" for ' + 'help.\n\nError: Invalid value for ' + '"[on|off]": invalid choice: onn. ' + '(choose from on, off)') + }, +} diff --git a/tests/pfc_test.py b/tests/pfc_test.py new file mode 100644 index 0000000000..136dab2623 --- /dev/null +++ b/tests/pfc_test.py @@ -0,0 +1,103 @@ +import os +import sys +import json +import pfc.main as pfc +from .pfc_input.assert_show_output import pfc_cannot_find_intf, pfc_show_asymmetric_all, \ + pfc_show_asymmetric_intf, pfc_show_priority_all, pfc_show_priority_intf, \ + pfc_config_priority_on, pfc_asym_cannot_find_intf + +from click.testing import CliRunner +from importlib import reload + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "pfc") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + + +class TestPfcBase(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "2" + + def executor(self, cliobj, command, expected_rc=0, expected_output=None, expected_cfgdb_entries=None, + runner=CliRunner()): + result = runner.invoke(cliobj, command) + print(result.exit_code) + print(result.output) + + if result.exit_code != expected_rc: + print(result.exception) + assert result.exit_code == expected_rc + + if expected_output: + assert result.output == expected_output + + if expected_cfgdb_entries: + fd = open('/tmp/pfc_testdata.json', 'r') + cmp_data = json.load(fd) + for expected_cfgdb_entry in expected_cfgdb_entries: + (namespace, table, key, field, expected_val) = expected_cfgdb_entry + entry = cmp_data[namespace][table][key][field] + assert entry == expected_val + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join( + os.environ["PATH"].split(os.pathsep)[:-1] + ) + os.environ.pop("UTILITIES_UNIT_TESTING") + if os.path.isfile('/tmp/pfc_testdata.json'): + os.remove('/tmp/pfc_testdata.json') + +class TestPfc(TestPfcBase): + @classmethod + def setup_class(cls): + super().setup_class() + + from mock_tables import dbconnector + from mock_tables import mock_single_asic + reload(mock_single_asic) + dbconnector.load_database_config() + + import utilities_common + reload(utilities_common.multi_asic) + reload(pfc) + + def test_pfc_show_asymmetric_all(self): + self.executor(pfc.cli, ['show', 'asymmetric'], + expected_output=pfc_show_asymmetric_all) + + def test_pfc_show_asymmetric_intf(self): + self.executor(pfc.cli, ['show', 'asymmetric', 'Ethernet0'], + expected_output=pfc_show_asymmetric_intf) + + def test_pfc_show_asymmetric_intf_fake(self): + self.executor(pfc.cli, ['show', 'asymmetric', 'Ethernet1234'], + expected_output=pfc_asym_cannot_find_intf) + + def test_pfc_show_priority_all(self): + self.executor(pfc.cli, ['show', 'priority'], + expected_output=pfc_show_priority_all) + + def test_pfc_show_priority_intf(self): + self.executor(pfc.cli, ['show', 'priority', 'Ethernet0'], + expected_output=pfc_show_priority_intf) + + def test_pfc_show_priority_intf_fake(self): + self.executor(pfc.cli, ['show', 'priority', 'Ethernet1234'], + expected_output=pfc_cannot_find_intf) + + def test_pfc_config_asymmetric(self): + self.executor(pfc.cli, ['config', 'asymmetric', 'on', 'Ethernet0'], + # namespace, table, key, field, expected_val + expected_cfgdb_entries=[('', 'PORT', 'Ethernet0', 'pfc_asym', 'on')]) + + def test_pfc_config_priority(self): + self.executor(pfc.cli, ['config', 'priority', 'on', 'Ethernet0', '5'], + # namespace, table, key, field, expected_val + expected_cfgdb_entries=[('', 'PORT_QOS_MAP', 'Ethernet0', 'pfc_enable', '3,4,5')]) diff --git a/tests/portstat_db/on_sup_na/chassis_state_db.json b/tests/portstat_db/on_sup_na/chassis_state_db.json new file mode 100644 index 0000000000..d2e5771098 --- /dev/null +++ b/tests/portstat_db/on_sup_na/chassis_state_db.json @@ -0,0 +1,68 @@ +{ + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD0": { + "module_hostname": "sonic-lc1" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { + "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc1": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc3": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_TABLE|Ethernet1/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet2/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet11/1": { + "state": "N/A", + "rx_ok": "N/A", + "rx_bps": "N/A", + "rx_pps": "N/A", + "rx_util": "N/A", + "rx_err": "N/A", + "rx_drop": "N/A", + "rx_ovr": "N/A", + "tx_ok": "N/A", + "tx_bps": "N/A", + "tx_pps": "N/A", + "tx_util": "N/A", + "tx_err": "N/A", + "tx_drop": "N/A", + "tx_ovr": "N/A" + } +} \ No newline at end of file diff --git a/tests/portstat_db/on_sup_no_counters/chassis_state_db.json b/tests/portstat_db/on_sup_no_counters/chassis_state_db.json new file mode 100644 index 0000000000..5c380954c3 --- /dev/null +++ b/tests/portstat_db/on_sup_no_counters/chassis_state_db.json @@ -0,0 +1,11 @@ +{ + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD0": { + "module_hostname": "sonic-lc1" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { + "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" + } +} \ No newline at end of file diff --git a/tests/portstat_db/on_sup_partial_lc/chassis_state_db.json b/tests/portstat_db/on_sup_partial_lc/chassis_state_db.json new file mode 100644 index 0000000000..6040a80776 --- /dev/null +++ b/tests/portstat_db/on_sup_partial_lc/chassis_state_db.json @@ -0,0 +1,48 @@ +{ + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD0": { + "module_hostname": "sonic-lc1" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD1": { + "module_hostname": "sonic-lc2" + }, + "CHASSIS_MODULE_HOSTNAME_TABLE|LINE-CARD2": { + "module_hostname": "sonic-lc3" + }, + "LINECARD_PORT_STAT_MARK_TABLE|sonic-lc1": { + "timestamp": "2020-07-01 00:00:00" + }, + "LINECARD_PORT_STAT_TABLE|Ethernet1/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + }, + "LINECARD_PORT_STAT_TABLE|Ethernet2/1": { + "state": "U", + "rx_ok": 100, + "rx_bps": 10, + "rx_pps": 1, + "rx_util": 0, + "rx_err": 0, + "rx_drop": 0, + "rx_ovr": 0, + "tx_ok": 100, + "tx_bps": 10, + "tx_pps": 1, + "tx_util": 0, + "tx_err": 0, + "tx_drop": 0, + "tx_ovr": 0 + } +} \ No newline at end of file diff --git a/tests/portstat_test.py b/tests/portstat_test.py index 885c06662f..e7499b94fd 100644 --- a/tests/portstat_test.py +++ b/tests/portstat_test.py @@ -8,8 +8,8 @@ from .utils import get_result_and_return_code from utilities_common.cli import UserCache -root_path = os.path.dirname(os.path.abspath(__file__)) -modules_path = os.path.dirname(root_path) +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) scripts_path = os.path.join(modules_path, "scripts") intf_counters_before_clear = """\ @@ -35,11 +35,32 @@ """ intf_fec_counters = """\ - IFACE STATE FEC_CORR FEC_UNCORR FEC_SYMBOL_ERR ---------- ------- ---------- ------------ ---------------- -Ethernet0 D 130,402 3 4 -Ethernet4 N/A 110,412 1 0 -Ethernet8 N/A 100,317 0 0 + IFACE STATE FEC_CORR FEC_UNCORR FEC_SYMBOL_ERR FEC_PRE_BER FEC_POST_BER +--------- ------- ---------- ------------ ---------------- ------------- -------------- +Ethernet0 D 130,402 3 4 N/A N/A +Ethernet4 N/A 110,412 1 0 N/A N/A +Ethernet8 N/A 100,317 0 0 N/A N/A +""" + +intf_fec_counters_fec_hist = """\ +Symbol Errors Per Codeword Codewords +---------------------------- ----------- +BIN0 1000000 +BIN1 900000 +BIN2 800000 +BIN3 700000 +BIN4 600000 +BIN5 500000 +BIN6 400000 +BIN7 300000 +BIN8 0 +BIN9 0 +BIN10 0 +BIN11 0 +BIN12 0 +BIN13 0 +BIN14 0 +BIN15 0 """ intf_fec_counters_period = """\ @@ -234,6 +255,36 @@ Time Since Counters Last Cleared............... None """ +intf_counters_on_sup = """\ + IFACE STATE RX_OK RX_BPS RX_UTIL RX_ERR RX_DRP RX_OVR TX_OK TX_BPS TX_UTIL\ + TX_ERR TX_DRP TX_OVR +------------ ------- ------- --------- --------- -------- -------- -------- ------- --------- ---------\ + -------- -------- -------- + Ethernet1/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 + Ethernet2/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 +Ethernet11/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 +""" + +intf_counters_on_sup_no_counters = "Linecard Counter Table is not available.\n" + +intf_counters_on_sup_partial_lc = "Not all linecards have published their counter values.\n" + +intf_counters_on_sup_na = """\ + IFACE STATE RX_OK RX_BPS RX_UTIL RX_ERR RX_DRP RX_OVR TX_OK TX_BPS TX_UTIL\ + TX_ERR TX_DRP TX_OVR +------------ ------- ------- --------- --------- -------- -------- -------- ------- --------- ---------\ + -------- -------- -------- + Ethernet1/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 + Ethernet2/1 U 100 10.00 B/s 0.00% 0 0 0 100 10.00 B/s 0.00%\ + 0 0 0 +Ethernet11/1 N/A N/A N/A N/A N/A N/A N/A N/A N/A N/A\ + N/A N/A N/A +""" + TEST_PERIOD = 3 @@ -320,6 +371,15 @@ def test_show_intf_fec_counters(self): assert return_code == 0 assert result == intf_fec_counters + def test_show_intf_counters_fec_histogram(self): + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"].commands["fec-histogram"], ["Ethernet0"]) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_fec_counters_fec_hist + def test_show_intf_fec_counters_period(self): runner = CliRunner() result = runner.invoke(show.cli.commands["interfaces"].commands["counters"].commands["fec-stats"], @@ -397,13 +457,109 @@ def test_clear_intf_counters(self): assert return_code == 0 verify_after_clear(result, intf_counter_after_clear) + def test_show_intf_counters_on_sup(self): + remove_tmp_cnstat_file() + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "1" + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_counters_on_sup + + return_code, result = get_result_and_return_code(['portstat']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == intf_counters_on_sup + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" + + def test_show_intf_counters_on_sup_no_counters(self): + remove_tmp_cnstat_file() + os.system("cp {} /tmp/".format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.system("cp {} {}".format(os.path.join(test_path, "portstat_db/on_sup_no_counters/chassis_state_db.json"), + os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "1" + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_counters_on_sup_no_counters + + return_code, result = get_result_and_return_code(['portstat']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == intf_counters_on_sup_no_counters + + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" + os.system("cp /tmp/chassis_state_db.json {}" + .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + + def test_show_intf_counters_on_sup_partial_lc(self): + remove_tmp_cnstat_file() + os.system("cp {} /tmp/".format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.system("cp {} {}".format(os.path.join(test_path, "portstat_db/on_sup_partial_lc/chassis_state_db.json"), + os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "1" + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_counters_on_sup_partial_lc + + return_code, result = get_result_and_return_code(['portstat']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == intf_counters_on_sup_partial_lc + + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" + os.system("cp /tmp/chassis_state_db.json {}" + .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + + def test_show_intf_counters_on_sup_na(self): + remove_tmp_cnstat_file() + os.system("cp {} /tmp/".format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.system("cp {} {}".format(os.path.join(test_path, "portstat_db/on_sup_na/chassis_state_db.json"), + os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "1" + + runner = CliRunner() + result = runner.invoke( + show.cli.commands["interfaces"].commands["counters"], []) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert result.output == intf_counters_on_sup_na + + return_code, result = get_result_and_return_code(['portstat']) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert return_code == 0 + assert result == intf_counters_on_sup_na + + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" + os.system("cp /tmp/chassis_state_db.json {}" + .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) + @classmethod def teardown_class(cls): print("TEARDOWN") os.environ["PATH"] = os.pathsep.join( os.environ["PATH"].split(os.pathsep)[:-1]) os.environ["UTILITIES_UNIT_TESTING"] = "0" + os.environ["UTILITIES_UNIT_TESTING_IS_SUP"] = "0" remove_tmp_cnstat_file() + os.system("cp /tmp/chassis_state_db.json {}" + .format(os.path.join(test_path, "mock_tables/chassis_state_db.json"))) class TestMultiAsicPortStat(object): diff --git a/tests/qos_config_input/0/config_qos.json b/tests/qos_config_input/0/config_qos.json index 40c1903a06..5ef4b07f8d 100644 --- a/tests/qos_config_input/0/config_qos.json +++ b/tests/qos_config_input/0/config_qos.json @@ -1,52 +1,466 @@ { - "TC_TO_PRIORITY_GROUP_MAP": { - "AZURE": { - "0": "0", - "1": "0", - "2": "0", - "3": "3", - "4": "4", - "5": "0", - "6": "0", - "7": "7" + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet100|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet104|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet108|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet112|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet116|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet120|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet124|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet12|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet16|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet20|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet24|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet28|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet32|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet36|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet40|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet44|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet48|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet4|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet52|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet56|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet60|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet64|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet68|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet72|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet76|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet80|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet84|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet88|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet8|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet92|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet96|0": { + "profile": "ingress_lossy_profile" } }, - "MAP_PFC_PRIORITY_TO_QUEUE": { - "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "BUFFER_POOL": { + "egress_lossless_pool": { + "mode": "static", + "size": "12766208", + "type": "egress" + }, + "egress_lossy_pool": { + "mode": "dynamic", + "size": "7326924", + "type": "egress" + }, + "ingress_lossless_pool": { + "mode": "dynamic", + "size": "12766208", + "type": "ingress" } }, - "TC_TO_QUEUE_MAP": { + "BUFFER_PROFILE": { + "egress_lossless_profile": { + "pool": "egress_lossless_pool", + "size": "0", + "static_th": "12766208" + }, + "egress_lossy_profile": { + "dynamic_th": "3", + "pool": "egress_lossy_pool", + "size": "1518" + }, + "ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossless_pool", + "size": "0" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet100|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet104|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet108|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet112|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet116|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet120|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet124|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet12|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet16|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet20|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet24|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet28|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet32|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet36|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet40|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet44|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet48|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet4|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet52|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet56|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet60|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet64|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet68|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet72|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet76|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet80|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet84|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet88|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet8|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet92|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet96|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "Ethernet0": "300m", + "Ethernet100": "300m", + "Ethernet104": "300m", + "Ethernet108": "300m", + "Ethernet112": "300m", + "Ethernet116": "300m", + "Ethernet12": "300m", + "Ethernet120": "300m", + "Ethernet124": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m", + "Ethernet24": "300m", + "Ethernet28": "300m", + "Ethernet32": "300m", + "Ethernet36": "300m", + "Ethernet4": "300m", + "Ethernet40": "300m", + "Ethernet44": "300m", + "Ethernet48": "300m", + "Ethernet52": "300m", + "Ethernet56": "300m", + "Ethernet60": "300m", + "Ethernet64": "300m", + "Ethernet68": "300m", + "Ethernet72": "300m", + "Ethernet76": "300m", + "Ethernet8": "300m", + "Ethernet80": "300m", + "Ethernet84": "300m", + "Ethernet88": "300m", + "Ethernet92": "300m", + "Ethernet96": "300m" } }, "DSCP_TO_TC_MAP": { "AZURE": { - "0" : "1", - "1" : "1", - "2" : "1", - "3" : "3", - "4" : "4", - "5" : "2", - "6" : "1", - "7" : "1", - "8" : "0", - "9" : "1", + "0": "1", + "1": "1", "10": "1", "11": "1", "12": "1", @@ -57,6 +471,7 @@ "17": "1", "18": "1", "19": "1", + "2": "1", "20": "1", "21": "1", "22": "1", @@ -67,6 +482,7 @@ "27": "1", "28": "1", "29": "1", + "3": "3", "30": "1", "31": "1", "32": "1", @@ -77,6 +493,7 @@ "37": "1", "38": "1", "39": "1", + "4": "4", "40": "1", "41": "1", "42": "1", @@ -87,6 +504,7 @@ "47": "1", "48": "6", "49": "1", + "5": "2", "50": "1", "51": "1", "52": "1", @@ -97,41 +515,79 @@ "57": "1", "58": "1", "59": "1", + "6": "1", "60": "1", "61": "1", "62": "1", - "63": "1" + "63": "1", + "7": "1", + "8": "0", + "9": "1" + } + }, + "MAP_PFC_PRIORITY_TO_QUEUE": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" } }, + "PORT_QOS_MAP": {}, + "QUEUE": {}, "SCHEDULER": { "scheduler.0": { - "type" : "DWRR", + "type": "DWRR", "weight": "14" }, "scheduler.1": { - "type" : "DWRR", + "type": "DWRR", "weight": "15" } }, - "PORT_QOS_MAP": { + "TC_TO_PRIORITY_GROUP_MAP": { + "AZURE": { + "0": "0", + "1": "0", + "2": "0", + "3": "3", + "4": "4", + "5": "0", + "6": "0", + "7": "7" + } + }, + "TC_TO_QUEUE_MAP": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" + } }, "WRED_PROFILE": { - "AZURE_LOSSLESS" : { - "wred_green_enable" : "true", - "wred_yellow_enable" : "true", - "wred_red_enable" : "true", - "ecn" : "ecn_all", - "green_max_threshold" : "2097152", - "green_min_threshold" : "1048576", - "yellow_max_threshold" : "2097152", - "yellow_min_threshold" : "1048576", - "red_max_threshold" : "2097152", - "red_min_threshold" : "1048576", - "green_drop_probability" : "5", + "AZURE_LOSSLESS": { + "ecn": "ecn_all", + "green_drop_probability": "5", + "green_max_threshold": "2097152", + "green_min_threshold": "1048576", + "red_drop_probability": "5", + "red_max_threshold": "2097152", + "red_min_threshold": "1048576", + "wred_green_enable": "true", + "wred_red_enable": "true", + "wred_yellow_enable": "true", "yellow_drop_probability": "5", - "red_drop_probability" : "5" + "yellow_max_threshold": "2097152", + "yellow_min_threshold": "1048576" } - }, - "QUEUE": { } -} +} \ No newline at end of file diff --git a/tests/qos_config_input/1/config_qos.json b/tests/qos_config_input/1/config_qos.json index 40c1903a06..5ef4b07f8d 100644 --- a/tests/qos_config_input/1/config_qos.json +++ b/tests/qos_config_input/1/config_qos.json @@ -1,52 +1,466 @@ { - "TC_TO_PRIORITY_GROUP_MAP": { - "AZURE": { - "0": "0", - "1": "0", - "2": "0", - "3": "3", - "4": "4", - "5": "0", - "6": "0", - "7": "7" + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet100|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet104|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet108|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet112|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet116|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet120|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet124|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet12|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet16|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet20|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet24|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet28|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet32|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet36|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet40|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet44|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet48|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet4|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet52|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet56|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet60|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet64|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet68|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet72|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet76|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet80|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet84|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet88|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet8|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet92|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet96|0": { + "profile": "ingress_lossy_profile" } }, - "MAP_PFC_PRIORITY_TO_QUEUE": { - "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "BUFFER_POOL": { + "egress_lossless_pool": { + "mode": "static", + "size": "12766208", + "type": "egress" + }, + "egress_lossy_pool": { + "mode": "dynamic", + "size": "7326924", + "type": "egress" + }, + "ingress_lossless_pool": { + "mode": "dynamic", + "size": "12766208", + "type": "ingress" } }, - "TC_TO_QUEUE_MAP": { + "BUFFER_PROFILE": { + "egress_lossless_profile": { + "pool": "egress_lossless_pool", + "size": "0", + "static_th": "12766208" + }, + "egress_lossy_profile": { + "dynamic_th": "3", + "pool": "egress_lossy_pool", + "size": "1518" + }, + "ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossless_pool", + "size": "0" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet100|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet104|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet108|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet112|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet116|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet120|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet124|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet12|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet16|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet20|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet24|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet28|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet32|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet36|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet40|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet44|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet48|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet4|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet52|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet56|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet60|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet64|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet68|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet72|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet76|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet80|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet84|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet88|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet8|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet92|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet96|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "Ethernet0": "300m", + "Ethernet100": "300m", + "Ethernet104": "300m", + "Ethernet108": "300m", + "Ethernet112": "300m", + "Ethernet116": "300m", + "Ethernet12": "300m", + "Ethernet120": "300m", + "Ethernet124": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m", + "Ethernet24": "300m", + "Ethernet28": "300m", + "Ethernet32": "300m", + "Ethernet36": "300m", + "Ethernet4": "300m", + "Ethernet40": "300m", + "Ethernet44": "300m", + "Ethernet48": "300m", + "Ethernet52": "300m", + "Ethernet56": "300m", + "Ethernet60": "300m", + "Ethernet64": "300m", + "Ethernet68": "300m", + "Ethernet72": "300m", + "Ethernet76": "300m", + "Ethernet8": "300m", + "Ethernet80": "300m", + "Ethernet84": "300m", + "Ethernet88": "300m", + "Ethernet92": "300m", + "Ethernet96": "300m" } }, "DSCP_TO_TC_MAP": { "AZURE": { - "0" : "1", - "1" : "1", - "2" : "1", - "3" : "3", - "4" : "4", - "5" : "2", - "6" : "1", - "7" : "1", - "8" : "0", - "9" : "1", + "0": "1", + "1": "1", "10": "1", "11": "1", "12": "1", @@ -57,6 +471,7 @@ "17": "1", "18": "1", "19": "1", + "2": "1", "20": "1", "21": "1", "22": "1", @@ -67,6 +482,7 @@ "27": "1", "28": "1", "29": "1", + "3": "3", "30": "1", "31": "1", "32": "1", @@ -77,6 +493,7 @@ "37": "1", "38": "1", "39": "1", + "4": "4", "40": "1", "41": "1", "42": "1", @@ -87,6 +504,7 @@ "47": "1", "48": "6", "49": "1", + "5": "2", "50": "1", "51": "1", "52": "1", @@ -97,41 +515,79 @@ "57": "1", "58": "1", "59": "1", + "6": "1", "60": "1", "61": "1", "62": "1", - "63": "1" + "63": "1", + "7": "1", + "8": "0", + "9": "1" + } + }, + "MAP_PFC_PRIORITY_TO_QUEUE": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" } }, + "PORT_QOS_MAP": {}, + "QUEUE": {}, "SCHEDULER": { "scheduler.0": { - "type" : "DWRR", + "type": "DWRR", "weight": "14" }, "scheduler.1": { - "type" : "DWRR", + "type": "DWRR", "weight": "15" } }, - "PORT_QOS_MAP": { + "TC_TO_PRIORITY_GROUP_MAP": { + "AZURE": { + "0": "0", + "1": "0", + "2": "0", + "3": "3", + "4": "4", + "5": "0", + "6": "0", + "7": "7" + } + }, + "TC_TO_QUEUE_MAP": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" + } }, "WRED_PROFILE": { - "AZURE_LOSSLESS" : { - "wred_green_enable" : "true", - "wred_yellow_enable" : "true", - "wred_red_enable" : "true", - "ecn" : "ecn_all", - "green_max_threshold" : "2097152", - "green_min_threshold" : "1048576", - "yellow_max_threshold" : "2097152", - "yellow_min_threshold" : "1048576", - "red_max_threshold" : "2097152", - "red_min_threshold" : "1048576", - "green_drop_probability" : "5", + "AZURE_LOSSLESS": { + "ecn": "ecn_all", + "green_drop_probability": "5", + "green_max_threshold": "2097152", + "green_min_threshold": "1048576", + "red_drop_probability": "5", + "red_max_threshold": "2097152", + "red_min_threshold": "1048576", + "wred_green_enable": "true", + "wred_red_enable": "true", + "wred_yellow_enable": "true", "yellow_drop_probability": "5", - "red_drop_probability" : "5" + "yellow_max_threshold": "2097152", + "yellow_min_threshold": "1048576" } - }, - "QUEUE": { } -} +} \ No newline at end of file diff --git a/tests/qos_config_input/config_qos.json b/tests/qos_config_input/config_qos.json index fd76373983..0d44b421bd 100644 --- a/tests/qos_config_input/config_qos.json +++ b/tests/qos_config_input/config_qos.json @@ -1,52 +1,466 @@ { - "TC_TO_PRIORITY_GROUP_MAP": { - "AZURE": { - "0": "0", - "1": "0", - "2": "0", - "3": "3", - "4": "4", - "5": "0", - "6": "0", - "7": "7" + "BUFFER_PG": { + "Ethernet0|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet100|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet104|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet108|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet112|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet116|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet120|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet124|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet12|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet16|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet20|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet24|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet28|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet32|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet36|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet40|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet44|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet48|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet4|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet52|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet56|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet60|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet64|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet68|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet72|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet76|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet80|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet84|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet88|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet8|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet92|0": { + "profile": "ingress_lossy_profile" + }, + "Ethernet96|0": { + "profile": "ingress_lossy_profile" } }, - "MAP_PFC_PRIORITY_TO_QUEUE": { - "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "BUFFER_POOL": { + "egress_lossless_pool": { + "mode": "static", + "size": "12766208", + "type": "egress" + }, + "egress_lossy_pool": { + "mode": "dynamic", + "size": "7326924", + "type": "egress" + }, + "ingress_lossless_pool": { + "mode": "dynamic", + "size": "12766208", + "type": "ingress" } }, - "TC_TO_QUEUE_MAP": { + "BUFFER_PROFILE": { + "egress_lossless_profile": { + "pool": "egress_lossless_pool", + "size": "0", + "static_th": "12766208" + }, + "egress_lossy_profile": { + "dynamic_th": "3", + "pool": "egress_lossy_pool", + "size": "1518" + }, + "ingress_lossy_profile": { + "dynamic_th": "3", + "pool": "ingress_lossless_pool", + "size": "0" + } + }, + "BUFFER_QUEUE": { + "Ethernet0|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet0|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet0|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet100|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet100|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet104|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet104|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet108|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet108|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet112|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet112|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet116|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet116|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet120|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet120|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet124|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet124|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet12|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet12|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet16|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet16|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet20|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet20|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet24|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet24|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet28|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet28|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet32|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet32|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet36|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet36|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet40|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet40|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet44|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet44|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet48|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet48|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet4|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet4|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet52|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet52|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet56|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet56|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet60|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet60|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet64|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet64|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet68|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet68|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet72|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet72|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet76|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet76|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet80|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet80|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet84|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet84|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet88|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet88|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet8|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet8|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet92|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet92|5-6": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|0-2": { + "profile": "egress_lossy_profile" + }, + "Ethernet96|3-4": { + "profile": "egress_lossless_profile" + }, + "Ethernet96|5-6": { + "profile": "egress_lossy_profile" + } + }, + "CABLE_LENGTH": { "AZURE": { - "0": "0", - "1": "1", - "2": "2", - "3": "3", - "4": "4", - "5": "5", - "6": "6", - "7": "7" + "Ethernet0": "300m", + "Ethernet100": "300m", + "Ethernet104": "300m", + "Ethernet108": "300m", + "Ethernet112": "300m", + "Ethernet116": "300m", + "Ethernet12": "300m", + "Ethernet120": "300m", + "Ethernet124": "300m", + "Ethernet16": "300m", + "Ethernet20": "300m", + "Ethernet24": "300m", + "Ethernet28": "300m", + "Ethernet32": "300m", + "Ethernet36": "300m", + "Ethernet4": "300m", + "Ethernet40": "300m", + "Ethernet44": "300m", + "Ethernet48": "300m", + "Ethernet52": "300m", + "Ethernet56": "300m", + "Ethernet60": "300m", + "Ethernet64": "300m", + "Ethernet68": "300m", + "Ethernet72": "300m", + "Ethernet76": "300m", + "Ethernet8": "300m", + "Ethernet80": "300m", + "Ethernet84": "300m", + "Ethernet88": "300m", + "Ethernet92": "300m", + "Ethernet96": "300m" } }, "DSCP_TO_TC_MAP": { "AZURE": { - "0" : "1", - "1" : "1", - "2" : "1", - "3" : "3", - "4" : "4", - "5" : "2", - "6" : "1", - "7" : "1", - "8" : "0", - "9" : "1", + "0": "1", + "1": "1", "10": "1", "11": "1", "12": "1", @@ -57,6 +471,7 @@ "17": "1", "18": "1", "19": "1", + "2": "1", "20": "1", "21": "1", "22": "1", @@ -67,6 +482,7 @@ "27": "1", "28": "1", "29": "1", + "3": "3", "30": "1", "31": "1", "32": "1", @@ -77,6 +493,7 @@ "37": "1", "38": "1", "39": "1", + "4": "4", "40": "1", "41": "1", "42": "1", @@ -87,6 +504,7 @@ "47": "1", "48": "6", "49": "1", + "5": "2", "50": "1", "51": "1", "52": "1", @@ -97,53 +515,91 @@ "57": "1", "58": "1", "59": "1", + "6": "1", "60": "1", "61": "1", "62": "1", - "63": "1" + "63": "1", + "7": "1", + "8": "0", + "9": "1" + } + }, + "MAP_PFC_PRIORITY_TO_QUEUE": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" } }, "MPLS_TC_TO_TC_MAP": { "AZURE": { - "0" : "1", - "1" : "1", - "2" : "1", - "3" : "3", - "4" : "4", - "5" : "2", - "6" : "1", - "7" : "1" + "0": "1", + "1": "1", + "2": "1", + "3": "3", + "4": "4", + "5": "2", + "6": "1", + "7": "1" } }, + "PORT_QOS_MAP": {}, + "QUEUE": {}, "SCHEDULER": { "scheduler.0": { - "type" : "DWRR", + "type": "DWRR", "weight": "14" }, "scheduler.1": { - "type" : "DWRR", + "type": "DWRR", "weight": "15" } }, - "PORT_QOS_MAP": { + "TC_TO_PRIORITY_GROUP_MAP": { + "AZURE": { + "0": "0", + "1": "0", + "2": "0", + "3": "3", + "4": "4", + "5": "0", + "6": "0", + "7": "7" + } + }, + "TC_TO_QUEUE_MAP": { + "AZURE": { + "0": "0", + "1": "1", + "2": "2", + "3": "3", + "4": "4", + "5": "5", + "6": "6", + "7": "7" + } }, "WRED_PROFILE": { - "AZURE_LOSSLESS" : { - "wred_green_enable" : "true", - "wred_yellow_enable" : "true", - "wred_red_enable" : "true", - "ecn" : "ecn_all", - "green_max_threshold" : "2097152", - "green_min_threshold" : "1048576", - "yellow_max_threshold" : "2097152", - "yellow_min_threshold" : "1048576", - "red_max_threshold" : "2097152", - "red_min_threshold" : "1048576", - "green_drop_probability" : "5", + "AZURE_LOSSLESS": { + "ecn": "ecn_all", + "green_drop_probability": "5", + "green_max_threshold": "2097152", + "green_min_threshold": "1048576", + "red_drop_probability": "5", + "red_max_threshold": "2097152", + "red_min_threshold": "1048576", + "wred_green_enable": "true", + "wred_red_enable": "true", + "wred_yellow_enable": "true", "yellow_drop_probability": "5", - "red_drop_probability" : "5" + "yellow_max_threshold": "2097152", + "yellow_min_threshold": "1048576" } - }, - "QUEUE": { } -} +} \ No newline at end of file diff --git a/tests/queue_counter_test.py b/tests/queue_counter_test.py index 20b9516fbc..508550b9c8 100644 --- a/tests/queue_counter_test.py +++ b/tests/queue_counter_test.py @@ -22,6 +22,7 @@ show_queue_counters = """\ +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet0 UC0 0 0 0 0 @@ -55,6 +56,7 @@ Ethernet0 ALL28 N/A N/A N/A N/A Ethernet0 ALL29 N/A N/A N/A N/A +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet4 UC0 41 96 70 98 @@ -88,6 +90,7 @@ Ethernet4 ALL28 N/A N/A N/A N/A Ethernet4 ALL29 N/A N/A N/A N/A +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet8 UC0 0 0 0 0 @@ -123,6 +126,7 @@ """ show_queue_counters_nz = """\ +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet0 UC1 60 43 39 1 @@ -155,6 +159,7 @@ Ethernet0 ALL28 N/A N/A N/A N/A Ethernet0 ALL29 N/A N/A N/A N/A +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet4 UC0 41 96 70 98 @@ -188,6 +193,7 @@ Ethernet4 ALL28 N/A N/A N/A N/A Ethernet4 ALL29 N/A N/A N/A N/A +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet8 UC1 38 17 68 91 @@ -324,6 +330,7 @@ """] show_queue_counters_port = """\ +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet8 UC0 0 0 0 0 @@ -359,6 +366,7 @@ """ show_queue_counters_port_nz = """\ +For namespace : Port TxQ Counter/pkts Counter/bytes Drop/pkts Drop/bytes --------- ----- -------------- --------------- ----------- ------------ Ethernet8 UC1 38 17 68 91 @@ -1851,136 +1859,144 @@ show_queue_voq_counters = """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ0 0 0 0 0 -testsw|Ethernet0 VOQ1 60 43 39 1 -testsw|Ethernet0 VOQ2 82 7 39 21 -testsw|Ethernet0 VOQ3 11 59 12 94 -testsw|Ethernet0 VOQ4 36 62 35 40 -testsw|Ethernet0 VOQ5 49 91 2 88 -testsw|Ethernet0 VOQ6 33 17 94 74 -testsw|Ethernet0 VOQ7 40 71 95 33 +For namespace : + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ0 0 0 0 0 0 +testsw|Ethernet0 VOQ1 60 43 39 1 1 +testsw|Ethernet0 VOQ2 82 7 39 21 19 +testsw|Ethernet0 VOQ3 11 59 12 94 12 +testsw|Ethernet0 VOQ4 36 62 35 40 11 +testsw|Ethernet0 VOQ5 49 91 2 88 15 +testsw|Ethernet0 VOQ6 33 17 94 74 17 +testsw|Ethernet0 VOQ7 40 71 95 33 73 - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet4 VOQ0 54 8 93 78 -testsw|Ethernet4 VOQ1 83 96 74 9 -testsw|Ethernet4 VOQ2 15 60 61 31 -testsw|Ethernet4 VOQ3 45 52 82 94 -testsw|Ethernet4 VOQ4 55 88 89 52 -testsw|Ethernet4 VOQ5 14 70 95 79 -testsw|Ethernet4 VOQ6 68 60 66 81 -testsw|Ethernet4 VOQ7 63 4 48 76 +For namespace : + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet4 VOQ0 54 8 93 78 29 +testsw|Ethernet4 VOQ1 83 96 74 9 42 +testsw|Ethernet4 VOQ2 15 60 61 31 54 +testsw|Ethernet4 VOQ3 45 52 82 94 19 +testsw|Ethernet4 VOQ4 55 88 89 52 28 +testsw|Ethernet4 VOQ5 14 70 95 79 13 +testsw|Ethernet4 VOQ6 68 60 66 81 22 +testsw|Ethernet4 VOQ7 63 4 48 76 53 - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet8 VOQ0 41 73 77 74 -testsw|Ethernet8 VOQ1 60 21 56 54 -testsw|Ethernet8 VOQ2 57 31 12 39 -testsw|Ethernet8 VOQ3 41 96 70 98 -testsw|Ethernet8 VOQ4 18 49 63 36 -testsw|Ethernet8 VOQ5 99 90 3 15 -testsw|Ethernet8 VOQ6 8 84 82 94 -testsw|Ethernet8 VOQ7 83 15 75 92 +For namespace : + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet8 VOQ0 41 73 77 74 67 +testsw|Ethernet8 VOQ1 60 21 56 54 79 +testsw|Ethernet8 VOQ2 57 31 12 39 35 +testsw|Ethernet8 VOQ3 41 96 70 98 47 +testsw|Ethernet8 VOQ4 18 49 63 36 62 +testsw|Ethernet8 VOQ5 99 90 3 15 19 +testsw|Ethernet8 VOQ6 8 84 82 94 33 +testsw|Ethernet8 VOQ7 83 15 75 92 3 """ show_queue_voq_counters_nz = """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ1 60 43 39 1 -testsw|Ethernet0 VOQ2 82 7 39 21 -testsw|Ethernet0 VOQ3 11 59 12 94 -testsw|Ethernet0 VOQ4 36 62 35 40 -testsw|Ethernet0 VOQ5 49 91 2 88 -testsw|Ethernet0 VOQ6 33 17 94 74 -testsw|Ethernet0 VOQ7 40 71 95 33 +For namespace : + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ1 60 43 39 1 1 +testsw|Ethernet0 VOQ2 82 7 39 21 19 +testsw|Ethernet0 VOQ3 11 59 12 94 12 +testsw|Ethernet0 VOQ4 36 62 35 40 11 +testsw|Ethernet0 VOQ5 49 91 2 88 15 +testsw|Ethernet0 VOQ6 33 17 94 74 17 +testsw|Ethernet0 VOQ7 40 71 95 33 73 - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet4 VOQ0 54 8 93 78 -testsw|Ethernet4 VOQ1 83 96 74 9 -testsw|Ethernet4 VOQ2 15 60 61 31 -testsw|Ethernet4 VOQ3 45 52 82 94 -testsw|Ethernet4 VOQ4 55 88 89 52 -testsw|Ethernet4 VOQ5 14 70 95 79 -testsw|Ethernet4 VOQ6 68 60 66 81 -testsw|Ethernet4 VOQ7 63 4 48 76 +For namespace : + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet4 VOQ0 54 8 93 78 29 +testsw|Ethernet4 VOQ1 83 96 74 9 42 +testsw|Ethernet4 VOQ2 15 60 61 31 54 +testsw|Ethernet4 VOQ3 45 52 82 94 19 +testsw|Ethernet4 VOQ4 55 88 89 52 28 +testsw|Ethernet4 VOQ5 14 70 95 79 13 +testsw|Ethernet4 VOQ6 68 60 66 81 22 +testsw|Ethernet4 VOQ7 63 4 48 76 53 - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet8 VOQ0 41 73 77 74 -testsw|Ethernet8 VOQ1 60 21 56 54 -testsw|Ethernet8 VOQ2 57 31 12 39 -testsw|Ethernet8 VOQ3 41 96 70 98 -testsw|Ethernet8 VOQ4 18 49 63 36 -testsw|Ethernet8 VOQ5 99 90 3 15 -testsw|Ethernet8 VOQ6 8 84 82 94 -testsw|Ethernet8 VOQ7 83 15 75 92 +For namespace : + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet8 VOQ0 41 73 77 74 67 +testsw|Ethernet8 VOQ1 60 21 56 54 79 +testsw|Ethernet8 VOQ2 57 31 12 39 35 +testsw|Ethernet8 VOQ3 41 96 70 98 47 +testsw|Ethernet8 VOQ4 18 49 63 36 62 +testsw|Ethernet8 VOQ5 99 90 3 15 19 +testsw|Ethernet8 VOQ6 8 84 82 94 33 +testsw|Ethernet8 VOQ7 83 15 75 92 3 """ show_queue_voq_counters_with_clear = ["""\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ0 0 0 0 0 -testsw|Ethernet0 VOQ1 0 0 0 0 -testsw|Ethernet0 VOQ2 0 0 0 0 -testsw|Ethernet0 VOQ3 0 0 0 0 -testsw|Ethernet0 VOQ4 0 0 0 0 -testsw|Ethernet0 VOQ5 0 0 0 0 -testsw|Ethernet0 VOQ6 0 0 0 0 -testsw|Ethernet0 VOQ7 0 0 0 0 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ0 0 0 0 0 0 +testsw|Ethernet0 VOQ1 0 0 0 0 0 +testsw|Ethernet0 VOQ2 0 0 0 0 0 +testsw|Ethernet0 VOQ3 0 0 0 0 0 +testsw|Ethernet0 VOQ4 0 0 0 0 0 +testsw|Ethernet0 VOQ5 0 0 0 0 0 +testsw|Ethernet0 VOQ6 0 0 0 0 0 +testsw|Ethernet0 VOQ7 0 0 0 0 0 """, """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet4 VOQ0 0 0 0 0 -testsw|Ethernet4 VOQ1 0 0 0 0 -testsw|Ethernet4 VOQ2 0 0 0 0 -testsw|Ethernet4 VOQ3 0 0 0 0 -testsw|Ethernet4 VOQ4 0 0 0 0 -testsw|Ethernet4 VOQ5 0 0 0 0 -testsw|Ethernet4 VOQ6 0 0 0 0 -testsw|Ethernet4 VOQ7 0 0 0 0 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet4 VOQ0 0 0 0 0 0 +testsw|Ethernet4 VOQ1 0 0 0 0 0 +testsw|Ethernet4 VOQ2 0 0 0 0 0 +testsw|Ethernet4 VOQ3 0 0 0 0 0 +testsw|Ethernet4 VOQ4 0 0 0 0 0 +testsw|Ethernet4 VOQ5 0 0 0 0 0 +testsw|Ethernet4 VOQ6 0 0 0 0 0 +testsw|Ethernet4 VOQ7 0 0 0 0 0 """, """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet8 VOQ0 0 0 0 0 -testsw|Ethernet8 VOQ1 0 0 0 0 -testsw|Ethernet8 VOQ2 0 0 0 0 -testsw|Ethernet8 VOQ3 0 0 0 0 -testsw|Ethernet8 VOQ4 0 0 0 0 -testsw|Ethernet8 VOQ5 0 0 0 0 -testsw|Ethernet8 VOQ6 0 0 0 0 -testsw|Ethernet8 VOQ7 0 0 0 0 + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet8 VOQ0 0 0 0 0 0 +testsw|Ethernet8 VOQ1 0 0 0 0 0 +testsw|Ethernet8 VOQ2 0 0 0 0 0 +testsw|Ethernet8 VOQ3 0 0 0 0 0 +testsw|Ethernet8 VOQ4 0 0 0 0 0 +testsw|Ethernet8 VOQ5 0 0 0 0 0 +testsw|Ethernet8 VOQ6 0 0 0 0 0 +testsw|Ethernet8 VOQ7 0 0 0 0 0 """ ] show_queue_port_voq_counters = """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ0 0 0 0 0 -testsw|Ethernet0 VOQ1 60 43 39 1 -testsw|Ethernet0 VOQ2 82 7 39 21 -testsw|Ethernet0 VOQ3 11 59 12 94 -testsw|Ethernet0 VOQ4 36 62 35 40 -testsw|Ethernet0 VOQ5 49 91 2 88 -testsw|Ethernet0 VOQ6 33 17 94 74 -testsw|Ethernet0 VOQ7 40 71 95 33 +For namespace : + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ0 0 0 0 0 0 +testsw|Ethernet0 VOQ1 60 43 39 1 1 +testsw|Ethernet0 VOQ2 82 7 39 21 19 +testsw|Ethernet0 VOQ3 11 59 12 94 12 +testsw|Ethernet0 VOQ4 36 62 35 40 11 +testsw|Ethernet0 VOQ5 49 91 2 88 15 +testsw|Ethernet0 VOQ6 33 17 94 74 17 +testsw|Ethernet0 VOQ7 40 71 95 33 73 """ show_queue_port_voq_counters_nz = """\ - Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes ----------------- ----- -------------- --------------- ----------- ------------ -testsw|Ethernet0 VOQ1 60 43 39 1 -testsw|Ethernet0 VOQ2 82 7 39 21 -testsw|Ethernet0 VOQ3 11 59 12 94 -testsw|Ethernet0 VOQ4 36 62 35 40 -testsw|Ethernet0 VOQ5 49 91 2 88 -testsw|Ethernet0 VOQ6 33 17 94 74 -testsw|Ethernet0 VOQ7 40 71 95 33 +For namespace : + Port Voq Counter/pkts Counter/bytes Drop/pkts Drop/bytes Credit-WD-Del/pkts +---------------- ----- -------------- --------------- ----------- ------------ -------------------- +testsw|Ethernet0 VOQ1 60 43 39 1 1 +testsw|Ethernet0 VOQ2 82 7 39 21 19 +testsw|Ethernet0 VOQ3 11 59 12 94 12 +testsw|Ethernet0 VOQ4 36 62 35 40 11 +testsw|Ethernet0 VOQ5 49 91 2 88 15 +testsw|Ethernet0 VOQ6 33 17 94 74 17 +testsw|Ethernet0 VOQ7 40 71 95 33 73 """ @@ -1988,48 +2004,56 @@ { "testsw|Ethernet0": { "VOQ0": { + "creditWDPkts": "0", "dropbytes": "0", "droppacket": "0", "totalbytes": "0", "totalpacket": "0" }, "VOQ1": { + "creditWDPkts": "1", "dropbytes": "1", "droppacket": "39", "totalbytes": "43", "totalpacket": "60" }, "VOQ2": { + "creditWDPkts": "19", "dropbytes": "21", "droppacket": "39", "totalbytes": "7", "totalpacket": "82" }, "VOQ3": { + "creditWDPkts": "12", "dropbytes": "94", "droppacket": "12", "totalbytes": "59", "totalpacket": "11" }, "VOQ4": { + "creditWDPkts": "11", "dropbytes": "40", "droppacket": "35", "totalbytes": "62", "totalpacket": "36" }, "VOQ5": { + "creditWDPkts": "15", "dropbytes": "88", "droppacket": "2", "totalbytes": "91", "totalpacket": "49" }, "VOQ6": { + "creditWDPkts": "17", "dropbytes": "74", "droppacket": "94", "totalbytes": "17", "totalpacket": "33" }, "VOQ7": { + "creditWDPkts": "73", "dropbytes": "33", "droppacket": "95", "totalbytes": "71", @@ -2038,48 +2062,56 @@ }, "testsw|Ethernet4": { "VOQ0": { + "creditWDPkts": "29", "dropbytes": "78", "droppacket": "93", "totalbytes": "8", "totalpacket": "54" }, "VOQ1": { + "creditWDPkts": "42", "dropbytes": "9", "droppacket": "74", "totalbytes": "96", "totalpacket": "83" }, "VOQ2": { + "creditWDPkts": "54", "dropbytes": "31", "droppacket": "61", "totalbytes": "60", "totalpacket": "15" }, "VOQ3": { + "creditWDPkts": "19", "dropbytes": "94", "droppacket": "82", "totalbytes": "52", "totalpacket": "45" }, "VOQ4": { + "creditWDPkts": "28", "dropbytes": "52", "droppacket": "89", "totalbytes": "88", "totalpacket": "55" }, "VOQ5": { + "creditWDPkts": "13", "dropbytes": "79", "droppacket": "95", "totalbytes": "70", "totalpacket": "14" }, "VOQ6": { + "creditWDPkts": "22", "dropbytes": "81", "droppacket": "66", "totalbytes": "60", "totalpacket": "68" }, "VOQ7": { + "creditWDPkts": "53", "dropbytes": "76", "droppacket": "48", "totalbytes": "4", @@ -2088,48 +2120,56 @@ }, "testsw|Ethernet8": { "VOQ0": { + "creditWDPkts": "67", "dropbytes": "74", "droppacket": "77", "totalbytes": "73", "totalpacket": "41" }, "VOQ1": { + "creditWDPkts": "79", "dropbytes": "54", "droppacket": "56", "totalbytes": "21", "totalpacket": "60" }, "VOQ2": { + "creditWDPkts": "35", "dropbytes": "39", "droppacket": "12", "totalbytes": "31", "totalpacket": "57" }, "VOQ3": { + "creditWDPkts": "47", "dropbytes": "98", "droppacket": "70", "totalbytes": "96", "totalpacket": "41" }, "VOQ4": { + "creditWDPkts": "62", "dropbytes": "36", "droppacket": "63", "totalbytes": "49", "totalpacket": "18" }, "VOQ5": { + "creditWDPkts": "19", "dropbytes": "15", "droppacket": "3", "totalbytes": "90", "totalpacket": "99" }, "VOQ6": { + "creditWDPkts": "33", "dropbytes": "94", "droppacket": "82", "totalbytes": "84", "totalpacket": "8" }, "VOQ7": { + "creditWDPkts": "3", "dropbytes": "92", "droppacket": "75", "totalbytes": "15", @@ -2142,48 +2182,56 @@ { "testsw|Ethernet0": { "VOQ0": { + "creditWDPkts": "0", "dropbytes": "0", "droppacket": "0", "totalbytes": "0", "totalpacket": "0" }, "VOQ1": { + "creditWDPkts": "1", "dropbytes": "1", "droppacket": "39", "totalbytes": "43", "totalpacket": "60" }, "VOQ2": { + "creditWDPkts": "19", "dropbytes": "21", "droppacket": "39", "totalbytes": "7", "totalpacket": "82" }, "VOQ3": { + "creditWDPkts": "12", "dropbytes": "94", "droppacket": "12", "totalbytes": "59", "totalpacket": "11" }, "VOQ4": { + "creditWDPkts": "11", "dropbytes": "40", "droppacket": "35", "totalbytes": "62", "totalpacket": "36" }, "VOQ5": { + "creditWDPkts": "15", "dropbytes": "88", "droppacket": "2", "totalbytes": "91", "totalpacket": "49" }, "VOQ6": { + "creditWDPkts": "17", "dropbytes": "74", "droppacket": "94", "totalbytes": "17", "totalpacket": "33" }, "VOQ7": { + "creditWDPkts": "73", "dropbytes": "33", "droppacket": "95", "totalbytes": "71", diff --git a/tests/remote_cli_test.py b/tests/remote_cli_test.py index d9fd672102..57a220be1e 100644 --- a/tests/remote_cli_test.py +++ b/tests/remote_cli_test.py @@ -11,10 +11,11 @@ import select import socket import termios +import getpass -MULTI_LC_REXEC_OUTPUT = '''======== sonic-lc1 output: ======== +MULTI_LC_REXEC_OUTPUT = '''======== LINE-CARD0|sonic-lc1 output: ======== hello world -======== LINE-CARD2 output: ======== +======== LINE-CARD2|sonic-lc3 output: ======== hello world ''' REXEC_HELP = '''Usage: cli [OPTIONS] LINECARD_NAMES... @@ -75,17 +76,27 @@ def mock_paramiko_connection(channel): return conn +def mock_getpass(prompt="Password:", stream=None): + return "dummy" + + class TestRemoteExec(object): + __getpass = getpass.getpass + @classmethod def setup_class(cls): print("SETUP") from .mock_tables import dbconnector dbconnector.load_database_config() + getpass.getpass = mock_getpass + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + getpass.getpass = TestRemoteExec.__getpass @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) - # @mock.patch.object(linecard.Linecard, '_get_password', mock.MagicMock(return_value='dummmy')) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value=mock_exec_command())) def test_rexec_with_module_name(self): @@ -98,7 +109,6 @@ def test_rexec_with_module_name(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value=mock_exec_command())) def test_rexec_with_hostname(self): @@ -111,7 +121,6 @@ def test_rexec_with_hostname(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(paramiko.SSHClient, 'exec_command', mock.MagicMock(return_value=mock_exec_error_cmd())) def test_rexec_error_with_module_name(self): @@ -133,7 +142,6 @@ def test_rexec_error(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_all(self): @@ -147,21 +155,19 @@ def test_rexec_all(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_invalid_lc(self): runner = CliRunner() - LINECARD_NAME = "sonic-lc-3" + LINECARD_NAME = "sonic-lc-100" result = runner.invoke( rexec.cli, [LINECARD_NAME, "-c", "show version"]) print(result.output) assert result.exit_code == 1, result.output - assert "Linecard sonic-lc-3 not found\n" == result.output + assert "Linecard sonic-lc-100 not found\n" == result.output @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_unreachable_lc(self): @@ -175,7 +181,6 @@ def test_rexec_unreachable_lc(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock()) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) def test_rexec_help(self): @@ -188,7 +193,6 @@ def test_rexec_help(self): @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock(side_effect=paramiko.ssh_exception.NoValidConnectionsError({('192.168.0.1', 22): "None"}))) @mock.patch.object(linecard.Linecard, 'execute_cmd', mock.MagicMock(return_value="hello world")) @@ -202,7 +206,6 @@ def test_rexec_exception(self): assert "Failed to connect to sonic-lc1 with username admin\n" == result.output @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) - @mock.patch("rcli.utils.get_password", mock.MagicMock(return_value="dummy")) @mock.patch.object(paramiko.SSHClient, 'connect', mock.MagicMock(side_effect=paramiko.ssh_exception.NoValidConnectionsError({('192.168.0.1', 22): "None"}))) def test_rexec_with_user_param(self): @@ -214,6 +217,19 @@ def test_rexec_with_user_param(self): assert result.exit_code == 1, result.output assert "Failed to connect to sonic-lc1 with username testuser\n" == result.output + @mock.patch("sonic_py_common.device_info.is_chassis", mock.MagicMock(return_value=True)) + @mock.patch("os.getlogin", mock.MagicMock(return_value="admin")) + def test_rexec_without_password_input(self): + runner = CliRunner() + getpass.getpass = TestRemoteExec.__getpass + LINECARD_NAME = "all" + result = runner.invoke( + rexec.cli, [LINECARD_NAME, "-c", "show version"]) + getpass.getpass = mock_getpass + print(result.output) + assert result.exit_code == 1, result.output + assert "Aborted" in result.output + class TestRemoteCLI(object): @classmethod diff --git a/tests/remote_show_test.py b/tests/remote_show_test.py new file mode 100644 index 0000000000..e1be3d0302 --- /dev/null +++ b/tests/remote_show_test.py @@ -0,0 +1,73 @@ +import mock +import subprocess +from io import BytesIO +from click.testing import CliRunner + + +def mock_rexec_command(*args): + mock_stdout = BytesIO(b"""hello world""") + print(mock_stdout.getvalue().decode()) + return subprocess.CompletedProcess(args=[], returncode=0, stdout=mock_stdout, stderr=BytesIO()) + + +def mock_rexec_error_cmd(*args): + mock_stderr = BytesIO(b"""Error""") + print(mock_stderr.getvalue().decode()) + return subprocess.CompletedProcess(args=[], returncode=1, stdout=BytesIO(), stderr=mock_stderr) + + +MULTI_LC_REXEC_OUTPUT = '''Since the current device is a chassis supervisor, this command will be executed remotely on all linecards +hello world +''' + +MULTI_LC_ERR_OUTPUT = '''Since the current device is a chassis supervisor, this command will be executed remotely on all linecards +Error +''' + + +class TestRexecBgp(object): + @classmethod + def setup_class(cls): + pass + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "summary"]) + def test_show_ip_bgp_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_command + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["summary"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 0 + assert MULTI_LC_REXEC_OUTPUT == result.output + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "summary"]) + def test_show_ip_bgp_error_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_error_cmd + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["summary"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 1 + assert MULTI_LC_ERR_OUTPUT == result.output + + @mock.patch("sonic_py_common.device_info.is_supervisor", mock.MagicMock(return_value=True)) + @mock.patch("sys.argv", ["show", "ip", "bgp", "network", "10.0.0.0/24"]) + def test_show_ip_bgp_network_rexec(self, setup_bgp_commands): + show = setup_bgp_commands + runner = CliRunner() + + _old_subprocess_run = subprocess.run + subprocess.run = mock_rexec_command + result = runner.invoke(show.cli.commands["ip"].commands["bgp"], args=["network", "10.0.0.0/24"]) + print(result.output) + subprocess.run = _old_subprocess_run + assert result.exit_code == 0 + assert MULTI_LC_REXEC_OUTPUT == result.output diff --git a/tests/sfputil_test.py b/tests/sfputil_test.py index 523848ec45..d8d13df1c0 100644 --- a/tests/sfputil_test.py +++ b/tests/sfputil_test.py @@ -20,6 +20,46 @@ ERROR_NOT_IMPLEMENTED = 5 ERROR_INVALID_PORT = 6 +FLAT_MEMORY_MODULE_EEPROM_SFP_INFO_DICT = { + 'type': 'QSFP28 or later', + 'type_abbrv_name': 'QSFP28', + 'manufacturer': 'Mellanox', + 'model': 'MCP1600-C003', + 'vendor_rev': 'A2', + 'serial': 'MT1636VS10561', + 'vendor_oui': '00-02-c9', + 'vendor_date': '2016-07-18', + 'connector': 'No separable connector', + 'encoding': '64B66B', + 'ext_identifier': 'Power Class 1(1.5W max)', + 'ext_rateselect_compliance': 'QSFP+ Rate Select Version 1', + 'cable_type': 'Length Cable Assembly(m)', + 'cable_length': '3', + 'application_advertisement': 'N/A', + 'specification_compliance': "{'10/40G Ethernet Compliance Code': '40GBASE-CR4'}", + 'dom_capability': "{'Tx_power_support': 'no', 'Rx_power_support': 'no',\ + 'Voltage_support': 'no', 'Temp_support': 'no'}", + 'nominal_bit_rate': '255' +} +FLAT_MEMORY_MODULE_EEPROM = """Ethernet16: SFP EEPROM detected + Application Advertisement: N/A + Connector: No separable connector + Encoding: 64B66B + Extended Identifier: Power Class 1(1.5W max) + Extended RateSelect Compliance: QSFP+ Rate Select Version 1 + Identifier: QSFP28 or later + Length Cable Assembly(m): 3 + Nominal Bit Rate(100Mbs): 255 + Specification compliance: + 10/40G Ethernet Compliance Code: 40GBASE-CR4 + Vendor Date Code(YYYY-MM-DD Lot): 2016-07-18 + Vendor Name: Mellanox + Vendor OUI: 00-02-c9 + Vendor PN: MCP1600-C003 + Vendor Rev: A2 + Vendor SN: MT1636VS10561 +""" + class TestSfputil(object): def test_format_dict_value_to_string(self): sorted_key_table = [ @@ -570,6 +610,51 @@ def test_show_lpmode(self, mock_chassis): """ assert result.output == expected_output + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=True)) + def test_power_RJ45(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_sfp.get_presence.return_value = True + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Power disable/enable is not available for RJ45 port Ethernet0.\n' + assert result.exit_code == EXIT_FAIL + + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + def test_power(self, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.get_presence.return_value = True + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.exit_code == 0 + + mock_sfp.get_presence.return_value = False + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Ethernet0: SFP EEPROM not detected\n\n' + + mock_sfp.get_presence.return_value = True + mock_sfp.set_power = MagicMock(side_effect=NotImplementedError) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'This functionality is currently not implemented for this platform\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_sfp.set_power = MagicMock(return_value=False) + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['power'].commands['enable'], ["Ethernet0"]) + assert result.output == 'Failed\n' + + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) @@ -585,6 +670,39 @@ def test_show_eeprom_RJ45(self, mock_chassis): expected_output = "Ethernet16: SFP EEPROM is not applicable for RJ45 port\n\n\n" assert result.output == expected_output + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sfputil.main.logical_port_name_to_physical_port_list', MagicMock(return_value=[1])) + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @pytest.mark.parametrize("exception, xcvr_api_none, expected_output", [ + (None, False, '''DOM values not supported for flat memory module\n\n'''), + (NotImplementedError, False, '''API is currently not implemented for this platform\n\n'''), + (None, True, '''API is none while getting DOM info!\n\n''') + ]) + @patch('sfputil.main.platform_chassis') + def test_show_eeprom_dom_conditions(self, mock_chassis, exception, xcvr_api_none, expected_output): + mock_sfp = MagicMock() + mock_sfp.get_presence.return_value = True + mock_sfp.get_transceiver_info.return_value = FLAT_MEMORY_MODULE_EEPROM_SFP_INFO_DICT + mock_chassis.get_sfp.return_value = mock_sfp + + if exception: + mock_chassis.get_sfp().get_xcvr_api.side_effect = exception + elif xcvr_api_none: + mock_chassis.get_sfp().get_xcvr_api.return_value = None + else: + mock_api = MagicMock() + mock_chassis.get_sfp().get_xcvr_api.return_value = mock_api + + runner = CliRunner() + result = runner.invoke(sfputil.cli.commands['show'].commands['eeprom'], ["-p", "Ethernet16", "-d"]) + + if exception or xcvr_api_none: + assert result.exit_code == ERROR_NOT_IMPLEMENTED + else: + assert result.exit_code == 0 + assert result.output == FLAT_MEMORY_MODULE_EEPROM + expected_output + @patch('sfputil.main.platform_chassis') @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=0))) def test_show_eeprom_hexdump_invalid_port(self, mock_chassis): @@ -1510,3 +1628,95 @@ def test_load_port_config(self, mock_is_multi_asic): mock_is_multi_asic.return_value = False assert sfputil.load_port_config() == True + + @patch('sfputil.main.is_port_type_rj45', MagicMock(return_value=False)) + @patch('sfputil.main.platform_chassis') + @patch('sfputil.main.ConfigDBConnector') + @patch('sfputil.main.SonicV2Connector') + @patch('sfputil.main.platform_sfputil', MagicMock(is_logical_port=MagicMock(return_value=1))) + @patch('sfputil.main.logical_port_to_physical_port_index', MagicMock(return_value=1)) + @patch('sonic_py_common.multi_asic.get_front_end_namespaces', MagicMock(return_value=[''])) + def test_debug_loopback(self, mock_sonic_v2_connector, mock_config_db_connector, mock_chassis): + mock_sfp = MagicMock() + mock_api = MagicMock() + mock_config_db_connector.return_value = MagicMock() + mock_sonic_v2_connector.return_value = MagicMock() + mock_chassis.get_sfp = MagicMock(return_value=mock_sfp) + mock_sfp.get_presence.return_value = True + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + + runner = CliRunner() + mock_sfp.get_presence.return_value = False + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input", "enable"]) + assert result.output == 'Ethernet0: SFP EEPROM not detected\n' + mock_sfp.get_presence.return_value = True + + mock_sfp.get_xcvr_api = MagicMock(side_effect=NotImplementedError) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input", "enable"]) + assert result.output == 'Ethernet0: This functionality is not implemented\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input", "enable"]) + assert result.output == 'Ethernet0: enable host-side-input loopback\n' + assert result.exit_code != ERROR_NOT_IMPLEMENTED + + mock_sfp.get_xcvr_api = MagicMock(return_value=mock_api) + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: enable media-side-input loopback\n' + assert result.exit_code != ERROR_NOT_IMPLEMENTED + + mock_api.set_loopback_mode.return_value = False + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-output", "enable"]) + assert result.output == 'Ethernet0: enable media-side-output loopback failed\n' + assert result.exit_code == EXIT_FAIL + + mock_api.set_loopback_mode.return_value = True + mock_api.set_loopback_mode.side_effect = AttributeError + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input", "enable"]) + assert result.output == 'Ethernet0: Set loopback mode is not applicable for this module\n' + assert result.exit_code == ERROR_NOT_IMPLEMENTED + + mock_api.set_loopback_mode.side_effect = [TypeError, True] + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "host-side-input", "enable"]) + assert result.output == 'Ethernet0: Set loopback mode failed. Parameter is not supported\n' + assert result.exit_code == EXIT_FAIL + + mock_config_db = MagicMock() + mock_config_db.get.side_effect = TypeError + mock_config_db_connector.return_value = mock_config_db + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: subport is not present in CONFIG_DB\n' + assert result.exit_code == EXIT_FAIL + + mock_config_db_connector.return_value = None + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: Failed to connect to CONFIG_DB\n' + assert result.exit_code == EXIT_FAIL + + mock_config_db_connector.return_value = MagicMock() + mock_sonic_v2_connector.return_value = None + result = runner.invoke(sfputil.cli.commands['debug'].commands['loopback'], + ["Ethernet0", "media-side-input", "enable"]) + assert result.output == 'Ethernet0: Failed to connect to STATE_DB\n' + assert result.exit_code == EXIT_FAIL + + @pytest.mark.parametrize("subport, lane_count, expected_mask", [ + (1, 1, 0x1), + (1, 4, 0xf), + (2, 1, 0x2), + (2, 4, 0xf0), + (3, 2, 0x30), + (4, 1, 0x8), + ]) + def test_get_subport_lane_mask(self, subport, lane_count, expected_mask): + assert sfputil.get_subport_lane_mask(subport, lane_count) == expected_mask diff --git a/tests/show_bgp_network_test.py b/tests/show_bgp_network_test.py index f610199538..bfc23d8912 100644 --- a/tests/show_bgp_network_test.py +++ b/tests/show_bgp_network_test.py @@ -57,7 +57,8 @@ def setup_class(cls): ('bgp_v4_network_bestpath', 'bgp_v4_network_bestpath'), ('bgp_v6_network_longer_prefixes', 'bgp_v6_network_longer_prefixes'), ('bgp_v4_network', 'bgp_v4_network_longer_prefixes_error'), - ('bgp_v4_network', 'bgp_v6_network_longer_prefixes_error')], + ('bgp_v4_network', 'bgp_v6_network_longer_prefixes_error'), + ('bgp_v4_network', 'bgp_v4_network_all_asic_on_single_asic')], indirect=['setup_single_bgp_instance']) def test_bgp_network(self, setup_bgp_commands, test_vector, setup_single_bgp_instance): @@ -77,14 +78,16 @@ def setup_class(cls): @pytest.mark.parametrize( 'setup_multi_asic_bgp_instance, test_vector', - [('bgp_v4_network', 'bgp_v4_network_multi_asic'), + [('bgp_v4_network_all_asic', 'bgp_v4_network_default_multi_asic'), ('bgp_v6_network', 'bgp_v6_network_multi_asic'), ('bgp_v4_network_asic0', 'bgp_v4_network_asic0'), ('bgp_v4_network_ip_address_asic0', 'bgp_v4_network_ip_address_asic0'), ('bgp_v4_network_bestpath_asic0', 'bgp_v4_network_bestpath_asic0'), ('bgp_v6_network_asic0', 'bgp_v6_network_asic0'), ('bgp_v6_network_ip_address_asic0', 'bgp_v6_network_ip_address_asic0'), - ('bgp_v6_network_bestpath_asic0', 'bgp_v6_network_bestpath_asic0')], + ('bgp_v6_network_bestpath_asic0', 'bgp_v6_network_bestpath_asic0'), + ('bgp_v4_network_all_asic', 'bgp_v4_network_all_asic'), + ('bgp_v4_network', 'bgp_v4_network_asic_unknown')], indirect=['setup_multi_asic_bgp_instance']) def test_bgp_network(self, setup_bgp_commands, test_vector, setup_multi_asic_bgp_instance): diff --git a/tests/show_bmp_test.py b/tests/show_bmp_test.py new file mode 100644 index 0000000000..c0bc556d10 --- /dev/null +++ b/tests/show_bmp_test.py @@ -0,0 +1,178 @@ +import os +from click.testing import CliRunner +from utilities_common.db import Db + +import show.main as show + +test_path = os.path.dirname(os.path.abspath(__file__)) +mock_db_path = os.path.join(test_path, "bmp_input") + + +class TestShowBmp(object): + @classmethod + def setup_class(cls): + print("SETUP") + os.environ["UTILITIES_UNIT_TESTING"] = "1" + + def set_db_values(self, db, key, kvs): + for field, value in kvs.items(): + db.set(db.BMP_STATE_DB, key, field, value) + + def test_show_bmp_neighbor_table(self): + runner = CliRunner() + db = Db() + dbconnector = db.db + self.set_db_values(dbconnector, + "BGP_NEIGHBOR_TABLE|10.0.1.1", + {"peer_addr": "10.0.0.61", + "peer_asn": "64915", + "peer_rd": "300", + "peer_port": "5000", + "local_addr": "10.1.0.32", + "local_asn": "65100", + "local_port": "6000", + "sent_cap": "supports-mpbgp,supports-graceful-restart", + "recv_cap": "supports-mpbgp,supports-graceful-restart"}) + self.set_db_values(dbconnector, + "BGP_NEIGHBOR_TABLE|10.0.1.2", + {"peer_addr": "10.0.0.62", + "peer_asn": "64915", + "peer_rd": "300", + "peer_port": "5000", + "local_addr": "10.1.0.32", + "local_asn": "65100", + "local_port": "6000", + "sent_cap": "supports-mpbgp,supports-graceful-restart", + "recv_cap": "supports-mpbgp,supports-graceful-restart"}) + + expected_output = """\ +Total number of bmp neighbors: 2 +Neighbor_Address Peer_Address Peer_ASN Peer_RD Peer_Port Local_Address Local_ASN \ +Local_Port Advertised_Capabilities Received_Capabilities +------------------ -------------- ---------- --------- ----------- --------------- ----------- \ +------------ ---------------------------------------- ---------------------------------------- +10.0.0.61 10.0.0.61 64915 300 5000 10.1.0.32 65100 6000 \ +supports-mpbgp,supports-graceful-restart supports-mpbgp,supports-graceful-restart +10.0.0.62 10.0.0.62 64915 300 5000 10.1.0.32 65100 6000 \ +supports-mpbgp,supports-graceful-restart supports-mpbgp,supports-graceful-restart +""" + result = runner.invoke(show.cli.commands['bmp'].commands['bgp-neighbor-table'], [], obj=db) + assert result.exit_code == 0 + resultA = result.output.strip().replace(' ', '').replace('\n', '') + resultB = expected_output.strip().replace(' ', '').replace('\n', '') + assert resultA == resultB + + def test_show_bmp_rib_out_table(self): + runner = CliRunner() + db = Db() + dbconnector = db.db + self.set_db_values(dbconnector, + "BGP_RIB_OUT_TABLE|20c0:ef50::/64|10.0.0.57", + {"origin": "igp", + "as_path": "65100 64600", + "origin_as": "64915", + "next_hop": "fc00::7e", + "local_pref": "0", + "originator_id": "0", + "community_list": "residential", + "ext_community_list": "traffic_engineering"}) + self.set_db_values(dbconnector, + "BGP_RIB_OUT_TABLE|192.181.168.0/25|10.0.0.59", + {"origin": "igp", + "as_path": "65100 64600", + "origin_as": "64915", + "next_hop": "10.0.0.63", + "local_pref": "0", + "originator_id": "0", + "community_list": "business", + "ext_community_list": "preferential_transit"}) + + expected_output = """\ +Total number of bmp bgp-rib-out-table: 2 +Neighbor_Address NLRI Origin AS_Path Origin_AS Next_Hop Local_Pref \ +Originator_ID Community_List Ext_Community_List +------------------ ---------------- -------- ----------- ----------- ---------- ------------ \ +--------------- ---------------- -------------------- +10.0.0.57 20c0:ef50::/64 igp 65100 64600 64915 fc00::7e 0 \ +0 residential traffic_engineering +10.0.0.59 192.181.168.0/25 igp 65100 64600 64915 10.0.0.63 0 \ +0 business preferential_transit +""" + result = runner.invoke(show.cli.commands['bmp'].commands['bgp-rib-out-table'], [], obj=db) + assert result.exit_code == 0 + resultA = result.output.strip().replace(' ', '').replace('\n', '') + resultB = expected_output.strip().replace(' ', '').replace('\n', '') + assert resultA == resultB + + def test_show_bmp_rib_in_table(self): + runner = CliRunner() + db = Db() + dbconnector = db.db + self.set_db_values(dbconnector, + "BGP_RIB_IN_TABLE|20c0:ef50::/64|10.0.0.57", + {"origin": "igp", + "as_path": "65100 64600", + "origin_as": "64915", + "next_hop": "fc00::7e", + "local_pref": "0", + "originator_id": "0", + "community_list": "residential", + "ext_community_list": "traffic_engineering"}) + self.set_db_values(dbconnector, + "BGP_RIB_IN_TABLE|192.181.168.0/25|10.0.0.59", + {"origin": "igp", + "as_path": "65100 64600", + "origin_as": "64915", + "next_hop": "10.0.0.63", + "local_pref": "0", + "originator_id": "0", + "community_list": "business", + "ext_community_list": "preferential_transit"}) + + expected_output = """\ +Total number of bmp bgp-rib-in-table: 2 +Neighbor_Address NLRI Origin AS_Path Origin_AS Next_Hop Local_Pref \ +Originator_ID Community_List Ext_Community_List +------------------ ---------------- -------- ----------- ----------- ---------- ------------ \ +--------------- ---------------- -------------------- +10.0.0.57 20c0:ef50::/64 igp 65100 64600 64915 fc00::7e 0 \ +0 residential traffic_engineering +10.0.0.59 192.181.168.0/25 igp 65100 64600 64915 10.0.0.63 0 \ +0 business preferential_transit +""" + result = runner.invoke(show.cli.commands['bmp'].commands['bgp-rib-in-table'], [], obj=db) + assert result.exit_code == 0 + resultA = result.output.strip().replace(' ', '').replace('\n', '') + resultB = expected_output.strip().replace(' ', '').replace('\n', '') + assert resultA == resultB + + def test_tables(self): + runner = CliRunner() + db = Db() + db.cfgdb.mod_entry("BMP", "table", {'bgp_neighbor_table': 'true'}) + db.cfgdb.mod_entry("BMP", "table", {'bgp_rib_in_table': 'false'}) + db.cfgdb.mod_entry("BMP", "table", {'bgp_rib_out_table': 'true'}) + + assert db.cfgdb.get_entry('BMP', 'table')['bgp_neighbor_table'] == 'true' + assert db.cfgdb.get_entry('BMP', 'table')['bgp_rib_in_table'] == 'false' + assert db.cfgdb.get_entry('BMP', 'table')['bgp_rib_out_table'] == 'true' + + expected_output = """\ +BMP tables: +Table_Name Enabled +------------------ --------- +bgp_neighbor_table true +bgp_rib_in_table false +bgp_rib_out_table true +""" + result = runner.invoke(show.cli.commands['bmp'].commands['tables'], [], obj=db) + assert result.exit_code == 0 + resultA = result.output.strip().replace(' ', '').replace('\n', '') + resultB = expected_output.strip().replace(' ', '').replace('\n', '') + assert resultA == resultB + + @classmethod + def teardown_class(cls): + print("TEARDOWN") + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ["UTILITIES_UNIT_TESTING"] = "0" diff --git a/tests/show_ip_route_common.py b/tests/show_ip_route_common.py index 101b23309c..899915a1f4 100644 --- a/tests/show_ip_route_common.py +++ b/tests/show_ip_route_common.py @@ -875,3 +875,60 @@ Totals 6467 6466 """ + +SHOW_IP_ROUTE_REMOTE_LC = """\ +Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, + T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, + F - PBR, f - OpenFabric, + > - selected route, * - FIB route, q - queued route, r - rejected route + +B> 0.0.0.0/0 [200/0] via 20.1.24.128, recursive via iBGP 04w0d12h + via 20.1.16.128, recursive via iBGP 04w0d12h + via 20.1.8.128, recursive via iBGP 04w0d12h + via 20.1.0.128, recursive via iBGP 04w0d12h +""" + +SHOW_IP_ROUTE_LC = """\ +Codes: K - kernel route, C - connected, S - static, R - RIP, + O - OSPF, I - IS-IS, B - BGP, E - EIGRP, N - NHRP, + T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, + F - PBR, f - OpenFabric, + > - selected route, * - FIB route, q - queued route, r - rejected route + +B>*0.0.0.0/0 [20/0] via 20.1.24.128, PortChannel13, 04w0d11h + * via 20.1.16.128, PortChannel9, 04w0d11h + * via 20.1.8.128, PortChannel5, 04w0d11h + * via 20.1.0.128, PortChannel1, 04w0d11h +""" + +SHOW_IP_ROUTE_REMOTE_LC_DEFAULT_ROUTE = """\ +Routing entry for 0.0.0.0/0 + Known via "bgp", distance 200, metric 0, best + Last update 04w0d12h ago + * 20.1.24.128 recursive via iBGP + * 20.1.16.128 recursive via iBGP + * 20.1.8.128 recursive via iBGP + * 20.1.0.128 recursive via iBGP + +""" + +SHOW_IP_ROUTE_LC_DEFAULT_ROUTE = """\ +Routing entry for 0.0.0.0/0 + Known via "bgp", distance 20, metric 0, best + Last update 04w0d11h ago + * 20.1.24.128, via PortChannel13 + * 20.1.16.128, via PortChannel9 + * 20.1.8.128, via PortChannel5 + * 20.1.0.128, via PortChannel1 + +""" + +SHOW_IP_ROUTE_LC_DEFAULT_ROUTE_2 = """\ +Routing entry for 0.0.0.0/0 + Known via "bgp", distance 20, metric 0, best + Last update 01:01:51 ago + * 10.0.0.7, via PortChannel106 + * 10.0.0.1, via PortChannel102 + +""" diff --git a/tests/show_test.py b/tests/show_test.py index 4cd29ac45e..819f197343 100644 --- a/tests/show_test.py +++ b/tests/show_test.py @@ -1040,6 +1040,12 @@ def test_show_ztp(self, mock_run_command): assert result.exit_code == 0 mock_run_command.assert_called_with(['ztp', 'status', '--verbose'], display_cmd=True) + @patch('show.main.run_command') + def test_show_banner(self, mock_run_command): + runner = CliRunner() + result = runner.invoke(show.cli.commands['banner']) + assert result.exit_code == 0 + def teardown(self): print('TEAR DOWN') @@ -1064,6 +1070,20 @@ def test_rc_syslog(self, mock_rc): assert result.exit_code == 0 assert '[1.1.1.1]' in result.output + @patch('builtins.open', mock_open( + read_data=open('tests/ntp.conf').read())) + def test_ntp(self): + runner = CliRunner() + + result = runner.invoke( + show.cli.commands['runningconfiguration'].commands['ntp']) + print(result.exit_code) + print(result.output) + + assert result.exit_code == 0 + assert '10.1.1.1' in result.output + assert '10.22.1.12' in result.output + @classmethod def teardown_class(cls): print('TEARDOWN') diff --git a/tests/single_asic_dropstat_test.py b/tests/single_asic_dropstat_test.py new file mode 100644 index 0000000000..c521bcfa60 --- /dev/null +++ b/tests/single_asic_dropstat_test.py @@ -0,0 +1,72 @@ +import os +import sys +from .utils import get_result_and_return_code + +test_path = os.path.dirname(os.path.abspath(__file__)) +modules_path = os.path.dirname(test_path) +scripts_path = os.path.join(modules_path, "scripts") +sys.path.insert(0, test_path) +sys.path.insert(0, modules_path) + +dropstat_result = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 10 100 0 0 80 20 +Ethernet4 N/A 0 1000 0 0 800 100 +Ethernet8 N/A 100 10 0 0 10 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 1000 0 +""" + +dropstat_result_clear_all = """\ + IFACE STATE RX_ERR RX_DROPS TX_ERR TX_DROPS DEBUG_0 DEBUG_2 +--------- ------- -------- ---------- -------- ---------- --------- --------- +Ethernet0 D 0 0 0 0 0 0 +Ethernet4 N/A 0 0 0 0 0 0 +Ethernet8 N/A 0 0 0 0 0 0 + + DEVICE SWITCH_DROPS lowercase_counter +---------------- -------------- ------------------- +sonic_drops_test 0 0 +""" + + +class TestMultiAsicDropstat(object): + @classmethod + def setup_class(cls): + os.environ["PATH"] += os.pathsep + scripts_path + os.environ["UTILITIES_UNIT_TESTING"] = "1" + print("SETUP") + + def test_show_dropcount_and_clear(self): + os.environ["UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE"] = "1" + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + os.environ.pop("UTILITIES_UNIT_TESTING_DROPSTAT_CLEAN_CACHE") + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_result + assert return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'clear' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == 'Cleared drop counters\n' and return_code == 0 + + return_code, result = get_result_and_return_code([ + 'dropstat', '-c', 'show' + ]) + print("return_code: {}".format(return_code)) + print("result = {}".format(result)) + assert result == dropstat_result_clear_all and return_code == 0 + + @classmethod + def teardown_class(cls): + os.environ["PATH"] = os.pathsep.join(os.environ["PATH"].split(os.pathsep)[:-1]) + os.environ.pop("UTILITIES_UNIT_TESTING") + print("TEARDOWN") diff --git a/tests/sonic_package_manager/conftest.py b/tests/sonic_package_manager/conftest.py index 10fe72cac1..3d6beae9ff 100644 --- a/tests/sonic_package_manager/conftest.py +++ b/tests/sonic_package_manager/conftest.py @@ -16,6 +16,7 @@ from sonic_package_manager.registry import RegistryResolver from sonic_package_manager.version import Version from sonic_package_manager.service_creator.creator import * +from sonic_package_manager.service_creator.creator import ETC_SYSTEMD_LOCATION @pytest.fixture @@ -133,20 +134,20 @@ def __init__(self): self.add('Azure/docker-test-6', '2.0.0', 'test-package-6', '2.0.0') self.add('Azure/docker-test-6', 'latest', 'test-package-6', '1.5.0') - def from_registry(self, repository: str, reference: str): + def from_registry(self, repository: str, reference: str, use_local_manifest=None, name=None): manifest = Manifest.marshal(self.metadata_store[repository][reference]['manifest']) components = self.metadata_store[repository][reference]['components'] yang = self.metadata_store[repository][reference]['yang'] return Metadata(manifest, components, yang) - def from_local(self, image: str): + def from_local(self, image: str, use_local_manfiest=None, name=None, use_edit=None): ref = Reference.parse(image) manifest = Manifest.marshal(self.metadata_store[ref['name']][ref['tag']]['manifest']) components = self.metadata_store[ref['name']][ref['tag']]['components'] yang = self.metadata_store[ref['name']][ref['tag']]['yang'] return Metadata(manifest, components, yang) - def from_tarball(self, filepath: str) -> Manifest: + def from_tarball(self, filepath: str, use_local_manifest=None, name=None) -> Manifest: path, ref = filepath.split(':') manifest = Manifest.marshal(self.metadata_store[path][ref]['manifest']) components = self.metadata_store[path][ref]['components'] @@ -405,12 +406,12 @@ def fake_db_for_migration(fake_metadata_resolver): def sonic_fs(fs): fs.create_file('/proc/1/root') fs.create_dir(ETC_SONIC_PATH) + fs.create_dir(ETC_SYSTEMD_LOCATION) fs.create_dir(SYSTEMD_LOCATION) fs.create_dir(DOCKER_CTL_SCRIPT_LOCATION) fs.create_dir(SERVICE_MGMT_SCRIPT_LOCATION) fs.create_file(GENERATED_SERVICES_CONF_FILE) fs.create_file(os.path.join(TEMPLATES_PATH, SERVICE_FILE_TEMPLATE)) - fs.create_file(os.path.join(TEMPLATES_PATH, TIMER_UNIT_TEMPLATE)) fs.create_file(os.path.join(TEMPLATES_PATH, SERVICE_MGMT_SCRIPT_TEMPLATE)) fs.create_file(os.path.join(TEMPLATES_PATH, DOCKER_CTL_SCRIPT_TEMPLATE)) fs.create_file(os.path.join(TEMPLATES_PATH, DEBUG_DUMP_SCRIPT_TEMPLATE)) diff --git a/tests/sonic_package_manager/test_cli.py b/tests/sonic_package_manager/test_cli.py index 695d8cba58..1b7556ae68 100644 --- a/tests/sonic_package_manager/test_cli.py +++ b/tests/sonic_package_manager/test_cli.py @@ -4,6 +4,15 @@ from sonic_package_manager import main +from unittest.mock import patch, mock_open, MagicMock + +MANIFEST_LOCATION = 'fake_manifest_location' +DMFILE_NAME = 'fake_dmfile_name' +DEFAUT_MANIFEST_NAME = 'fake_default_manifest_name' +LOCAL_JSON = 'fake_local_json' +sample_manifest_json = '{"package": {"name": "test", "version": "1.0.0"}, "service": {"name": "test"}}' +fake_manifest_name = 'test-manifest' +MANIFEST_CONTENT = '{"package": {"name": "test", "version": "1.0.0"}, "service": {"name": "test"}}' def test_show_changelog(package_manager, fake_metadata_resolver): """ Test case for "sonic-package-manager package show changelog [NAME]" """ @@ -61,3 +70,217 @@ def test_show_changelog_no_changelog(package_manager): assert result.exit_code == 1 assert result.output == 'Failed to print package changelog: No changelog for package test-package\n' + + +def test_manifests_create_command_existing_manifest(package_manager): + """ Test case for "sonic-package-manager manifests create" with an existing manifest file """ + + runner = CliRunner() + + with patch('os.path.exists', side_effect=[True, False]), \ + patch('sonic_package_manager.main.PackageManager.is_installed', return_value=False), \ + patch('builtins.open', new_callable=mock_open()), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['create'], + ['test-manifest'], + input=sample_manifest_json, + obj=package_manager) + + assert 'Error: Manifest file \'test-manifest\' already exists.' in result.output + assert result.exit_code == 0 + + +def test_manifests_create_command_existing_package(package_manager): + """ Test case for "sonic-package-manager manifests create" with an existing installed package """ + + runner = CliRunner() + + with patch('os.path.exists', return_value=False), \ + patch('sonic_package_manager.main.PackageManager.is_installed', return_value=True), \ + patch('builtins.open', new_callable=mock_open()), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['create'], + ['test-manifest'], + input=sample_manifest_json, + obj=package_manager) + + assert 'Error: A package with the same name test-manifest is already installed' in result.output + assert result.exit_code == 0 + + +def test_manifests_update_command_error_handling(package_manager): + + runner = CliRunner() + + with patch('os.path.exists', return_value=False), \ + patch('builtins.open', new_callable=mock_open(read_data='{"key": "value"}')), \ + patch('json.load', side_effect=lambda x: MagicMock(return_value='{"key": "value"}')), \ + patch('json.dump'), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['update'], + ['non-existent-manifest', '--from-json', 'fake_json_path'], + obj=package_manager) + assert 'Local Manifest file for non-existent-manifest does not exists to update\n' in result.output + assert result.exit_code == 0 + + +def test_manifests_delete_command_deletion_cancelled(package_manager): + runner = CliRunner() + + with patch('os.path.exists', return_value=True), \ + patch('click.prompt', return_value='n'), \ + patch('os.remove') as mock_remove, \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['delete'], ['test-manifest'], obj=package_manager) + + # Check if the cancellation message is present in the result output + assert 'Deletion cancelled.' in result.output + # Check if os.remove is not called when the deletion is cancelled + assert not mock_remove.called + + +def test_manifests_list_command_no_manifests(package_manager): + runner = CliRunner() + + with patch('os.listdir', return_value=[]), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['list'], [], obj=package_manager) + + # Check if the appropriate message is present in the result output + assert 'No custom local manifest files found.\n' in result.output + + +def test_manifests_command(): + """ Test case for "sonic-package-manager manifests" """ + + runner = CliRunner() + result = runner.invoke(main.manifests) + assert result.exit_code == 0 + + +def test_manifests_create_command_exception(package_manager): + """Test case for "sonic-package-manager manifests create" with an exception during manifest creation""" + + runner = CliRunner() + + with patch('sonic_package_manager.main.PackageManager.create_package_manifest', + side_effect=Exception("Custom error")), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['create'], ['test-manifest'], obj=package_manager) + + assert 'Error: Manifest test-manifest creation failed - Custom error' in result.output + assert result.exit_code == 0 + + +def test_manifests_update_command_exception(package_manager): + """Test case for 'sonic-package-manager manifests update' with an exception during manifest update""" + + runner = CliRunner() + + with patch('sonic_package_manager.main.PackageManager.update_package_manifest', + side_effect=Exception("Custom error")), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['update'], + ['test-manifest', '--from-json', 'new_manifest.json'], + obj=package_manager) + + assert 'Error occurred while updating manifest \'test-manifest\': Custom error' in result.output + assert result.exit_code == 0 + + +def test_manifests_delete_command_exception(package_manager): + """Test case for 'sonic-package-manager manifests delete' with an exception during manifest deletion""" + + runner = CliRunner() + + with patch('sonic_package_manager.main.PackageManager.delete_package_manifest', + side_effect=Exception("Custom error")), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['delete'], + ['test-manifest'], obj=package_manager) + + assert "Error: Failed to delete manifest file 'test-manifest'. Custom error" in result.output + assert result.exit_code == 0 + + +def test_manifests_show_command_file_not_found(package_manager): + """Test case for 'sonic-package-manager manifests show' with a non-existent manifest file""" + + runner = CliRunner() + + with patch('sonic_package_manager.main.PackageManager.show_package_manifest', + side_effect=FileNotFoundError()), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.manifests.commands['show'], + ['nonexistent_manifest.json'], obj=package_manager) + + assert "Manifest file 'nonexistent_manifest.json' not found." in result.output + assert result.exit_code == 0 + + +def test_install_with_local_manifest(package_manager): + """Test case for 'install' command with use_local_manifest=True and name provided""" + + runner = CliRunner() + + with patch('os.path.exists', return_value=True), \ + patch('os.geteuid', return_value=0): + result = runner.invoke(main.install, + ['package_name', '--use-local-manifest', '-y'], + obj=package_manager) + + assert 'name argument is not provided to use local manifest' in result.output + assert result.exit_code == 0 + + +def test_install_with_nonexistent_manifest(package_manager): + """Test case for 'install' command with use_local_manifest=True and non-existent name provided""" + + runner = CliRunner() + + with patch('os.path.exists', return_value=False), \ + patch('os.geteuid', return_value=0): + result = runner.invoke( + main.install, + ['package_name', '--use-local-manifest', '--name', 'nonexistent_manifest', '-y'], + obj=package_manager) + + assert 'Local Manifest file for nonexistent_manifest does not exists to install' in result.output + assert result.exit_code == 0 + + +def test_update_command_exception(package_manager): + """Test case for 'update' command with an exception during package update""" + + runner = CliRunner() + + with patch('sonic_package_manager.main.PackageManager.update', + side_effect=Exception("Custom error")), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.update, ['package_name'], obj=package_manager) + + assert 'Failed to update package package_name: Custom error' in result.output + + +def test_update_command_keyboard_interrupt(package_manager): + """Test case for 'update' command with a keyboard interrupt""" + + runner = CliRunner() + + with patch('sonic_package_manager.main.PackageManager.update', + side_effect=KeyboardInterrupt()), \ + patch('os.geteuid', return_value=0): + + result = runner.invoke(main.update, ['package_name'], obj=package_manager) + + assert 'Operation canceled by user' in result.output diff --git a/tests/sonic_package_manager/test_manager.py b/tests/sonic_package_manager/test_manager.py index 46ea3f6acb..26e838ce6d 100644 --- a/tests/sonic_package_manager/test_manager.py +++ b/tests/sonic_package_manager/test_manager.py @@ -1,13 +1,14 @@ #!/usr/bin/env python import re -from unittest.mock import Mock, call, patch - +import unittest +from unittest.mock import Mock, call, patch, mock_open import pytest import sonic_package_manager from sonic_package_manager.errors import * from sonic_package_manager.version import Version +import json @pytest.fixture(autouse=True) def mock_run_command(): @@ -323,7 +324,7 @@ def test_manager_installation_version_range(package_manager): package_manager.install(f'test-package>=1.6.0') -def test_manager_upgrade(package_manager, sonic_fs): +def test_manager_upgrade(package_manager, sonic_fs, mock_run_command): package_manager.install('test-package-6=1.5.0') package = package_manager.get_installed_package('test-package-6') @@ -332,6 +333,15 @@ def test_manager_upgrade(package_manager, sonic_fs): assert upgraded_package.entry.version == Version.parse('2.0.0') assert upgraded_package.entry.default_reference == package.entry.default_reference + mock_run_command.assert_has_calls( + [ + call(['systemctl', 'stop', 'test-package-6']), + call(['systemctl', 'disable', 'test-package-6']), + call(['systemctl', 'enable', 'test-package-6']), + call(['systemctl', 'start', 'test-package-6']), + ] + ) + def test_manager_package_reset(package_manager, sonic_fs): package_manager.install('test-package-6=1.5.0') @@ -352,10 +362,10 @@ def test_manager_migration(package_manager, fake_db_for_migration): call('test-package-3=1.6.0'), # test-package-4 was not present in DB at all, but it is present and installed in # fake_db_for_migration, thus asserting that it is going to be installed. - call('test-package-4=1.5.0'), + call(None, 'Azure/docker-test-4:1.5.0', name='test-package-4'), # test-package-5 1.5.0 was installed in fake_db_for_migration but the default # in current db is 1.9.0, assert that migration will install the newer version. - call('test-package-5=1.9.0'), + call(None, 'Azure/docker-test-5:1.9.0', name='test-package-5'), # test-package-6 2.0.0 was installed in fake_db_for_migration but the default # in current db is 1.5.0, assert that migration will install the newer version. call('test-package-6=2.0.0')], @@ -369,7 +379,7 @@ def __init__(self, dockerd_sock): class Image: def __init__(self, image_id): self.image_id = image_id - + def save(self, named): return ["named: {}".format(named).encode()] @@ -389,3 +399,204 @@ def test_manager_migration_dockerd(package_manager, fake_db_for_migration, mock_ package_manager.migrate_packages(fake_db_for_migration, '/var/run/docker.sock') package_manager.get_docker_client.assert_has_calls([ call('/var/run/docker.sock')], any_order=True) + + +def test_create_package_manifest_default_manifest(package_manager): + """Test case for creating a default manifest.""" + + with patch('os.path.exists', return_value=False), \ + patch('os.mkdir'), \ + patch('builtins.open', new_callable=mock_open()), \ + patch('click.echo') as mock_echo: + + package_manager.create_package_manifest("default_manifest", from_json=None) + + mock_echo.assert_called_once_with("Default Manifest creation is not allowed by user") + + +def test_create_package_manifest_existing_package(package_manager): + """Test case for creating a manifest with an existing package.""" + + with patch('os.path.exists', side_effect=[False, True]), \ + patch('sonic_package_manager.main.PackageManager.is_installed', return_value=True), \ + patch('click.echo') as mock_echo: + + package_manager.create_package_manifest("test-package", from_json=None) + + mock_echo.assert_called_once_with("Error: A package with the same name test-package is already installed") + + +def test_create_package_manifest_existing_manifest(package_manager): + """Test case for creating a manifest with an existing manifest file.""" + + with patch('os.path.exists', return_value=True), \ + patch('click.echo') as mock_echo: + + package_manager.create_package_manifest("test-manifest", from_json=None) + + mock_echo.assert_called_once_with("Error: Manifest file 'test-manifest' already exists.") + + +def test_manifests_create_command(package_manager): + with patch('click.echo') as mock_echo, \ + patch('os.path.exists') as mock_exists, \ + patch('os.mkdir'), \ + patch('builtins.open', new_callable=mock_open()), \ + patch('json.dump'), \ + patch('json.load') as mock_json_load, \ + patch('sonic_package_manager.manifest.Manifest.marshal') as mock_marshal, \ + patch('sonic_package_manager.manager.PackageManager.is_installed') as mock_is_installed, \ + patch('sonic_package_manager.manager.PackageManager.download_file') as mock_download_file: + + dummy_json = {"package": {"name": "test", "version": "1.0.0"}, "service": {"name": "test"}} + # Setup mocks + mock_exists.return_value = False + mock_is_installed.return_value = False + mock_download_file.return_value = True + mock_marshal.return_value = None + mock_json_load.return_value = dummy_json + + # Run the function + package_manager.create_package_manifest("test_manifest", dummy_json) + + # Assertions + mock_echo.assert_called_with("Manifest 'test_manifest' created successfully.") + + +def test_manifests_update_command(package_manager): + with patch('click.echo') as mock_echo, \ + patch('os.path.exists') as mock_exists, \ + patch('os.mkdir'), \ + patch('builtins.open', new_callable=unittest.mock.mock_open), \ + patch('json.dump'), \ + patch('json.load') as mock_json_load, \ + patch('sonic_package_manager.manifest.Manifest.marshal') as mock_marshal, \ + patch('sonic_package_manager.manager.PackageManager.is_installed') as mock_is_installed, \ + patch('sonic_package_manager.manager.PackageManager.download_file') as mock_download_file: + + dummy_json = {"package": {"name": "test", "version": "2.0.0"}, "service": {"name": "test"}} + # Setup mocks + mock_exists.return_value = True + mock_is_installed.return_value = True + mock_download_file.return_value = True + mock_marshal.return_value = None + mock_json_load.return_value = dummy_json + + # Run the function + package_manager.update_package_manifest("test_manifest", "dummy_json") + + # Assertions + mock_echo.assert_called_with("Manifest 'test_manifest' updated successfully.") + + +def test_delete_package_manifest(package_manager): + with patch('click.echo') as mock_echo, \ + patch('click.prompt') as mock_prompt, \ + patch('os.path.exists') as mock_exists, \ + patch('os.remove'): + + # Test case 1: deleting default manifest + package_manager.delete_package_manifest("default_manifest") + mock_echo.assert_called_with("Default Manifest deletion is not allowed") + mock_echo.reset_mock() # Reset the mock for the next test case + + # Test case 2: manifest file doesn't exist + mock_exists.return_value = True + mock_exists.side_effect = lambda x: False if x.endswith("test_manifest") else True + package_manager.delete_package_manifest("test_manifest") + mock_echo.assert_called_with("Error: Manifest file 'test_manifest' not found.") + mock_echo.reset_mock() + + # Test case 3: user confirms deletion + mock_exists.side_effect = lambda x: True if x.endswith("test_manifest") else False + mock_prompt.return_value = "y" + package_manager.delete_package_manifest("test_manifest") + mock_echo.assert_called_with("Manifest 'test_manifest' deleted successfully.") + mock_echo.reset_mock() + + # Test case 4: user cancels deletion + mock_prompt.return_value = "n" + package_manager.delete_package_manifest("test_manifest") + mock_echo.assert_called_with("Deletion cancelled.") + mock_echo.reset_mock() + + +def test_show_package_manifest(package_manager): + with patch('click.echo') as mock_echo, \ + patch('os.path.exists') as mock_exists, \ + patch('builtins.open', unittest.mock.mock_open()), \ + patch('json.load') as mock_json_load: + + mock_exists.return_value = True + mock_exists.side_effect = lambda x: True if x.endswith("test_manifest") else False + + dummy_json = {"package": {"name": "test", "version": "2.0.0"}, "service": {"name": "test"}} + mock_json_load.return_value = dummy_json + + package_manager.show_package_manifest("test_manifest") + mock_echo.assert_called_with(json.dumps(dummy_json, indent=4)) + + +def test_list_package_manifest(package_manager): + with patch('click.echo') as mock_echo, \ + patch('os.path.exists') as mock_exists, \ + patch('os.listdir') as mock_listdir: + + # Test case 1: no custom local manifest files found + mock_exists.return_value = True + mock_listdir.return_value = [] + package_manager.list_package_manifest() + mock_echo.assert_called_with("No custom local manifest files found.") + + # Test case 2: custom local manifest files found + mock_listdir.return_value = ["manifest1.json", "manifest2.json"] + package_manager.list_package_manifest() + mock_echo.assert_any_call("Custom Local Manifest files:") + mock_echo.assert_any_call("- manifest1.json") + mock_echo.assert_any_call("- manifest2.json") + + +def test_download_file_http(package_manager): + fake_remote_url = "http://www.example.com/index.html" + fake_local_path = "local_path" + with patch("requests.get") as mock_requests_get: + with patch("builtins.open", mock_open()) as mock_file: + package_manager.download_file(fake_remote_url, fake_local_path) + mock_requests_get.assert_called_once_with(fake_remote_url, stream=True) + mock_file.assert_called_once_with("local_path", "wb") + + +def test_download_file_scp(package_manager): + fake_remote_url = "scp://admin@10.x.x.x:/home/admin/sec_update.json" + fake_local_path = "local_path" + + with patch("paramiko.SSHClient") as mock_ssh_client: + with patch("scp.SCPClient"): + with patch("getpass.getpass", return_value="test_password"): + package_manager.download_file(fake_remote_url, fake_local_path) + + mock_ssh_client.assert_called_once() + mock_ssh_client.return_value.set_missing_host_key_policy.assert_called_once() + mock_ssh_client.return_value.connect.assert_called_once_with( + "10.x.x.x", + username="admin", + password="test_password" + ) + + +def test_download_file_sftp(package_manager): + fake_remote_url = "sftp://admin@10.x.x.x:/home/admin/sec_update.json" + fake_local_path = "local_path" + + with patch("paramiko.SSHClient") as mock_ssh_client: + with patch("paramiko.SFTPClient.from_transport"): + with patch("getpass.getpass", return_value="test_password"): + package_manager.download_file(fake_remote_url, fake_local_path) + + mock_ssh_client.assert_called_once() + mock_ssh_client.return_value.set_missing_host_key_policy.assert_called_once() + mock_ssh_client.return_value.connect.assert_called_once_with( + "10.x.x.x", + username="admin", + password="test_password" + ) diff --git a/tests/sonic_package_manager/test_manifest.py b/tests/sonic_package_manager/test_manifest.py index 009895991a..5eaa2f6053 100644 --- a/tests/sonic_package_manager/test_manifest.py +++ b/tests/sonic_package_manager/test_manifest.py @@ -1,9 +1,11 @@ #!/usr/bin/env python import pytest +import json +from unittest.mock import patch, mock_open from sonic_package_manager.constraint import ComponentConstraints -from sonic_package_manager.manifest import Manifest, ManifestError +from sonic_package_manager.manifest import Manifest, ManifestError, MANIFESTS_LOCATION def test_manifest_v1_defaults(): @@ -85,3 +87,33 @@ def test_manifest_v1_unmarshal(): for key, section in manifest_json_input.items(): for field, value in section.items(): assert manifest_json[key][field] == value + + +@patch("sonic_package_manager.manifest.open", new_callable=mock_open) +def test_get_manifest_from_local_file_existing_manifest(mock_open, sonic_fs): + # Create a mock manifest file + manifest_name = "test_manifest.json" + manifest_content = {"package": {"name": "test_package", "version": "1.0.0"}, + "service": {"name": "test_service"}} + mock_open.return_value.__enter__.return_value.read.return_value = json.dumps(manifest_content) + sonic_fs.create_dir(MANIFESTS_LOCATION) + + # Call the function + desired_dict = Manifest.get_manifest_from_local_file(manifest_name) + + exp_manifest_content = {"package": {"name": "test_manifest.json", "version": "1.0.0"}, + "service": {"name": "test_manifest.json"}} + manifest_string = json.dumps(exp_manifest_content, indent=4) + desired_output = { + 'Tag': 'master', + 'com': { + 'azure': { + 'sonic': { + 'manifest': manifest_string + } + } + } + } + + # Check if the returned dictionary matches the expected structure + assert desired_dict == desired_output diff --git a/tests/sonic_package_manager/test_metadata.py b/tests/sonic_package_manager/test_metadata.py index 96f9bbc38d..f386836a83 100644 --- a/tests/sonic_package_manager/test_metadata.py +++ b/tests/sonic_package_manager/test_metadata.py @@ -2,13 +2,14 @@ import json import contextlib -from unittest.mock import Mock, MagicMock - +from unittest.mock import Mock, MagicMock, patch +import tempfile +import os import pytest from sonic_package_manager.database import PackageEntry from sonic_package_manager.errors import MetadataError -from sonic_package_manager.manifest import Manifest +from sonic_package_manager.manifest import MANIFESTS_LOCATION, DEFAULT_MANIFEST_FILE from sonic_package_manager.metadata import MetadataResolver from sonic_package_manager.version import Version @@ -87,3 +88,125 @@ def test_metadata_construction(manifest_str): }) assert metadata.yang_modules == ['TEST', 'TEST 2'] + +@pytest.fixture +def temp_manifest_dir(): + with tempfile.TemporaryDirectory() as temp_dir: + yield temp_dir + + +@pytest.fixture +def temp_tarball(temp_manifest_dir): + tarball_path = os.path.join(temp_manifest_dir, 'image.tar') + # Create an empty tarball file for testing + open(tarball_path, 'w').close() + yield tarball_path + + +def test_metadata_resolver_local_with_name_and_use_local_manifest(mock_registry_resolver, + mock_docker_api, + temp_manifest_dir): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + # Patching the get_manifest_from_local_file method to avoid FileNotFoundError + with patch('sonic_package_manager.manifest.Manifest.get_manifest_from_local_file') as mock_get_manifest: + # Setting the side_effect to None to simulate the absence of a manifest file + mock_get_manifest.side_effect = None + with contextlib.suppress(MetadataError): + metadata_resolver.from_local('image', use_local_manifest=True, name='test_manifest', use_edit=False) + + +def test_metadata_resolver_local_manifest_file_not_exist(mock_registry_resolver, mock_docker_api, temp_manifest_dir): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + # Patching the get_manifest_from_local_file method to avoid FileNotFoundError + with patch('sonic_package_manager.manifest.Manifest.get_manifest_from_local_file') as mock_get_manifest: + # Setting the side_effect to None to simulate the absence of a manifest file + mock_get_manifest.side_effect = None + with pytest.raises(MetadataError): + metadata_resolver.from_local('image', use_local_manifest=True, name='test_manifest', use_edit=False) + + +def test_metadata_resolver_tarball_with_use_local_manifest(mock_registry_resolver, + mock_docker_api, + temp_manifest_dir): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + # Patching the get_manifest_from_local_file method to avoid FileNotFoundError + with patch('sonic_package_manager.manifest.Manifest.get_manifest_from_local_file') as mock_get_manifest: + # Setting the side_effect to None to simulate the absence of a manifest file + mock_get_manifest.side_effect = None + with pytest.raises(MetadataError): + metadata_resolver.from_tarball('image.tar', use_local_manifest=True, name='test_manifest') + + +def test_metadata_resolver_no_name_and_no_metadata_in_labels_for_remote(mock_registry_resolver, mock_docker_api): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + # Mocking the registry resolver's get_registry_for method to return a MagicMock + mock_registry_resolver.get_registry_for = MagicMock(return_value=Mock()) + with pytest.raises(TypeError): + metadata_resolver.from_registry('test-repository', '1.2.0') + + +def test_metadata_resolver_tarball_with_use_local_manifest_true(mock_registry_resolver, + mock_docker_api, + temp_manifest_dir): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + # Patching the get_manifest_from_local_file method to avoid FileNotFoundError + with patch('sonic_package_manager.manifest.Manifest.get_manifest_from_local_file') as mock_get_manifest: + # Setting the side_effect to None to simulate the absence of a manifest file + mock_get_manifest.side_effect = None + with pytest.raises(MetadataError): + metadata_resolver.from_tarball('image.tar', use_local_manifest=True) + + +def test_metadata_resolver_no_metadata_in_labels_for_tarball(mock_registry_resolver, mock_docker_api): + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + with pytest.raises(FileNotFoundError): + metadata_resolver.from_tarball('image.tar') + + +def test_metadata_resolver_local_with_name_and_use_edit(mock_registry_resolver, + mock_docker_api, + temp_manifest_dir, + sonic_fs): + with patch('builtins.open') as mock_open, \ + patch('json.loads') as mock_json_loads: + sonic_fs.create_dir(MANIFESTS_LOCATION) # Create the directory using sonic_fs fixture + mock_open.side_effect = FileNotFoundError # Simulate FileNotFoundError when opening the manifest file + mock_json_loads.side_effect = ValueError # Simulate ValueError when parsing JSON + + # Create the default manifest file + sonic_fs.create_file(DEFAULT_MANIFEST_FILE) + sonic_fs.create_file(os.path.join(MANIFESTS_LOCATION, "test_manifest.edit")) + + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + with pytest.raises(FileNotFoundError): + metadata_resolver.from_local('image', + use_local_manifest=True, + name='test_manifest', + use_edit=True) + + mock_open.assert_called_with(os.path.join(MANIFESTS_LOCATION, 'test_manifest.edit'), 'r') + mock_json_loads.assert_not_called() # Ensure json.loads is not called + + +def test_metadata_resolver_local_with_name_and_default_manifest(mock_registry_resolver, + mock_docker_api, + temp_manifest_dir, + sonic_fs): + with patch('builtins.open') as mock_open, \ + patch('json.loads') as mock_json_loads: + sonic_fs.create_dir(MANIFESTS_LOCATION) # Create the directory using sonic_fs fixture + mock_open.side_effect = FileNotFoundError # Simulate FileNotFoundError when opening the manifest file + mock_json_loads.side_effect = ValueError # Simulate ValueError when parsing JSON + + # Create the default manifest file + sonic_fs.create_file(DEFAULT_MANIFEST_FILE) + + metadata_resolver = MetadataResolver(mock_docker_api, mock_registry_resolver) + with pytest.raises(FileNotFoundError): + metadata_resolver.from_local('image', + use_local_manifest=False, + name='test_manifest', + use_edit=True) + + mock_open.assert_called_with(DEFAULT_MANIFEST_FILE, 'r') + mock_json_loads.assert_not_called() # Ensure json.loads is not called diff --git a/tests/sonic_package_manager/test_service_creator.py b/tests/sonic_package_manager/test_service_creator.py index 8e6edcd0f0..319dcf32ff 100644 --- a/tests/sonic_package_manager/test_service_creator.py +++ b/tests/sonic_package_manager/test_service_creator.py @@ -12,6 +12,7 @@ from sonic_package_manager.metadata import Metadata from sonic_package_manager.package import Package from sonic_package_manager.service_creator.creator import * +from sonic_package_manager.service_creator.creator import ETC_SYSTEMD_LOCATION from sonic_package_manager.service_creator.feature import FeatureRegistry @@ -106,6 +107,14 @@ def test_service_creator(sonic_fs, manifest, service_creator, package_manager): assert sonic_fs.exists(os.path.join(SERVICE_MGMT_SCRIPT_LOCATION, 'test.sh')) assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.service')) + # Create symlinks and directory featured creates + os.symlink('/dev/null', os.path.join(ETC_SYSTEMD_LOCATION, 'test.service')) + os.symlink('/dev/null', os.path.join(ETC_SYSTEMD_LOCATION, 'test@1.service')) + os.symlink('/dev/null', os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service')) + os.mkdir(os.path.join(ETC_SYSTEMD_LOCATION, 'test.service.d')) + os.mkdir(os.path.join(ETC_SYSTEMD_LOCATION, 'test@1.service.d')) + os.mkdir(os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service.d')) + def read_file(name): with open(os.path.join(ETC_SONIC_PATH, name)) as file: return file.read() @@ -118,19 +127,14 @@ def read_file(name): assert generated_services_conf_content.endswith('\n') assert set(generated_services_conf_content.split()) == set(['test.service', 'test@.service']) + service_creator.remove(package) -def test_service_creator_with_timer_unit(sonic_fs, manifest, service_creator): - entry = PackageEntry('test', 'azure/sonic-test') - package = Package(entry, Metadata(manifest)) - service_creator.create(package) - - assert not sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) - - manifest['service']['delayed'] = True - package = Package(entry, Metadata(manifest)) - service_creator.create(package) - - assert sonic_fs.exists(os.path.join(SYSTEMD_LOCATION, 'test.timer')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test.service')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@1.service')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test.service.d')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@1.service.d')) + assert not sonic_fs.exists(os.path.join(ETC_SYSTEMD_LOCATION, 'test@2.service.d')) def test_service_creator_with_debug_dump(sonic_fs, manifest, service_creator): @@ -378,27 +382,6 @@ def test_feature_update(mock_sonic_db, manifest): ], any_order=True) -def test_feature_registration_with_timer(mock_sonic_db, manifest): - manifest['service']['delayed'] = True - mock_connector = Mock() - mock_connector.get_entry = Mock(return_value={}) - mock_sonic_db.get_connectors = Mock(return_value=[mock_connector]) - mock_sonic_db.get_initial_db_connector = Mock(return_value=mock_connector) - feature_registry = FeatureRegistry(mock_sonic_db) - feature_registry.register(manifest) - mock_connector.set_entry.assert_called_with('FEATURE', 'test', { - 'state': 'disabled', - 'auto_restart': 'enabled', - 'high_mem_alert': 'disabled', - 'set_owner': 'local', - 'has_per_asic_scope': 'False', - 'has_global_scope': 'True', - 'delayed': 'True', - 'check_up_status': 'False', - 'support_syslog_rate_limit': 'False', - }) - - def test_feature_registration_with_non_default_owner(mock_sonic_db, manifest): mock_connector = Mock() mock_connector.get_entry = Mock(return_value={}) diff --git a/tests/ssdutil_test.py b/tests/ssdutil_test.py new file mode 100644 index 0000000000..dc27526ea7 --- /dev/null +++ b/tests/ssdutil_test.py @@ -0,0 +1,80 @@ +import os +import sys +import argparse +from unittest.mock import patch, MagicMock +import sonic_platform_base # noqa: F401 + +tests_path = os.path.dirname(os.path.abspath(__file__)) + +# Add mocked_libs path so that the file under test +# can load mocked modules from there +mocked_libs_path = os.path.join(tests_path, "mocked_libs") # noqa: E402,F401 +sys.path.insert(0, mocked_libs_path) + +from .mocked_libs import psutil # noqa: E402,F401 +from .mocked_libs.blkinfo import BlkDiskInfo # noqa: E402,F401 + +sys.modules['os.stat'] = MagicMock() +sys.modules['os.major'] = MagicMock(return_value=8) +sys.modules['sonic_platform'] = MagicMock() +sys.modules['sonic_platform_base.sonic_ssd.ssd_generic'] = MagicMock() + +import ssdutil.main as ssdutil # noqa: E402 + + +class Ssd(): + + def get_model(self): + return 'SkyNet' + + def get_firmware(self): + return 'ABC' + + def get_serial(self): + return 'T1000' + + def get_health(self): + return 5 + + def get_temperature(self): + return 3000 + + def get_vendor_output(self): + return 'SONiC Test' + + +class TestSsdutil: + + @patch('os.geteuid', MagicMock(return_value=0)) + @patch('os.stat', MagicMock(st_rdev=2049)) + @patch('os.major', MagicMock(return_value=8)) + def test_get_default_disk(self): + (default_device, disk_type) = ssdutil.get_default_disk() + + assert default_device == "/dev/sdx" + assert disk_type == 'usb' + + @patch('os.geteuid', MagicMock(return_value=0)) + @patch('os.stat', MagicMock(st_rdev=2049)) + @patch('os.major', MagicMock(return_value=8)) + @patch('psutil.disk_partitions', MagicMock(return_value=None)) + def test_get_default_disk_none_partitions(self): + (default_device, disk_type) = ssdutil.get_default_disk() + + assert default_device == "/dev/sda" + assert disk_type is None + + def test_is_number_valueerror(self): + outcome = ssdutil.is_number("nope") + assert outcome is False + + @patch('sonic_py_common.device_info.get_paths_to_platform_and_hwsku_dirs', MagicMock(return_value=("test_path", ""))) # noqa: E501 + @patch('os.geteuid', MagicMock(return_value=0)) + @patch('os.stat', MagicMock(st_rdev=2049)) + @patch('os.major', MagicMock(return_value=8)) + def test_sonic_storage_path(self): + + with patch('argparse.ArgumentParser.parse_args', MagicMock()) as mock_args: # noqa: E501 + sys.modules['sonic_platform_base.sonic_storage.ssd'] = MagicMock(return_value=Ssd()) # noqa: E501 + mock_args.return_value = argparse.Namespace(device='/dev/sda', verbose=True, vendor=True) # noqa: E501 + ssdutil.ssdutil() diff --git a/tests/stp_test.py b/tests/stp_test.py new file mode 100644 index 0000000000..44a93065cc --- /dev/null +++ b/tests/stp_test.py @@ -0,0 +1,414 @@ +import os +import re +import pytest +from click.testing import CliRunner + +import config.main as config +import show.main as show +from utilities_common.db import Db +from .mock_tables import dbconnector + + +EXPECTED_SHOW_SPANNING_TREE_OUTPUT = """\ +Spanning-tree Mode: PVST + +VLAN 500 - STP instance 0 +-------------------------------------------------------------------- +STP Bridge Parameters: +Bridge Bridge Bridge Bridge Hold LastTopology Topology +Identifier MaxAge Hello FwdDly Time Change Change +hex sec sec sec sec sec cnt +8064b86a97e24e9c 20 2 15 1 0 1 + +RootBridge RootPath DesignatedBridge RootPort Max Hel Fwd +Identifier Cost Identifier Age lo Dly +hex hex sec sec sec +0064b86a97e24e9c 600 806480a235f281ec Root 20 2 15 + +STP Port Parameters: +Port Prio Path Port Uplink State Designated Designated Designated +Name rity Cost Fast Fast Cost Root Bridge +Ethernet4 128 200 N N FORWARDING 400 0064b86a97e24e9c 806480a235f281ec +""" + +EXPECTED_SHOW_SPANNING_TREE_VLAN_OUTPUT = """\ + +VLAN 500 - STP instance 0 +-------------------------------------------------------------------- +STP Bridge Parameters: +Bridge Bridge Bridge Bridge Hold LastTopology Topology +Identifier MaxAge Hello FwdDly Time Change Change +hex sec sec sec sec sec cnt +8064b86a97e24e9c 20 2 15 1 0 1 + +RootBridge RootPath DesignatedBridge RootPort Max Hel Fwd +Identifier Cost Identifier Age lo Dly +hex hex sec sec sec +0064b86a97e24e9c 600 806480a235f281ec Root 20 2 15 + +STP Port Parameters: +Port Prio Path Port Uplink State Designated Designated Designated +Name rity Cost Fast Fast Cost Root Bridge +Ethernet4 128 200 N N FORWARDING 400 0064b86a97e24e9c 806480a235f281ec +""" + +EXPECTED_SHOW_SPANNING_TREE_STATISTICS_OUTPUT = """\ +VLAN 500 - STP instance 0 +-------------------------------------------------------------------- +PortNum BPDU Tx BPDU Rx TCN Tx TCN Rx +Ethernet4 10 15 15 5 +""" + +EXPECTED_SHOW_SPANNING_TREE_BPDU_GUARD_OUTPUT = """\ +PortNum Shutdown Port Shut + Configured due to BPDU guard +------------------------------------------- +Ethernet4 No NA +""" + +EXPECTED_SHOW_SPANNING_TREE_ROOT_GUARD_OUTPUT = """\ +Root guard timeout: 30 secs + +Port VLAN Current State +------------------------------------------- +Ethernet4 500 Consistent state +""" + + +class TestStp(object): + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + print("SETUP") + + # Fixture for initializing the CliRunner + @pytest.fixture(scope="module") + def runner(self): + return CliRunner() + + # Fixture for initializing the Db + @pytest.fixture(scope="module") + def db(self): + return Db() + + def test_show_spanning_tree(self, runner, db): + result = runner.invoke(show.cli.commands["spanning-tree"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert (re.sub(r'\s+', ' ', result.output.strip())) == (re.sub( + r'\s+', ' ', EXPECTED_SHOW_SPANNING_TREE_OUTPUT.strip())) + + def test_show_spanning_tree_vlan(self, runner, db): + result = runner.invoke(show.cli.commands["spanning-tree"].commands["vlan"], ["500"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert re.sub(r'\s+', ' ', result.output.strip()) == re.sub( + r'\s+', ' ', EXPECTED_SHOW_SPANNING_TREE_VLAN_OUTPUT.strip()) + + def test_show_spanning_tree_statistics(self, runner, db): + result = runner.invoke(show.cli.commands["spanning-tree"].commands["statistics"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert re.sub(r'\s+', ' ', result.output.strip()) == re.sub( + r'\s+', ' ', EXPECTED_SHOW_SPANNING_TREE_STATISTICS_OUTPUT.strip()) + + def test_show_spanning_tree_statistics_vlan(self, runner, db): + result = runner.invoke( + show.cli.commands["spanning-tree"].commands["statistics"].commands["vlan"], ["500"], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert re.sub(r'\s+', ' ', result.output.strip()) == re.sub( + r'\s+', ' ', EXPECTED_SHOW_SPANNING_TREE_STATISTICS_OUTPUT.strip()) + + def test_show_spanning_tree_bpdu_guard(self, runner, db): + result = runner.invoke(show.cli.commands["spanning-tree"].commands["bpdu_guard"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert re.sub(r'\s+', ' ', result.output.strip()) == re.sub( + r'\s+', ' ', EXPECTED_SHOW_SPANNING_TREE_BPDU_GUARD_OUTPUT.strip()) + + def test_show_spanning_tree_root_guard(self, runner, db): + result = runner.invoke(show.cli.commands["spanning-tree"].commands["root_guard"], [], obj=db) + print(result.exit_code) + print(result.output) + assert result.exit_code == 0 + assert re.sub(r'\s+', ' ', result.output.strip()) == re.sub( + r'\s+', ' ', EXPECTED_SHOW_SPANNING_TREE_ROOT_GUARD_OUTPUT.strip()) + + @pytest.mark.parametrize("command, args, expected_exit_code, expected_output", [ + # Disable PVST + (config.config.commands["spanning-tree"].commands["disable"], ["pvst"], 0, None), + # Enable PVST + (config.config.commands["spanning-tree"].commands["enable"], ["pvst"], 0, None), + # Add VLAN and member + (config.config.commands["vlan"].commands["add"], ["500"], 0, None), + (config.config.commands["vlan"].commands["member"].commands["add"], ["500", "Ethernet4"], 0, None), + # Attempt to enable PVST when it is already enabled + (config.config.commands["spanning-tree"].commands["enable"], ["pvst"], 2, "PVST is already configured") + ]) + def test_disable_enable_global_pvst(self, runner, db, command, args, expected_exit_code, expected_output): + # Execute the command + result = runner.invoke(command, args, obj=db) + + # Print for debugging + print(result.exit_code) + print(result.output) + + # Check the exit code + assert result.exit_code == expected_exit_code + + # Check the output if an expected output is defined + if expected_output: + assert expected_output in result.output + + @pytest.mark.parametrize("command, args, expected_exit_code, expected_output", [ + # Disable pvst + (config.config.commands["spanning-tree"].commands["disable"], ["pvst"], 0, None), + # Attempt enabling STP interface without global STP enabled + (config.config.commands["spanning-tree"].commands["interface"].commands["enable"], + ["Ethernet4"], 2, "Global STP is not enabled"), + # Enable pvst + (config.config.commands["spanning-tree"].commands["enable"], ["pvst"], 0, None), + # Configure interface priority and cost + (config.config.commands["spanning-tree"].commands["interface"].commands["priority"], + ["Ethernet4", "16"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["cost"], + ["Ethernet4", "500"], 0, None), + # Disable and enable interface spanning tree + (config.config.commands["spanning-tree"].commands["interface"].commands["disable"], ["Ethernet4"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["enable"], ["Ethernet4"], 0, None), + # Configure portfast disable and enable + (config.config.commands["spanning-tree"].commands["interface"].commands["portfast"].commands["disable"], + ["Ethernet4"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["portfast"].commands["enable"], + ["Ethernet4"], 0, None), + # Configure uplink fast disable and enable + (config.config.commands["spanning-tree"].commands["interface"].commands["uplink_fast"].commands["disable"], + ["Ethernet4"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["uplink_fast"].commands["enable"], + ["Ethernet4"], 0, None), + # Configure BPDU guard enable and disable with shutdown + (config.config.commands["spanning-tree"].commands["interface"].commands["bpdu_guard"].commands["enable"], + ["Ethernet4"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["bpdu_guard"].commands["disable"], + ["Ethernet4"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["bpdu_guard"].commands["enable"], + ["Ethernet4", "--shutdown"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["bpdu_guard"].commands["disable"], + ["Ethernet4"], 0, None), + # Configure root guard enable and disable + (config.config.commands["spanning-tree"].commands["interface"].commands["root_guard"].commands["enable"], + ["Ethernet4"], 0, None), + (config.config.commands["spanning-tree"].commands["interface"].commands["root_guard"].commands["disable"], + ["Ethernet4"], 0, None), + # Invalid cost and priority values + (config.config.commands["spanning-tree"].commands["interface"].commands["cost"], ["Ethernet4", "0"], + 2, "STP interface path cost must be in range 1-200000000"), + (config.config.commands["spanning-tree"].commands["interface"].commands["cost"], ["Ethernet4", "2000000000"], + 2, "STP interface path cost must be in range 1-200000000"), + (config.config.commands["spanning-tree"].commands["interface"].commands["priority"], ["Ethernet4", "1000"], + 2, "STP interface priority must be in range 0-240"), + # Attempt to enable STP on interface with various conflicts + (config.config.commands["spanning-tree"].commands["interface"].commands["enable"], ["Ethernet4"], + 2, "STP is already enabled for"), + (config.config.commands["spanning-tree"].commands["interface"].commands["enable"], ["Ethernet0"], + 2, "has ip address"), + (config.config.commands["spanning-tree"].commands["interface"].commands["enable"], ["Ethernet120"], + 2, "is a portchannel member port"), + (config.config.commands["spanning-tree"].commands["interface"].commands["enable"], ["Ethernet20"], + 2, "has no VLAN configured") + ]) + def test_stp_validate_interface_params(self, runner, db, command, args, expected_exit_code, expected_output): + # Execute the command + result = runner.invoke(command, args, obj=db) + + # Print for debugging + print(result.exit_code) + print(result.output) + + # Check the exit code + assert result.exit_code == expected_exit_code + + # Check the output if an expected output is defined + if expected_output: + assert expected_output in result.output + + @pytest.mark.parametrize("command, args, expected_exit_code, expected_output", [ + (config.config.commands["spanning-tree"].commands["disable"], ["pvst"], 0, None), + (config.config.commands["spanning-tree"].commands["enable"], ["pvst"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["interface"].commands["cost"], + ["500", "Ethernet4", "200"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["interface"].commands["priority"], + ["500", "Ethernet4", "32"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["interface"].commands["cost"], + ["500", "Ethernet4", "0"], 2, "STP interface path cost must be in range 1-200000000"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["interface"].commands["cost"], + ["500", "Ethernet4", "2000000000"], 2, "STP interface path cost must be in range 1-200000000"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["interface"].commands["priority"], + ["500", "Ethernet4", "1000"], 2, "STP per vlan port priority must be in range 0-240"), + (config.config.commands["vlan"].commands["add"], ["99"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["interface"].commands["priority"], + ["99", "Ethernet4", "16"], 2, "is not member of"), + (config.config.commands["vlan"].commands["del"], ["99"], 0, None), + (config.config.commands["vlan"].commands["member"].commands["del"], ["500", "Ethernet4"], 0, None), + (config.config.commands["vlan"].commands["del"], ["500"], 0, None) + ]) + def test_stp_validate_vlan_interface_params(self, runner, db, command, args, expected_exit_code, expected_output): + # Execute the command + result = runner.invoke(command, args, obj=db) + # Output result information + print(result.exit_code) + print(result.output) + + # Check exit code + assert result.exit_code == expected_exit_code + + # If an expected output is defined, check that as well + if expected_output is not None: + assert expected_output in result.output + + @pytest.mark.parametrize("command, args, expected_exit_code, expected_output", [ + (config.config.commands["spanning-tree"].commands["disable"], ["pvst"], 0, None), + (config.config.commands["spanning-tree"].commands["enable"], ["pvst"], 0, None), + # Add VLAN and member + (config.config.commands["vlan"].commands["add"], ["500"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["hello"], ["500", "3"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["max_age"], ["500", "21"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["forward_delay"], ["500", "16"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["priority"], ["500", "4096"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["hello"], ["500", "0"], + 2, "STP hello timer must be in range 1-10"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["hello"], ["500", "20"], + 2, "STP hello timer must be in range 1-10"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["forward_delay"], ["500", "2"], + 2, "STP forward delay value must be in range 4-30"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["forward_delay"], ["500", "42"], + 2, "STP forward delay value must be in range 4-30"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["max_age"], ["500", "4"], + 2, "STP max age value must be in range 6-40"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["max_age"], ["500", "45"], + 2, "STP max age value must be in range 6-40"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["forward_delay"], ["500", "4"], + 2, "2*(forward_delay-1) >= max_age >= 2*(hello_time +1 )"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["priority"], ["500", "65536"], + 2, "STP bridge priority must be in range 0-61440"), + (config.config.commands["spanning-tree"].commands["vlan"].commands["priority"], ["500", "8000"], + 2, "STP bridge priority must be multiple of 4096"), + (config.config.commands["vlan"].commands["del"], ["500"], 0, None) + ]) + def test_stp_validate_vlan_timer_and_priority_params(self, runner, db, + command, args, expected_exit_code, expected_output): + # Execute the command + result = runner.invoke(command, args, obj=db) + + # Print for debugging + print(result.exit_code) + print(result.output) + + # Check the exit code + assert result.exit_code == expected_exit_code + + # Check the output if there's an expected output + if expected_output: + assert expected_output in result.output + + @pytest.mark.parametrize("command, args, expected_exit_code, expected_output", [ + # Disable PVST globally + (config.config.commands["spanning-tree"].commands["disable"], ["pvst"], 0, None), + # Add VLAN 500 and assign a member port + (config.config.commands["vlan"].commands["add"], ["500"], 0, None), + (config.config.commands["vlan"].commands["member"].commands["add"], ["500", "Ethernet4"], 0, None), + # Enable PVST globally + (config.config.commands["spanning-tree"].commands["enable"], ["pvst"], 0, None), + # Add VLAN 600 + (config.config.commands["vlan"].commands["add"], ["600"], 0, None), + # Disable and then enable spanning-tree on VLAN 600 + (config.config.commands["spanning-tree"].commands["vlan"].commands["disable"], ["600"], 0, None), + (config.config.commands["spanning-tree"].commands["vlan"].commands["enable"], ["600"], 0, None), + # Attempt to delete VLAN 600 while STP is enabled + (config.config.commands["vlan"].commands["del"], ["600"], 0, None), + # Enable STP on non-existing VLAN 1010 + (config.config.commands["spanning-tree"].commands["vlan"].commands["enable"], ["1010"], 2, "doesn't exist"), + # Disable STP on non-existing VLAN 1010 + (config.config.commands["spanning-tree"].commands["vlan"].commands["disable"], ["1010"], 2, "doesn't exist"), + ]) + def test_add_vlan_enable_pvst(self, runner, db, command, args, expected_exit_code, expected_output): + # Execute the command + result = runner.invoke(command, args, obj=db) + + # Print for debugging + print(result.exit_code) + print(result.output) + + # Check the exit code + assert result.exit_code == expected_exit_code + + # Check the output if an expected output is defined + if expected_output: + assert expected_output in result.output + + @pytest.mark.parametrize("command, args, expected_exit_code, expected_output", [ + # Valid cases + (config.config.commands["spanning-tree"].commands["hello"], ["3"], 0, None), + (config.config.commands["spanning-tree"].commands["forward_delay"], ["16"], 0, None), + (config.config.commands["spanning-tree"].commands["max_age"], ["22"], 0, None), + (config.config.commands["spanning-tree"].commands["priority"], ["8192"], 0, None), + (config.config.commands["spanning-tree"].commands["root_guard_timeout"], ["500"], 0, None), + # Invalid hello timer values + (config.config.commands["spanning-tree"].commands["hello"], ["0"], 2, + "STP hello timer must be in range 1-10"), + (config.config.commands["spanning-tree"].commands["hello"], ["20"], 2, + "STP hello timer must be in range 1-10"), + # Invalid forward delay values + (config.config.commands["spanning-tree"].commands["forward_delay"], ["2"], 2, + "STP forward delay value must be in range 4-30"), + (config.config.commands["spanning-tree"].commands["forward_delay"], ["50"], 2, + "STP forward delay value must be in range 4-30"), + # Invalid max age values + (config.config.commands["spanning-tree"].commands["max_age"], ["5"], 2, + "STP max age value must be in range 6-40"), + (config.config.commands["spanning-tree"].commands["max_age"], ["45"], 2, + "STP max age value must be in range 6-40"), + # Consistency check for forward delay and max age + (config.config.commands["spanning-tree"].commands["forward_delay"], ["4"], 2, + "2*(forward_delay-1) >= max_age >= 2*(hello_time +1 )"), + # Invalid root guard timeout values + (config.config.commands["spanning-tree"].commands["root_guard_timeout"], ["4"], 2, + "STP root guard timeout must be in range 5-600"), + (config.config.commands["spanning-tree"].commands["root_guard_timeout"], ["700"], 2, + "STP root guard timeout must be in range 5-600"), + # Invalid priority values + (config.config.commands["spanning-tree"].commands["priority"], ["65536"], 2, + "STP bridge priority must be in range 0-61440"), + (config.config.commands["spanning-tree"].commands["priority"], ["8000"], 2, + "STP bridge priority must be multiple of 4096"), + (config.config.commands["vlan"].commands["member"].commands["del"], ["500", "Ethernet4"], 0, None), + (config.config.commands["vlan"].commands["del"], ["500"], 0, None) + ]) + def test_stp_validate_global_timer_and_priority_params(self, runner, db, command, + args, expected_exit_code, expected_output): + # Execute the command + result = runner.invoke(command, args, obj=db) + + # Print for debugging + print(result.exit_code) + print(result.output) + + # Check the exit code + assert result.exit_code == expected_exit_code + + # Check the output if an expected output is defined + if expected_output: + assert expected_output in result.output + + @classmethod + def teardown_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "0" + print("TEARDOWN") + dbconnector.load_namespace_config() + dbconnector.dedicated_dbs.clear() diff --git a/tests/suppress_pending_fib_test.py b/tests/suppress_pending_fib_test.py index 04064d306e..b4dcc7d4bc 100644 --- a/tests/suppress_pending_fib_test.py +++ b/tests/suppress_pending_fib_test.py @@ -14,7 +14,7 @@ def test_synchronous_mode(self): result = runner.invoke(config.config.commands['suppress-fib-pending'], ['enabled'], obj=db) print(result.output) assert result.exit_code == 0 - assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'enabled' + assert db.cfgdb.get_entry('DEVICE_METADATA', 'localhost')['suppress-fib-pending'] == 'enabled' result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) assert result.exit_code == 0 @@ -23,7 +23,7 @@ def test_synchronous_mode(self): result = runner.invoke(config.config.commands['suppress-fib-pending'], ['disabled'], obj=db) print(result.output) assert result.exit_code == 0 - assert db.cfgdb.get_entry('DEVICE_METADATA' , 'localhost')['suppress-fib-pending'] == 'disabled' + assert db.cfgdb.get_entry('DEVICE_METADATA', 'localhost')['suppress-fib-pending'] == 'disabled' result = runner.invoke(show.cli.commands['suppress-fib-pending'], obj=db) assert result.exit_code == 0 diff --git a/tests/syslog_multi_asic_test.py b/tests/syslog_multi_asic_test.py index 7933edcd66..c1a136582c 100644 --- a/tests/syslog_multi_asic_test.py +++ b/tests/syslog_multi_asic_test.py @@ -279,3 +279,19 @@ def test_disable_syslog_rate_limit_feature(self, setup_cmd_module): ['database', '-n', 'asic0'] ) assert result.exit_code == 0 + + @mock.patch('config.syslog.clicommon.run_command') + def test_config_log_level(self, mock_run, setup_cmd_module): + _, config = setup_cmd_module + db = Db() + runner = CliRunner() + + mock_run.return_value = ('something', 0) + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG', '-n', 'asic0'], obj=db + ) + assert result.exit_code == 0 + cfg_db = db.cfgdb_clients['asic0'] + data = cfg_db.get_entry('LOGGER', 'component') + assert data.get('LOGLEVEL') == 'DEBUG' diff --git a/tests/syslog_test.py b/tests/syslog_test.py index c1cbee1127..e77f6d0e6c 100644 --- a/tests/syslog_test.py +++ b/tests/syslog_test.py @@ -484,3 +484,73 @@ def side_effect(*args, **kwargs): config.config.commands["syslog"].commands["rate-limit-feature"].commands["disable"], obj=db ) assert result.exit_code == SUCCESS + + @mock.patch('config.syslog.clicommon.run_command') + def test_config_log_level(self, mock_run): + db = Db() + db.cfgdb.set_entry('LOGGER', 'log1', {'require_manual_refresh': 'true'}) + + runner = CliRunner() + + mock_run.return_value = ('something', 0) + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG'], obj=db + ) + assert result.exit_code == SUCCESS + data = db.cfgdb.get_entry('LOGGER', 'component') + assert data.get('LOGLEVEL') == 'DEBUG' + + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG', '--pid', '123'], obj=db + ) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG', '--container', 'pmon', '--pid', '123'], obj=db + ) + assert result.exit_code == SUCCESS + + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'component', '-l', 'DEBUG', '--container', 'pmon', '--program', 'xcvrd'], obj=db + ) + assert result.exit_code == SUCCESS + + @mock.patch('config.syslog.clicommon.run_command') + def test_config_log_level_negative(self, mock_run): + db = Db() + + runner = CliRunner() + + mock_run.return_value = ('something', 0) + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'log1', '-l', 'DEBUG', '--container', 'pmon'], obj=db + ) + assert result.exit_code != SUCCESS + + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'log1', '-l', 'DEBUG', '--program', 'xcvrd'], obj=db + ) + assert result.exit_code != SUCCESS + + mock_run.reset_mock() + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'log1', '-l', 'DEBUG', '--container', 'swss', '--program', 'orchagent'], obj=db + ) + assert result.exit_code == SUCCESS + # Verify it does not send signal to orchagent if require_manual_refresh is not true + assert mock_run.call_count == 0 + + mock_run.return_value = ('something', -1) + db.cfgdb.set_entry('LOGGER', 'log1', {'require_manual_refresh': 'true'}) + result = runner.invoke( + config.config.commands["syslog"].commands["level"], + ['-i', 'log1', '-l', 'DEBUG', '--container', 'pmon', '--program', 'xcvrd'], obj=db + ) + assert result.exit_code != SUCCESS diff --git a/tests/test_sonic_installer.py b/tests/test_sonic_installer.py index 9e8438a7fc..66eb972fdf 100644 --- a/tests/test_sonic_installer.py +++ b/tests/test_sonic_installer.py @@ -86,6 +86,9 @@ def rootfs_path_mock(path): call(["sh", "-c", f"echo 'DOCKER_OPTS=\"$DOCKER_OPTS {' '.join(dockerd_opts)}\"' >> {mounted_image_folder}/etc/default/docker"]), # dockerd started with added options as host dockerd call(["chroot", mounted_image_folder, "/usr/lib/docker/docker.sh", "start"]), call(["cp", "/var/lib/sonic-package-manager/packages.json", f"{mounted_image_folder}/tmp/packages.json"]), + call(["mkdir", "-p", "/var/lib/sonic-package-manager/manifests"]), + call(["cp", "-arf", "/var/lib/sonic-package-manager/manifests", + f"{mounted_image_folder}/var/lib/sonic-package-manager"]), call(["touch", f"{mounted_image_folder}/tmp/docker.sock"]), call(["mount", "--bind", "/var/run/docker.sock", f"{mounted_image_folder}/tmp/docker.sock"]), call(["cp", f"{mounted_image_folder}/etc/resolv.conf", "/tmp/resolv.conf.backup"]), diff --git a/tests/vlan_test.py b/tests/vlan_test.py index 2d3c1dcf1b..fc3569b87d 100644 --- a/tests/vlan_test.py +++ b/tests/vlan_test.py @@ -1426,7 +1426,7 @@ def test_config_set_router_port_on_member_interface(self): ["Ethernet4", "10.10.10.1/24"], obj=obj) print(result.exit_code, result.output) assert result.exit_code == 0 - assert 'Interface Ethernet4 is in trunk mode and needs to be in routed mode!' in result.output + assert 'Interface Ethernet4 is a member of vlan\nAborting!\n' in result.output def test_config_vlan_add_member_of_portchannel(self): runner = CliRunner() diff --git a/tests/vnet_route_check_test.py b/tests/vnet_route_check_test.py index 092a89e2f9..10d97f21a3 100644 --- a/tests/vnet_route_check_test.py +++ b/tests/vnet_route_check_test.py @@ -341,7 +341,9 @@ def get(self, key): db_conns = {"APPL_DB": APPL_DB, "ASIC_DB": ASIC_DB, "COUNTERS_DB": CNTR_DB} -def conn_side_effect(arg, _): + + +def conn_side_effect(arg, _, __): return db_conns[arg] diff --git a/tests/vrrp_test.py b/tests/vrrp_test.py new file mode 100644 index 0000000000..bd33738fa5 --- /dev/null +++ b/tests/vrrp_test.py @@ -0,0 +1,1499 @@ +import os +from unittest import mock + +from click.testing import CliRunner + +import config.main as config +from utilities_common.db import Db +import utilities_common.bgp_util as bgp_util + + +class TestConfigVRRP(object): + _old_run_bgp_command = None + + @classmethod + def setup_class(cls): + os.environ['UTILITIES_UNIT_TESTING'] = "1" + cls._old_run_bgp_command = bgp_util.run_bgp_command + bgp_util.run_bgp_command = mock.MagicMock( + return_value=cls.mock_run_bgp_command()) + print("SETUP") + + ''' Tests for VRRPv4 and VRRPv6 ''' + + def mock_run_bgp_command(): + return "" + + def test_add_del_vrrp_instance_without_vip(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet63 9.9.9.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet63", "9.9.9.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '9.9.9.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp remove Ethernet63 7 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet63", "7"], obj=obj) + print(result.exit_code, result.output) + assert "Ethernet63 dose not configured the vrrp instance 7!" in result.output + assert result.exit_code != 0 + + # config int vrrp add Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["add"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + + # config int vrrp add Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["add"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert "Ethernet64 has already configured the vrrp instance 8!" in result.output + assert result.exit_code != 0 + + # config int vrrp add Ethernet63 7 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["add"], + ["Ethernet63", "7"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '7') in db.cfgdb.get_table('VRRP') + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # config int vrrp remove Ethernet63 7 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet63", "7"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '7') not in db.cfgdb.get_table('VRRP') + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["add"], + ["Ethernt64", "8"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["add"], + ["Ethernet2", "7"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernt64", "8"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet2", "7"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet63 9.9.9.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet63", "9.9.9.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet63', '9.9.9.1/24') not in db.cfgdb.get_table('INTERFACE') + + def test_add_del_vrrp6_instance_without_vip(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 100::64/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "100::64/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '100::64/64') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet63 99::64/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet63", "99::64/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '99::64/64') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp6 add Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["add"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + + # config int vrrp6 add Ethernet63 7 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["add"], + ["Ethernet63", "7"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '7') in db.cfgdb.get_table('VRRP6') + + # config int vrrp6 add Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["add"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert "Ethernet64 has already configured the Vrrpv6 instance 8!" in result.output + assert result.exit_code != 0 + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["add"], + ["Ethernt64", "8"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["add"], + ["Ethernet2", "7"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config int vrrp6 remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP6') + + # config int ip remove Ethernet64 100::64/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "100::64/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '100::64/64') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet63 99::64/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet63", "99::64/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet63', '99::64/64') not in db.cfgdb.get_table('INTERFACE') + + def test_add_del_vrrp_instance(self): + runner = CliRunner() + db = Db() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet63 9.9.9.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet63", "9.9.9.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '9.9.9.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet62 8.8.8.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet62", "8.8.8.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet62', '8.8.8.1/24') in db.cfgdb.get_table('INTERFACE') + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernt64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet2", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config int vrrp ip add Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24'] + + # config int vrrp ip add Ethernet64 8 10.10.10.16/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.16/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24', '10.10.10.16/24'] + + # config int vrrp ip add Ethernet62 7 8.8.8.16/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet62", "7", "8.8.8.16/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet62', '7') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet62', '7']['vip'] == ['8.8.8.16/24'] + + # config int vrrp ip add Ethernet62 7 8.8.8.16/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet62", "7", "8.8.8.16/24"], obj=obj) + print(result.exit_code, result.output) + assert "8.8.8.16/24 has already configured" in result.output + assert result.exit_code != 0 + + # config int vrrp ip add Ethernet62 7 0.0.0.0 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet62", "7", "0.0.0.0"], obj=obj) + print(result.exit_code, result.output) + assert "IPv4 address 0.0.0.0/32 is Zero" in result.output + assert result.exit_code != 0 + + # config int vrrp ip add Ethernet62 7 777.256.1.1/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet62", "7", "777.256.1.1/24"], obj=obj) + print(result.exit_code, result.output) + assert "IP address 777.256.1.1/24 is not valid" in result.output + assert result.exit_code != 0 + + # config int vrrp ip add Ethernet62 7 224.0.0.41/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet62", "7", "224.0.0.41/24"], obj=obj) + print(result.exit_code, result.output) + assert "IP address 224.0.0.41/24 is multicast" in result.output + assert result.exit_code != 0 + + # config int vrrp ip add Ethernet62 7 6.6.6.6 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet62", "7", "6.6.6.6"], obj=obj) + print(result.exit_code, result.output) + assert "IP address 6.6.6.6 is missing a mask." in result.output + assert result.exit_code != 0 + + # config int vrrp ip remove Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.16/24'] + + # config int vrrp ip remove Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert "10.10.10.8/24 is not configured on the vrrp instance" in result.output + assert result.exit_code != 0 + + # config int vrrp ip remove Ethernet64 8 10.10.10.888/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernet64", "8", "10.10.10.888/24"], obj=obj) + print(result.exit_code, result.output) + assert "IP address is not valid:" in result.output + assert result.exit_code != 0 + + # config int vrrp ip remove Ethernet64 8 10.10.10.16/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernet64", "8", "10.10.10.16/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == [''] + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernt64", "8", "10.10.10.16/24"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernet2", "8", "10.10.10.16/24"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config int vrrp remove Ethernet63 9 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["remove"], + ["Ethernet63", "9", "10.10.10.16/24"], obj=obj) + print(result.exit_code, result.output) + assert "10.10.10.16/24 is not configured on the vrrp instance" in result.output + assert result.exit_code != 0 + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet63 9.9.9.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet63", "9.9.9.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet63', '9.9.9.1/24') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet62 8.8.8.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet62", "8.8.8.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet62', '8.8.8.1/24') not in db.cfgdb.get_table('INTERFACE') + + def test_add_del_vrrp6_instance(self): + runner = CliRunner() + db = Db() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 100::1/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "100::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '100::1/64') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet63 99::1/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet63", "99::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet63', '99::1/64') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet62 88::1/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet62", "88::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet62', '88::1/64') in db.cfgdb.get_table('INTERFACE') + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernt64", "8", "100::8/64"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet2", "8", "100::8/64"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 add Ethernet64 8 100::8/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet64", "8", "100::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['100::8/64'] + + # config int vrrp6 ipv6 add Ethernet64 8 100::16/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet64", "8", "100::16/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['100::8/64', '100::16/64'] + + # config int vrrp6 ipv6 add Ethernet62 7 88::16/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet62", "7", "88::16/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet62', '7') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet62', '7']['vip'] == ['88::16/64'] + + # config int vrrp6 ipv6 add Ethernet62 7 88::16/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet62", "7", "88::16/64"], obj=obj) + print(result.exit_code, result.output) + assert "88::16/64 has already configured" in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 add Ethernet62 7 :: + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet62", "7", "::"], obj=obj) + print(result.exit_code, result.output) + assert "IPv6 address ::/128 is unspecified" in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 add Ethernet62 7 785h::12/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet62", "7", "785h::12/64"], obj=obj) + print(result.exit_code, result.output) + assert "IP address 785h::12/64 is not valid" in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 add Ethernet62 7 88::2 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet62", "7", "88::2"], obj=obj) + print(result.exit_code, result.output) + assert "IPv6 address 88::2 is missing a mask." in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 remove Ethernet64 8 100::8/64 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernet64", "8", "100::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['100::16/64'] + + # config int vrrp6 ipv6 remove Ethernet64 8 100::8/64 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernet64", "8", "100::8/64"], obj=obj) + print(result.exit_code, result.output) + assert "100::8/64 is not configured on the Vrrpv6 instance 8!" in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 remove Ethernet64 8 100::16/64 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernet64", "8", "100::16/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == [''] + + # config int vrrp6 remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP6') + + # check interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernt64", "8", "100::16/64"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernet2", "8", "100::16/64"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config int vrrp remove Ethernet63 9 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernet63", "9", "100::16/64"], obj=obj) + print(result.exit_code, result.output) + assert "100::16/64 is not configured on the Vrrpv6 instance 9" in result.output + assert result.exit_code != 0 + + # config int vrrp6 ipv6 remove Ethernet64 8 88cg::2/64 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["remove"], + ["Ethernet64", "8", "88cg::2/64"], obj=obj) + print(result.exit_code, result.output) + assert "IPv6 address is not valid:" in result.output + assert result.exit_code != 0 + + # config int ip remove Ethernet64 100::1/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "100::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '100::1/64') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet63 99::1/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet63", "99::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet63', '99::1/64') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet62 88::1/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet62", "88::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet62', '88::1/64') not in db.cfgdb.get_table('INTERFACE') + + def test_add_del_vrrp_instance_track_intf(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet5 10.10.10.5/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet5", "10.10.10.5/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet5', '10.10.10.5/24') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet6 10.10.10.6/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet6", "10.10.10.6/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet6', '10.10.10.6/24') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet7 10.10.10.7/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet7", "10.10.10.7/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet7', '10.10.10.7/24') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp ip add Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24'] + + # check interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernt64", "8", "Ethernet5", "20"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet2", "8", "Ethernet5", "20"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernt5", "20"], obj=obj) + print(result.exit_code, result.output) + assert "'track_interface' is not valid." in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet2", "20"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config interface vrrp track_interface add Ethernet64 8 Ethernet5 20 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet5", "20"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet5') in db.cfgdb.get_table('VRRP_TRACK') + assert db.cfgdb.get_table('VRRP_TRACK')['Ethernet64', '8', 'Ethernet5']['priority_increment'] == '20' + + # config interface vrrp track_interface add Ethernet64 8 Ethernet6 30 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet6", "30"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet6') in db.cfgdb.get_table('VRRP_TRACK') + assert db.cfgdb.get_table('VRRP_TRACK')['Ethernet64', '8', 'Ethernet6']['priority_increment'] == '30' + + # config interface vrrp track_interface add Ethernet64 8 Ethernet6 25 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet6", "25"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet6') in db.cfgdb.get_table('VRRP_TRACK') + assert db.cfgdb.get_table('VRRP_TRACK')['Ethernet64', '8', 'Ethernet6']['priority_increment'] == '25' + + # config interface vrrp track_interface add Ethernet64 8 Ethernet7 80 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet7", "80"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config interface vrrp track_interface add Ethernet7 7 Ethernet5 40 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["add"], + ["Ethernet7", "7", "Ethernet5", "40"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp instance 7 not found on interface Ethernet7" in result.output + assert result.exit_code != 0 + + # config interface vrrp track_interface remove Ethernet64 8 Ethernet6 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernet6"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet6') not in db.cfgdb.get_table('VRRP_TRACK') + + # config interface vrrp track_interface remove Ethernet64 8 Ethernet5 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet5') not in db.cfgdb.get_table('VRRP_TRACK') + + # check interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernt64", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernet2", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernt5"], obj=obj) + print(result.exit_code, result.output) + assert "'track_interface' is not valid." in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernet2"], obj=obj) + print(result.exit_code, result.output) + assert "Ethernet2 is not configured on the vrrp instance 8" in result.output + assert result.exit_code != 0 + + # config interface vrrp track_interface remove Ethernet7 7 Ethernet5 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp"].commands["track_interface"].commands["remove"], + ["Ethernet7", "7", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp instance 7 not found on interface Ethernet7" in result.output + assert result.exit_code != 0 + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # config int ip remove Ethernet7 10.10.10.7/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet7", "10.10.10.7/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet7', '10.10.10.7/24') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet6 10.10.10.6/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet6", "10.10.10.6/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet6', '10.10.10.6/24') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet5 10.10.10.5/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet5", "10.10.10.5/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet5', '10.10.10.5/24') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + + def test_add_del_vrrp6_instance_track_intf(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 100::64/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "100::64/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '100::64/64') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet5 100::5/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet5", "100::5/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet5', '100::5/64') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet6 100::6/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet6", "100::6/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet6', '100::6/64') in db.cfgdb.get_table('INTERFACE') + + # config int ip add Ethernet7 100::7/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet7", "100::7/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet7', '100::7/64') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp6 ipv6 add Ethernet64 8 100::1/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet64", "8", "100::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['100::1/64'] + + # check interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernt64", "8", "Ethernet", "20"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet2", "8", "Ethernet5", "20"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernt5", "20"], obj=obj) + print(result.exit_code, result.output) + assert "'track_interface' is not valid." in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet2", "20"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # config interface vrrp6 track_interface add Ethernet7 8 Ethernet5 20 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet7", "8", "Ethernet5", "20"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp6 instance 8 not found on interface Ethernet7" in result.output + assert result.exit_code != 0 + + # config interface vrrp6 track_interface add Ethernet64 8 Ethernet5 20 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet5", "20"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet5') in db.cfgdb.get_table('VRRP6_TRACK') + assert db.cfgdb.get_table('VRRP6_TRACK')['Ethernet64', '8', 'Ethernet5']['priority_increment'] == '20' + + # config interface vrrp6 track_interface add Ethernet64 8 Ethernet6 30 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet6", "30"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet6') in db.cfgdb.get_table('VRRP6_TRACK') + assert db.cfgdb.get_table('VRRP6_TRACK')['Ethernet64', '8', 'Ethernet6']['priority_increment'] == '30' + + # config interface vrrp6 track_interface add Ethernet64 8 Ethernet7 80 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["add"], + ["Ethernet64", "8", "Ethernet7", "80"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config interface vrrp6 track_interface remove Ethernet64 8 Ethernet6 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernet6"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet6') not in db.cfgdb.get_table('VRRP6_TRACK') + + # config interface vrrp6 track_interface remove Ethernet64 8 Ethernet5 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8', 'Ethernet5') not in db.cfgdb.get_table('VRRP6_TRACK') + + # config interface vrrp6 track_interface remove Ethernet7 8 Ethernet5 + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernet7", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp6 instance 8 not found on interface Ethernet7" in result.output + assert result.exit_code != 0 + + # check interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernt64", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernet2", "8", "Ethernet5"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernt5"], obj=obj) + print(result.exit_code, result.output) + assert "'track_interface' is not valid." in result.output + assert result.exit_code != 0 + + # check track_interface_name is valid + result = runner.invoke( + config.config.commands["interface"].commands["vrrp6"].commands["track_interface"].commands["remove"], + ["Ethernet64", "8", "Ethernet2"], obj=obj) + print(result.exit_code, result.output) + assert "Ethernet2 is not configured on the vrrp6 instance 8" in result.output + assert result.exit_code != 0 + + # config int vrrp6 remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP6') + + # config int ip remove Ethernet7 100::7/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet7", "100::7/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet7', '100::7/64') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet6 100::6/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet6", "100::6/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet6', '100::6/64') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet5 100::5/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet5", "100::5/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet5', '100::5/64') not in db.cfgdb.get_table('INTERFACE') + + # config int ip remove Ethernet64 100::64/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "100::64/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '100::64/64') not in db.cfgdb.get_table('INTERFACE') + + def test_enable_disable_vrrp_instance_preempt(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp ip add Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["pre_empt"], + ["Ethernt64", "8", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["pre_empt"], + ["Ethernet2", "8", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["pre_empt"], + ["Ethernet64", "9", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp vrrp pre_empt Ethernet64 8 disabled + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["pre_empt"], + ["Ethernet64", "8", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['preempt'] == 'disabled' + + # config interface vrrp vrrp pre_empt Ethernet64 8 enabled + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["pre_empt"], + ["Ethernet64", "8", "enabled"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['preempt'] == 'enabled' + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + + def test_enable_disable_vrrp6_instance_preempt(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10::8/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10::8/64') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp6 ipv6 add Ethernet64 8 10::1/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet64", "8", "10::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['10::1/64'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["pre_empt"], + ["Ethernt64", "8", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["pre_empt"], + ["Ethernet2", "8", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp6 instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["pre_empt"], + ["Ethernet64", "9", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert "Vrrpv6 instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp6 pre_empt Ethernet64 8 disabled + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["pre_empt"], + ["Ethernet64", "8", "disabled"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['preempt'] == 'disabled' + + # config interface vrrp vrrp pre_empt Ethernet64 8 enabled + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["pre_empt"], + ["Ethernet64", "8", "enabled"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['preempt'] == 'enabled' + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP6') + + # config int ip remove Ethernet64 10::8/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10::8/64') not in db.cfgdb.get_table('INTERFACE') + + def test_config_vrrp_instance_adv_interval(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp ip add Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["adv_interval"], + ["Ethernt64", "8", "2"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["adv_interval"], + ["Ethernet2", "8", "2"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["adv_interval"], + ["Ethernet64", "9", "2"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp vrrp adv_interval Ethernet64 8 2 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["adv_interval"], + ["Ethernet64", "8", "2"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['adv_interval'] == '2' + + # config interface vrrp vrrp adv_interval Ethernet64 8 500 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["adv_interval"], + ["Ethernet64", "8", "500"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + + def test_config_vrrp6_instance_adv_interval(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10::8/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10::8/64') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp6 ipv6 add Ethernet64 8 10::1/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet64", "8", "10::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['10::1/64'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["adv_interval"], + ["Ethernt64", "8", "2"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["adv_interval"], + ["Ethernet2", "8", "2"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["adv_interval"], + ["Ethernet64", "9", "2"], obj=obj) + print(result.exit_code, result.output) + assert "Vrrpv6 instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp6 adv_interval Ethernet64 8 2 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["adv_interval"], + ["Ethernet64", "8", "2"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['adv_interval'] == '2' + + # config interface vrrp6 adv_interval Ethernet64 8 500 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["adv_interval"], + ["Ethernet64", "8", "500"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config int vrrp6 remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP6') + + # config int ip remove Ethernet64 10::8/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10::8/64') not in db.cfgdb.get_table('INTERFACE') + + def test_config_vrrp_instance_priority(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp ip add Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["priority"], + ["Ethernt64", "8", "150"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["priority"], + ["Ethernet2", "8", "150"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["priority"], + ["Ethernet64", "9", "150"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp priority Ethernet64 8 150 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["priority"], + ["Ethernet64", "8", "150"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['priority'] == '150' + + # config interface vrrp priority Ethernet64 8 256 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["priority"], + ["Ethernet64", "8", "256"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') + + def test_config_vrrp6_instance_priority(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10::8/64 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10::8/64') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp6 ipv6 add Ethernet64 8 10::1/64 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["ipv6"].commands["add"], + ["Ethernet64", "8", "10::1/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['vip'] == ['10::1/64'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["priority"], + ["Ethernt64", "8", "150"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["priority"], + ["Ethernet2", "8", "150"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["priority"], + ["Ethernet64", "9", "150"], obj=obj) + print(result.exit_code, result.output) + assert "Vrrpv6 instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp6 priority Ethernet64 8 150 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["priority"], + ["Ethernet64", "8", "150"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP6') + assert db.cfgdb.get_table('VRRP6')['Ethernet64', '8']['priority'] == '150' + + # config interface vrrp priority Ethernet64 8 256 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["priority"], + ["Ethernet64", "8", "256"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config int vrrp6 remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp6"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP6') + + # config int ip remove Ethernet64 10::8/64 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10::8/64"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10::8/64') not in db.cfgdb.get_table('INTERFACE') + + def test_config_vrrp_instance_version(self): + db = Db() + runner = CliRunner() + obj = {'config_db': db.cfgdb} + + # config int ip add Ethernet64 10.10.10.1/24 + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["add"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '10.10.10.1/24') in db.cfgdb.get_table('INTERFACE') + + # config int vrrp ip add Ethernet64 8 10.10.10.8/24 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["ip"].commands["add"], + ["Ethernet64", "8", "10.10.10.8/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['vip'] == ['10.10.10.8/24'] + + # check interface_name is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["version"], + ["Ethernt64", "8", "3"], obj=obj) + print(result.exit_code, result.output) + assert "'interface_name' is not valid" in result.output + assert result.exit_code != 0 + + # check interface is Router interface + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["version"], + ["Ethernet2", "8", "3"], obj=obj) + print(result.exit_code, result.output) + assert "Router Interface 'Ethernet2' not found" in result.output + assert result.exit_code != 0 + + # check the vrrp instance is valid + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["version"], + ["Ethernet64", "9", "3"], obj=obj) + print(result.exit_code, result.output) + assert "vrrp instance 9 not found on interface Ethernet64" in result.output + assert result.exit_code != 0 + + # config interface vrrp version Ethernet64 8 3 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["version"], + ["Ethernet64", "8", "3"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') in db.cfgdb.get_table('VRRP') + assert db.cfgdb.get_table('VRRP')['Ethernet64', '8']['version'] == '3' + + # config interface vrrp version Ethernet64 8 1 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["version"], + ["Ethernet64", "8", "1"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code != 0 + + # config int vrrp remove Ethernet64 8 + result = runner.invoke(config.config.commands["interface"].commands["vrrp"].commands["remove"], + ["Ethernet64", "8"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert ('Ethernet64', '8') not in db.cfgdb.get_table('VRRP') + + # config int ip remove Ethernet64 10.10.10.1/24 + with mock.patch('utilities_common.cli.run_command') as mock_run_command: + result = runner.invoke(config.config.commands["interface"].commands["ip"].commands["remove"], + ["Ethernet64", "10.10.10.1/24"], obj=obj) + print(result.exit_code, result.output) + assert result.exit_code == 0 + assert mock_run_command.call_count == 1 + assert ('Ethernet64', '10.10.10.1/24') not in db.cfgdb.get_table('INTERFACE') diff --git a/tests/watermarkstat_test.py b/tests/watermarkstat_test.py index dc419ae3b9..6a2ebfa2cf 100644 --- a/tests/watermarkstat_test.py +++ b/tests/watermarkstat_test.py @@ -1,11 +1,9 @@ import os import sys import pytest - import show.main as show from click.testing import CliRunner - -from .wm_input.wm_test_vectors import * +from wm_input.wm_test_vectors import testData test_path = os.path.dirname(os.path.abspath(__file__)) modules_path = os.path.dirname(test_path) @@ -84,12 +82,14 @@ def executor(self, testcase): else: exec_cmd = show.cli.commands[input['cmd'][0]].commands[input['cmd'][1]] - result = runner.invoke(exec_cmd, []) + args = [] if 'args' not in input else input['args'] + result = runner.invoke(exec_cmd, args) print(result.exit_code) print(result.output) - assert result.exit_code == 0 + expected_code = 0 if 'rc' not in input else input['rc'] + assert result.exit_code == expected_code assert result.output == input['rc_output'] @classmethod diff --git a/tests/wm_input/wm_test_vectors.py b/tests/wm_input/wm_test_vectors.py index 93d9faa4cb..f0a80cf9cb 100644 --- a/tests/wm_input/wm_test_vectors.py +++ b/tests/wm_input/wm_test_vectors.py @@ -1,3 +1,373 @@ +show_pg_wm_shared_output_one_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + " Ethernet4 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n") + +show_pg_wm_shared_output_all_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- ----- " + "----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + " Ethernet4 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ingress shared pool occupancy per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 " + "PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + "Ethernet-BP260 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n") + +show_pg_wm_hdrm_output_one_masic = ( + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + "Ethernet-BP260 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n") + +show_pg_wm_hdrm_output_all_masic = ( + "Ingress headroom per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + " Ethernet4 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 100 101 102 103 104 105 106 107" + " 108 109 110 111 112 113 114 115\n" + "Ethernet-BP260 200 201 202 203 204 205 206 207" + " 208 209 210 211 212 213 214 215\n") + +show_pg_persistent_wm_shared_output_one_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_pg_persistent_wm_shared_output_all_masic = ( + "Ingress shared pool occupancy per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + " Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0 0\n" + "Ingress shared pool occupancy per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7" + " PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_pg_persistent_wm_hdrm_output_one_masic = ( + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207" + " 500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_pg_persistent_wm_hdrm_output_all_masic = ( + "Ingress headroom per PG: (Namespace asic0)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6" + " PG7 PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "------------ ----- ----- ----- ----- ----- ----- ----- ----- -----" + " ----- ------ ------ ------ ------ ------ ------\n" + " Ethernet0 200 201 202 203 204 205 206 207 500" + " 501 502 503 504 505 506 507\n" + " Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A N/A" + " N/A N/A N/A N/A N/A N/A N/A\n" + "Ethernet-BP0 0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0\n" + "Ethernet-BP4 0 0 0 0 0 0 0 0 0" + " 0 0 0 0 0 0 0\n" + "Ingress headroom per PG: (Namespace asic1)\n" + " Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 " + "PG8 PG9 PG10 PG11 PG12 PG13 PG14 PG15\n" + "-------------- ----- ----- ----- ----- ----- ----- ----- ----- " + "----- ----- ------ ------ ------ ------ ------ ------\n" + "Ethernet-BP256 200 201 202 203 204 205 206 207 " + "500 501 502 503 504 505 506 507\n" + "Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A " + "N/A N/A N/A N/A N/A N/A N/A N/A\n") + +show_queue_wm_unicast_output_one_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ------- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 2057328 20 2 0 0 2 2 28 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_wm_unicast_output_all_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic0) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +------------ ----- ----- ----- ----- ----- ----- ----- ----- + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ------- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 2057328 20 2 0 0 2 2 28 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_unicast_output_one_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_unicast_output_all_masic = """\ +Egress shared pool occupancy per unicast queue: (Namespace asic0) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +------------ ----- ----- ----- ----- ----- ----- ----- ----- + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per unicast queue: (Namespace asic1) + Port UC0 UC1 UC2 UC3 UC4 UC5 UC6 UC7 +-------------- ----- ----- ----- ----- ----- ----- ----- ----- +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_wm_multicast_output_one_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +""" + +show_queue_wm_multicast_output_all_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +Egress shared pool occupancy per multicast queue: (Namespace asic1) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +-------------- ----- ----- ------ ------- ------ ------ ------ ------ +Ethernet-BP256 2 0 5 2057328 208 20 228 2 +Ethernet-BP260 0 0 0 0 0 0 0 0 +""" + +show_queue_pwm_multicast_output_one_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +""" + +show_queue_pwm_multicast_output_all_masic = """\ +Egress shared pool occupancy per multicast queue: (Namespace asic0) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +------------ ----- ----- ------ ------ ------ ------ ------ ------ + Ethernet0 N/A N/A N/A N/A N/A N/A N/A N/A + Ethernet4 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP0 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP4 0 0 0 0 0 0 0 0 +Egress shared pool occupancy per multicast queue: (Namespace asic1) + Port MC8 MC9 MC10 MC11 MC12 MC13 MC14 MC15 +-------------- ----- ----- ------ ------ ------ ------ ------ ------ +Ethernet-BP256 N/A N/A N/A N/A N/A N/A N/A N/A +Ethernet-BP260 0 0 0 0 0 0 0 0 +""" + +show_queue_wm_all_output_one_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_wm_all_output_all_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic0) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +------------ ------ ------ ------- ------- ------- ------- ------- ------- + Ethernet0 0 0 0 0 0 0 0 0 + Ethernet4 0 0 0 0 0 0 0 0 +Ethernet-BP0 0 0 0 0 0 0 0 0 +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_all_output_one_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_queue_pwm_all_output_all_masic = """\ +Egress shared pool occupancy per all queues: (Namespace asic0) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +------------ ------ ------ ------- ------- ------- ------- ------- ------- + Ethernet0 0 0 0 0 0 0 0 0 + Ethernet4 0 0 0 0 0 0 0 0 +Ethernet-BP0 0 0 0 0 0 0 0 0 +Ethernet-BP4 N/A N/A N/A N/A N/A N/A N/A N/A +Egress shared pool occupancy per all queues: (Namespace asic1) + Port ALL8 ALL9 ALL10 ALL11 ALL12 ALL13 ALL14 ALL15 +-------------- ------ ------ ------- ------- ------- ------- ------- ------- +Ethernet-BP256 0 0 0 0 0 0 0 0 +Ethernet-BP260 N/A N/A N/A N/A N/A N/A N/A N/A +""" + +show_buffer_pool_wm_output_one_masic = """\ +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 3000 +""" + +show_buffer_pool_wm_output_all_masic = """\ +Shared pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 3000 +""" + +show_buffer_pool_pwm_output_one_masic = """\ +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +show_buffer_pool_pwm_output_all_masic = """\ +Shared pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Shared pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +show_hdrm_pool_wm_output_one_masic = """\ +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 432640 +""" + +show_hdrm_pool_wm_output_all_masic = """\ +Headroom pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool 432640 +""" + +show_hdrm_pool_pwm_output_one_masic = """\ +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +show_hdrm_pool_pwm_output_all_masic = """\ +Headroom pool maximum occupancy: (Namespace asic0) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +Headroom pool maximum occupancy: (Namespace asic1) + Pool Bytes +--------------------- ------- +ingress_lossless_pool N/A +""" + +clear_hdrm_pool_wm_output_one_masic = """\ +Channel: WATERMARK_CLEAR_REQUEST accessed in namespace: asic0 +Message published to WATERMARK_CLEAR_REQUEST: ["USER","PG_HEADROOM"] +""" + +clear_hdrm_pool_wm_output_all_masic = """\ +Channel: WATERMARK_CLEAR_REQUEST accessed in namespace: asic0 +Message published to WATERMARK_CLEAR_REQUEST: ["USER","PG_HEADROOM"] +Channel: WATERMARK_CLEAR_REQUEST accessed in namespace: asic1 +Message published to WATERMARK_CLEAR_REQUEST: ["USER","PG_HEADROOM"] +""" + show_pg_wm_shared_output="""\ Ingress shared pool occupancy per PG: Port PG0 PG1 PG2 PG3 PG4 PG5 PG6 PG7 @@ -124,56 +494,198 @@ 'rc_output': show_pg_wm_hdrm_output } ], - 'show_pg_pwm_shared' : [ {'cmd' : ['priority-group', 'persistent-watermark', 'shared'], - 'rc_output': show_pg_persistent_wm_shared_output - } - ], - 'show_pg_pwm_hdrm' : [ {'cmd' : ['priority-group', 'persistent-watermark', 'headroom'], - 'rc_output': show_pg_persistent_wm_hdrm_output - } - ], - 'show_q_wm_unicast' : [ {'cmd' : ['queue', 'watermark', 'unicast'], - 'rc_output': show_queue_wm_unicast_output + 'show_pg_pwm_shared': [{'cmd': ['priority-group', 'persistent-watermark', 'shared'], + 'rc_output': show_pg_persistent_wm_shared_output } ], - 'show_q_pwm_unicast' : [ {'cmd' : ['queue', 'persistent-watermark', 'unicast'], - 'rc_output': show_queue_pwm_unicast_output - } + 'show_pg_pwm_hdrm': [{'cmd': ['priority-group', 'persistent-watermark', 'headroom'], + 'rc_output': show_pg_persistent_wm_hdrm_output + } + ], + 'show_q_wm_unicast': [{'cmd': ['queue', 'watermark', 'unicast'], + 'rc_output': show_queue_wm_unicast_output + } + ], + 'show_q_pwm_unicast': [{'cmd': ['queue', 'persistent-watermark', 'unicast'], + 'rc_output': show_queue_pwm_unicast_output + } + ], + 'show_q_wm_multicast': [{'cmd': ['queue', 'watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_output + } ], - 'show_q_wm_multicast' : [ {'cmd' : ['queue', 'watermark', 'multicast'], - 'rc_output': show_queue_wm_multicast_output - } - ], - 'show_q_wm_multicast_neg' : [ { 'cmd' : ['queue', 'watermark', 'multicast'], - 'rc_output': show_queue_wm_multicast_neg_output - } + 'show_q_wm_multicast_neg': [{'cmd': ['queue', 'watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_neg_output + } ], - 'show_q_pwm_multicast' : [ {'cmd' : ['queue', 'persistent-watermark', 'multicast'], - 'rc_output': show_queue_wm_multicast_output - } - ], - 'show_q_wm_all' : [ {'cmd' : ['queue', 'watermark', 'all'], - 'rc_output': show_queue_wm_all_output - } - ], - 'show_q_pwm_all' : [ {'cmd' : ['queue', 'persistent-watermark', 'all'], - 'rc_output': show_queue_pwm_all_output - } - ], - 'show_buffer_pool_wm' : [ {'cmd' : ['buffer_pool', 'watermark'], - 'rc_output': show_buffer_pool_wm_output - } + 'show_q_pwm_multicast': [{'cmd': ['queue', 'persistent-watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_output + } ], - 'show_buffer_pool_pwm' : [ {'cmd' : ['buffer_pool', 'persistent-watermark'], - 'rc_output': show_buffer_pool_persistent_wm_output - } - ], - 'show_hdrm_pool_wm' : [ {'cmd' : ['headroom-pool', 'watermark'], - 'rc_output': show_hdrm_pool_wm_output + 'show_q_wm_all': [{'cmd': ['queue', 'watermark', 'all'], + 'rc_output': show_queue_wm_all_output + } + ], + 'show_q_pwm_all': [{'cmd': ['queue', 'persistent-watermark', 'all'], + 'rc_output': show_queue_pwm_all_output + } + ], + 'show_buffer_pool_wm': [{'cmd': ['buffer_pool', 'watermark'], + 'rc_output': show_buffer_pool_wm_output } - ], - 'show_hdrm_pool_pwm' : [ {'cmd' : ['headroom-pool', 'persistent-watermark'], - 'rc_output': show_hdrm_pool_persistent_wm_output + ], + 'show_buffer_pool_pwm': [{'cmd': ['buffer_pool', 'persistent-watermark'], + 'rc_output': show_buffer_pool_persistent_wm_output } - ] + ], + 'show_hdrm_pool_wm': [{'cmd': ['headroom-pool', 'watermark'], + 'rc_output': show_hdrm_pool_wm_output + } + ], + 'show_hdrm_pool_pwm': [{'cmd': ['headroom-pool', 'persistent-watermark'], + 'rc_output': show_hdrm_pool_persistent_wm_output + } + ], + 'show_pg_wm_shared_one_masic': [{'cmd': ['priority-group', 'watermark', 'shared'], + 'args': ['--namespace', 'asic0'], + 'rc_output': show_pg_wm_shared_output_one_masic + } + ], + 'show_pg_wm_shared_all_masic': [{'cmd': ['priority-group', 'watermark', 'shared'], + 'rc_output': show_pg_wm_shared_output_all_masic + } + ], + 'show_pg_wm_hdrm_one_masic': [{'cmd': ['priority-group', 'watermark', 'headroom'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_pg_wm_hdrm_output_one_masic + } + ], + 'show_pg_wm_hdrm_all_masic': [{'cmd': ['priority-group', 'watermark', 'headroom'], + 'rc_output': show_pg_wm_hdrm_output_all_masic + } + ], + 'show_pg_pwm_shared_one_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'shared'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_pg_persistent_wm_shared_output_one_masic + } + ], + 'show_pg_pwm_shared_all_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'shared'], + 'rc_output': show_pg_persistent_wm_shared_output_all_masic + } + ], + 'show_pg_pwm_hdrm_one_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'headroom'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_pg_persistent_wm_hdrm_output_one_masic + } + ], + 'show_pg_pwm_hdrm_all_masic': [{'cmd': ['priority-group', 'persistent-watermark', 'headroom'], + 'rc_output': show_pg_persistent_wm_hdrm_output_all_masic + } + ], + 'show_q_wm_unicast_one_masic': [{'cmd': ['queue', 'watermark', 'unicast'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_wm_unicast_output_one_masic + } + ], + 'show_q_wm_unicast_all_masic': [{'cmd': ['queue', 'watermark', 'unicast'], + 'rc_output': show_queue_wm_unicast_output_all_masic + } + ], + 'show_q_pwm_unicast_one_masic': [{'cmd': ['queue', 'persistent-watermark', 'unicast'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_pwm_unicast_output_one_masic + } + ], + 'show_q_pwm_unicast_all_masic': [{'cmd': ['queue', 'persistent-watermark', 'unicast'], + 'rc_output': show_queue_pwm_unicast_output_all_masic + } + ], + 'show_q_wm_multicast_one_masic': [{'cmd': ['queue', 'watermark', 'multicast'], + 'args': ['--namespace', 'asic0'], + 'rc_output': show_queue_wm_multicast_output_one_masic + } + ], + 'show_q_wm_multicast_all_masic': [{'cmd': ['queue', 'watermark', 'multicast'], + 'rc_output': show_queue_wm_multicast_output_all_masic + } + ], + 'show_q_pwm_multicast_one_masic': [{'cmd': ['queue', 'persistent-watermark', 'multicast'], + 'args': ['--namespace', 'asic0'], + 'rc_output': show_queue_pwm_multicast_output_one_masic + } + ], + 'show_q_pwm_multicast_all_masic': [{'cmd': ['queue', 'persistent-watermark', 'multicast'], + 'rc_output': show_queue_pwm_multicast_output_all_masic + } + ], + 'show_q_wm_all_one_masic': [{'cmd': ['queue', 'watermark', 'all'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_wm_all_output_one_masic + } + ], + 'show_q_wm_all_all_masic': [{'cmd': ['queue', 'watermark', 'all'], + 'rc_output': show_queue_wm_all_output_all_masic + } + ], + 'show_q_pwm_all_one_masic': [{'cmd': ['queue', 'persistent-watermark', 'all'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_queue_pwm_all_output_one_masic + } + ], + 'show_q_pwm_all_all_masic': [{'cmd': ['queue', 'persistent-watermark', 'all'], + 'rc_output': show_queue_pwm_all_output_all_masic + } + ], + 'show_buffer_pool_wm_one_masic': [{'cmd': ['buffer_pool', 'watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_buffer_pool_wm_output_one_masic + } + ], + 'show_buffer_pool_wm_all_masic': [{'cmd': ['buffer_pool', 'watermark'], + 'rc_output': show_buffer_pool_wm_output_all_masic + } + ], + 'show_buffer_pool_pwm_one_masic': [{'cmd': ['buffer_pool', 'persistent-watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_buffer_pool_pwm_output_one_masic + } + ], + 'show_buffer_pool_pwm_all_masic': [{'cmd': ['buffer_pool', 'persistent-watermark'], + 'rc_output': show_buffer_pool_pwm_output_all_masic + } + ], + 'show_hdrm_pool_wm_one_masic': [{'cmd': ['headroom-pool', 'watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_hdrm_pool_wm_output_one_masic + } + ], + 'show_hdrm_pool_wm_all_masic': [{'cmd': ['headroom-pool', 'watermark'], + 'rc_output': show_hdrm_pool_wm_output_all_masic + } + ], + 'show_hdrm_pool_pwm_one_masic': [{'cmd': ['headroom-pool', 'persistent-watermark'], + 'args': ['--namespace', 'asic1'], + 'rc_output': show_hdrm_pool_pwm_output_one_masic + } + ], + 'show_hdrm_pool_pwm_all_masic': [{'cmd': ['headroom-pool', 'persistent-watermark'], + 'rc_output': show_hdrm_pool_pwm_output_all_masic + } + ], + 'show_invalid_namespace_masic': [{'cmd': ['buffer_pool', 'watermark'], + 'args': ['--namespace', 'asic14'], + 'rc': 2, + 'rc_output': '' + } + ], + 'clear_hdrm_pool_wm_one_masic': [{'cmd': ['clear', 'watermarkstat', '-t', + 'pg_headroom', '-n', 'asic0', '-c'], + 'rc_output': clear_hdrm_pool_wm_output_one_masic + } + ], + 'clear_hdrm_pool_wm_all_masic': [{'cmd': ['clear', 'watermarkstat', '-t', + 'pg_headroom', '-c'], + 'rc_output': clear_hdrm_pool_wm_output_all_masic + } + ] } diff --git a/tests/wol_test.py b/tests/wol_test.py deleted file mode 100644 index 011676eeac..0000000000 --- a/tests/wol_test.py +++ /dev/null @@ -1,229 +0,0 @@ -import click -import io -import pytest -import wol.main as wol -from click.testing import CliRunner -from unittest.mock import patch, MagicMock - -ETHER_TYPE_WOL = b'\x08\x42' -BROADCAST_MAC = wol.MacAddress('ff:ff:ff:ff:ff:ff') - -SAMPLE_INTERFACE_ETH0 = "Ethernet0" -SAMPLE_INTERFACE_VLAN1000 = "Vlan1000" -SAMPLE_INTERFACE_PO100 = "PortChannel100" - -SAMPLE_ETH0_MAC = wol.MacAddress('11:33:55:77:99:bb') -SAMPLE_VLAN1000_MAC = wol.MacAddress('22:44:66:88:aa:cc') -SAMPLE_PO100_MAC = wol.MacAddress('33:55:77:99:bb:dd') -SAMPLE_TARGET_MAC = wol.MacAddress('44:66:88:aa:cc:ee') -SAMPLE_TARGET_MAC_LIST = [wol.MacAddress('44:66:88:aa:cc:ee'), wol.MacAddress('55:77:99:bb:dd:ff')] - -SAMPLE_MAGIC_PACKET_UNICAST = SAMPLE_TARGET_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 -SAMPLE_MAGIC_PACKET_BROADCAST = BROADCAST_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 - - -class TestMacAddress(): - def test_init(self): - # Test Case 1: Test with a valid MAC address - assert wol.MacAddress('00:11:22:33:44:55').address == b'\x00\x11\x22\x33\x44\x55' - # Test Case 2: Test with an invalid MAC address - with pytest.raises(ValueError) as exc_info: - wol.MacAddress('INVALID_MAC_ADDRESS') - assert exc_info.value.message == "invalid MAC address" - with pytest.raises(ValueError) as exc_info: - wol.MacAddress('00:11:22:33:44') - assert exc_info.value.message == "invalid MAC address" - - def test_str(self): - assert str(wol.MacAddress('00:01:0a:a0:aa:ee')) == '00:01:0a:a0:aa:ee' - assert str(wol.MacAddress('ff:ff:ff:ff:ff:ff')) == 'ff:ff:ff:ff:ff:ff' - - def test_eq(self): - # Test Case 1: Test with two equal MAC addresses - assert wol.MacAddress('00:11:22:33:44:55') == wol.MacAddress('00:11:22:33:44:55') - # Test Case 2: Test with two unequal MAC addresses - assert wol.MacAddress('00:11:22:33:44:55') != wol.MacAddress('55:44:33:22:11:00') - - def test_to_bytes(self): - assert wol.MacAddress('00:11:22:33:44:55').to_bytes() == b'\x00\x11\x22\x33\x44\x55' - - -@patch('wol.main.get_interface_mac', MagicMock(return_value=SAMPLE_ETH0_MAC)) -def test_build_magic_packet(): - # Test Case 1: Test build magic packet basic - expected_output = SAMPLE_TARGET_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 - assert wol.build_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, broadcast=False, password=b'') == expected_output - # Test Case 2: Test build magic packet with broadcast flag - expected_output = BROADCAST_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 - assert wol.build_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, broadcast=True, password=b'') == expected_output - # Test Case 3: Test build magic packet with 4-byte password - password = b'\x12\x34' - expected_output = SAMPLE_TARGET_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 + password - assert wol.build_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, broadcast=False, password=password) == expected_output - # Test Case 4: Test build magic packet with 6-byte password - password = b'\x12\x34\x56\x78\x9a\xbc' - expected_output = SAMPLE_TARGET_MAC.to_bytes() + SAMPLE_ETH0_MAC.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + SAMPLE_TARGET_MAC.to_bytes() * 16 + password - assert wol.build_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, broadcast=False, password=password) == expected_output - - -def test_send_magic_packet(): - # Test Case 1: Test send magic packet with count is 1 - with patch('socket.socket') as mock_socket: - wol.send_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, count=1, interval=0, verbose=False) - mock_socket.return_value.bind.assert_called_once_with((SAMPLE_INTERFACE_ETH0, 0)) - mock_socket.return_value.send.assert_called_once_with(SAMPLE_MAGIC_PACKET_UNICAST) - # Test Case 2: Test send magic packet with count is 3 - with patch('socket.socket') as mock_socket: - wol.send_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, count=3, interval=0, verbose=False) - assert mock_socket.return_value.bind.call_count == 1 - assert mock_socket.return_value.send.call_count == 3 - # Test Case 3: Test send magic packet with interval is 1000 - with patch('socket.socket') as mock_socket, \ - patch('time.sleep') as mock_sleep: - wol.send_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, count=3, interval=1000, verbose=False) - assert mock_socket.return_value.bind.call_count == 1 - assert mock_socket.return_value.send.call_count == 3 - assert mock_sleep.call_count == 2 # sleep twice between 3 packets - mock_sleep.assert_called_with(1) - # Test Case 4: Test send magic packet with verbose is True - expected_verbose_output = f"Sending 5 magic packet to {SAMPLE_TARGET_MAC} via interface {SAMPLE_INTERFACE_ETH0}\n" + \ - f"1st magic packet sent to {SAMPLE_TARGET_MAC}\n" + \ - f"2nd magic packet sent to {SAMPLE_TARGET_MAC}\n" + \ - f"3rd magic packet sent to {SAMPLE_TARGET_MAC}\n" + \ - f"4th magic packet sent to {SAMPLE_TARGET_MAC}\n" + \ - f"5th magic packet sent to {SAMPLE_TARGET_MAC}\n" - with patch('socket.socket') as mock_socket, patch('time.sleep'), patch('sys.stdout', new_callable=io.StringIO) as mock_stdout: - wol.send_magic_packet(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, count=5, interval=1000, verbose=True) - assert mock_socket.return_value.bind.call_count == 1 - assert mock_socket.return_value.send.call_count == 5 - assert mock_stdout.getvalue() == expected_verbose_output - - -@patch('netifaces.interfaces', MagicMock(return_value=[SAMPLE_INTERFACE_ETH0])) -@patch('wol.main.get_interface_operstate', MagicMock(return_value="up")) -def test_validate_interface(): - # Test Case 1: Test with a valid SONiC interface name - assert wol.validate_interface(None, None, SAMPLE_INTERFACE_ETH0) == SAMPLE_INTERFACE_ETH0 - # Test Case 2: Test with an invalid SONiC interface name - with pytest.raises(click.BadParameter) as exc_info: - wol.validate_interface(None, None, "INVALID_SONIC_INTERFACE") - assert exc_info.value.message == "invalid SONiC interface name INVALID_SONIC_INTERFACE" - # Test Case 3: Test with an valid SONiC interface name, but the interface operstat is down - with patch('wol.main.get_interface_operstate', MagicMock(return_value="down")): - with pytest.raises(click.BadParameter) as exc_info: - wol.validate_interface(None, None, SAMPLE_INTERFACE_ETH0) - assert exc_info.value.message == f"interface {SAMPLE_INTERFACE_ETH0} is not up" - - -def test_parse_target_mac(): - # Test Case 1: Test with a single valid target MAC address - wol.parse_target_mac(None, None, str(SAMPLE_TARGET_MAC)) == [SAMPLE_TARGET_MAC] - # Test Case 2: Test with a list of valid target MAC addresses - mac_list = [SAMPLE_ETH0_MAC, SAMPLE_VLAN1000_MAC, SAMPLE_PO100_MAC] - assert wol.parse_target_mac(None, None, ",".join([str(x) for x in mac_list])) == mac_list - # Test Case 3: Test with a single invalid target MAC address - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_target_mac(None, None, "INVALID_MAC_ADDRESS") - assert exc_info.value.message == "invalid MAC address INVALID_MAC_ADDRESS" - # Test Case 4: Test with a list of target MAC addresses, one of them is invalid - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_target_mac(None, None, ",".join([str(SAMPLE_ETH0_MAC), "INVALID_MAC_ADDRESS"])) - assert exc_info.value.message == "invalid MAC address INVALID_MAC_ADDRESS" - - -def test_parse_password(): - # Test Case 1: Test with an empty password - assert wol.parse_password(None, None, "") == b'' - # Test Case 2: Test with a valid 4-byte password - assert wol.parse_password(None, None, "1.2.3.4") == b'\x01\x02\x03\x04' - # Test Case 3: Test with an invalid 4-byte password - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_password(None, None, "1.2.3.999") - assert exc_info.value.message == "invalid password 1.2.3.999" - # Test Case 4: Test with a valid 6-byte password - assert wol.parse_password(None, None, str(SAMPLE_TARGET_MAC)) == SAMPLE_TARGET_MAC.to_bytes() - # Test Case 5: Test with an invalid 6-byte password - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_password(None, None, "11:22:33:44:55:999") - assert exc_info.value.message == "invalid password 11:22:33:44:55:999" - # Test Case 6: Test with an invalid password string - with pytest.raises(click.BadParameter) as exc_info: - wol.parse_password(None, None, "INVALID_PASSWORD") - assert exc_info.value.message == "invalid password INVALID_PASSWORD" - - -def test_validate_count_interval(): - # Test Case 1: input valid count and interval - assert wol.validate_count_interval(1, 1000) == (1, 1000) - # Test Case 2: Test with both count and interval are not provided - assert wol.validate_count_interval(None, None) == (1, 0) - # Test Case 3: Test count and interval not provided together - with pytest.raises(click.BadParameter) as exc_info: - wol.validate_count_interval(3, None) - assert exc_info.value.message == "count and interval must be used together" - with pytest.raises(click.BadParameter) as exc_info: - wol.validate_count_interval(None, 1000) - assert exc_info.value.message == "count and interval must be used together" - # Test Case 4: Test with count or interval not in valid range - # This restriction is validated by click.IntRange(), so no need to call the command line function - runner = CliRunner() - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC), '-c', '100', '-i', '1000']) - assert 'Invalid value for "-c": 100 is not in the valid range of 1 to 5.' in result.stdout - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC), '-c', '3', '-i', '100000']) - assert 'Invalid value for "-i": 100000 is not in the valid range of 0 to 2000.' in result.stdout - - -@patch('netifaces.interfaces', MagicMock(return_value=[SAMPLE_INTERFACE_ETH0])) -@patch('wol.main.is_root', MagicMock(return_value=True)) -@patch('wol.main.get_interface_operstate', MagicMock(return_value="up")) -@patch('wol.main.get_interface_mac', MagicMock(return_value=SAMPLE_ETH0_MAC)) -def test_wol_send_magic_packet_call_count(): - """ - Test the count of send_magic_packet() function call in wol is correct. - """ - runner = CliRunner() - # Test Case 1: Test with only required arguments - # 1.1 Single Target Mac - with patch('wol.main.send_magic_packet') as mock_send_magic_packet: - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC)]) - assert result.exit_code == 0 - mock_send_magic_packet.assert_called_once_with(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, 1, 0, False) - # 1.2 Multiple Target Mac - with patch('wol.main.send_magic_packet') as mock_send_magic_packet: - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, ','.join([str(v) for v in SAMPLE_TARGET_MAC_LIST])]) - assert result.exit_code == 0 - assert mock_send_magic_packet.call_count == 2 - # Test Case 2: Test with specified count and interval - # 2.1 Single Target Mac - with patch('wol.main.send_magic_packet') as mock_send_magic_packet: - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC), '-c', '5', '-i', '1000']) - assert result.exit_code == 0 - mock_send_magic_packet.assert_called_once_with(SAMPLE_INTERFACE_ETH0, SAMPLE_TARGET_MAC, SAMPLE_MAGIC_PACKET_UNICAST, 5, 1000, False) - # 2.2 Multiple Target Mac - with patch('wol.main.send_magic_packet') as mock_send_magic_packet: - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, ','.join([str(v) for v in SAMPLE_TARGET_MAC_LIST]), '-c', '5', '-i', '1000']) - assert result.exit_code == 0 - assert mock_send_magic_packet.call_count == 2 - - -@patch('netifaces.interfaces', MagicMock(return_value=[SAMPLE_INTERFACE_ETH0])) -@patch('wol.main.is_root', MagicMock(return_value=True)) -@patch('wol.main.get_interface_operstate', MagicMock(return_value="up")) -@patch('wol.main.get_interface_mac', MagicMock(return_value=SAMPLE_ETH0_MAC)) -def test_wol_send_magic_packet_throw_exception(): - """ - Test the exception handling of send_magic_packet() function in wol. - """ - runner = CliRunner() - # Test Case 1: Test with OSError exception (interface flap) - with patch('wol.main.send_magic_packet', MagicMock(side_effect=OSError("[Errno 100] Network is down"))): - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC)]) - assert "Exception: [Errno 100] Network is down" in result.stdout - # Test Case 2: Test with other exception - with patch('wol.main.send_magic_packet', MagicMock(side_effect=Exception("Exception message"))): - result = runner.invoke(wol.wol, [SAMPLE_INTERFACE_ETH0, str(SAMPLE_TARGET_MAC)]) - assert "Exception: Exception message" in result.stdout diff --git a/utilities_common/bgp.py b/utilities_common/bgp.py new file mode 100644 index 0000000000..640be87ee0 --- /dev/null +++ b/utilities_common/bgp.py @@ -0,0 +1,23 @@ +from swsscommon.swsscommon import CFG_BGP_DEVICE_GLOBAL_TABLE_NAME as CFG_BGP_DEVICE_GLOBAL # noqa + +# +# BGP constants ------------------------------------------------------------------------------------------------------- +# + +BGP_DEVICE_GLOBAL_KEY = "STATE" + +SYSLOG_IDENTIFIER = "bgp-cli" + + +# +# BGP helpers --------------------------------------------------------------------------------------------------------- +# + + +def to_str(state): + """ Convert boolean to string representation """ + if state == "true": + return "enabled" + elif state == "false": + return "disabled" + return state diff --git a/utilities_common/bgp_util.py b/utilities_common/bgp_util.py index 65f9a59496..cb49123c4b 100644 --- a/utilities_common/bgp_util.py +++ b/utilities_common/bgp_util.py @@ -197,7 +197,8 @@ def get_neighbor_dict_from_table(db, table_name): return neighbor_dict -def run_bgp_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE, vtysh_shell_cmd=constants.VTYSH_COMMAND): +def run_bgp_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE, + vtysh_shell_cmd=constants.VTYSH_COMMAND, exit_on_fail=True): bgp_instance_id = [] output = None if bgp_namespace is not multi_asic.DEFAULT_NAMESPACE: @@ -208,16 +209,16 @@ def run_bgp_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE, vtysh output, ret = clicommon.run_command(cmd, return_cmd=True) if ret != 0: click.echo(output.rstrip('\n')) - sys.exit(ret) + output = "" if not exit_on_fail else sys.exit(ret) except Exception: ctx = click.get_current_context() - ctx.fail("Unable to get summary from bgp {}".format(bgp_instance_id)) + ctx.fail("Unable to get summary from bgp {}".format(bgp_instance_id)) if exit_on_fail else None return output -def run_bgp_show_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE): - output = run_bgp_command(vtysh_cmd, bgp_namespace, constants.RVTYSH_COMMAND) +def run_bgp_show_command(vtysh_cmd, bgp_namespace=multi_asic.DEFAULT_NAMESPACE, exit_on_fail=True): + output = run_bgp_command(vtysh_cmd, bgp_namespace, constants.RVTYSH_COMMAND, exit_on_fail) # handle the the alias mode in the following code if output is not None: if clicommon.get_interface_naming_mode() == "alias" and re.search("show ip|ipv6 route", vtysh_cmd): @@ -298,6 +299,10 @@ def display_bgp_summary(bgp_summary, af): af: IPV4 or IPV6 ''' + + # "Neighbhor" is a known typo, + # but fix it will impact lots of automation scripts that the community users may have developed for years + # for now, let's keep it as it is. headers = ["Neighbhor", "V", "AS", "MsgRcvd", "MsgSent", "TblVer", "InQ", "OutQ", "Up/Down", "State/PfxRcd", "NeighborName"] diff --git a/utilities_common/cli.py b/utilities_common/cli.py index 63336377a8..c8a314b704 100644 --- a/utilities_common/cli.py +++ b/utilities_common/cli.py @@ -731,8 +731,7 @@ def run_command(command, display_cmd=False, ignore_error=False, return_cmd=False # with a list for next hops if (get_interface_naming_mode() == "alias" and not command_str.startswith("intfutil") and not re.search( "show ip|ipv6 route", command_str)): - run_command_in_alias_mode(command, shell=shell) - sys.exit(0) + return run_command_in_alias_mode(command, shell=shell) proc = subprocess.Popen(command, shell=shell, text=True, stdout=subprocess.PIPE) diff --git a/utilities_common/flock.py b/utilities_common/flock.py new file mode 100644 index 0000000000..c8faa8bfd9 --- /dev/null +++ b/utilities_common/flock.py @@ -0,0 +1,89 @@ +"""File lock utilities.""" +import click +import fcntl +import functools +import inspect +import os +import sys +import time + +from sonic_py_common import logger + + +log = logger.Logger() + + +def acquire_flock(fd, timeout=-1): + """Acquire the flock.""" + flags = fcntl.LOCK_EX + if timeout >= 0: + flags |= fcntl.LOCK_NB + else: + timeout = 0 + + start_time = current_time = time.time() + ret = False + while current_time - start_time <= timeout: + try: + fcntl.flock(fd, flags) + except (IOError, OSError): + ret = False + else: + ret = True + break + current_time = time.time() + if timeout != 0: + time.sleep(0.2) + return ret + + +def release_flock(fd): + """Release the flock.""" + fcntl.flock(fd, fcntl.LOCK_UN) + + +def try_lock(lock_file, timeout=-1): + """Decorator to try lock file using fcntl.flock.""" + def _decorator(func): + @functools.wraps(func) + def _wrapper(*args, **kwargs): + bypass_lock = False + + # Get the bypass_lock argument from the function signature + func_signature = inspect.signature(func) + has_bypass_lock = "bypass_lock" in func_signature.parameters + if has_bypass_lock: + func_ba = func_signature.bind(*args, **kwargs) + func_ba.apply_defaults() + bypass_lock = func_ba.arguments["bypass_lock"] + + if bypass_lock: + click.echo(f"Bypass lock on {lock_file}") + return func(*args, **kwargs) + else: + fd = os.open(lock_file, os.O_CREAT | os.O_RDWR) + if acquire_flock(fd, timeout): + click.echo(f"Acquired lock on {lock_file}") + os.truncate(fd, 0) + # Write pid and the function name to the lock file as a record + os.write(fd, f"{func.__name__}, pid {os.getpid()}\n".encode()) + try: + return func(*args, **kwargs) + finally: + release_flock(fd) + click.echo(f"Released lock on {lock_file}") + os.truncate(fd, 0) + os.close(fd) + else: + click.echo(f"Failed to acquire lock on {lock_file}") + lock_owner = os.read(fd, 1024).decode() + if not lock_owner: + lock_owner = "unknown" + log.log_notice( + (f"{func.__name__} failed to acquire lock on {lock_file}," + f" which is taken by {lock_owner}") + ) + os.close(fd) + sys.exit(1) + return _wrapper + return _decorator diff --git a/utilities_common/general.py b/utilities_common/general.py index 6ed70a46a1..97155532ca 100644 --- a/utilities_common/general.py +++ b/utilities_common/general.py @@ -2,8 +2,11 @@ import importlib.util import sys -from sonic_py_common.multi_asic import is_multi_asic +from sonic_py_common import multi_asic from swsscommon import swsscommon +FEATURE_TABLE = "FEATURE" +FEATURE_HAS_PER_ASIC_SCOPE = 'has_per_asic_scope' +FEATURE_HAS_GLOBAL_SCOPE = 'has_global_scope' def load_module_from_source(module_name, file_path): """ @@ -25,7 +28,7 @@ def load_db_config(): - database_global.json for multi asic - database_config.json for single asic ''' - if is_multi_asic(): + if multi_asic.is_multi_asic(): if not swsscommon.SonicDBConfig.isGlobalInit(): swsscommon.SonicDBConfig.load_sonic_global_db_config() else: @@ -39,6 +42,28 @@ def get_optional_value_for_key_in_config_tbl(config_db, port, key, table): return None value = info_dict.get(key, None) - return value + +def get_feature_state_data(config_db, feature): + ''' + Get feature state from FEATURE table from CONFIG_DB. + return global_scope, per_asic_scope + - if feature state is disabled, return "False" for both global_scope and per_asic_scope + - if is not a multi-asic, return feature state for global_scope ("True/False") and + "False" for asic_scope + ''' + global_scope = "False" + asic_scope = "False" + info_dict = {} + info_dict = config_db.get_entry(FEATURE_TABLE, feature) + if info_dict is None: + return global_scope, asic_scope + if multi_asic.is_multi_asic(): + if info_dict['state'].lower() == "enabled": + global_scope = info_dict[FEATURE_HAS_GLOBAL_SCOPE] + asic_scope = info_dict[FEATURE_HAS_PER_ASIC_SCOPE] + else: + if info_dict['state'].lower() == "enabled": + global_scope = "True" + return global_scope, asic_scope diff --git a/utilities_common/multi_asic.py b/utilities_common/multi_asic.py index b1f24e12e8..4ebd728031 100644 --- a/utilities_common/multi_asic.py +++ b/utilities_common/multi_asic.py @@ -3,7 +3,6 @@ import click import netifaces -import pyroute2 from natsort import natsorted from sonic_py_common import multi_asic, device_info from utilities_common import constants @@ -170,6 +169,7 @@ def multi_asic_args(parser=None): return parser def multi_asic_get_ip_intf_from_ns(namespace): + import pyroute2 if namespace != constants.DEFAULT_NAMESPACE: pyroute2.netns.pushns(namespace) interfaces = natsorted(netifaces.interfaces()) @@ -181,6 +181,7 @@ def multi_asic_get_ip_intf_from_ns(namespace): def multi_asic_get_ip_intf_addr_from_ns(namespace, iface): + import pyroute2 if namespace != constants.DEFAULT_NAMESPACE: pyroute2.netns.pushns(namespace) ipaddresses = netifaces.ifaddresses(iface) diff --git a/utilities_common/netstat.py b/utilities_common/netstat.py index 5f17c1f4c6..e32e28c745 100755 --- a/utilities_common/netstat.py +++ b/utilities_common/netstat.py @@ -108,6 +108,16 @@ def format_prate(rate): return "{:.2f}".format(float(rate))+'/s' +def format_fec_ber(rate): + """ + Show the ber rate. + """ + if rate == STATUS_NA: + return STATUS_NA + else: + return "{:.2e}".format(float(rate)) + + def format_util(brate, port_rate): """ Calculate the util. @@ -118,3 +128,12 @@ def format_util(brate, port_rate): util = brate/(float(port_rate)*1000*1000/8.0)*100 return "{:.2f}%".format(util) + +def format_util_directly(util): + """ + Format the util without calculation. + """ + if util == STATUS_NA: + return STATUS_NA + else: + return "{:.2f}%".format(float(util)) diff --git a/utilities_common/portstat.py b/utilities_common/portstat.py new file mode 100644 index 0000000000..d28584682a --- /dev/null +++ b/utilities_common/portstat.py @@ -0,0 +1,672 @@ +import datetime +import time +from collections import OrderedDict, namedtuple + +from natsort import natsorted +from tabulate import tabulate +from sonic_py_common import multi_asic +from sonic_py_common import device_info +from swsscommon.swsscommon import SonicV2Connector, CounterTable, PortCounter + +from utilities_common import constants +import utilities_common.multi_asic as multi_asic_util +from utilities_common.netstat import ns_diff, table_as_json, format_brate, format_prate, \ + format_util, format_number_with_comma, format_util_directly, \ + format_fec_ber + +""" +The order and count of statistics mentioned below needs to be in sync with the values in portstat script +So, any fields added/deleted in here should be reflected in portstat script also +""" +NStats = namedtuple("NStats", "rx_ok, rx_err, rx_drop, rx_ovr, tx_ok,\ + tx_err, tx_drop, tx_ovr, rx_byt, tx_byt,\ + rx_64, rx_65_127, rx_128_255, rx_256_511, rx_512_1023,\ + rx_1024_1518, rx_1519_2047, rx_2048_4095, rx_4096_9216, rx_9217_16383,\ + rx_uca, rx_mca, rx_bca, rx_all,\ + tx_64, tx_65_127, tx_128_255, tx_256_511, tx_512_1023, tx_1024_1518,\ + tx_1519_2047, tx_2048_4095, tx_4096_9216, tx_9217_16383,\ + tx_uca, tx_mca, tx_bca, tx_all,\ + rx_jbr, rx_frag, rx_usize, rx_ovrrun,\ + fec_corr, fec_uncorr, fec_symbol_err") +header_all = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', + 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] +header_std = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_UTIL', 'RX_ERR', 'RX_DRP', 'RX_OVR', + 'TX_OK', 'TX_BPS', 'TX_UTIL', 'TX_ERR', 'TX_DRP', 'TX_OVR'] +header_errors_only = ['IFACE', 'STATE', 'RX_ERR', 'RX_DRP', 'RX_OVR', 'TX_ERR', 'TX_DRP', 'TX_OVR'] +header_fec_only = ['IFACE', 'STATE', 'FEC_CORR', 'FEC_UNCORR', 'FEC_SYMBOL_ERR', 'FEC_PRE_BER', 'FEC_POST_BER'] +header_rates_only = ['IFACE', 'STATE', 'RX_OK', 'RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_OK', 'TX_BPS', 'TX_PPS', 'TX_UTIL'] + +rates_key_list = ['RX_BPS', 'RX_PPS', 'RX_UTIL', 'TX_BPS', 'TX_PPS', 'TX_UTIL', 'FEC_PRE_BER', 'FEC_POST_BER'] +ratestat_fields = ("rx_bps", "rx_pps", "rx_util", "tx_bps", "tx_pps", "tx_util", "fec_pre_ber", "fec_post_ber") +RateStats = namedtuple("RateStats", ratestat_fields) + +""" +The order and count of statistics mentioned below needs to be in sync with the values in portstat script +So, any fields added/deleted in here should be reflected in portstat script also +""" +BUCKET_NUM = 45 +counter_bucket_dict = { + 0: ['SAI_PORT_STAT_IF_IN_UCAST_PKTS', 'SAI_PORT_STAT_IF_IN_NON_UCAST_PKTS'], + 1: ['SAI_PORT_STAT_IF_IN_ERRORS'], + 2: ['SAI_PORT_STAT_IF_IN_DISCARDS'], + 3: ['SAI_PORT_STAT_ETHER_RX_OVERSIZE_PKTS'], + 4: ['SAI_PORT_STAT_IF_OUT_UCAST_PKTS', 'SAI_PORT_STAT_IF_OUT_NON_UCAST_PKTS'], + 5: ['SAI_PORT_STAT_IF_OUT_ERRORS'], + 6: ['SAI_PORT_STAT_IF_OUT_DISCARDS'], + 7: ['SAI_PORT_STAT_ETHER_TX_OVERSIZE_PKTS'], + 8: ['SAI_PORT_STAT_IF_IN_OCTETS'], + 9: ['SAI_PORT_STAT_IF_OUT_OCTETS'], + 10: ['SAI_PORT_STAT_ETHER_IN_PKTS_64_OCTETS'], + 11: ['SAI_PORT_STAT_ETHER_IN_PKTS_65_TO_127_OCTETS'], + 12: ['SAI_PORT_STAT_ETHER_IN_PKTS_128_TO_255_OCTETS'], + 13: ['SAI_PORT_STAT_ETHER_IN_PKTS_256_TO_511_OCTETS'], + 14: ['SAI_PORT_STAT_ETHER_IN_PKTS_512_TO_1023_OCTETS'], + 15: ['SAI_PORT_STAT_ETHER_IN_PKTS_1024_TO_1518_OCTETS'], + 16: ['SAI_PORT_STAT_ETHER_IN_PKTS_1519_TO_2047_OCTETS'], + 17: ['SAI_PORT_STAT_ETHER_IN_PKTS_2048_TO_4095_OCTETS'], + 18: ['SAI_PORT_STAT_ETHER_IN_PKTS_4096_TO_9216_OCTETS'], + 19: ['SAI_PORT_STAT_ETHER_IN_PKTS_9217_TO_16383_OCTETS'], + 20: ['SAI_PORT_STAT_IF_IN_UCAST_PKTS'], + 21: ['SAI_PORT_STAT_IF_IN_MULTICAST_PKTS'], + 22: ['SAI_PORT_STAT_IF_IN_BROADCAST_PKTS'], + 23: ['SAI_PORT_STAT_IF_IN_UCAST_PKTS', 'SAI_PORT_STAT_IF_IN_MULTICAST_PKTS', + 'SAI_PORT_STAT_IF_IN_BROADCAST_PKTS'], + 24: ['SAI_PORT_STAT_ETHER_OUT_PKTS_64_OCTETS'], + 25: ['SAI_PORT_STAT_ETHER_OUT_PKTS_65_TO_127_OCTETS'], + 26: ['SAI_PORT_STAT_ETHER_OUT_PKTS_128_TO_255_OCTETS'], + 27: ['SAI_PORT_STAT_ETHER_OUT_PKTS_256_TO_511_OCTETS'], + 28: ['SAI_PORT_STAT_ETHER_OUT_PKTS_512_TO_1023_OCTETS'], + 29: ['SAI_PORT_STAT_ETHER_OUT_PKTS_1024_TO_1518_OCTETS'], + 30: ['SAI_PORT_STAT_ETHER_OUT_PKTS_1519_TO_2047_OCTETS'], + 31: ['SAI_PORT_STAT_ETHER_OUT_PKTS_2048_TO_4095_OCTETS'], + 32: ['SAI_PORT_STAT_ETHER_OUT_PKTS_4096_TO_9216_OCTETS'], + 33: ['SAI_PORT_STAT_ETHER_OUT_PKTS_9217_TO_16383_OCTETS'], + 34: ['SAI_PORT_STAT_IF_OUT_UCAST_PKTS'], + 35: ['SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS'], + 36: ['SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS'], + 37: ['SAI_PORT_STAT_IF_OUT_UCAST_PKTS', 'SAI_PORT_STAT_IF_OUT_MULTICAST_PKTS', + 'SAI_PORT_STAT_IF_OUT_BROADCAST_PKTS'], + 38: ['SAI_PORT_STAT_ETHER_STATS_JABBERS'], + 39: ['SAI_PORT_STAT_ETHER_STATS_FRAGMENTS'], + 40: ['SAI_PORT_STAT_ETHER_STATS_UNDERSIZE_PKTS'], + 41: ['SAI_PORT_STAT_IP_IN_RECEIVES'], + 42: ['SAI_PORT_STAT_IF_IN_FEC_CORRECTABLE_FRAMES'], + 43: ['SAI_PORT_STAT_IF_IN_FEC_NOT_CORRECTABLE_FRAMES'], + 44: ['SAI_PORT_STAT_IF_IN_FEC_SYMBOL_ERRORS'] +} + +STATUS_NA = 'N/A' + +RATES_TABLE_PREFIX = "RATES:" + +COUNTER_TABLE_PREFIX = "COUNTERS:" +COUNTERS_PORT_NAME_MAP = "COUNTERS_PORT_NAME_MAP" + +PORT_STATUS_TABLE_PREFIX = "PORT_TABLE:" +PORT_STATE_TABLE_PREFIX = "PORT_TABLE|" +PORT_OPER_STATUS_FIELD = "oper_status" +PORT_ADMIN_STATUS_FIELD = "admin_status" +PORT_STATUS_VALUE_UP = 'UP' +PORT_STATUS_VALUE_DOWN = 'DOWN' +PORT_SPEED_FIELD = "speed" + +PORT_STATE_UP = 'U' +PORT_STATE_DOWN = 'D' +PORT_STATE_DISABLED = 'X' + +LINECARD_PORT_STAT_TABLE = 'LINECARD_PORT_STAT_TABLE' +LINECARD_PORT_STAT_MARK_TABLE = 'LINECARD_PORT_STAT_MARK_TABLE' +CHASSIS_MIDPLANE_INFO_TABLE = 'CHASSIS_MIDPLANE_TABLE' + + +class Portstat(object): + def __init__(self, namespace, display_option): + self.db = None + self.multi_asic = multi_asic_util.MultiAsic(display_option, namespace) + if device_info.is_supervisor(): + self.db = SonicV2Connector(use_unix_socket_path=False) + self.db.connect(self.db.CHASSIS_STATE_DB, False) + + def get_cnstat_dict(self): + self.cnstat_dict = OrderedDict() + self.cnstat_dict['time'] = datetime.datetime.now() + self.ratestat_dict = OrderedDict() + if device_info.is_supervisor(): + self.collect_stat_from_lc() + else: + self.collect_stat() + return self.cnstat_dict, self.ratestat_dict + + def collect_stat_from_lc(self): + # Retrieve the current counter values from all LCs + + # Clear stale records + self.db.delete_all_by_pattern(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_TABLE + "*") + self.db.delete_all_by_pattern(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_MARK_TABLE + "*") + + # Check how many linecards are connected + tempdb = SonicV2Connector(use_unix_socket_path=False) + tempdb.connect(tempdb.STATE_DB, False) + linecard_midplane_keys = tempdb.keys(tempdb.STATE_DB, CHASSIS_MIDPLANE_INFO_TABLE + "*") + lc_count = 0 + if not linecard_midplane_keys: + # LC has not published it's Counter which could be due to chassis_port_counter_monitor.service not running + print("No linecards are connected!") + return + else: + for key in linecard_midplane_keys: + linecard_status = tempdb.get(tempdb.STATE_DB, key, "access") + if linecard_status == "True": + lc_count += 1 + + # Notify the Linecards to publish their counter values instantly + self.db.set(self.db.CHASSIS_STATE_DB, "GET_LINECARD_COUNTER|pull", "enable", "true") + time.sleep(2) + + # Check if all LCs have published counters + linecard_names = self.db.keys(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_MARK_TABLE + "*") + linecard_port_aliases = self.db.keys(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_TABLE + "*") + if not linecard_port_aliases: + # LC has not published it's Counter which could be due to chassis_port_counter_monitor.service not running + print("Linecard Counter Table is not available.") + return + if len(linecard_names) != lc_count: + print("Not all linecards have published their counter values.") + return + + # Create the dictornaries to store the counter values + cnstat_dict = OrderedDict() + cnstat_dict['time'] = datetime.datetime.now() + ratestat_dict = OrderedDict() + + # Get the counter values from CHASSIS_STATE_DB + for key in linecard_port_aliases: + rx_ok = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_ok") + rx_bps = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_bps") + rx_pps = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_pps") + rx_util = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_util") + rx_err = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_err") + rx_drop = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_drop") + rx_ovr = self.db.get(self.db.CHASSIS_STATE_DB, key, "rx_ovr") + tx_ok = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_ok") + tx_bps = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_bps") + tx_pps = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_pps") + tx_util = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_util") + tx_err = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_err") + tx_drop = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_drop") + tx_ovr = self.db.get(self.db.CHASSIS_STATE_DB, key, "tx_ovr") + fec_pre_ber = self.db.get(self.db.CHASSIS_STATE_DB, key, "fec_pre_ber") + fec_post_ber = self.db.get(self.db.CHASSIS_STATE_DB, key, "fec_post_ber") + port_alias = key.split("|")[-1] + cnstat_dict[port_alias] = NStats._make([rx_ok, rx_err, rx_drop, rx_ovr, tx_ok, tx_err, tx_drop, tx_ovr] + + [STATUS_NA] * (len(NStats._fields) - 8))._asdict() + ratestat_dict[port_alias] = RateStats._make([rx_bps, rx_pps, rx_util, tx_bps, + tx_pps, tx_util, fec_pre_ber, fec_post_ber]) + self.cnstat_dict.update(cnstat_dict) + self.ratestat_dict.update(ratestat_dict) + + @multi_asic_util.run_on_multi_asic + def collect_stat(self): + """ + Collect the statisitics from all the asics present on the + device and store in a dict + """ + + cnstat_dict, ratestat_dict = self.get_cnstat() + self.cnstat_dict.update(cnstat_dict) + self.ratestat_dict.update(ratestat_dict) + + def get_cnstat(self): + """ + Get the counters info from database. + """ + def get_counters(port): + """ + Get the counters from specific table. + """ + fields = ["0"]*BUCKET_NUM + + _, fvs = counter_table.get(PortCounter(), port) + fvs = dict(fvs) + for pos, cntr_list in counter_bucket_dict.items(): + for counter_name in cntr_list: + if counter_name not in fvs: + fields[pos] = STATUS_NA + elif fields[pos] != STATUS_NA: + fields[pos] = str(int(fields[pos]) + int(float(fvs[counter_name]))) + + cntr = NStats._make(fields)._asdict() + return cntr + + def get_rates(table_id): + """ + Get the rates from specific table. + """ + fields = ["0", "0", "0", "0", "0", "0", "0", "0"] + for pos, name in enumerate(rates_key_list): + full_table_id = RATES_TABLE_PREFIX + table_id + counter_data = self.db.get(self.db.COUNTERS_DB, full_table_id, name) + if counter_data is None: + fields[pos] = STATUS_NA + elif fields[pos] != STATUS_NA: + fields[pos] = float(counter_data) + cntr = RateStats._make(fields) + return cntr + + # Get the info from database + counter_port_name_map = self.db.get_all(self.db.COUNTERS_DB, COUNTERS_PORT_NAME_MAP) + # Build a dictionary of the stats + cnstat_dict = OrderedDict() + cnstat_dict['time'] = datetime.datetime.now() + ratestat_dict = OrderedDict() + counter_table = CounterTable(self.db.get_redis_client(self.db.COUNTERS_DB)) + if counter_port_name_map is None: + return cnstat_dict, ratestat_dict + for port in natsorted(counter_port_name_map): + port_name = port.split(":")[0] + if self.multi_asic.skip_display(constants.PORT_OBJ, port_name): + continue + cnstat_dict[port] = get_counters(port) + ratestat_dict[port] = get_rates(counter_port_name_map[port]) + return cnstat_dict, ratestat_dict + + def get_port_speed(self, port_name): + """ + Get the port speed + """ + # Get speed from APPL_DB + state_db_table_id = PORT_STATE_TABLE_PREFIX + port_name + app_db_table_id = PORT_STATUS_TABLE_PREFIX + port_name + for ns in self.multi_asic.get_ns_list_based_on_options(): + self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + speed = self.db.get(self.db.STATE_DB, state_db_table_id, PORT_SPEED_FIELD) + oper_status = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_OPER_STATUS_FIELD) + if speed is None or speed == STATUS_NA or oper_status != "up": + speed = self.db.get(self.db.APPL_DB, app_db_table_id, PORT_SPEED_FIELD) + if speed is not None: + return int(speed) + return STATUS_NA + + def get_port_state(self, port_name): + """ + Get the port state + """ + if device_info.is_supervisor(): + self.db.connect(self.db.CHASSIS_STATE_DB, False) + return self.db.get(self.db.CHASSIS_STATE_DB, LINECARD_PORT_STAT_TABLE + "|" + port_name, "state") + + full_table_id = PORT_STATUS_TABLE_PREFIX + port_name + for ns in self.multi_asic.get_ns_list_based_on_options(): + self.db = multi_asic.connect_to_all_dbs_for_ns(ns) + admin_state = self.db.get(self.db.APPL_DB, full_table_id, PORT_ADMIN_STATUS_FIELD) + oper_state = self.db.get(self.db.APPL_DB, full_table_id, PORT_OPER_STATUS_FIELD) + + if admin_state is None or oper_state is None: + continue + if admin_state.upper() == PORT_STATUS_VALUE_DOWN: + return PORT_STATE_DISABLED + elif admin_state.upper() == PORT_STATUS_VALUE_UP and oper_state.upper() == PORT_STATUS_VALUE_UP: + return PORT_STATE_UP + elif admin_state.upper() == PORT_STATUS_VALUE_UP and oper_state.upper() == PORT_STATUS_VALUE_DOWN: + return PORT_STATE_DOWN + else: + return STATUS_NA + return STATUS_NA + + def cnstat_print(self, cnstat_dict, ratestat_dict, intf_list, use_json, print_all, + errors_only, fec_stats_only, rates_only, detail=False): + """ + Print the cnstat. + """ + + if intf_list and detail: + self.cnstat_intf_diff_print(cnstat_dict, {}, intf_list) + return None + + table = [] + header = None + + for key in natsorted(cnstat_dict.keys()): + if key == 'time': + continue + if intf_list and key not in intf_list: + continue + port_speed = self.get_port_speed(key) + data = cnstat_dict[key] + rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(rates_key_list))) + if print_all: + header = header_all + table.append((key, self.get_port_state(key), + format_number_with_comma(data["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(data["rx_err"]), + format_number_with_comma(data["rx_drop"]), + format_number_with_comma(data["rx_ovr"]), + format_number_with_comma(data["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + format_number_with_comma(data["tx_err"]), + format_number_with_comma(data["tx_drop"]), + format_number_with_comma(data["tx_ovr"]))) + elif errors_only: + header = header_errors_only + table.append((key, self.get_port_state(key), + format_number_with_comma(data["rx_err"]), + format_number_with_comma(data["rx_drop"]), + format_number_with_comma(data["rx_ovr"]), + format_number_with_comma(data["tx_err"]), + format_number_with_comma(data["tx_drop"]), + format_number_with_comma(data["tx_ovr"]))) + elif fec_stats_only: + header = header_fec_only + table.append((key, self.get_port_state(key), + format_number_with_comma(data['fec_corr']), + format_number_with_comma(data['fec_uncorr']), + format_number_with_comma(data['fec_symbol_err']), + format_fec_ber(rates.fec_pre_ber), + format_fec_ber(rates.fec_post_ber))) + elif rates_only: + header = header_rates_only + table.append((key, self.get_port_state(key), + format_number_with_comma(data["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(data["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util))) + else: + header = header_std + table.append((key, self.get_port_state(key), + format_number_with_comma(data["rx_ok"]), + format_brate(rates.rx_bps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(data["rx_err"]), + format_number_with_comma(data["rx_drop"]), + format_number_with_comma(data["rx_ovr"]), + format_number_with_comma(data["tx_ok"]), + format_brate(rates.tx_bps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + format_number_with_comma(data["tx_err"]), + format_number_with_comma(data["tx_drop"]), + format_number_with_comma(data["tx_ovr"]))) + if table: + if use_json: + print(table_as_json(table, header)) + else: + print(tabulate(table, header, tablefmt='simple', stralign='right')) + if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json: + print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") + + def cnstat_intf_diff_print(self, cnstat_new_dict, cnstat_old_dict, intf_list): + """ + Print the difference between two cnstat results for interface. + """ + + for key in natsorted(cnstat_new_dict.keys()): + cntr = cnstat_new_dict.get(key) + if key == 'time': + continue + + if key in cnstat_old_dict: + old_cntr = cnstat_old_dict.get(key) + else: + old_cntr = NStats._make([0] * BUCKET_NUM)._asdict() + + if intf_list and key not in intf_list: + continue + + print("Packets Received 64 Octets..................... {}".format(ns_diff(cntr['rx_64'], + old_cntr['rx_64']))) + print("Packets Received 65-127 Octets................. {}".format(ns_diff(cntr['rx_65_127'], + old_cntr['rx_65_127']))) + print("Packets Received 128-255 Octets................ {}".format(ns_diff(cntr['rx_128_255'], + old_cntr['rx_128_255']))) + print("Packets Received 256-511 Octets................ {}".format(ns_diff(cntr['rx_256_511'], + old_cntr['rx_256_511']))) + print("Packets Received 512-1023 Octets............... {}".format(ns_diff(cntr['rx_512_1023'], + old_cntr['rx_512_1023']))) + print("Packets Received 1024-1518 Octets.............. {}".format(ns_diff(cntr['rx_1024_1518'], + old_cntr['rx_1024_1518']))) + print("Packets Received 1519-2047 Octets.............. {}".format(ns_diff(cntr['rx_1519_2047'], + old_cntr['rx_1519_2047']))) + print("Packets Received 2048-4095 Octets.............. {}".format(ns_diff(cntr['rx_2048_4095'], + old_cntr['rx_2048_4095']))) + print("Packets Received 4096-9216 Octets.............. {}".format(ns_diff(cntr['rx_4096_9216'], + old_cntr['rx_4096_9216']))) + print("Packets Received 9217-16383 Octets............. {}".format(ns_diff(cntr['rx_9217_16383'], + old_cntr['rx_9217_16383']))) + + print("") + print("Total Packets Received Without Errors.......... {}".format(ns_diff(cntr['rx_all'], + old_cntr['rx_all']))) + print("Unicast Packets Received....................... {}".format(ns_diff(cntr['rx_uca'], + old_cntr['rx_uca']))) + print("Multicast Packets Received..................... {}".format(ns_diff(cntr['rx_mca'], + old_cntr['rx_mca']))) + print("Broadcast Packets Received..................... {}".format(ns_diff(cntr['rx_bca'], + old_cntr['rx_bca']))) + + print("") + print("Jabbers Received............................... {}".format(ns_diff(cntr['rx_jbr'], + old_cntr['rx_jbr']))) + print("Fragments Received............................. {}".format(ns_diff(cntr['rx_frag'], + old_cntr['rx_frag']))) + print("Undersize Received............................. {}".format(ns_diff(cntr['rx_usize'], + old_cntr['rx_usize']))) + print("Overruns Received.............................. {}".format(ns_diff(cntr["rx_ovrrun"], + old_cntr["rx_ovrrun"]))) + + print("") + print("Packets Transmitted 64 Octets.................. {}".format(ns_diff(cntr['tx_64'], + old_cntr['tx_64']))) + print("Packets Transmitted 65-127 Octets.............. {}".format(ns_diff(cntr['tx_65_127'], + old_cntr['tx_65_127']))) + print("Packets Transmitted 128-255 Octets............. {}".format(ns_diff(cntr['tx_128_255'], + old_cntr['tx_128_255']))) + print("Packets Transmitted 256-511 Octets............. {}".format(ns_diff(cntr['tx_256_511'], + old_cntr['tx_256_511']))) + print("Packets Transmitted 512-1023 Octets............ {}".format(ns_diff(cntr['tx_512_1023'], + old_cntr['tx_512_1023']))) + print("Packets Transmitted 1024-1518 Octets........... {}".format(ns_diff(cntr['tx_1024_1518'], + old_cntr['tx_1024_1518']))) + print("Packets Transmitted 1519-2047 Octets........... {}".format(ns_diff(cntr['tx_1519_2047'], + old_cntr['tx_1519_2047']))) + print("Packets Transmitted 2048-4095 Octets........... {}".format(ns_diff(cntr['tx_2048_4095'], + old_cntr['tx_2048_4095']))) + print("Packets Transmitted 4096-9216 Octets........... {}".format(ns_diff(cntr['tx_4096_9216'], + old_cntr['tx_4096_9216']))) + print("Packets Transmitted 9217-16383 Octets.......... {}".format(ns_diff(cntr['tx_9217_16383'], + old_cntr['tx_9217_16383']))) + + print("") + print("Total Packets Transmitted Successfully......... {}".format(ns_diff(cntr['tx_all'], + old_cntr['tx_all']))) + print("Unicast Packets Transmitted.................... {}".format(ns_diff(cntr['tx_uca'], + old_cntr['tx_uca']))) + print("Multicast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_mca'], + old_cntr['tx_mca']))) + print("Broadcast Packets Transmitted.................. {}".format(ns_diff(cntr['tx_bca'], + old_cntr['tx_bca']))) + + print("Time Since Counters Last Cleared............... " + str(cnstat_old_dict.get('time'))) + + def cnstat_diff_print(self, cnstat_new_dict, cnstat_old_dict, + ratestat_dict, intf_list, use_json, + print_all, errors_only, fec_stats_only, + rates_only, detail=False): + """ + Print the difference between two cnstat results. + """ + + if intf_list and detail: + self.cnstat_intf_diff_print(cnstat_new_dict, cnstat_old_dict, intf_list) + return None + + table = [] + header = None + + for key in natsorted(cnstat_new_dict.keys()): + cntr = cnstat_new_dict.get(key) + if key == 'time': + continue + old_cntr = None + if key in cnstat_old_dict: + old_cntr = cnstat_old_dict.get(key) + + rates = ratestat_dict.get(key, RateStats._make([STATUS_NA] * len(ratestat_fields))) + + if intf_list and key not in intf_list: + continue + port_speed = self.get_port_speed(key) + + if print_all: + header = header_all + if old_cntr is not None: + table.append((key, self.get_port_state(key), + ns_diff(cntr["rx_ok"], old_cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + ns_diff(cntr["rx_err"], old_cntr["rx_err"]), + ns_diff(cntr["rx_drop"], old_cntr["rx_drop"]), + ns_diff(cntr["rx_ovr"], old_cntr["rx_ovr"]), + ns_diff(cntr["tx_ok"], old_cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + ns_diff(cntr["tx_err"], old_cntr["tx_err"]), + ns_diff(cntr["tx_drop"], old_cntr["tx_drop"]), + ns_diff(cntr["tx_ovr"], old_cntr["tx_ovr"]))) + else: + table.append((key, self.get_port_state(key), + format_number_with_comma(cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(cntr["rx_err"]), + format_number_with_comma(cntr["rx_drop"]), + format_number_with_comma(cntr["rx_ovr"]), + format_number_with_comma(cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + format_number_with_comma(cntr["tx_err"]), + format_number_with_comma(cntr["tx_drop"]), + format_number_with_comma(cntr["tx_ovr"]))) + elif errors_only: + header = header_errors_only + if old_cntr is not None: + table.append((key, self.get_port_state(key), + ns_diff(cntr["rx_err"], old_cntr["rx_err"]), + ns_diff(cntr["rx_drop"], old_cntr["rx_drop"]), + ns_diff(cntr["rx_ovr"], old_cntr["rx_ovr"]), + ns_diff(cntr["tx_err"], old_cntr["tx_err"]), + ns_diff(cntr["tx_drop"], old_cntr["tx_drop"]), + ns_diff(cntr["tx_ovr"], old_cntr["tx_ovr"]))) + else: + table.append((key, self.get_port_state(key), + format_number_with_comma(cntr["rx_err"]), + format_number_with_comma(cntr["rx_drop"]), + format_number_with_comma(cntr["rx_ovr"]), + format_number_with_comma(cntr["tx_err"]), + format_number_with_comma(cntr["tx_drop"]), + format_number_with_comma(cntr["tx_ovr"]))) + elif fec_stats_only: + header = header_fec_only + if old_cntr is not None: + table.append((key, self.get_port_state(key), + ns_diff(cntr['fec_corr'], old_cntr['fec_corr']), + ns_diff(cntr['fec_uncorr'], old_cntr['fec_uncorr']), + ns_diff(cntr['fec_symbol_err'], old_cntr['fec_symbol_err']))) + else: + table.append((key, self.get_port_state(key), + format_number_with_comma(cntr['fec_corr']), + format_number_with_comma(cntr['fec_uncorr']), + format_number_with_comma(cntr['fec_symbol_err']))) + + elif rates_only: + header = header_rates_only + if old_cntr is not None: + table.append((key, + self.get_port_state(key), + ns_diff(cntr["rx_ok"], old_cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + ns_diff(cntr["tx_ok"], old_cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util))) + else: + table.append((key, + self.get_port_state(key), + format_number_with_comma(cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_prate(rates.rx_pps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_prate(rates.tx_pps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util))) + else: + header = header_std + if old_cntr is not None: + table.append((key, + self.get_port_state(key), + ns_diff(cntr["rx_ok"], old_cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + ns_diff(cntr["rx_err"], old_cntr["rx_err"]), + ns_diff(cntr["rx_drop"], old_cntr["rx_drop"]), + ns_diff(cntr["rx_ovr"], old_cntr["rx_ovr"]), + ns_diff(cntr["tx_ok"], old_cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + ns_diff(cntr["tx_err"], old_cntr["tx_err"]), + ns_diff(cntr["tx_drop"], old_cntr["tx_drop"]), + ns_diff(cntr["tx_ovr"], old_cntr["tx_ovr"]))) + else: + table.append((key, + self.get_port_state(key), + format_number_with_comma(cntr["rx_ok"]), + format_brate(rates.rx_bps), + format_util(rates.rx_bps, port_speed) + if rates.rx_util == STATUS_NA else format_util_directly(rates.rx_util), + format_number_with_comma(cntr["rx_err"]), + format_number_with_comma(cntr["rx_drop"]), + format_number_with_comma(cntr["rx_ovr"]), + format_number_with_comma(cntr["tx_ok"]), + format_brate(rates.tx_bps), + format_util(rates.tx_bps, port_speed) + if rates.tx_util == STATUS_NA else format_util_directly(rates.tx_util), + format_number_with_comma(cntr["tx_err"]), + format_number_with_comma(cntr["tx_drop"]), + format_number_with_comma(cntr["tx_ovr"]))) + if table: + if use_json: + print(table_as_json(table, header)) + else: + print(tabulate(table, header, tablefmt='simple', stralign='right')) + if (multi_asic.is_multi_asic() or device_info.is_chassis()) and not use_json: + print("\nReminder: Please execute 'show interface counters -d all' to include internal links\n") diff --git a/wol/main.py b/wol/main.py deleted file mode 100644 index 3b569a3a4f..0000000000 --- a/wol/main.py +++ /dev/null @@ -1,202 +0,0 @@ -#!/usr/bin/env python3 - -""" -use wol to generate and send Wake-On-LAN (WOL) "Magic Packet" to specific interface - -Usage: wol_click [OPTIONS] INTERFACE TARGET_MAC - - Generate and send Wake-On-LAN (WOL) "Magic Packet" to specific interface - -Options: - -b Use broadcast MAC address instead of target device's MAC - address as Destination MAC Address in Ethernet Frame Header. - [default: False] - -p password An optional 4 or 6 byte password, in ethernet hex format or - quad-dotted decimal [default: ] - -c count For each target MAC address, the count of magic packets to - send. count must between 1 and 5. This param must use with -i. - [default: 1] - -i interval Wait interval milliseconds between sending each magic packet. - interval must between 0 and 2000. This param must use with -c. - [default: 0] - -v Verbose output [default: False] - -h, --help Show this message and exit. - -Examples: - wol Ethernet10 00:11:22:33:44:55 - wol Ethernet10 00:11:22:33:44:55 -b - wol Vlan1000 00:11:22:33:44:55,11:33:55:77:99:bb -p 00:22:44:66:88:aa - wol Vlan1000 00:11:22:33:44:55,11:33:55:77:99:bb -p 192.168.1.1 -c 3 -i 2000 -""" - -import binascii -import click -import copy -import netifaces -import os -import socket -import time - -CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) -EPILOG = """\b -Examples: - wol Ethernet10 00:11:22:33:44:55 - wol Ethernet10 00:11:22:33:44:55 -b - wol Vlan1000 00:11:22:33:44:55,11:33:55:77:99:bb -p 00:22:44:66:88:aa - wol Vlan1000 00:11:22:33:44:55,11:33:55:77:99:bb -p 192.168.1.1 -c 3 -i 2000 -""" -ORDINAL_NUMBER = ["0", "1st", "2nd", "3rd", "4th", "5th"] -ETHER_TYPE_WOL = b'\x08\x42' - - -class MacAddress(object): - """ - Class to handle MAC addresses and perform operations on them. - - Attributes: - - address: bytes - """ - - def __init__(self, address: str): - """ - Constructor to instantiate the MacAddress class. - - Parameters: - - address: str - The MAC address in the format '01:23:45:67:89:AB' or '01-23-45-67-89-AB'. - - Raises: - - ValueError: - Throws an error if the provided address is not in the correct format. - """ - try: - self.address = binascii.unhexlify(address.replace(':', '').replace('-', '')) - except binascii.Error: - raise ValueError("invalid MAC address") - if len(self.address) != 6: - raise ValueError("invalid MAC address") - - def __str__(self): - return ":".join(["%02x" % v for v in self.address]) - - def __eq__(self, other): - return self.address == other.address - - def to_bytes(self): - return copy.copy(self.address) - - -BROADCAST_MAC = MacAddress('ff:ff:ff:ff:ff:ff') - - -def is_root(): - return os.geteuid() == 0 - - -def get_interface_operstate(interface): - with open('/sys/class/net/{}/operstate'.format(interface), 'r') as f: - return f.read().strip().lower() - - -def get_interface_mac(interface): - return MacAddress(netifaces.ifaddresses(interface)[netifaces.AF_LINK][0].get('addr')) - - -def build_magic_packet(interface, target_mac, broadcast, password): - dst_mac = BROADCAST_MAC if broadcast else target_mac - src_mac = get_interface_mac(interface) - return dst_mac.to_bytes() + src_mac.to_bytes() + ETHER_TYPE_WOL \ - + b'\xff' * 6 + target_mac.to_bytes() * 16 + password - - -def send_magic_packet(interface, target_mac, pkt, count, interval, verbose): - if verbose: - print("Sending {} magic packet to {} via interface {}".format(count, target_mac, interface)) - sock = socket.socket(socket.AF_PACKET, socket.SOCK_RAW) - sock.bind((interface, 0)) - for i in range(count): - sock.send(pkt) - if verbose: - print("{} magic packet sent to {}".format(ORDINAL_NUMBER[i + 1], target_mac)) - if i + 1 != count: - time.sleep(interval / 1000) - sock.close() - - -def validate_interface(ctx, param, value): - if value not in netifaces.interfaces(): - raise click.BadParameter("invalid SONiC interface name {}".format(value)) - if get_interface_operstate(value) != 'up': - raise click.BadParameter("interface {} is not up".format(value)) - return value - - -def parse_target_mac(ctx, param, value): - mac_list = [] - for mac in value.split(','): - try: - mac_list.append(MacAddress(mac)) - except ValueError: - raise click.BadParameter("invalid MAC address {}".format(mac)) - return mac_list - - -def parse_password(ctx, param, value): - if len(value) == 0: - return b'' # Empty password is valid. - elif len(value) <= 15: # The length of a valid IPv4 address is less or equal to 15. - try: - password = socket.inet_aton(value) - except OSError: - raise click.BadParameter("invalid password format") - else: # The length of a valid MAC address is 17. - try: - password = MacAddress(value).to_bytes() - except ValueError: - raise click.BadParameter("invalid password format") - if len(password) not in [4, 6]: - raise click.BadParameter("password must be 4 or 6 bytes or empty") - return password - - -def validate_count_interval(count, interval): - if count is None and interval is None: - return 1, 0 # By default, count=1 and interval=0. - if count is None or interval is None: - raise click.BadParameter("count and interval must be used together") - # The values are confirmed in valid range by click.IntRange(). - return count, interval - - -@click.command(context_settings=CONTEXT_SETTINGS, epilog=EPILOG) -@click.argument('interface', type=click.STRING, callback=validate_interface) -@click.argument('target_mac', type=click.STRING, callback=parse_target_mac) -@click.option('-b', 'broadcast', is_flag=True, show_default=True, default=False, - help="Use broadcast MAC address instead of target device's MAC address as Destination MAC Address in Ethernet Frame Header.") -@click.option('-p', 'password', type=click.STRING, show_default=True, default='', callback=parse_password, metavar='password', - help='An optional 4 or 6 byte password, in ethernet hex format or quad-dotted decimal') -@click.option('-c', 'count', type=click.IntRange(1, 5), metavar='count', show_default=True, # default=1, - help='For each target MAC address, the count of magic packets to send. count must between 1 and 5. This param must use with -i.') -@click.option('-i', 'interval', type=click.IntRange(0, 2000), metavar='interval', # show_default=True, default=0, - help="Wait interval milliseconds between sending each magic packet. interval must between 0 and 2000. This param must use with -c.") -@click.option('-v', 'verbose', is_flag=True, show_default=True, default=False, - help='Verbose output') -def wol(interface, target_mac, broadcast, password, count, interval, verbose): - """ - Generate and send Wake-On-LAN (WOL) "Magic Packet" to specific interface - """ - count, interval = validate_count_interval(count, interval) - - if not is_root(): - raise click.ClickException("root priviledge is required to run this script") - - for mac in target_mac: - pkt = build_magic_packet(interface, mac, broadcast, password) - try: - send_magic_packet(interface, mac, pkt, count, interval, verbose) - except Exception as e: - raise click.ClickException(f'Exception: {e}') - - -if __name__ == '__main__': - wol()