diff --git a/setup-container.sh b/setup-container.sh index d549d304669..4f6ae69a0ef 100755 --- a/setup-container.sh +++ b/setup-container.sh @@ -5,7 +5,7 @@ declare -r SCRIPT_PATH="$(readlink -f "${0}")" declare -r SCRIPT_DIR="$(dirname "${SCRIPT_PATH}")" declare -r DOCKER_REGISTRY="sonicdev-microsoft.azurecr.io:443" -declare -r DOCKER_SONIC_MGMT="docker-sonic-mgmt" +declare -r DOCKER_SONIC_MGMT="docker-sonic-mgmt:latest" declare -r LOCAL_IMAGE_NAME="docker-sonic-mgmt-$(echo "${USER}" | tr '[:upper:]' '[:lower:]')" declare -r LOCAL_IMAGE_TAG="master" declare -r LOCAL_IMAGE="${LOCAL_IMAGE_NAME}:${LOCAL_IMAGE_TAG}" diff --git a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml index 186cd7345b8..b5924ecb0e0 100644 --- a/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml +++ b/tests/common/plugins/conditional_mark/tests_mark_conditions.yaml @@ -320,6 +320,18 @@ dash/test_dash_disable_enable_eni.py: conditions: - "asic_type in ['vs'] and https://github.com/sonic-net/sonic-mgmt/issues/16407" +dash/test_dash_privatelink.py: + skip: + reason: "Currently dash tests are not supported on KVM" + conditions: + - "asic_type in ['vs'] and https://github.com/sonic-net/sonic-mgmt/issues/16407" + +dash/test_dash_vnet.py: + skip: + reason: "Currently dash tests are not supported on KVM" + conditions: + - "asic_type in ['vs'] and https://github.com/sonic-net/sonic-mgmt/issues/16407" + dash/test_relaxed_match_negative.py: skip: reason: "Currently dash tests are not supported on KVM" diff --git a/tests/dash/configs/privatelink_config.py b/tests/dash/configs/privatelink_config.py new file mode 100644 index 00000000000..e0f2a98041e --- /dev/null +++ b/tests/dash/configs/privatelink_config.py @@ -0,0 +1,137 @@ +from dash_api.eni_pb2 import State +from dash_api.route_type_pb2 import ActionType, EncapType, RoutingType + +VNET = "vnet" +VNET_ENCAP = "vnet_encap" +VNET_DIRECT = "vnet_direct" +PRIVATELINK = "privatelink" +DECAP = "decap" + +APPLIANCE_VIP = "10.1.0.5" +VM1_PA = "25.1.1.1" # VM host physical address +VM1_CA = "10.0.0.11" # VM customer address +VM_CA_SUBNET = "10.0.0.0/16" +PE_PA = "101.1.2.3" # private endpoint physical address +PE_CA = "10.2.0.100" # private endpoint customer address +PE_CA_SUBNET = "10.2.0.0/16" +PL_ENCODING_IP = "::56b2:0:ff71:0:0" +PL_ENCODING_MASK = "::ffff:ffff:ffff:0:0" +PL_OVERLAY_SIP = "fd41:108:20:abc:abc::0" +PL_OVERLAY_SIP_MASK = "ffff:ffff:ffff:ffff:ffff:ffff::" +PL_OVERLAY_DIP = "2603:10e1:100:2::3401:203" +PL_OVERLAY_DIP_MASK = "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff" + +APPLIANCE_ID = "100" +VM_VNI = "4321" +ENCAP_VNI = 100 +VNET1 = "Vnet1" +VNET1_VNI = "45654" +VNET1_GUID = "559c6ce8-26ab-4193-b946-ccc6e8f930b2" +ENI_MAC = "F4:93:9F:EF:C4:7E" +ENI_MAC_STRING = ENI_MAC.replace(":", "") +REMOTE_MAC = "43:BE:65:25:FA:67" +REMOTE_MAC_STRING = REMOTE_MAC.replace(":", "") +ENI_ID = "497f23d7-f0ac-4c99-a98f-59b470e8c7bd" +ROUTE_GROUP1 = "RouteGroup1" +ROUTE_GROUP2 = "RouteGroup2" +ROUTE_GROUP1_GUID = "48af6ce8-26cc-4293-bfa6-0126e8fcdeb2" +ROUTE_GROUP2_GUID = "58cf62e0-22cc-4693-baa6-012358fcdec9" + + +APPLIANCE_CONFIG = { + f"DASH_APPLIANCE_TABLE:{APPLIANCE_ID}": { + "sip": APPLIANCE_VIP, + "vm_vni": VM_VNI + } +} + +VNET_CONFIG = { + f"DASH_VNET_TABLE:{VNET1}": { + "vni": VNET1_VNI, + "guid": VNET1_GUID + } +} + +ENI_CONFIG = { + f"DASH_ENI_TABLE:{ENI_ID}": { + "vnet": VNET1, + "underlay_ip": VM1_PA, + "mac_address": ENI_MAC, + "eni_id": ENI_ID, + "admin_state": State.STATE_ENABLED, + "pl_underlay_sip": APPLIANCE_VIP, + "pl_sip_encoding": f"{PL_ENCODING_IP}/{PL_ENCODING_MASK}" + } +} + +PE_VNET_MAPPING_CONFIG = { + f"DASH_VNET_MAPPING_TABLE:{VNET1}:{PE_CA}": { + "routing_type": RoutingType.ROUTING_TYPE_PRIVATELINK, + "underlay_ip": PE_PA, + "overlay_sip_prefix": f"{PL_OVERLAY_SIP}/{PL_OVERLAY_SIP_MASK}", + "overlay_dip_prefix": f"{PL_OVERLAY_DIP}/{PL_OVERLAY_DIP_MASK}", + } +} + +VM1_VNET_MAPPING_CONFIG = { + f"DASH_VNET_MAPPING_TABLE:{VNET1}:{VM1_CA}": { + "routing_type": RoutingType.ROUTING_TYPE_VNET, + "underlay_ip": VM1_PA, + } +} + +PE_SUBNET_ROUTE_CONFIG = { + f"DASH_ROUTE_TABLE:{ROUTE_GROUP1}:{PE_CA_SUBNET}": { + "routing_type": RoutingType.ROUTING_TYPE_VNET, + "vnet": VNET1, + } +} + +VM_SUBNET_ROUTE_CONFIG = { + f"DASH_ROUTE_TABLE:{ROUTE_GROUP1}:{VM_CA_SUBNET}": { + "routing_type": RoutingType.ROUTING_TYPE_VNET, + "vnet": VNET1, + } +} + +ROUTING_TYPE_VNET_CONFIG = { + f"DASH_ROUTING_TYPE_TABLE:{VNET}": { + "items": [ + { + "action_name": "action1", + "action_type": ActionType.ACTION_TYPE_STATICENCAP, + "encap_type": EncapType.ENCAP_TYPE_VXLAN, + }, + ] + } +} + +ROUTING_TYPE_PL_CONFIG = { + f"DASH_ROUTING_TYPE_TABLE:{PRIVATELINK}": { + "items": [ + { + "action_name": "action1", + "action_type": ActionType.ACTION_TYPE_4_to_6 + }, + { + "action_name": "action2", + "action_type": ActionType.ACTION_TYPE_STATICENCAP, + "encap_type": EncapType.ENCAP_TYPE_NVGRE, + "vni": ENCAP_VNI + } + ] + } +} + +ROUTE_GROUP1_CONFIG = { + f"DASH_ROUTE_GROUP_TABLE:{ROUTE_GROUP1}": { + "guid": ROUTE_GROUP1_GUID, + "version": "rg_version" + } +} + +ENI_ROUTE_GROUP1_CONFIG = { + f"DASH_ENI_ROUTE_TABLE:{ENI_ID}": { + "group_id": ROUTE_GROUP1 + } +} diff --git a/tests/dash/conftest.py b/tests/dash/conftest.py index 445fba4ae85..9072d7f7215 100644 --- a/tests/dash/conftest.py +++ b/tests/dash/conftest.py @@ -5,10 +5,11 @@ import time from ipaddress import ip_interface -from constants import ENI, VM_VNI, VNET1_VNI, VNET2_VNI, REMOTE_CA_IP, LOCAL_CA_IP, REMOTE_ENI_MAC,\ - LOCAL_ENI_MAC, REMOTE_CA_PREFIX, LOOPBACK_IP, DUT_MAC, LOCAL_PA_IP, LOCAL_PTF_INTF, LOCAL_PTF_MAC,\ +from constants import ENI, VM_VNI, VNET1_VNI, VNET2_VNI, REMOTE_CA_IP, LOCAL_CA_IP, REMOTE_ENI_MAC, \ + LOCAL_ENI_MAC, REMOTE_CA_PREFIX, LOOPBACK_IP, DUT_MAC, LOCAL_PA_IP, LOCAL_PTF_INTF, LOCAL_PTF_MAC, \ REMOTE_PA_IP, REMOTE_PTF_INTF, REMOTE_PTF_MAC, REMOTE_PA_PREFIX, VNET1_NAME, VNET2_NAME, ROUTING_ACTION, \ - ROUTING_ACTION_TYPE, LOOKUP_OVERLAY_IP, ACL_GROUP, ACL_STAGE + ROUTING_ACTION_TYPE, LOOKUP_OVERLAY_IP, ACL_GROUP, ACL_STAGE, LOCAL_DUT_INTF, REMOTE_DUT_INTF, \ + REMOTE_PTF_SEND_INTF, REMOTE_PTF_RECV_INTF from dash_utils import render_template_to_host, apply_swssconfig_file from gnmi_utils import generate_gnmi_cert, apply_gnmi_cert, recover_gnmi_cert, apply_gnmi_file from dash_acl import AclGroup, DEFAULT_ACL_GROUP, WAIT_AFTER_CONFIG, DefaultAclRule @@ -114,12 +115,47 @@ def get_intf_from_ip(local_ip, config_facts): if str(intf_ip.ip) == local_ip: return intf, intf_ip + for intf, config in list(config_facts["PORTCHANNEL_INTERFACE"].items()): + for ip in config: + intf_ip = ip_interface(ip) + if str(intf_ip.ip) == local_ip: + return intf, intf_ip + @pytest.fixture(params=["no-underlay-route", "with-underlay-route"]) def use_underlay_route(request): return request.param == "with-underlay-route" +@pytest.fixture(scope="module") +def dash_pl_config(duthost, config_facts, minigraph_facts): + dash_info = { + DUT_MAC: config_facts["DEVICE_METADATA"]["localhost"]["mac"], + LOCAL_CA_IP: "10.2.2.2", + } + + neigh_table = duthost.switch_arptable()['ansible_facts']['arptable'] + for neigh_ip, config in list(config_facts["BGP_NEIGHBOR"].items()): + if ip_interface(neigh_ip).version == 4: + if LOCAL_PTF_INTF not in dash_info and config["name"].endswith("T0"): + intf, _ = get_intf_from_ip(config['local_addr'], config_facts) + dash_info[LOCAL_PTF_INTF] = minigraph_facts["minigraph_ptf_indices"][intf] + dash_info[LOCAL_DUT_INTF] = intf + dash_info[LOCAL_PTF_MAC] = neigh_table["v4"][neigh_ip]["macaddress"] + if REMOTE_PTF_INTF not in dash_info and config["name"].endswith("T2"): + intf, _ = get_intf_from_ip(config['local_addr'], config_facts) + intfs = list(config_facts["PORTCHANNEL_MEMBER"][intf].keys()) + dash_info[REMOTE_PTF_SEND_INTF] = minigraph_facts["minigraph_ptf_indices"][intfs[0]] + dash_info[REMOTE_PTF_RECV_INTF] = [minigraph_facts["minigraph_ptf_indices"][i] for i in intfs] + dash_info[REMOTE_DUT_INTF] = intf + dash_info[REMOTE_PTF_MAC] = neigh_table["v4"][neigh_ip]["macaddress"] + + if REMOTE_PTF_INTF in dash_info and LOCAL_PTF_INTF in dash_info: + break + + return dash_info + + @pytest.fixture(scope="function") def dash_config_info(duthost, config_facts, minigraph_facts, tbinfo): dash_info = { @@ -148,11 +184,13 @@ def dash_config_info(duthost, config_facts, minigraph_facts, tbinfo): # Take neighbor 1 as local PA, take neighbor 2 as remote PA if ip_interface(neigh_ip).version == 4: if LOCAL_PA_IP not in dash_info: - dash_info[LOCAL_PA_IP] = neigh_ip intf, _ = get_intf_from_ip(config['local_addr'], config_facts) + if "PortChannel" in intf: + continue + dash_info[LOCAL_PA_IP] = neigh_ip dash_info[LOCAL_PTF_INTF] = minigraph_facts["minigraph_ptf_indices"][intf] dash_info[LOCAL_PTF_MAC] = neigh_table["v4"][neigh_ip]["macaddress"] - if topo == 'dpu-1' and REMOTE_PA_IP not in dash_info: + if (topo == 'dpu-1' or topo == "t1-28-lag") and REMOTE_PA_IP not in dash_info: # For DPU with only one single port, we just have one neighbor (neighbor 1). # So, we take neighbor 1 as the local PA. For the remote PA, # we take the original neighbor 2's IP as the remote PA IP, @@ -170,8 +208,10 @@ def dash_config_info(duthost, config_facts, minigraph_facts, tbinfo): dash_info[REMOTE_PA_PREFIX] = fake_neighbor_2_prefix break elif REMOTE_PA_IP not in dash_info: - dash_info[REMOTE_PA_IP] = neigh_ip intf, intf_ip = get_intf_from_ip(config['local_addr'], config_facts) + if "PortChannel" in intf: + continue + dash_info[REMOTE_PA_IP] = neigh_ip dash_info[REMOTE_PTF_INTF] = minigraph_facts["minigraph_ptf_indices"][intf] dash_info[REMOTE_PTF_MAC] = neigh_table["v4"][neigh_ip]["macaddress"] dash_info[REMOTE_PA_PREFIX] = str(intf_ip.network) @@ -354,3 +394,8 @@ def acl_default_rule(localhost, duthost, ptfhost, dash_config_info): default_acl_rule.teardown() del default_acl_group time.sleep(WAIT_AFTER_CONFIG) + + +@pytest.fixture(scope="module") +def dpu_index(): + return 0 diff --git a/tests/dash/constants.py b/tests/dash/constants.py index d5105c1001d..b289d88f9cc 100644 --- a/tests/dash/constants.py +++ b/tests/dash/constants.py @@ -22,6 +22,10 @@ REMOTE_PA_PREFIX = "remote_pa_prefix" LOCAL_PTF_INTF = "local_ptf_intf" REMOTE_PTF_INTF = "remote_ptf_intf" +LOCAL_DUT_INTF = "local_dut_intf" +REMOTE_DUT_INTF = "remote_dut_intf" +REMOTE_PTF_SEND_INTF = "remote_ptf_send_intf" +REMOTE_PTF_RECV_INTF = "remote_ptf_recv_intf" ROUTING_ACTION = "routing_action" ROUTING_ACTION_TYPE = "routing_action_type" LOOKUP_OVERLAY_IP = "lookup_overlay_ip" diff --git a/tests/dash/gnmi_utils.py b/tests/dash/gnmi_utils.py index cf38c8de92d..7a30587f91f 100644 --- a/tests/dash/gnmi_utils.py +++ b/tests/dash/gnmi_utils.py @@ -1,12 +1,12 @@ -import logging import json +import logging +import math import time import uuid -import math from functools import lru_cache -import pytest import proto_utils +import pytest logger = logging.getLogger(__name__) @@ -23,7 +23,8 @@ def __init__(self, duthost): self.gnmi_client_cert = "gnmiclient.crt" self.gnmi_client_key = "gnmiclient.key" self.gnmi_server_start_wait_time = 30 - self.enable_zmq = duthost.shell("netstat -na | grep -w 8100", module_ignore_errors=True)['rc'] == 0 + # self.enable_zmq = duthost.shell("netstat -na | grep -w 8100", module_ignore_errors=True)['rc'] == 0 + self.enable_zmq = True cmd = "docker images | grep -w sonic-gnmi" if duthost.shell(cmd, module_ignore_errors=True)['rc'] == 0: cmd = "docker ps | grep -w gnmi" @@ -267,6 +268,7 @@ def gnmi_set(duthost, ptfhost, delete_list, update_list, replace_list): cmd += '--xpath ' + xpath cmd += ' ' cmd += '--value ' + xvalue + logger.info(f"PTF GNMI command: {cmd}") output = ptfhost.shell(cmd, module_ignore_errors=True) error = "GRPC error\n" if error in output['stdout']: @@ -318,6 +320,41 @@ def gnmi_get(duthost, ptfhost, path_list): raise Exception("error:" + msg) +def apply_messages( + localhost, + duthost, + ptfhost, + messages, + dpu_index, + set_db=True, + wait_after_apply=5, + max_updates_in_single_cmd=1024, +): + env = GNMIEnvironment(duthost) + update_list = [] + delete_list = [] + for i, (key, config_dict) in enumerate(messages.items()): + message = proto_utils.parse_dash_proto(key, config_dict) + keys = key.split(":", 1) + gnmi_key = keys[0] + "[key=" + keys[1] + "]" + filename = f"update{i}" + + if set_db: + if proto_utils.ENABLE_PROTO: + path = f"/APPL_DB/dpu{dpu_index}/{gnmi_key}:$/root/{filename}" + else: + path = f"/APPL_DB/dpu{dpu_index}/{gnmi_key}:@/root/{filename}" + with open(env.work_dir + filename, "wb") as file: + file.write(message.SerializeToString()) + update_list.append(path) + else: + path = f"/APPL_DB/dpu{dpu_index}/{gnmi_key}" + delete_list.append(path) + + write_gnmi_files(localhost, duthost, ptfhost, env, delete_list, update_list, max_updates_in_single_cmd) + time.sleep(wait_after_apply) + + def apply_gnmi_file(localhost, duthost, ptfhost, dest_path=None, config_json=None, wait_after_apply=5, max_updates_in_single_cmd=1024): """ @@ -365,9 +402,9 @@ def apply_gnmi_file(localhost, duthost, ptfhost, dest_path=None, config_json=Non keys = k.split(":", 1) k = keys[0] + "[key=" + keys[1] + "]" if proto_utils.ENABLE_PROTO: - path = "/APPL_DB/localhost/%s:$/root/%s" % (k, filename) + path = "/APPL_DB/dpu1/%s:$/root/%s" % (k, filename) else: - path = "/APPL_DB/localhost/%s:@/root/%s" % (k, filename) + path = "/APPL_DB/dpu1/%s:@/root/%s" % (k, filename) update_list.append(path) elif operation["OP"] == "DEL": for k, v in operation.items(): @@ -375,10 +412,15 @@ def apply_gnmi_file(localhost, duthost, ptfhost, dest_path=None, config_json=Non continue keys = k.split(":", 1) k = keys[0] + "[key=" + keys[1] + "]" - path = "/APPL_DB/localhost/%s" % (k) + path = "/APPL_DB/dpu1/%s" % (k) delete_list.append(path) else: logger.info("Invalid operation %s" % operation["OP"]) + write_gnmi_files(localhost, duthost, ptfhost, env, delete_list, update_list, max_updates_in_single_cmd) + time.sleep(wait_after_apply) + + +def write_gnmi_files(localhost, duthost, ptfhost, env, delete_list, update_list, max_updates_in_single_cmd): localhost.shell(f'tar -zcvf /tmp/updates.tar.gz -C {env.work_dir} .') ptfhost.copy(src='/tmp/updates.tar.gz', dest='~') ptfhost.shell('tar -xf updates.tar.gz') @@ -404,4 +446,3 @@ def _devide_list(operation_list): ptfhost.shell('rm -f updates.tar.gz') localhost.shell(f'rm -f {env.work_dir}update*') ptfhost.shell('rm -f update*') - time.sleep(wait_after_apply) diff --git a/tests/dash/packets.py b/tests/dash/packets.py index dbb5ec606a1..9d7438a6c64 100644 --- a/tests/dash/packets.py +++ b/tests/dash/packets.py @@ -1,28 +1,38 @@ +import logging +import sys +import time from ipaddress import ip_address import ptf.packet as scapy -import scapy.utils as scapy_utils -from ptf.mask import Mask import ptf.testutils as testutils -from ptf.dataplane import match_exp_pkt +import scapy.utils as scapy_utils +from configs import privatelink_config as pl from constants import * # noqa: F403 -import logging -import sys -import time -from tests.common.helpers.assertions import pytest_assert +from ptf.dataplane import match_exp_pkt +from ptf.mask import Mask from six import StringIO +from tests.common.helpers.assertions import pytest_assert logger = logging.getLogger(__name__) -def generate_inner_packet(packet_type): +def generate_inner_packet(packet_type, ipv6=False): if packet_type == 'udp': - return testutils.simple_udp_packet + if ipv6: + return testutils.simple_udpv6_packet + else: + return testutils.simple_udp_packet elif packet_type == 'tcp': - return testutils.simple_tcp_packet + if ipv6: + return testutils.simple_tcpv6_packet + else: + return testutils.simple_tcp_packet elif packet_type == 'echo_request' or packet_type == 'echo_reply': - return testutils.simple_icmp_packet + if ipv6: + return testutils.simple_icmpv6_packet + else: + return testutils.simple_icmp_packet return None @@ -34,6 +44,180 @@ def set_icmp_sub_type(packet, packet_type): packet[scapy.ICMP].type = 0 +def get_bits(ip): + addr = ip_address(ip) + return int(addr) + + +def get_pl_overlay_sip(orig_sip, ol_sip, ol_mask, pl_sip_encoding, pl_sip_mask): + pkt_sip = get_bits(orig_sip) + ol_sip_ip = get_bits(ol_sip) + ol_sip_mask = get_bits(ol_mask) + pl_encoding_ip = get_bits(pl_sip_encoding) + pl_encoding_mask = get_bits(pl_sip_mask) + + overlay_sip = ( + ((pkt_sip & ~ol_sip_mask) | ol_sip_ip) & ~pl_encoding_mask + ) | pl_encoding_ip + return str(ip_address(overlay_sip)) + + +def get_pl_overlay_dip(orig_dip, ol_dip, ol_mask): + pkt_dip = get_bits(orig_dip) + ol_dip_ip = get_bits(ol_dip) + ol_dip_mask = get_bits(ol_mask) + + overlay_dip = (pkt_dip & ~ol_dip_mask) | ol_dip_ip + return str(ip_address(overlay_dip)) + + +def inbound_pl_packets(config, inner_packet_type='udp', vxlan_udp_dport=4789): + inner_sip = get_pl_overlay_dip( # not a typo, inner DIP/SIP are reversed for inbound direction + pl.PE_CA, + pl.PL_OVERLAY_DIP, + pl.PL_OVERLAY_DIP_MASK + ) + + inner_dip = get_pl_overlay_sip( + pl.VM1_CA, + pl.PL_OVERLAY_SIP, + pl.PL_OVERLAY_SIP_MASK, + pl.PL_ENCODING_IP, + pl.PL_ENCODING_MASK + ) + + inner_packet = generate_inner_packet(inner_packet_type, ipv6=True)( + eth_src=pl.REMOTE_MAC, + eth_dst=pl.ENI_MAC, + ipv6_src=inner_sip, + ipv6_dst=inner_dip, + ) + + sport = inner_packet[scapy.UDP].sport + dport = inner_packet[scapy.UDP].dport + inner_packet[scapy.UDP].sport = dport + inner_packet[scapy.UDP].dport = sport + + gre_packet = testutils.simple_gre_packet( + eth_dst=config[DUT_MAC], + ip_src=pl.PE_PA, + ip_dst=pl.APPLIANCE_VIP, + gre_key_present=True, + gre_key=int(pl.VNET1_VNI) << 8, + inner_frame=inner_packet, + ) + + exp_inner_packet = generate_inner_packet(inner_packet_type)( + eth_src=pl.ENI_MAC, + eth_dst=pl.REMOTE_MAC, + ip_src=pl.PE_CA, + ip_dst=pl.VM1_CA, + ip_id=0, + ) + + exp_inner_packet[scapy.UDP].sport = dport + exp_inner_packet[scapy.UDP].dport = sport + exp_inner_packet[scapy.UDP].load = inner_packet[scapy.UDP].load + + exp_vxlan_packet = testutils.simple_vxlan_packet( + eth_src=config[DUT_MAC], + eth_dst=config[LOCAL_PTF_MAC], + ip_src=pl.APPLIANCE_VIP, + ip_dst=pl.VM1_PA, + ip_ttl=254, + ip_id=0, + udp_dport=vxlan_udp_dport, + vxlan_vni=int(pl.VM_VNI), + inner_frame=exp_inner_packet + ) + + masked_exp_packet = Mask(exp_vxlan_packet) + masked_exp_packet.set_do_not_care_packet(scapy.Ether, "src") + masked_exp_packet.set_do_not_care_packet(scapy.Ether, "dst") + masked_exp_packet.set_do_not_care_packet(scapy.UDP, "sport") + masked_exp_packet.set_do_not_care_packet(scapy.UDP, "chksum") + + return gre_packet, masked_exp_packet + + +def outbound_pl_packets(config, outer_encap, inner_packet_type='udp', vxlan_udp_dport=4789, vxlan_udp_sport=1234): + inner_packet = generate_inner_packet(inner_packet_type)( + eth_src=pl.REMOTE_MAC, + eth_dst=pl.ENI_MAC, + ip_src=pl.VM1_CA, + ip_dst=pl.PE_CA, + ) + + if outer_encap == 'vxlan': + outer_packet = testutils.simple_vxlan_packet( + eth_src=config[LOCAL_PTF_MAC], + eth_dst=config[DUT_MAC], + ip_src=pl.VM1_PA, + ip_dst=pl.APPLIANCE_VIP, + udp_dport=vxlan_udp_dport, + udp_sport=vxlan_udp_sport, + with_udp_chksum=False, + vxlan_vni=int(pl.VM_VNI), + inner_frame=inner_packet + ) + elif outer_encap == 'gre': + outer_packet = testutils.simple_gre_packet( + eth_src=config[LOCAL_PTF_MAC], + eth_dst=config[DUT_MAC], + ip_src=pl.VM1_PA, + ip_dst=pl.APPLIANCE_VIP, + gre_key_present=True, + gre_key=int(pl.VM_VNI) << 8, + inner_frame=inner_packet + ) + else: + logger.error(f"Unsupported encap type: {outer_encap}") + return None + + exp_overlay_sip = get_pl_overlay_sip( + inner_packet[scapy.IP].src, + pl.PL_OVERLAY_SIP, + pl.PL_OVERLAY_SIP_MASK, + pl.PL_ENCODING_IP, + pl.PL_ENCODING_MASK + ) + + exp_overlay_dip = get_pl_overlay_dip( + inner_packet[scapy.IP].dst, + pl.PL_OVERLAY_DIP, + pl.PL_OVERLAY_DIP_MASK + ) + + logger.info(f"Expecting overlay SIP: {exp_overlay_sip}") + logger.info(f"Expecting overlay DIP: {exp_overlay_dip}") + + exp_inner_packet = scapy.Ether() / scapy.IPv6() / scapy.UDP() + exp_inner_packet[scapy.Ether].src = pl.ENI_MAC + exp_inner_packet[scapy.Ether].dst = pl.REMOTE_MAC + exp_inner_packet[scapy.IPv6].src = exp_overlay_sip + exp_inner_packet[scapy.IPv6].dst = exp_overlay_dip + exp_inner_packet[scapy.UDP] = inner_packet[scapy.UDP] + + exp_encap_packet = testutils.simple_gre_packet( + eth_dst=config[REMOTE_PTF_MAC], + eth_src=config[DUT_MAC], + ip_src=pl.APPLIANCE_VIP, + ip_dst=pl.PE_PA, + gre_key_present=True, + gre_key=pl.ENCAP_VNI << 8, + inner_frame=exp_inner_packet, + ip_id=0, + ip_ttl=254, + ) + + masked_exp_packet = Mask(exp_encap_packet) + masked_exp_packet.set_do_not_care_packet(scapy.Ether, "src") + masked_exp_packet.set_do_not_care_packet(scapy.Ether, "dst") + masked_exp_packet.set_do_not_care_packet(scapy.IP, "chksum") + + return outer_packet, masked_exp_packet + + def inbound_vnet_packets(dash_config_info, inner_extra_conf={}, inner_packet_type='udp', vxlan_udp_dport=4789): inner_packet = generate_inner_packet(inner_packet_type)( eth_src=dash_config_info[REMOTE_ENI_MAC], diff --git a/tests/dash/proto_utils.py b/tests/dash/proto_utils.py index 23a6e0b458e..31678706816 100644 --- a/tests/dash/proto_utils.py +++ b/tests/dash/proto_utils.py @@ -1,27 +1,111 @@ +import base64 +import ipaddress import re import socket -import ipaddress import uuid -import pytest +from ipaddress import ip_address +import pytest +from dash_api.acl_group_pb2 import AclGroup +from dash_api.acl_in_pb2 import AclIn +from dash_api.acl_out_pb2 import AclOut +from dash_api.acl_rule_pb2 import AclRule, Action from dash_api.appliance_pb2 import Appliance -from dash_api.vnet_pb2 import Vnet from dash_api.eni_pb2 import Eni, State +from dash_api.eni_route_pb2 import EniRoute +from dash_api.prefix_tag_pb2 import PrefixTag from dash_api.qos_pb2 import Qos +from dash_api.route_group_pb2 import RouteGroup from dash_api.route_pb2 import Route from dash_api.route_rule_pb2 import RouteRule +from dash_api.route_type_pb2 import (ActionType, RouteType, RouteTypeItem, + RoutingType) +from dash_api.types_pb2 import IpPrefix, IpVersion, ValueOrRange from dash_api.vnet_mapping_pb2 import VnetMapping -from dash_api.route_type_pb2 import RoutingType, ActionType, RouteType, RouteTypeItem -from dash_api.types_pb2 import IpVersion, IpPrefix, ValueOrRange -from dash_api.acl_group_pb2 import AclGroup -from dash_api.acl_out_pb2 import AclOut -from dash_api.acl_in_pb2 import AclIn -from dash_api.acl_rule_pb2 import AclRule, Action -from dash_api.prefix_tag_pb2 import PrefixTag - +from dash_api.vnet_pb2 import Vnet +from google.protobuf.descriptor import FieldDescriptor +from google.protobuf.json_format import ParseDict ENABLE_PROTO = True +PB_INT_TYPES = set([ + FieldDescriptor.TYPE_INT32, + FieldDescriptor.TYPE_INT64, + FieldDescriptor.TYPE_UINT32, + FieldDescriptor.TYPE_UINT64, + FieldDescriptor.TYPE_FIXED64, + FieldDescriptor.TYPE_FIXED32, + FieldDescriptor.TYPE_SFIXED32, + FieldDescriptor.TYPE_SFIXED64, + FieldDescriptor.TYPE_SINT32, + FieldDescriptor.TYPE_SINT64 +]) + +PB_CLASS_MAP = { + "APPLIANCE": Appliance, + "VNET": Vnet, + "ENI": Eni, + "VNET_MAPPING": VnetMapping, + "ROUTE": Route, + "ROUTING_TYPE": RouteType, + "ROUTE_GROUP": RouteGroup, + "ENI_ROUTE": EniRoute, +} + + +def parse_ip_address(ip_str): + ip_addr = ip_address(ip_str) + if ip_addr.version == 4: + encoded_val = socket.htonl(int(ip_addr)) + else: + encoded_val = base64.b64encode(ip_addr.packed) + + return {f"ipv{ip_addr.version}": encoded_val} + + +def parse_ip_prefix(ip_prefix_str): + ip_addr, mask = ip_prefix_str.split("/") + return {"ip": parse_ip_address(ip_addr), "mask": parse_ip_address(ip_address(mask))} + + +def parse_byte_field(orig_val): + return base64.b64encode(bytes.fromhex(orig_val.replace(":", ""))) + + +def parse_guid(guid_str): + return {"value": parse_byte_field(uuid.UUID(guid_str).hex)} + + +def parse_dash_proto(key: str, proto_dict: dict): + """ + Custom parser for DASH configs to allow writing configs + in a more human-readable format + """ + table_name = re.search(r"DASH_(\w+)_TABLE", key).group(1) + message = PB_CLASS_MAP[table_name]() + field_map = message.DESCRIPTOR.fields_by_name + new_dict = {} + for key, value in proto_dict.items(): + if field_map[key].type == field_map[key].TYPE_MESSAGE: + + if field_map[key].message_type.name == "IpAddress": + new_dict[key] = parse_ip_address(value) + elif field_map[key].message_type.name == "IpPrefix": + new_dict[key] = parse_ip_prefix(value) + elif field_map[key].message_type.name == "Guid": + new_dict[key] = parse_guid(value) + + elif field_map[key].type == field_map[key].TYPE_BYTES: + new_dict[key] = parse_byte_field(value) + + elif field_map[key].type in PB_INT_TYPES: + new_dict[key] = int(value) + + if key not in new_dict: + new_dict[key] = value + + return ParseDict(new_dict, message) + def appliance_from_json(json_obj): pb = Appliance() diff --git a/tests/dash/test_dash_privatelink.py b/tests/dash/test_dash_privatelink.py new file mode 100644 index 00000000000..f326a3c8fb2 --- /dev/null +++ b/tests/dash/test_dash_privatelink.py @@ -0,0 +1,118 @@ +import json +import logging +from ipaddress import ip_interface + +import configs.privatelink_config as pl +import ptf.testutils as testutils +import pytest +from constants import LOCAL_PTF_INTF, LOCAL_DUT_INTF, REMOTE_DUT_INTF, REMOTE_PTF_RECV_INTF, REMOTE_PTF_SEND_INTF +from gnmi_utils import apply_messages +from packets import outbound_pl_packets, inbound_pl_packets + +logger = logging.getLogger(__name__) + +pytestmark = [ + pytest.mark.topology('t1'), + pytest.mark.skip_check_dut_health +] + + +""" +Test prerequisites: +- DPU needs the Appliance VIP configured as its loopback IP +- Assign IPs to DPU-NPU dataplane interfaces +- Default route on DPU to NPU +""" + + +def get_dpu_dataplane_port(duthost, dpu_index): + platform = duthost.facts["platform"] + platform_json = json.loads(duthost.shell(f"cat /usr/share/sonic/device/{platform}/platform.json")["stdout"]) + try: + interface = list(platform_json["DPUS"][f"dpu{dpu_index}"]["interface"].keys())[0] + except KeyError: + interface = f"Ethernet-BP{dpu_index}" + + logger.info(f"DPU dataplane interface: {interface}") + return interface + + +def get_interface_ip(duthost, interface): + cmd = f"ip addr show {interface} | grep -w inet | awk '{{print $2}}'" + output = duthost.shell(cmd)["stdout"].strip() + return ip_interface(output) + + +@pytest.fixture(scope="module") +def dpu_ip(duthost, dpu_index): + dpu_port = get_dpu_dataplane_port(duthost, dpu_index) + npu_interface_ip = get_interface_ip(duthost, dpu_port) + return npu_interface_ip.ip + 1 + + +@pytest.fixture(scope="module", autouse=True) +def add_npu_static_routes(duthost, dpu_ip, dash_pl_config, skip_config, skip_cleanup): + if not skip_config: + cmds = [] + vm_nexthop_ip = get_interface_ip(duthost, dash_pl_config[LOCAL_DUT_INTF]).ip + 1 + pe_nexthop_ip = get_interface_ip(duthost, dash_pl_config[REMOTE_DUT_INTF]).ip + 1 + cmds.append(f"ip route replace {pl.APPLIANCE_VIP}/32 via {dpu_ip}") + cmds.append(f"ip route replace {pl.VM1_PA}/32 via {vm_nexthop_ip}") + cmds.append(f"ip route replace {pl.PE_PA}/32 via {pe_nexthop_ip}") + logger.info(f"Adding static routes: {cmds}") + duthost.shell_cmds(cmds=cmds) + + yield + + if not skip_config and not skip_cleanup: + cmds = [] + cmds.append(f"ip route del {pl.APPLIANCE_VIP}/32 via {dpu_ip}") + cmds.append(f"ip route del {pl.VM1_PA}/32 via {vm_nexthop_ip}") + cmds.append(f"ip route del {pl.PE_PA}/32 via {pe_nexthop_ip}") + logger.info(f"Removing static routes: {cmds}") + duthost.shell_cmds(cmds=cmds) + + +@pytest.fixture(autouse=True) +def common_setup_teardown(localhost, duthost, ptfhost, dpu_index, skip_config): + if skip_config: + return + + logger.info(pl.ROUTING_TYPE_PL_CONFIG) + base_config_messages = { + **pl.APPLIANCE_CONFIG, + **pl.ROUTING_TYPE_PL_CONFIG, + **pl.VNET_CONFIG, + **pl.ENI_CONFIG, + **pl.PE_VNET_MAPPING_CONFIG, + **pl.ROUTE_GROUP1_CONFIG + } + logger.info(base_config_messages) + + apply_messages(localhost, duthost, ptfhost, base_config_messages, dpu_index) + + route_messages = { + **pl.PE_SUBNET_ROUTE_CONFIG, + **pl.VM_SUBNET_ROUTE_CONFIG + } + logger.info(route_messages) + apply_messages(localhost, duthost, ptfhost, route_messages, dpu_index) + + logger.info(pl.ENI_ROUTE_GROUP1_CONFIG) + apply_messages(localhost, duthost, ptfhost, pl.ENI_ROUTE_GROUP1_CONFIG, dpu_index) + + +@pytest.mark.parametrize("encap_proto", ["vxlan", "gre"]) +def test_privatelink_basic_transform( + ptfadapter, + dash_pl_config, + encap_proto +): + vm_to_dpu_pkt, exp_dpu_to_pe_pkt = outbound_pl_packets(dash_pl_config, outer_encap=encap_proto) + pe_to_dpu_pkt, exp_dpu_to_vm_pkt = inbound_pl_packets(dash_pl_config) + + ptfadapter.dataplane.flush() + testutils.send(ptfadapter, dash_pl_config[LOCAL_PTF_INTF], vm_to_dpu_pkt, 1) + testutils.verify_packet_any_port(ptfadapter, exp_dpu_to_pe_pkt, dash_pl_config[REMOTE_PTF_RECV_INTF]) + testutils.send(ptfadapter, dash_pl_config[REMOTE_PTF_SEND_INTF], pe_to_dpu_pkt, 1) + testutils.verify_packet(ptfadapter, exp_dpu_to_vm_pkt, dash_pl_config[LOCAL_PTF_INTF])