From 65d62d982eae072bf02fc80fe4c0a67c91af3c88 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Fri, 30 Aug 2024 17:50:40 +0200 Subject: [PATCH 01/69] nhrpd: fix sh ip nhrp opennhrp json counter "show ip nhrp opennhrp json" counter is incorrect. > root@nhs1:/# vtysh -c 'sh ip nhrp opennhrp json' | jq . > { > "attr": { > "status": "ok", > "entriesCount": 1 <=== NOK > }, > "table": [ > { > "type": "dynamic", > "up": true, > "used": false, > "protocolAddress": "172.16.1.4", > "protocolAddressSize": 32, > "nbmaAddress": "192.168.2.4" > }, > { > "type": "dynamic", > "up": true, > "used": false, > "protocolAddress": "172.16.1.5", > "protocolAddressSize": 32, > "nbmaAddress": "192.168.2.5" > } > ] > } > Fixes: 87b9e98203 ("nhrpd: add json support to show nhrp vty commands") Signed-off-by: Louis Scalbert --- nhrpd/nhrp_vty.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nhrpd/nhrp_vty.c b/nhrpd/nhrp_vty.c index f2025769609e..0fc3dee98d37 100644 --- a/nhrpd/nhrp_vty.c +++ b/nhrpd/nhrp_vty.c @@ -934,6 +934,8 @@ static void show_ip_opennhrp_cache(struct nhrp_cache *c, void *pctx) if (ctx->afi != family2afi(sockunion_family(&c->remote_addr))) return; + ctx->count++; + sockunion2str(&c->remote_addr, buf[0], sizeof(buf[0])); if (c->cur.peer) sockunion2str(&c->cur.peer->vc->remote.nbma, buf[1], @@ -1031,7 +1033,6 @@ DEFUN(show_ip_nhrp, show_ip_nhrp_cmd, else json_object_string_add(json_vrf, "status", "ok"); - ctx.count++; FOR_ALL_INTERFACES (vrf, ifp) nhrp_cache_foreach(ifp, show_ip_opennhrp_cache, &ctx); } From e2db455c0144f2732e0dd681942f2c489e4b5d85 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Fri, 30 Aug 2024 17:53:31 +0200 Subject: [PATCH 02/69] nhrpd: normalize sh ip nhrp opennhrp output The command outputs too much "\n". Normalize it. Signed-off-by: Louis Scalbert --- nhrpd/nhrp_vty.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nhrpd/nhrp_vty.c b/nhrpd/nhrp_vty.c index 0fc3dee98d37..5db1327b024d 100644 --- a/nhrpd/nhrp_vty.c +++ b/nhrpd/nhrp_vty.c @@ -934,6 +934,8 @@ static void show_ip_opennhrp_cache(struct nhrp_cache *c, void *pctx) if (ctx->afi != family2afi(sockunion_family(&c->remote_addr))) return; + if (ctx->count && !ctx->json) + vty_out(ctx->vty, "\n"); ctx->count++; sockunion2str(&c->remote_addr, buf[0], sizeof(buf[0])); @@ -988,8 +990,6 @@ static void show_ip_opennhrp_cache(struct nhrp_cache *c, void *pctx) if (sockunion_family(&c->cur.remote_nbma_natoa) != AF_UNSPEC) vty_out(ctx->vty, "NBMA-NAT-OA-Address: %s\n", buf[2]); - - vty_out(ctx->vty, "\n\n"); } DEFUN(show_ip_nhrp, show_ip_nhrp_cmd, From a6cffa842be384a290c75b8255712f2a23fa4bca Mon Sep 17 00:00:00 2001 From: zhou-run Date: Thu, 24 Oct 2024 11:09:37 +0800 Subject: [PATCH 03/69] isisd: When the ISIS types of the routers do not match on a P2P link, the neighbor status remains UP Test Scenario: RouterA and RouterB are in the same routing domain and have configured a P2P link. RouterA is configured with "is-type level-1" while RouterB is configured with "is-type level-1-2". They establish a level-1 UP neighborship. In this scenario, we expect that when RouterB's configuration is switched to "is-type level-2-only", the neighborship status on both RouterA and RouterB would be non-UP. However, RouterB still shows the neighbor as UP. Upon receiving a P2P Hello packet, the function "process_p2p_hello" is invoked. According to the ISO/IEC 10589 protocol specification, section 8.2.5.2 a) and tables 5 and 7, if the "iih->circ_type" of the neighbor's hello packet does not match one's own "circuit->is_type," we may choose to take no action. When establishing a neighborship for the first time, the neighbor's status can remain in the "Initializing" state. However, if the neighborship has already been established and one's own "circuit->is_type" changes, the neighbor's UP status cannot be reset. Therefore, when processing P2P Hello packets, we should be cognizant of changes in our own link adjacency type. Topotest has identified a core issue during testing. (gdb) bt "#0 0xb7efe579 in __kernel_vsyscall () \#1 0xb79f62f7 in ?? () \#2 0xbf981dd0 in ?? () \#3 \#4 0xb79f7722 in ?? () \#5 0xb7ed8634 in _DYNAMIC () from /home/z15467/isis_core/usr/lib/i386-linux-gnu/frr/libfrr.so.0.0.0 \#6 0x0001003c in ?? () \#7 0x00010000 in ?? () \#8 0xb7df3322 in _frr_mtx_lock (mutex=) at ../lib/frr_pthread.h:255 \#9 event_timer_remain_msec (thread=0x10000) at ../lib/event.c:734 \#10 event_timer_remain_msec (thread=) at ../lib/event.c:727 \#11 0x004fb4aa in _send_hello_sched (circuit=, threadp=0x2189de0, level=1, delay=) at ../isisd/isis_pdu.c:2116 \#12 0x004e8dbc in isis_circuit_up (circuit=) at ../isisd/isis_circuit.c:734 \#13 0x004ea8f7 in isis_csm_state_change (event=, circuit=, arg=) at ../isisd/isis_csm.c:98 \#14 0x004ea23f in isis_circuit_circ_type_set (circuit=, circ_type=) at ../isisd/isis_circuit.c:1578 \#15 0x0053aefa in lib_interface_isis_network_type_modify (args=) at ../isisd/isis_nb_config.c:4190 \#16 0xb7dbcc8d in nb_callback_modify (errmsg_len=8192, errmsg=0xbf982afc "", resource=0x2186220, dnode=, event=NB_EV_APPLY, nb_node=0x1fafe70, context=) at ../lib/northbound.c:1550 \#17 nb_callback_configuration (context=, event=NB_EV_APPLY, change=, errmsg=, errmsg_len=) at ../lib/northbound.c:1900 \#18 0xb7dbd646 in nb_transaction_process (errmsg_len=, errmsg=, transaction=, event=) at ../lib/northbound.c:2028 \#19 nb_candidate_commit_apply (transaction=, save_transaction=, transaction_id=, errmsg=, errmsg_len=) at ../lib/northbound.c:1368 \#20 0xb7dbdd68 in nb_candidate_commit (context=..., candidate=, save_transaction=, comment=, transaction_id=, errmsg=, errmsg_len=) at ../lib/northbound.c:1401 \#21 0xb7dc0cff in nb_cli_classic_commit (vty=vty@entry=0x21d6940) at ../lib/northbound_cli.c:57 \#22 0xb7dc0f46 in nb_cli_apply_changes_internal (vty=vty@entry=0x21d6940, xpath_base=xpath_base@entry=0xbf986b7c "/frr-interface:lib/interface[name='r5-eth0']", clear_pending=clear_pending@entry=false) at ../lib/northbound_cli.c:184 \#23 0xb7dc130b in nb_cli_apply_changes (vty=, xpath_base_fmt=) at ../lib/northbound_cli.c:240 \#24 0x00542c1d in isis_network_magic (self=, argc=, argv=, no=, vty=) at ../isisd/isis_cli.c:3101 \#25 isis_network (self=, vty=, argc=, argv=) at ./isisd/isis_cli_clippy.c:5499 \#26 0xb7d6d8f1 in cmd_execute_command_real (vline=vline@entry=0x219afa0, vty=vty@entry=0x21d6940, cmd=cmd@entry=0x0, up_level=) at ../lib/command.c:1003 \#27 0xb7d6d9e0 in cmd_execute_command (vline=, vty=, cmd=, vtysh=) at ../lib/command.c:1061 \#28 0xb7d6dc60 in cmd_execute (vty=, cmd=, matched=, vtysh=) at ../lib/command.c:1228 \#29 0xb7dfb58a in vty_command (vty=vty@entry=0x21d6940, buf=0x21e0ff0 ' ' , "isis network point-to-point") at ../lib/vty.c:625 \#30 0xb7dfc560 in vty_execute (vty=vty@entry=0x21d6940) at ../lib/vty.c:1388 \#31 0xb7dfdc8d in vtysh_read (thread=) at ../lib/vty.c:2400 \#32 0xb7df4d47 in event_call (thread=) at ../lib/event.c:2019 \#33 0xb7d9a831 in frr_run (master=) at ../lib/libfrr.c:1232 \#34 0x004e4758 in main (argc=7, argv=0xbf989a24, envp=0xbf989a44) at ../isisd/isis_main.c:354 (gdb) f 9 \#9 event_timer_remain_msec (thread=0x10000) at ../lib/event.c:734 734 ../lib/event.c: No such file or directory. (gdb) p pthread No symbol "pthread" in current context. (gdb) p thread $1 = (struct event *) 0x10000 When LAN links and P2P links share the` circuit->u` of a neighbor, if one link is no longer in use and the union is not cleared, the other link is unable to pass the non-empty check, resulting in accessing an invalid pointer. Unfortunately, for non-DIS devices in LAN links, `circuit->u.bc.run_dr_elect[x]` is essentially always 1, but in `isis_circuit_down()`,` circuit->u.bc.run_dr_elect[x] `will not be cleared because `circuit->u.bc.is_dr[x]` is always 0. Consequently, when switching to a P2P link, `isis_circuit_circ_type_set()` does not reset the link in a non-C_STATE_UP state, leading to subsequent accesses of `circuit->u.p2p.t_send_p2p_hello` resulting in a non-empty yet invalid address. I believe that in `isis_circuit_down()`, the LAN link should unconditionally clear `circuit->u.bc.run_dr_elect[x]`. Signed-off-by: zhou-run --- isisd/isis_circuit.c | 2 + isisd/isis_pdu.c | 3 +- tests/topotests/isis_topo1/test_isis_topo1.py | 161 ++++++++++++++++++ 3 files changed, 165 insertions(+), 1 deletion(-) diff --git a/isisd/isis_circuit.c b/isisd/isis_circuit.c index fa1ce3007f8a..9a967bc1e3b9 100644 --- a/isisd/isis_circuit.c +++ b/isisd/isis_circuit.c @@ -851,11 +851,13 @@ void isis_circuit_down(struct isis_circuit *circuit) isis_dr_resign(circuit, 1); circuit->u.bc.is_dr[0] = 0; } + circuit->u.bc.run_dr_elect[0] = 0; memset(circuit->u.bc.l1_desig_is, 0, ISIS_SYS_ID_LEN + 1); if (circuit->u.bc.is_dr[1]) { isis_dr_resign(circuit, 2); circuit->u.bc.is_dr[1] = 0; } + circuit->u.bc.run_dr_elect[1] = 0; memset(circuit->u.bc.l2_desig_is, 0, ISIS_SYS_ID_LEN + 1); memset(circuit->u.bc.snpa, 0, ETH_ALEN); diff --git a/isisd/isis_pdu.c b/isisd/isis_pdu.c index 23238d314a20..c2ada459eb1c 100644 --- a/isisd/isis_pdu.c +++ b/isisd/isis_pdu.c @@ -231,7 +231,8 @@ static int process_p2p_hello(struct iih_info *iih) return ISIS_OK; } } - if (!adj || adj->level != iih->calculated_type) { + if (!adj || adj->level != iih->calculated_type || + !(iih->circuit->is_type & iih->circ_type)) { if (!adj) { adj = isis_new_adj(iih->sys_id, NULL, iih->calculated_type, iih->circuit); diff --git a/tests/topotests/isis_topo1/test_isis_topo1.py b/tests/topotests/isis_topo1/test_isis_topo1.py index 1cec2f16f057..f80f2e435f5d 100644 --- a/tests/topotests/isis_topo1/test_isis_topo1.py +++ b/tests/topotests/isis_topo1/test_isis_topo1.py @@ -12,6 +12,7 @@ """ test_isis_topo1.py: Test ISIS topology. """ +import time import datetime import functools import json @@ -314,6 +315,107 @@ def test_isis_neighbor_json(): ), assertmsg +def test_isis_neighbor_state(): + "Check that the neighbor states remain normal when the ISIS type is switched." + + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Checking 'show isis neighbor state on a p2p link'") + + # Establish a P2P link + # When the IS-IS type of r3 is set to level-1-2 and the IS-IS type of r5 is set to level-1, + # it is expected that all neighbors exist and are in the Up state + r3 = tgen.gears["r3"] + r3.vtysh_cmd( + """ + configure + router isis 1 + no redistribute ipv4 connected level-1 + no redistribute ipv4 connected level-2 + no redistribute ipv6 connected level-1 + no redistribute ipv6 connected level-2 + interface r3-eth1 + no isis circuit-type + isis network point-to-point + end + """ + ) + r5 = tgen.gears["r5"] + r5.vtysh_cmd( + """ + configure + router isis 1 + no redistribute ipv4 connected level-1 + no redistribute ipv6 connected level-1 + no redistribute ipv4 table 20 level-1 + interface r5-eth0 + no isis circuit-type + isis network point-to-point + end + """ + ) + result = _check_isis_neighbor_json("r3", "r5", True, "Up") + assert result is True, result + result = _check_isis_neighbor_json("r5", "r3", True, "Up") + assert result is True, result + + # Remove the configuration that affects the switch of IS-IS type. + # Configure the IS-IS type of r3 to transition from level-1-2 to level-2-only, + # while maintaining the IS-IS type of r5 as level-1. + # In this scenario, + # the expectation is that some neighbors do not exist or are in the Initializing state + r3.vtysh_cmd( + """ + configure + router isis 1 + is-type level-2-only + end + """ + ) + result = _check_isis_neighbor_json("r3", "r5", False, "Initializing") + assert result is True, result + result = _check_isis_neighbor_json("r5", "r3", False, "Initializing") + assert result is True, result + + # Restore to initial configuration + logger.info("Checking 'restore to initial configuration'") + r3.vtysh_cmd( + """ + configure + interface r3-eth1 + isis circuit-type level-1 + no isis network point-to-point + router isis 1 + no is-type + redistribute ipv4 connected level-1 + redistribute ipv4 connected level-2 + redistribute ipv6 connected level-1 + redistribute ipv6 connected level-2 + end + """ + ) + r5.vtysh_cmd( + """ + configure + interface r5-eth0 + isis circuit-type level-1 + no isis network point-to-point + router isis 1 + redistribute ipv4 connected level-1 + redistribute ipv6 connected level-1 + redistribute ipv4 table 20 level-1 + end + """ + ) + result = _check_isis_neighbor_json("r3", "r5", True, "Up") + assert result is True, result + result = _check_isis_neighbor_json("r5", "r3", True, "Up") + assert result is True, result + + def test_isis_database_json(): "Check json struct in show isis database json" @@ -623,6 +725,65 @@ def test_isis_hello_padding_during_adjacency_formation(): assert result is True, result +def _check_isis_neighbor_json( + self, neighbor, neighbor_expected, neighbor_state_expected +): + tgen = get_topogen() + router = tgen.gears[self] + logger.info( + f"check_isis_neighbor_json {router} {neighbor} {neighbor_expected} {neighbor_state_expected}" + ) + + result = _check_isis_neighbor_exist(self, neighbor) + if result == True: + return _check_isis_neighbor_state(self, neighbor, neighbor_state_expected) + elif neighbor_expected == True: + return "{} with expected neighbor {} got none ".format(router.name, neighbor) + else: + return True + + +@retry(retry_timeout=60) +def _check_isis_neighbor_exist(self, neighbor): + tgen = get_topogen() + router = tgen.gears[self] + logger.info(f"check_isis_neighbor_exist {router} {neighbor}") + neighbor_json = router.vtysh_cmd("show isis neighbor json", isjson=True) + + circuits = neighbor_json.get("areas", [])[0].get("circuits", []) + for circuit in circuits: + if "adj" in circuit and circuit["adj"] == neighbor: + return True + + return "The neighbor {} of router {} has not been learned yet ".format( + neighbor, router.name + ) + + +@retry(retry_timeout=5) +def _check_isis_neighbor_state(self, neighbor, neighbor_state_expected): + tgen = get_topogen() + router = tgen.gears[self] + logger.info( + f"check_isis_neighbor_state {router} {neighbor} {neighbor_state_expected}" + ) + neighbor_json = router.vtysh_cmd( + "show isis neighbor {} json".format(neighbor), isjson=True + ) + + circuits = neighbor_json.get("areas", [])[0].get("circuits", []) + for circuit in circuits: + interface = circuit.get("interface", {}) + if "state" in interface: + neighbor_state = interface["state"] + if neighbor_state == neighbor_state_expected: + return True + + return "{} peer with expected neighbor_state {} got {} ".format( + router.name, neighbor_state_expected, neighbor_state + ) + + @retry(retry_timeout=10) def check_last_iih_packet_for_padding(router, expect_padding): logfilename = "{}/{}".format(router.gearlogdir, "isisd.log") From 20c3756f30f69ae71d9d7acefb925dfb9d526303 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Wed, 2 Oct 2024 16:53:05 -0300 Subject: [PATCH 04/69] bfdd: disable echo socket when not using it Lets avoid a performance penalty in forwarding when not using the BFD echo feature. The echo socket uses raw packet capturing along with a BPF filter which causes performance issues. While here change code to use `-1` for closed sockets instead of valid FD number `0`. Signed-off-by: Rafael Zalamena --- bfdd/bfd.c | 151 ++++++++++++++++++++++++++++++++++++----------------- bfdd/bfd.h | 2 + 2 files changed, 105 insertions(+), 48 deletions(-) diff --git a/bfdd/bfd.c b/bfdd/bfd.c index eb9c3003135f..f32bc2598bdc 100644 --- a/bfdd/bfd.c +++ b/bfdd/bfd.c @@ -1172,6 +1172,9 @@ void bfd_set_echo(struct bfd_session *bs, bool echo) if (bs->bdc == NULL) ptm_bfd_echo_stop(bs); } + + if (bs->vrf && bs->vrf->info) + bfd_vrf_toggle_echo(bs->vrf->info); } void bfd_set_shutdown(struct bfd_session *bs, bool shutdown) @@ -1800,6 +1803,69 @@ void bfd_profiles_remove(void) bfd_profile_free(bp); } +struct __bfd_session_echo { + /* VRF peers must match */ + struct vrf *vrf; + /* Echo enabled or not */ + bool enabled; +}; + +static int __bfd_session_has_echo(struct hash_bucket *hb, void *arg) +{ + const struct bfd_session *session = hb->data; + struct __bfd_session_echo *has_echo = arg; + + if (session->vrf != has_echo->vrf) + return HASHWALK_CONTINUE; + if (!CHECK_FLAG(session->flags, BFD_SESS_FLAG_ECHO)) + return HASHWALK_CONTINUE; + + has_echo->enabled = true; + return HASHWALK_ABORT; +} + +void bfd_vrf_toggle_echo(struct bfd_vrf_global *bfd_vrf) +{ + struct __bfd_session_echo has_echo = { + .enabled = false, + .vrf = bfd_vrf->vrf, + }; + + /* Check for peers using echo */ + hash_walk(bfd_id_hash, __bfd_session_has_echo, &has_echo); + + /* + * No peers using echo, close all echo sockets. + */ + if (!has_echo.enabled) { + if (bfd_vrf->bg_echo != -1) { + event_cancel(&bfd_vrf->bg_ev[4]); + close(bfd_vrf->bg_echo); + bfd_vrf->bg_echo = -1; + } + + if (bfd_vrf->bg_echov6 != -1) { + event_cancel(&bfd_vrf->bg_ev[5]); + close(bfd_vrf->bg_echov6); + bfd_vrf->bg_echov6 = -1; + } + return; + } + + /* + * At least one peer using echo, open echo sockets. + */ + if (bfd_vrf->bg_echo == -1) + bfd_vrf->bg_echo = bp_echo_socket(bfd_vrf->vrf); + if (bfd_vrf->bg_echov6 == -1) + bfd_vrf->bg_echov6 = bp_echov6_socket(bfd_vrf->vrf); + + if (bfd_vrf->bg_ev[4] == NULL && bfd_vrf->bg_echo != -1) + event_add_read(master, bfd_recv_cb, bfd_vrf, bfd_vrf->bg_echo, &bfd_vrf->bg_ev[4]); + if (bfd_vrf->bg_ev[5] == NULL && bfd_vrf->bg_echov6 != -1) + event_add_read(master, bfd_recv_cb, bfd_vrf, bfd_vrf->bg_echov6, &bfd_vrf->bg_ev[5]); +} + /* * Profile related hash functions. */ @@ -1842,9 +1908,23 @@ static void bfd_profile_detach(struct bfd_profile *bp) */ static int bfd_vrf_new(struct vrf *vrf) { + struct bfd_vrf_global *bvrf; + if (bglobal.debug_zebra) zlog_debug("VRF Created: %s(%u)", vrf->name, vrf->vrf_id); + bvrf = XCALLOC(MTYPE_BFDD_VRF, sizeof(struct bfd_vrf_global)); + bvrf->vrf = vrf; + vrf->info = bvrf; + + /* Invalidate all sockets */ + bvrf->bg_shop = -1; + bvrf->bg_mhop = -1; + bvrf->bg_shop6 = -1; + bvrf->bg_mhop6 = -1; + bvrf->bg_echo = -1; + bvrf->bg_echov6 = -1; + return 0; } @@ -1853,70 +1933,53 @@ static int bfd_vrf_delete(struct vrf *vrf) if (bglobal.debug_zebra) zlog_debug("VRF Deletion: %s(%u)", vrf->name, vrf->vrf_id); + XFREE(MTYPE_BFDD_VRF, vrf->info); + return 0; } static int bfd_vrf_enable(struct vrf *vrf) { - struct bfd_vrf_global *bvrf; - - /* a different name */ - if (!vrf->info) { - bvrf = XCALLOC(MTYPE_BFDD_VRF, sizeof(struct bfd_vrf_global)); - bvrf->vrf = vrf; - vrf->info = (void *)bvrf; - - /* Disable sockets if using data plane. */ - if (bglobal.bg_use_dplane) { - bvrf->bg_shop = -1; - bvrf->bg_mhop = -1; - bvrf->bg_shop6 = -1; - bvrf->bg_mhop6 = -1; - bvrf->bg_echo = -1; - bvrf->bg_echov6 = -1; - } - } else - bvrf = vrf->info; + struct bfd_vrf_global *bvrf = vrf->info; if (bglobal.debug_zebra) zlog_debug("VRF enable add %s id %u", vrf->name, vrf->vrf_id); - if (!bvrf->bg_shop) + /* Don't open sockets when using data plane */ + if (bglobal.bg_use_dplane) + goto skip_sockets; + + if (bvrf->bg_shop == -1) bvrf->bg_shop = bp_udp_shop(vrf); - if (!bvrf->bg_mhop) + if (bvrf->bg_mhop == -1) bvrf->bg_mhop = bp_udp_mhop(vrf); - if (!bvrf->bg_shop6) + if (bvrf->bg_shop6 == -1) bvrf->bg_shop6 = bp_udp6_shop(vrf); - if (!bvrf->bg_mhop6) + if (bvrf->bg_mhop6 == -1) bvrf->bg_mhop6 = bp_udp6_mhop(vrf); - if (!bvrf->bg_echo) - bvrf->bg_echo = bp_echo_socket(vrf); - if (!bvrf->bg_echov6) - bvrf->bg_echov6 = bp_echov6_socket(vrf); - if (!bvrf->bg_ev[0] && bvrf->bg_shop != -1) + if (bvrf->bg_ev[0] == NULL && bvrf->bg_shop != -1) event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop, &bvrf->bg_ev[0]); - if (!bvrf->bg_ev[1] && bvrf->bg_mhop != -1) + if (bvrf->bg_ev[1] == NULL && bvrf->bg_mhop != -1) event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop, &bvrf->bg_ev[1]); - if (!bvrf->bg_ev[2] && bvrf->bg_shop6 != -1) + if (bvrf->bg_ev[2] == NULL && bvrf->bg_shop6 != -1) event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_shop6, &bvrf->bg_ev[2]); - if (!bvrf->bg_ev[3] && bvrf->bg_mhop6 != -1) + if (bvrf->bg_ev[3] == NULL && bvrf->bg_mhop6 != -1) event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_mhop6, &bvrf->bg_ev[3]); - if (!bvrf->bg_ev[4] && bvrf->bg_echo != -1) - event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echo, - &bvrf->bg_ev[4]); - if (!bvrf->bg_ev[5] && bvrf->bg_echov6 != -1) - event_add_read(master, bfd_recv_cb, bvrf, bvrf->bg_echov6, - &bvrf->bg_ev[5]); + /* Toggle echo if VRF was disabled. */ + bfd_vrf_toggle_echo(bvrf); + +skip_sockets: if (vrf->vrf_id != VRF_DEFAULT) { bfdd_zclient_register(vrf->vrf_id); bfdd_sessions_enable_vrf(vrf); } + return 0; } @@ -1948,17 +2011,9 @@ static int bfd_vrf_disable(struct vrf *vrf) socket_close(&bvrf->bg_echo); socket_close(&bvrf->bg_shop); socket_close(&bvrf->bg_mhop); - if (bvrf->bg_shop6 != -1) - socket_close(&bvrf->bg_shop6); - if (bvrf->bg_mhop6 != -1) - socket_close(&bvrf->bg_mhop6); - socket_close(&bvrf->bg_echo); - if (bvrf->bg_echov6 != -1) - socket_close(&bvrf->bg_echov6); - - /* free context */ - XFREE(MTYPE_BFDD_VRF, bvrf); - vrf->info = NULL; + socket_close(&bvrf->bg_shop6); + socket_close(&bvrf->bg_mhop6); + socket_close(&bvrf->bg_echov6); return 0; } diff --git a/bfdd/bfd.h b/bfdd/bfd.h index 2f83b245eb28..d4d14ffce675 100644 --- a/bfdd/bfd.h +++ b/bfdd/bfd.h @@ -610,6 +610,8 @@ void bfd_sessions_remove_manual(void); void bfd_profiles_remove(void); void bfd_rtt_init(struct bfd_session *bfd); +extern void bfd_vrf_toggle_echo(struct bfd_vrf_global *bfd_vrf); + /** * Set the BFD session echo state. * From 6afd56da969821359ee1fdfbfd33259587f58184 Mon Sep 17 00:00:00 2001 From: Acee Lindem Date: Fri, 15 Nov 2024 18:58:49 +0000 Subject: [PATCH 05/69] ospfd: OSPF multi-instance default origination fixes When originating a default AS-External LSA in one OSPF instance, it wasn't working if the criteria route was installed by another OSPF instance. This required more flexible processing of the OSPF external route information. Also fix problem multi-instance display for "show ip ospf database ...". Signed-off-by: Acee Lindem --- ospfd/ospf_asbr.c | 70 ++++++++++++++++++++++++++++++++++++++++++++++ ospfd/ospf_asbr.h | 4 +++ ospfd/ospf_lsa.c | 7 +---- ospfd/ospf_vty.c | 3 ++ ospfd/ospf_zebra.c | 30 ++++++++++---------- ospfd/ospf_zebra.h | 3 ++ 6 files changed, 96 insertions(+), 21 deletions(-) diff --git a/ospfd/ospf_asbr.c b/ospfd/ospf_asbr.c index b47c39008819..738ac6d8cf05 100644 --- a/ospfd/ospf_asbr.c +++ b/ospfd/ospf_asbr.c @@ -168,6 +168,38 @@ void ospf_external_info_delete(struct ospf *ospf, uint8_t type, } } +/* + * ospf_external_info_delete_multi_instance + * + * Delete instances of the external route information for a given route type. + * The preserve_instance parameter may be used to prevent the current instance + * from being deleted. + */ +void ospf_external_info_delete_multi_instance(struct ospf *ospf, uint8_t type, struct prefix_ipv4 p, + unsigned long preserve_instance) +{ + struct route_node *rn; + struct ospf_external *ext; + struct list *ext_list; + struct listnode *node; + + ext_list = ospf->external[type]; + if (!ext_list) + return; + + for (ALL_LIST_ELEMENTS_RO(ext_list, node, ext)) { + if (ext->instance != preserve_instance) { + rn = route_node_lookup(EXTERNAL_INFO(ext), (struct prefix *)&p); + if (rn) { + ospf_external_info_free(rn->info); + rn->info = NULL; + route_unlock_node(rn); + route_unlock_node(rn); + } + } + } +} + struct external_info *ospf_external_info_lookup(struct ospf *ospf, uint8_t type, unsigned short instance, struct prefix_ipv4 *p) @@ -189,6 +221,44 @@ struct external_info *ospf_external_info_lookup(struct ospf *ospf, uint8_t type, return NULL; } +/* + * ospf_external_info_default_lookup + * + * For default information criteria, we really don't care about the + * source of the route and there only should be one. + */ +struct external_info *ospf_external_info_default_lookup(struct ospf *ospf) +{ + struct ospf_external *ext; + struct external_info *ei; + struct list *ext_list; + struct listnode *node; + struct route_node *rn; + struct prefix_ipv4 p = { + .family = AF_INET, + .prefixlen = 0, + .prefix.s_addr = INADDR_ANY, + }; + + ext_list = ospf->external[DEFAULT_ROUTE]; + if (!ext_list) + return (NULL); + + for (ALL_LIST_ELEMENTS_RO(ext_list, node, ext)) { + rn = route_node_lookup(EXTERNAL_INFO(ext), (struct prefix *)&p); + if (rn) { + route_unlock_node(rn); + if (rn->info) { + ei = rn->info; + if (ei->type != ZEBRA_ROUTE_OSPF || ei->instance != ospf->instance) + return ei; + } + } + } + + return NULL; +} + struct ospf_lsa *ospf_external_info_find_lsa(struct ospf *ospf, struct prefix_ipv4 *p) { diff --git a/ospfd/ospf_asbr.h b/ospfd/ospf_asbr.h index 6158d65f22e8..648a5a11aee2 100644 --- a/ospfd/ospf_asbr.h +++ b/ospfd/ospf_asbr.h @@ -109,6 +109,10 @@ ospf_external_info_add(struct ospf *, uint8_t, unsigned short, route_tag_t, uint32_t metric); extern void ospf_external_info_delete(struct ospf *, uint8_t, unsigned short, struct prefix_ipv4); +extern void ospf_external_info_delete_multi_instance(struct ospf *ospf, uint8_t type, + struct prefix_ipv4 p, + unsigned long preserve_instance); +#define OSPF_DELETE_ANY_INSTANCE 0xffffffff extern struct external_info *ospf_external_info_lookup(struct ospf *, uint8_t, unsigned short, struct prefix_ipv4 *); diff --git a/ospfd/ospf_lsa.c b/ospfd/ospf_lsa.c index 135048789858..73542233976e 100644 --- a/ospfd/ospf_lsa.c +++ b/ospfd/ospf_lsa.c @@ -2407,15 +2407,10 @@ struct ospf_lsa *ospf_nssa_lsa_refresh(struct ospf_area *area, static struct external_info *ospf_default_external_info(struct ospf *ospf) { int type; - struct prefix_ipv4 p; struct external_info *default_ei; int ret = 0; - p.family = AF_INET; - p.prefix.s_addr = 0; - p.prefixlen = 0; - - default_ei = ospf_external_info_lookup(ospf, DEFAULT_ROUTE, 0, &p); + default_ei = ospf_external_info_default_lookup(ospf); if (!default_ei) return NULL; diff --git a/ospfd/ospf_vty.c b/ospfd/ospf_vty.c index 0457b1333753..27528f659432 100644 --- a/ospfd/ospf_vty.c +++ b/ospfd/ospf_vty.c @@ -7347,6 +7347,9 @@ DEFPY (show_ip_ospf_database, struct in_addr *adv_router_p = NULL; json_object *json = NULL; + if (instance_id != ospf_instance) + return CMD_NOT_MY_INSTANCE; + if (uj) json = json_object_new_object(); if (lsid_str) diff --git a/ospfd/ospf_zebra.c b/ospfd/ospf_zebra.c index c7cba1e20fee..b718d498ae0d 100644 --- a/ospfd/ospf_zebra.c +++ b/ospfd/ospf_zebra.c @@ -1292,15 +1292,14 @@ static int ospf_zebra_read_route(ZAPI_CALLBACK_ARGS) * originate)ZEBRA_ROUTE_MAX is used to delete the ex-info. * Resolved this inconsistency by maintaining same route type. */ - if ((is_default_prefix(&pgen)) && (api.type != ZEBRA_ROUTE_OSPF)) + if ((is_default_prefix(&pgen)) && + ((api.type != ZEBRA_ROUTE_OSPF) || (api.instance != ospf->instance))) rt_type = DEFAULT_ROUTE; if (IS_DEBUG_OSPF(zebra, ZEBRA_REDISTRIBUTE)) - zlog_debug("%s: cmd %s from client %s: vrf %s(%u), p %pFX, metric %d", - __func__, zserv_command_string(cmd), - zebra_route_string(api.type), - ospf_vrf_id_to_name(vrf_id), vrf_id, &api.prefix, - api.metric); + zlog_debug("%s: cmd %s from client %s-%d: vrf %s(%u), p %pFX, metric %d", __func__, + zserv_command_string(cmd), zebra_route_string(api.type), api.instance, + ospf_vrf_id_to_name(vrf_id), vrf_id, &api.prefix, api.metric); if (cmd == ZEBRA_REDISTRIBUTE_ROUTE_ADD) { /* XXX|HACK|TODO|FIXME: @@ -1315,16 +1314,17 @@ static int ospf_zebra_read_route(ZAPI_CALLBACK_ARGS) api.tag = ospf->dtag[rt_type]; /* - * Given zebra sends update for a prefix via ADD message, it - * should - * be considered as an implicit DEL for that prefix with other - * source - * types. + * Given zebra sends an update for a prefix via an ADD message, it + * will be considered as an impilict DELETE for that prefix for other + * types and instances other than the type and instance associated with + * the prefix. */ - for (i = 0; i <= ZEBRA_ROUTE_MAX; i++) - if (i != rt_type) - ospf_external_info_delete(ospf, i, api.instance, - p); + for (i = 0; i <= ZEBRA_ROUTE_MAX; i++) { + unsigned long preserve_instance; + + preserve_instance = (i == rt_type) ? api.instance : OSPF_DELETE_ANY_INSTANCE; + ospf_external_info_delete_multi_instance(ospf, i, p, preserve_instance); + } ei = ospf_external_info_add(ospf, rt_type, api.instance, p, ifindex, nexthop, api.tag, diff --git a/ospfd/ospf_zebra.h b/ospfd/ospf_zebra.h index 86a5678fc4fa..b83524303fa7 100644 --- a/ospfd/ospf_zebra.h +++ b/ospfd/ospf_zebra.h @@ -47,6 +47,9 @@ extern uint8_t ospf_distance_apply(struct ospf *ospf, struct prefix_ipv4 *, struct ospf_route *); extern struct ospf_external *ospf_external_lookup(struct ospf *, uint8_t, unsigned short); + +extern struct external_info *ospf_external_info_default_lookup(struct ospf *ospf); + extern struct ospf_external *ospf_external_add(struct ospf *, uint8_t, unsigned short); From 82f434940207c5d2ed0e52f0d27998fb56129064 Mon Sep 17 00:00:00 2001 From: Acee Lindem Date: Mon, 18 Nov 2024 17:05:31 +0000 Subject: [PATCH 06/69] tests: Add topotest for OSPF multi-instance default origination. This change adds a topotest to test various case of OSPF multi-instance origination including cases where the criteria route is from another instance of OSPF, as well as the same OSPF instance (where a default should not be originated). Signed-off-by: Acee Lindem --- .../topotests/ospf_multi_instance/r1/frr.conf | 19 + .../topotests/ospf_multi_instance/r2/frr.conf | 37 ++ .../topotests/ospf_multi_instance/r3/frr.conf | 19 + .../test_ospf_multi_instance.py | 403 ++++++++++++++++++ 4 files changed, 478 insertions(+) create mode 100644 tests/topotests/ospf_multi_instance/r1/frr.conf create mode 100644 tests/topotests/ospf_multi_instance/r2/frr.conf create mode 100644 tests/topotests/ospf_multi_instance/r3/frr.conf create mode 100644 tests/topotests/ospf_multi_instance/test_ospf_multi_instance.py diff --git a/tests/topotests/ospf_multi_instance/r1/frr.conf b/tests/topotests/ospf_multi_instance/r1/frr.conf new file mode 100644 index 000000000000..c341a7176a09 --- /dev/null +++ b/tests/topotests/ospf_multi_instance/r1/frr.conf @@ -0,0 +1,19 @@ +! +hostname r1 +password zebra +log file /tmp/r1-frr.log +ip forwarding +! +interface lo + ip address 1.1.1.1/32 + ip ospf area 0 +! +interface r1-eth0 + ip address 10.1.1.1/24 + ip ospf area 0 +! +! +router ospf + ospf router-id 1.1.1.1 + distance 20 +! diff --git a/tests/topotests/ospf_multi_instance/r2/frr.conf b/tests/topotests/ospf_multi_instance/r2/frr.conf new file mode 100644 index 000000000000..8501e0edc07d --- /dev/null +++ b/tests/topotests/ospf_multi_instance/r2/frr.conf @@ -0,0 +1,37 @@ +! +hostname r2 +password zebra +! debug ospf event +! debug ospf lsa +! debug ospf default-information +! debug ospf zebra redistribute + +ip forwarding +! +interface lo1 + ip address 2.2.2.1/32 + ip ospf 1 area 0 + no shut +! +interface lo2 + ip address 2.2.2.2/32 + ip ospf 2 area 0 + no shut +! +interface r2-eth0 + ip address 10.1.1.2/24 + ip ospf 1 area 0 +! +interface r2-eth1 + ip address 10.1.2.2/24 + ip ospf 2 area 0 +! +router ospf 1 + ospf router-id 2.2.2.1 + distance 20 +! +router ospf 2 + ospf router-id 2.2.2.2 + distance 20 +! + diff --git a/tests/topotests/ospf_multi_instance/r3/frr.conf b/tests/topotests/ospf_multi_instance/r3/frr.conf new file mode 100644 index 000000000000..97a3e19c9b10 --- /dev/null +++ b/tests/topotests/ospf_multi_instance/r3/frr.conf @@ -0,0 +1,19 @@ +! +hostname r3 +password zebra +log file /tmp/r3-frr.log +ip forwarding +! +interface lo + ip address 3.3.3.1/32 + ip ospf area 0 +! +interface r3-eth0 + ip address 10.1.2.3/24 + ip ospf area 0 +! +! +router ospf + ospf router-id 3.3.3.1 + distance 20 +! diff --git a/tests/topotests/ospf_multi_instance/test_ospf_multi_instance.py b/tests/topotests/ospf_multi_instance/test_ospf_multi_instance.py new file mode 100644 index 000000000000..de44140c09dc --- /dev/null +++ b/tests/topotests/ospf_multi_instance/test_ospf_multi_instance.py @@ -0,0 +1,403 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# test_ospf_multi_instance.py +# +# Copyright (c) 2024 LabN Consulting +# Acee Lindem +# + +import os +import sys +from functools import partial +import pytest + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +from lib.common_config import ( + step, + create_interface_in_kernel, +) + + +""" +test_ospf_metric_propagation.py: Test OSPF/BGP metric propagation +""" + +TOPOLOGY = """ + + +---------+ +--------------------+ +---------+ + | r1 | | r2 | r2 | | r3 | + | | | ospf 1 | ospf 2 | | | + | 1.1.1.1 | eth0 eth0| 2.2.2.1 | 2.2.2.2 |eth1 eth0| 3.3.3.1 | + | +-------------+ | +-------------+ | + | | 10.1.1.0/24 | | | 10.1.2.0/24 | | + +---------+ +--------------------+ +---------+ + + +""" + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# Required to instantiate the topology builder class. + +pytestmark = [pytest.mark.ospfd, pytest.mark.bgpd] + + +def build_topo(tgen): + "Build function" + + # Create 3 routers + tgen.add_router("r1") + tgen.add_router("r2") + tgen.add_router("r3") + + # Interconect router 1, 2 (0) + switch = tgen.add_switch("s1-1-2") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + # Interconect router 2, 3 (1) + switch = tgen.add_switch("s2-2-3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + + # Add more loopbacks to r2 + create_interface_in_kernel( + tgen, "r2", "lo1", "2.2.2.1", netmask="255.255.255.255", create=True + ) + create_interface_in_kernel( + tgen, "r2", "lo2", "2.2.2.2", netmask="255.255.255.255", create=True + ) + + +def setup_module(mod): + logger.info("OSPF Multi-Instance:\n {}".format(TOPOLOGY)) + + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + # Starting Routers + router_list = tgen.routers() + + for rname, router in router_list.items(): + logger.info("Loading router %s" % rname) + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + # Initialize all routers. + tgen.start_router() + + +def teardown_module(): + "Teardown the pytest environment" + tgen = get_topogen() + tgen.stop_topology() + + +def test_multi_instance_default_origination(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip("Skipped because of router(s) failure") + + step("Configure a local default route") + r1 = tgen.gears["r1"] + r1.vtysh_cmd("conf t\nip route 0.0.0.0/0 Null0") + + step("Verify the R1 configuration and install of 'ip route 0.0.0.0/0 Null0'") + prefix_suppression_cfg = ( + tgen.net["r1"] + .cmd('vtysh -c "show running" | grep "^ip route 0.0.0.0/0 Null0"') + .rstrip() + ) + assertmsg = "'ip route 0.0.0.0/0 Null0' applied, but not present in configuration" + assert prefix_suppression_cfg == "ip route 0.0.0.0/0 Null0", assertmsg + + input_dict = { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "static", + "nexthops": [ + { + "blackhole": True, + } + ], + } + ] + } + test_func = partial( + topotest.router_json_cmp, r1, "show ip route 0.0.0.0/0 json", input_dict + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "0.0.0.0/0 not installed on router r1" + assert result is None, assertmsg + + step( + "Verify the R1 configuration and advertisement of 'default-information originate'" + ) + r1.vtysh_cmd("conf t\nrouter ospf\n default-information originate") + + input_dict = { + "asExternalLinkStates": [ + { + "lsaType": "AS-external-LSA", + "linkStateId": "0.0.0.0", + "advertisingRouter": "1.1.1.1", + "networkMask": 0, + "metricType": "E2 (Larger than any link state path)", + "metric": 10, + "forwardAddress": "0.0.0.0", + "externalRouteTag": 0, + } + ] + } + test_func = partial( + topotest.router_json_cmp, r1, "show ip ospf database json", input_dict + ) + + r2 = tgen.gears["r2"] + step("Verify the OSPF instance 1 installation of default route on router 2") + input_dict = { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "ospf", + "instance": 1, + "nexthops": [ + { + "ip": "10.1.1.1", + "interfaceName": "r2-eth0", + } + ], + } + ] + } + test_func = partial( + topotest.router_json_cmp, r2, "show ip route 0.0.0.0/0 json", input_dict + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "0.0.0.0/0 not installed on router r2" + assert result is None, assertmsg + + step("Configure OSPF 'default-intformation originate' on router r2 instance 2") + r2.vtysh_cmd("conf t\nrouter ospf 2\n default-information originate") + + step("Verify r2 instance 2 AS-External default origination") + input_dict = { + "ospfInstance": 2, + "routerId": "2.2.2.2", + "asExternalLinkStates": [ + { + "lsaType": "AS-external-LSA", + "linkStateId": "0.0.0.0", + "advertisingRouter": "2.2.2.2", + "networkMask": 0, + "metricType": "E2 (Larger than any link state path)", + "tos": 0, + "metric": 10, + "forwardAddress": "0.0.0.0", + "externalRouteTag": 0, + } + ], + } + test_func = partial( + topotest.router_json_cmp, + r2, + "show ip ospf 2 database external json", + input_dict, + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "AS-External default not originated by router r2 OSPF instance 2" + assert result is None, assertmsg + + step("Update the OSPF instance 2 distance so it will be preferred over instance 1") + r2.vtysh_cmd("conf t\nrouter ospf 2\n distance 15") + + step("Generate a default route from OSPF on r3") + r3 = tgen.gears["r3"] + r3.vtysh_cmd("conf t\nrouter ospf\n default-information originate") + r3.vtysh_cmd("conf t\nip route 0.0.0.0/0 Null0") + + step("Verify r3 AS-External default origination on r2") + input_dict = { + "ospfInstance": 2, + "routerId": "2.2.2.2", + "asExternalLinkStates": [ + { + "lsaType": "AS-external-LSA", + "linkStateId": "0.0.0.0", + "advertisingRouter": "3.3.3.1", + "length": 36, + "networkMask": 0, + "metricType": "E2 (Larger than any link state path)", + "tos": 0, + "metric": 10, + "forwardAddress": "0.0.0.0", + "externalRouteTag": 0, + } + ], + } + test_func = partial( + topotest.router_json_cmp, + r2, + "show ip ospf 2 database external json", + input_dict, + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "AS-External default not originated by router r3 OSPF" + assert result is None, assertmsg + + step("Verify r3's default installed by OSPF instance 2 is preferred on r2") + input_dict = { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "ospf", + "instance": 2, + "distance": 15, + "nexthops": [ + { + "ip": "10.1.2.3", + "interfaceName": "r2-eth1", + } + ], + } + ] + } + test_func = partial( + topotest.router_json_cmp, r2, "show ip route 0.0.0.0/0 json", input_dict + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "0.0.0.0/0 from r3 not installed on router r2" + assert result is None, assertmsg + + step( + "Verify that r2's OSPF instance 2 AS-External LSA default is flushed due to default from r3" + ) + input_dict = { + "ospfInstance": 2, + "routerId": "2.2.2.2", + "asExternalLinkStates": [ + { + "lsaAge": 3600, + "lsaType": "AS-external-LSA", + "linkStateId": "0.0.0.0", + "advertisingRouter": "2.2.2.2", + "networkMask": 0, + "metricType": "E2 (Larger than any link state path)", + "tos": 0, + "metric": 10, + "forwardAddress": "0.0.0.0", + "externalRouteTag": 0, + } + ], + } + test_func = partial( + topotest.router_json_cmp, + r2, + "show ip ospf 2 database external json", + input_dict, + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "AS-External default not flushed by router r2 OSPF instance 2" + assert result is None, assertmsg + + step("Remove r3's default route and verify that its advertisement is flushed") + r3.vtysh_cmd("conf t\nno ip route 0.0.0.0/0 Null0") + input_dict = { + "routerId": "3.3.3.1", + "asExternalLinkStates": [ + { + "lsaAge": 3600, + "lsaType": "AS-external-LSA", + "linkStateId": "0.0.0.0", + "advertisingRouter": "3.3.3.1", + "networkMask": 0, + "metricType": "E2 (Larger than any link state path)", + "tos": 0, + "metric": 10, + "forwardAddress": "0.0.0.0", + "externalRouteTag": 0, + } + ], + } + test_func = partial( + topotest.router_json_cmp, r3, "show ip ospf database external json", input_dict + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "AS-External default not flushed by router r3 OSPF" + assert result is None, assertmsg + + step( + "Verify that r2's OSPF instance 2 AS-External default is advertised and installed by r3" + ) + input_dict = { + "routerId": "3.3.3.1", + "asExternalLinkStates": [ + { + "lsaType": "AS-external-LSA", + "linkStateId": "0.0.0.0", + "advertisingRouter": "2.2.2.2", + "networkMask": 0, + "metricType": "E2 (Larger than any link state path)", + "tos": 0, + "metric": 10, + "forwardAddress": "0.0.0.0", + "externalRouteTag": 0, + } + ], + } + test_func = partial( + topotest.router_json_cmp, r3, "show ip ospf database external json", input_dict + ) + assertmsg = "AS-External default not originated by r2 OSPF instance 2" + assert result is None, assertmsg + + step("Verify r2's OSPF instance 2 is AS-External default is installed on r3") + input_dict = { + "0.0.0.0/0": [ + { + "prefix": "0.0.0.0/0", + "prefixLen": 0, + "protocol": "ospf", + "distance": 20, + "nexthops": [ + { + "ip": "10.1.2.2", + "interfaceName": "r3-eth0", + } + ], + } + ] + } + test_func = partial( + topotest.router_json_cmp, r3, "show ip route 0.0.0.0/0 json", input_dict + ) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1) + assertmsg = "0.0.0.0/0 from router r2 not installed on r3" + assert result is None, assertmsg + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) From 8cc6359fdc849def02716cc473662a4e616e97e9 Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Tue, 19 Nov 2024 16:25:12 +0200 Subject: [PATCH 07/69] bgpd: Disable sending ROV extended community by default https://datatracker.ietf.org/doc/html/rfc8097 defines ROV extended community, but https://datatracker.ietf.org/doc/draft-ietf-sidrops-avoid-rpki-state-in-bgp is against sending it by default even for iBGP peers. Let's do this practice and reverse it. Signed-off-by: Donatas Abraitis --- bgpd/bgp_vty.c | 4 +--- bgpd/bgpd.c | 10 ++-------- doc/user/bgp.rst | 2 +- tests/topotests/bgp_rpki_topo1/r2/bgpd.conf | 1 + 4 files changed, 5 insertions(+), 12 deletions(-) diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index f4426a5a681d..cba1cdaf1a91 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -19132,9 +19132,7 @@ static void bgp_config_write_peer_af(struct vty *vty, struct bgp *bgp, if (peergroup_af_flag_check(peer, afi, safi, PEER_FLAG_SEND_EXT_COMMUNITY_RPKI)) - vty_out(vty, - " no neighbor %s send-community extended rpki\n", - addr); + vty_out(vty, " neighbor %s send-community extended rpki\n", addr); } /* Default information */ diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index aa2bd5c3719c..01a12337eaaf 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -1565,19 +1565,13 @@ struct peer *peer_new(struct bgp *bgp) /* Set default flags. */ FOREACH_AFI_SAFI (afi, safi) { SET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SEND_COMMUNITY); - SET_FLAG(peer->af_flags[afi][safi], - PEER_FLAG_SEND_EXT_COMMUNITY); - SET_FLAG(peer->af_flags[afi][safi], - PEER_FLAG_SEND_EXT_COMMUNITY_RPKI); + SET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SEND_EXT_COMMUNITY); SET_FLAG(peer->af_flags[afi][safi], PEER_FLAG_SEND_LARGE_COMMUNITY); SET_FLAG(peer->af_flags_invert[afi][safi], PEER_FLAG_SEND_COMMUNITY); - SET_FLAG(peer->af_flags_invert[afi][safi], - PEER_FLAG_SEND_EXT_COMMUNITY); - SET_FLAG(peer->af_flags_invert[afi][safi], - PEER_FLAG_SEND_EXT_COMMUNITY_RPKI); + SET_FLAG(peer->af_flags_invert[afi][safi], PEER_FLAG_SEND_EXT_COMMUNITY); SET_FLAG(peer->af_flags_invert[afi][safi], PEER_FLAG_SEND_LARGE_COMMUNITY); peer->addpath_type[afi][safi] = BGP_ADDPATH_NONE; diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index 0c7fcecb9b96..c0db7f2b875f 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -1803,7 +1803,7 @@ Configuring Peers Send the extended RPKI communities to the peer. RPKI extended community can be send only to iBGP and eBGP-OAD peers. - Default: enabled. + Default: disabled. .. clicmd:: neighbor PEER weight WEIGHT diff --git a/tests/topotests/bgp_rpki_topo1/r2/bgpd.conf b/tests/topotests/bgp_rpki_topo1/r2/bgpd.conf index 4de177dc2586..e5dc7f65f93b 100644 --- a/tests/topotests/bgp_rpki_topo1/r2/bgpd.conf +++ b/tests/topotests/bgp_rpki_topo1/r2/bgpd.conf @@ -9,6 +9,7 @@ router bgp 65002 neighbor 192.168.4.4 timers connect 1 address-family ipv4 unicast neighbor 192.168.4.4 next-hop-self + neighbor 192.168.4.4 send-community extended rpki exit-address-family ! router bgp 65002 vrf vrf10 From f5115307888dc8ca4b6369d1b705686d3c689d23 Mon Sep 17 00:00:00 2001 From: Mark Stapp Date: Tue, 19 Nov 2024 14:54:06 -0500 Subject: [PATCH 08/69] bfdd: retain remote dplane client socket When using bfd in remote-dataplane client mode, don't close a new client socket if we're going to try to use it. Signed-off-by: Mark Stapp --- bfdd/dplane.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/bfdd/dplane.c b/bfdd/dplane.c index 7f55f3407302..b1a32fb15059 100644 --- a/bfdd/dplane.c +++ b/bfdd/dplane.c @@ -948,6 +948,9 @@ static void bfd_dplane_client_connect(struct event *t) _bfd_dplane_client_bootstrap(bdc); } + /* Continue with the connection */ + return; + reschedule_connect: EVENT_OFF(bdc->inbufev); EVENT_OFF(bdc->outbufev); From bd9331e0ae02e36ab008c50313facc3295ce93a3 Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Fri, 1 Nov 2024 19:09:37 +0000 Subject: [PATCH 09/69] pimd,yang: Add PIMD northbound/yang for AutoRP mapping agent Reuses the candidate selection logic from BSR configuration Signed-off-by: Nathan Bahr --- pimd/pim_nb.c | 52 +++++++ pimd/pim_nb.h | 14 ++ pimd/pim_nb_config.c | 347 +++++++++++++++++++++++++++++++++++++------ yang/frr-pim-rp.yang | 57 ++++++- 4 files changed, 425 insertions(+), 45 deletions(-) diff --git a/pimd/pim_nb.c b/pimd/pim_nb.c index 1dc66be82d7d..cb0f5e8255c8 100644 --- a/pimd/pim_nb.c +++ b/pimd/pim_nb.c @@ -447,6 +447,58 @@ const struct frr_yang_module_info frr_pim_rp_info = { .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_prefix_list_destroy, } }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/send-rp-discovery", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_send_rp_discovery_modify, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/discovery-scope", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_scope_modify, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/discovery-interval", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_interval_modify, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/discovery-holdtime", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_holdtime_modify, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/address", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_modify, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/interface", + .cbs = { + .modify = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_modify, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/if-loopback", + .cbs = { + .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_create, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy, + } + }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/if-any", + .cbs = { + .create = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_create, + .destroy = routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy, + } + }, { .xpath = NULL, }, diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h index b45af3d589da..49a94f363e34 100644 --- a/pimd/pim_nb.h +++ b/pimd/pim_nb.h @@ -191,6 +191,20 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp struct nb_cb_modify_args *args); int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_prefix_list_destroy( struct nb_cb_destroy_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_send_rp_discovery_modify( + struct nb_cb_modify_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_scope_modify( + struct nb_cb_modify_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_interval_modify( + struct nb_cb_modify_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_holdtime_modify( + struct nb_cb_modify_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_create( + struct nb_cb_create_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_modify( + struct nb_cb_modify_args *args); +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy( + struct nb_cb_destroy_args *args); /* frr-cand-bsr */ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_candidate_bsr_create( diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index 87338f37c0ea..636669985790 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -41,12 +41,24 @@ int funcname(struct argtype *args) \ } \ MACRO_REQUIRE_SEMICOLON() +#define pim6_autorp_err(funcname, argtype) \ + int funcname(struct argtype *args) \ + { \ + snprintf(args->errmsg, args->errmsg_len, \ + "Trying to configure AutoRP in pim6d. " \ + "AutoRP does not exist for IPv6."); \ + return NB_ERR_VALIDATION; \ + } \ + MACRO_REQUIRE_SEMICOLON() + #define yang_dnode_get_pimaddr yang_dnode_get_ipv6 #else /* PIM_IPV != 6 */ #define pim6_msdp_err(funcname, argtype) \ MACRO_REQUIRE_SEMICOLON() +#define pim6_autorp_err(funcname, argtype) MACRO_REQUIRE_SEMICOLON() + #define yang_dnode_get_pimaddr yang_dnode_get_ipv4 #endif /* PIM_IPV != 6 */ @@ -490,6 +502,26 @@ static void change_query_max_response_time(struct interface *ifp, #endif /* PIM_IPV == 4 */ } +static void yang_addrsel(struct cand_addrsel *addrsel, const struct lyd_node *node) +{ + memset(addrsel->cfg_ifname, 0, sizeof(addrsel->cfg_ifname)); + addrsel->cfg_addr = PIMADDR_ANY; + + if (yang_dnode_exists(node, "if-any")) { + addrsel->cfg_mode = CAND_ADDR_ANY; + } else if (yang_dnode_exists(node, "address")) { + addrsel->cfg_mode = CAND_ADDR_EXPLICIT; + yang_dnode_get_pimaddr(&addrsel->cfg_addr, node, "address"); + } else if (yang_dnode_exists(node, "interface")) { + addrsel->cfg_mode = CAND_ADDR_IFACE; + strlcpy(addrsel->cfg_ifname, yang_dnode_get_string(node, "interface"), + sizeof(addrsel->cfg_ifname)); + } else if (yang_dnode_exists(node, "if-loopback")) { + addrsel->cfg_mode = CAND_ADDR_LO; + } + addrsel->cfg_enable = true; +} + int routing_control_plane_protocols_name_validate( struct nb_cb_create_args *args) { @@ -2794,13 +2826,77 @@ int pim_embedded_rp_maximum_rps_modify(struct nb_cb_modify_args *args) } } +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_discovery_enabled_modify, + nb_cb_modify_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_discovery_enabled_destroy, + nb_cb_destroy_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_scope_modify, + nb_cb_modify_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_scope_destroy, + nb_cb_destroy_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_interval_modify, + nb_cb_modify_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_interval_destroy, + nb_cb_destroy_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_holdtime_modify, + nb_cb_modify_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_holdtime_destroy, + nb_cb_destroy_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_create, + nb_cb_create_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_destroy, + nb_cb_destroy_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_group_modify, + nb_cb_modify_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_group_destroy, + nb_cb_destroy_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_prefix_list_modify, + nb_cb_modify_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_prefix_list_destroy, + nb_cb_destroy_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_send_rp_discovery_modify, + nb_cb_modify_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_scope_modify, + nb_cb_modify_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_interval_modify, + nb_cb_modify_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_holdtime_modify, + nb_cb_modify_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_create, + nb_cb_create_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_modify, + nb_cb_modify_args); +pim6_autorp_err( + routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy, + nb_cb_destroy_args); + +#if PIM_IPV == 4 /* * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/discovery-enabled */ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_discovery_enabled_modify( struct nb_cb_modify_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; bool enabled; @@ -2820,14 +2916,12 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp pim_autorp_stop_discovery(pim); break; } -#endif return NB_OK; } int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_discovery_enabled_destroy( struct nb_cb_destroy_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; bool enabled; @@ -2846,7 +2940,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp pim_autorp_start_discovery(pim); break; } -#endif return NB_OK; } @@ -2857,7 +2950,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_scope_modify( struct nb_cb_modify_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; uint8_t scope; @@ -2872,15 +2964,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp pim = vrf->info; scope = yang_dnode_get_uint8(args->dnode, NULL); pim_autorp_announce_scope(pim, scope); + break; } -#endif return NB_OK; } int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_scope_destroy( struct nb_cb_destroy_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; @@ -2893,8 +2984,8 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp vrf = nb_running_get_entry(args->dnode, NULL, true); pim = vrf->info; pim_autorp_announce_scope(pim, 0); + break; } -#endif return NB_OK; } @@ -2905,7 +2996,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_interval_modify( struct nb_cb_modify_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; uint16_t interval; @@ -2920,15 +3010,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp pim = vrf->info; interval = yang_dnode_get_uint16(args->dnode, NULL); pim_autorp_announce_interval(pim, interval); + break; } -#endif return NB_OK; } int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_interval_destroy( struct nb_cb_destroy_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; @@ -2941,8 +3030,8 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp vrf = nb_running_get_entry(args->dnode, NULL, true); pim = vrf->info; pim_autorp_announce_interval(pim, 0); + break; } -#endif return NB_OK; } @@ -2953,7 +3042,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_holdtime_modify( struct nb_cb_modify_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; uint16_t holdtime; @@ -2968,15 +3056,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp pim = vrf->info; holdtime = yang_dnode_get_uint16(args->dnode, NULL); pim_autorp_announce_holdtime(pim, holdtime); + break; } -#endif return NB_OK; } int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_announce_holdtime_destroy( struct nb_cb_destroy_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; @@ -2990,8 +3077,8 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp pim = vrf->info; /* 0 is a valid value, so -1 indicates deleting (go back to default) */ pim_autorp_announce_holdtime(pim, -1); + break; } -#endif return NB_OK; } @@ -3002,7 +3089,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_create( struct nb_cb_create_args *args) { -#if PIM_IPV == 4 switch (args->event) { case NB_EV_VALIDATE: case NB_EV_PREPARE: @@ -3010,14 +3096,12 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp case NB_EV_APPLY: break; } -#endif return NB_OK; } int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_destroy( struct nb_cb_destroy_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; pim_addr rp_addr; @@ -3035,7 +3119,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp return NB_ERR_INCONSISTENCY; break; } -#endif return NB_OK; } @@ -3046,7 +3129,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_group_modify( struct nb_cb_modify_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; struct prefix group; @@ -3064,15 +3146,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp yang_dnode_get_prefix(&group, args->dnode, NULL); apply_mask(&group); pim_autorp_add_candidate_rp_group(pim, rp_addr, group); + break; } -#endif return NB_OK; } int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_group_destroy( struct nb_cb_destroy_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; struct prefix group; @@ -3091,8 +3172,8 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp apply_mask(&group); if (!pim_autorp_rm_candidate_rp_group(pim, rp_addr, group)) return NB_ERR_INCONSISTENCY; + break; } -#endif return NB_OK; } @@ -3103,7 +3184,6 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_prefix_list_modify( struct nb_cb_modify_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; pim_addr rp_addr; @@ -3120,15 +3200,14 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp plist = yang_dnode_get_string(args->dnode, NULL); yang_dnode_get_pimaddr(&rp_addr, args->dnode, "../rp-address"); pim_autorp_add_candidate_rp_plist(pim, rp_addr, plist); + break; } -#endif return NB_OK; } int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_candidate_rp_list_prefix_list_destroy( struct nb_cb_destroy_args *args) { -#if PIM_IPV == 4 struct vrf *vrf; struct pim_instance *pim; pim_addr rp_addr; @@ -3148,31 +3227,211 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp return NB_ERR_INCONSISTENCY; break; } -#endif return NB_OK; } -static void yang_addrsel(struct cand_addrsel *addrsel, - const struct lyd_node *node) +/* + * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/send-rp-discovery + */ +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_send_rp_discovery_modify( + struct nb_cb_modify_args *args) { - memset(addrsel->cfg_ifname, 0, sizeof(addrsel->cfg_ifname)); - addrsel->cfg_addr = PIMADDR_ANY; + struct vrf *vrf; + struct pim_instance *pim; - if (yang_dnode_exists(node, "if-any")) { - addrsel->cfg_mode = CAND_ADDR_ANY; - } else if (yang_dnode_exists(node, "address")) { - addrsel->cfg_mode = CAND_ADDR_EXPLICIT; - yang_dnode_get_pimaddr(&addrsel->cfg_addr, node, "address"); - } else if (yang_dnode_exists(node, "interface")) { - addrsel->cfg_mode = CAND_ADDR_IFACE; - strlcpy(addrsel->cfg_ifname, - yang_dnode_get_string(node, "interface"), - sizeof(addrsel->cfg_ifname)); - } else if (yang_dnode_exists(node, "if-loopback")) { - addrsel->cfg_mode = CAND_ADDR_LO; + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + if (pim && pim->autorp) { + pim->autorp->send_rp_discovery = yang_dnode_get_bool(args->dnode, NULL); + pim_autorp_send_discovery_apply(pim->autorp); + } else + return NB_ERR_INCONSISTENCY; + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/discovery-scope + */ +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_scope_modify( + struct nb_cb_modify_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + if (pim && pim->autorp) + pim->autorp->discovery_scope = yang_dnode_get_uint8(args->dnode, NULL); + else + return NB_ERR_INCONSISTENCY; + break; } + + return NB_OK; +} + +/* + * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/discovery-interval + */ +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_interval_modify( + struct nb_cb_modify_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + if (pim && pim->autorp) + pim->autorp->discovery_interval = yang_dnode_get_uint16(args->dnode, NULL); + else + return NB_ERR_INCONSISTENCY; + break; + } + + return NB_OK; +} + +/* + * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/discovery-holdtime + */ +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_discovery_holdtime_modify( + struct nb_cb_modify_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + if (pim && pim->autorp) + pim->autorp->discovery_holdtime = yang_dnode_get_uint16(args->dnode, NULL); + else + return NB_ERR_INCONSISTENCY; + break; + } + + return NB_OK; +} + +static int pim_autorp_mapping_agent_addrsel(struct pim_autorp *autorp, + const struct lyd_node *mapping_agent_node, + struct vrf *vrf) +{ + yang_addrsel(&autorp->mapping_agent_addrsel, mapping_agent_node); + if (cand_addrsel_update(&autorp->mapping_agent_addrsel, vrf)) + pim_autorp_send_discovery_apply(autorp); + return NB_OK; +} + +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/address + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/interface + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/if-loopback + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/frr-pim-rp:rp/auto-rp/mapping-agent/if-any + */ +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_create( + struct nb_cb_create_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + const struct lyd_node *mapping_agent_node; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + mapping_agent_node = yang_dnode_get_parent(args->dnode, "mapping-agent"); + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + if (pim && pim->autorp) + return pim_autorp_mapping_agent_addrsel(pim->autorp, mapping_agent_node, + vrf); + else + return NB_ERR_INCONSISTENCY; + break; + } + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_modify( + struct nb_cb_modify_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + const struct lyd_node *mapping_agent_node; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + mapping_agent_node = yang_dnode_get_parent(args->dnode, "mapping-agent"); + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + if (pim && pim->autorp) + return pim_autorp_mapping_agent_addrsel(pim->autorp, mapping_agent_node, + vrf); + else + return NB_ERR_INCONSISTENCY; + break; + } + return NB_OK; +} + +int routing_control_plane_protocols_control_plane_protocol_pim_address_family_rp_auto_rp_mapping_agent_addrsel_destroy( + struct nb_cb_destroy_args *args) +{ + struct vrf *vrf; + struct pim_instance *pim; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + if (pim && pim->autorp) + pim->autorp->mapping_agent_addrsel.cfg_enable = false; + else + return NB_ERR_INCONSISTENCY; + break; + } + + return NB_OK; } +#endif /* PIM_IPV == 4 (for AutoRP)*/ static int candidate_bsr_addrsel(struct bsm_scope *scope, const struct lyd_node *cand_bsr_node) diff --git a/yang/frr-pim-rp.yang b/yang/frr-pim-rp.yang index cbc6e87b805b..5558b0888d73 100644 --- a/yang/frr-pim-rp.yang +++ b/yang/frr-pim-rp.yang @@ -21,6 +21,10 @@ module frr-pim-rp { prefix frr-route-types; } + import frr-interface { + prefix "frr-interface"; + } + organization "FRRouting"; @@ -178,7 +182,7 @@ module frr-pim-rp { leaf announce-interval { type uint16; description - "The time between sending C-RP announcement packets."; + "The time between sending C-RP announcement packets (seconds)."; } leaf announce-holdtime { @@ -216,6 +220,57 @@ module frr-pim-rp { } } } // candidate-rp-list + + container mapping-agent { + leaf send-rp-discovery { + type boolean; + default false; + description + "Make this router an AutoRP mapping agent"; + } + + leaf discovery-scope { + type uint8; + default 31; + description + "The TTL of the discovery packet"; + } + + leaf discovery-interval { + type uint16 { + range "1 .. 65535"; + } + default 60; + description + "The time between sending discovery advertisements (seconds)"; + } + + leaf discovery-holdtime { + type uint16 { + range "0 .. 65535"; + } + default 180; + description + "The hold time in seconds advertised in the discovery packet."; + } + + choice source-address-or-interface { + description "Source address to use for mapping agent operation"; + default if-loopback; + leaf address { + type inet:ip-address; + } + leaf interface { + type frr-interface:interface-ref; + } + leaf if-loopback { + type empty; + } + leaf if-any { + type empty; + } + } + } // mapping-agent } // auto-rp } // auto-rp-container From a060c72530b96181ca736e8c42257b558e1b8dd3 Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Fri, 1 Nov 2024 19:11:14 +0000 Subject: [PATCH 10/69] pimd: Implement autorp mapping agent CLI Also exposes BSR cand_addrsel methods for use in AutoRP. Signed-off-by: Nathan Bahr --- pimd/pim_bsm.c | 4 +- pimd/pim_bsm.h | 5 +- pimd/pim_cmd.c | 114 +++++++++++++++----------- pimd/pim_cmd_common.c | 185 +++++++++++++++--------------------------- pimd/pim_cmd_common.h | 16 ++-- 5 files changed, 149 insertions(+), 175 deletions(-) diff --git a/pimd/pim_bsm.c b/pimd/pim_bsm.c index a44e4e08f3d5..75104141ae8e 100644 --- a/pimd/pim_bsm.c +++ b/pimd/pim_bsm.c @@ -1769,14 +1769,14 @@ static inline pim_addr if_highest_addr(pim_addr cur, struct interface *ifp) return cur; } -static void cand_addrsel_clear(struct cand_addrsel *asel) +void cand_addrsel_clear(struct cand_addrsel *asel) { asel->run = false; asel->run_addr = PIMADDR_ANY; } /* returns whether address or active changed */ -static bool cand_addrsel_update(struct cand_addrsel *asel, struct vrf *vrf) +bool cand_addrsel_update(struct cand_addrsel *asel, struct vrf *vrf) { bool is_any = false, prev_run = asel->run; struct interface *ifp = NULL; diff --git a/pimd/pim_bsm.h b/pimd/pim_bsm.h index 1eacc1be5770..b1582d0dfa87 100644 --- a/pimd/pim_bsm.h +++ b/pimd/pim_bsm.h @@ -64,7 +64,7 @@ enum cand_addr { CAND_ADDR_EXPLICIT, }; -/* used separately for Cand-RP and Cand-BSR */ +/* used separately for Cand-RP, Cand-BSR, and AutoRP mapping agent */ struct cand_addrsel { bool cfg_enable; enum cand_addr cfg_mode : 8; @@ -369,6 +369,9 @@ void pim_cand_rp_trigger(struct bsm_scope *scope); void pim_cand_rp_grp_add(struct bsm_scope *scope, const prefix_pim *p); void pim_cand_rp_grp_del(struct bsm_scope *scope, const prefix_pim *p); +void cand_addrsel_clear(struct cand_addrsel *asel); +bool cand_addrsel_update(struct cand_addrsel *asel, struct vrf *vrf); + void pim_cand_addrs_changed(void); int pim_crp_process(struct interface *ifp, pim_sgaddr *src_dst, uint8_t *buf, diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index 934da2d53e67..732dd2971c8d 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -2822,65 +2822,49 @@ DEFPY (show_ip_pim_rp_vrf_all, DEFPY (show_ip_pim_autorp, show_ip_pim_autorp_cmd, - "show ip pim [vrf NAME] autorp [json$json]", + "show ip pim [vrf ] autorp [discovery|candidate|mapping-agent]$component [json$json]", SHOW_STR IP_STR PIM_STR VRF_CMD_HELP_STR + "All VRF's\n" "PIM AutoRP information\n" + "RP Discovery details\n" + "Candidate RP details\n" + "Mapping Agent details\n" JSON_STR) { - struct vrf *v; json_object *json_parent = NULL; - - v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); - if (!v || !v->info) { - if (!json) - vty_out(vty, "%% Unable to find pim instance\n"); - return CMD_WARNING; - } + struct vrf *v; if (json) json_parent = json_object_new_object(); - pim_autorp_show_autorp(vty, v->info, json_parent); - - if (json) - vty_json(vty, json_parent); - - return CMD_SUCCESS; -} - -DEFPY (show_ip_pim_autorp_vrf_all, - show_ip_pim_autorp_vrf_all_cmd, - "show ip pim vrf all autorp [json$json]", - SHOW_STR - IP_STR - PIM_STR - VRF_CMD_HELP_STR - "PIM AutoRP information\n" - JSON_STR) -{ - struct vrf *vrf; - json_object *json_parent = NULL; - json_object *json_vrf = NULL; + if (vrf && strmatch(vrf, "all")) { + json_object *json_vrf = NULL; - if (json) - json_parent = json_object_new_object(); + RB_FOREACH (v, vrf_name_head, &vrfs_by_name) { + if (!v || !v->info) + continue; - RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { - if (vrf->info) { - if (!json) - vty_out(vty, "VRF: %s\n", vrf->name); - else + if (json) json_vrf = json_object_new_object(); + else + vty_out(vty, "VRF: %s\n", v->name); - pim_autorp_show_autorp(vty, vrf->info, json_vrf); + pim_autorp_show_autorp(vty, v->info, component, json_vrf); if (json) - json_object_object_add(json_parent, vrf->name, - json_vrf); + json_object_object_add(json_parent, v->name, json_vrf); + } + } else { + v = vrf_lookup_by_name(vrf ? vrf : VRF_DEFAULT_NAME); + if (!v || !v->info) { + if (!json) + vty_out(vty, "%% Unable to find pim instance\n"); + return CMD_WARNING; } + pim_autorp_show_autorp(vty, v->info, component, json_parent); } if (json) @@ -4609,13 +4593,17 @@ DEFPY (pim_autorp_announce_rp, "Prefix list\n" "List name\n") { - return pim_process_autorp_candidate_rp_cmd(vty, no, rpaddr_str, (grp_str ? grp : NULL), - plist); + if (grp_str && (!pim_addr_is_multicast(grp->prefix) || grp->prefixlen < 4)) { + vty_out(vty, "%% group prefix %pFX is not a valid multicast range\n", grp); + return CMD_WARNING_CONFIG_FAILED; + } + + return pim_process_autorp_candidate_rp_cmd(vty, no, rpaddr_str, grp_str, plist); } DEFPY (pim_autorp_announce_scope_int, pim_autorp_announce_scope_int_cmd, - "[no] autorp announce ![{scope (1-255) | interval (1-65535) | holdtime (0-65535)}]", + "[no] autorp announce {scope (1-255) | interval (1-65535) | holdtime (0-65535)}", NO_STR "AutoRP\n" "AutoRP Candidate RP announcement\n" @@ -4626,11 +4614,44 @@ DEFPY (pim_autorp_announce_scope_int, "Announcement holdtime\n" "Time in seconds\n") { - return pim_process_autorp_announce_scope_int_cmd(vty, no, scope_str, - interval_str, + return pim_process_autorp_announce_scope_int_cmd(vty, no, scope_str, interval_str, holdtime_str); } +DEFPY (pim_autorp_send_rp_discovery, + pim_autorp_send_rp_discovery_cmd, + "[no] autorp send-rp-discovery [source
]", + NO_STR + "AutoRP\n" + "Enable AutoRP mapping agent\n" + "Specify AutoRP discovery source\n" + "Local address\n" + IP_ADDR_STR + "Local Interface (uses highest address)\n" + IFNAME_STR + "Highest loopback address (default)\n" + "Highest address of any interface\n") +{ + return pim_process_autorp_send_rp_discovery_cmd(vty, no, any, loopback, ifname, address_str); +} + +DEFPY (pim_autorp_send_rp_discovery_scope_int, + pim_autorp_send_rp_discovery_scope_int_cmd, + "[no] autorp send-rp-discovery {scope (0-255) | interval (1-65535) | holdtime (0-65535)}", + NO_STR + "AutoRP\n" + "Enable AutoRP mapping agent\n" + "Packet scope (TTL)\n" + "TTL value\n" + "Discovery TX interval\n" + "Time in seconds\n" + "Announcement holdtime\n" + "Time in seconds\n") +{ + return pim_process_autorp_send_rp_discovery_scope_int_cmd(vty, no, scope_str, interval_str, + holdtime_str); +} + DEFPY (pim_bsr_candidate_bsr, pim_bsr_candidate_bsr_cmd, "[no] bsr candidate-bsr [{priority (0-255)|source
}]", @@ -8855,6 +8876,8 @@ void pim_cmd_init(void) install_element(PIM_NODE, &pim_autorp_discovery_cmd); install_element(PIM_NODE, &pim_autorp_announce_rp_cmd); install_element(PIM_NODE, &pim_autorp_announce_scope_int_cmd); + install_element(PIM_NODE, &pim_autorp_send_rp_discovery_cmd); + install_element(PIM_NODE, &pim_autorp_send_rp_discovery_scope_int_cmd); install_element(PIM_NODE, &no_pim_ssm_prefix_list_cmd); install_element(PIM_NODE, &no_pim_ssm_prefix_list_name_cmd); install_element(PIM_NODE, &pim_ssm_prefix_list_cmd); @@ -9010,7 +9033,6 @@ void pim_cmd_init(void) install_element(VIEW_NODE, &show_ip_pim_rp_cmd); install_element(VIEW_NODE, &show_ip_pim_rp_vrf_all_cmd); install_element(VIEW_NODE, &show_ip_pim_autorp_cmd); - install_element(VIEW_NODE, &show_ip_pim_autorp_vrf_all_cmd); install_element(VIEW_NODE, &show_ip_pim_bsr_cmd); install_element(VIEW_NODE, &show_ip_multicast_cmd); install_element(VIEW_NODE, &show_ip_multicast_vrf_all_cmd); diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c index 02ddea8252d6..1476845a5d59 100644 --- a/pimd/pim_cmd_common.c +++ b/pimd/pim_cmd_common.c @@ -630,139 +630,88 @@ int pim_process_no_autorp_cmd(struct vty *vty) return nb_cli_apply_changes(vty, NULL); } -int pim_process_autorp_candidate_rp_cmd(struct vty *vty, bool no, - const char *rpaddr_str, - const struct prefix_ipv4 *grp, - const char *plist) +int pim_process_autorp_candidate_rp_cmd(struct vty *vty, bool no, const char *rpaddr_str, + const char *grp, const char *plist) { - char xpath[XPATH_MAXLEN]; - char grpstr[64]; - if (no) { - if ((grp && !is_default_prefix((const struct prefix *)grp)) || plist) { + if (grp || plist) { /* If any single values are set, only destroy those */ - if (grp && !is_default_prefix((const struct prefix *)grp)) { - snprintfrr(xpath, sizeof(xpath), - "%s/candidate-rp-list[rp-address='%s']/group", - FRR_PIM_AUTORP_XPATH, rpaddr_str); - nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, - NULL); - } - if (plist) { - snprintfrr(xpath, sizeof(xpath), - "%s/candidate-rp-list[rp-address='%s']/prefix-list", - FRR_PIM_AUTORP_XPATH, rpaddr_str); - nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, - NULL); - } - } else { + if (grp) + nb_cli_enqueue_change(vty, "./group", NB_OP_DESTROY, NULL); + if (plist) + nb_cli_enqueue_change(vty, "./prefix-list", NB_OP_DESTROY, NULL); + } else /* No values set, remove the entire RP */ - snprintfrr(xpath, sizeof(xpath), - "%s/candidate-rp-list[rp-address='%s']", - FRR_PIM_AUTORP_XPATH, rpaddr_str); - nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); - } + nb_cli_enqueue_change(vty, ".", NB_OP_DESTROY, NULL); } else { - if ((grp && !is_default_prefix((const struct prefix *)grp)) || plist) { - snprintfrr(xpath, sizeof(xpath), - "%s/candidate-rp-list[rp-address='%s']", - FRR_PIM_AUTORP_XPATH, rpaddr_str); - nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); - if (grp && !is_default_prefix((const struct prefix *)grp)) { - snprintfrr(xpath, sizeof(xpath), - "%s/candidate-rp-list[rp-address='%s']/group", - FRR_PIM_AUTORP_XPATH, rpaddr_str); - nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, - prefix2str(grp, grpstr, - sizeof(grpstr))); - } - if (plist) { - snprintfrr(xpath, sizeof(xpath), - "%s/candidate-rp-list[rp-address='%s']/prefix-list", - FRR_PIM_AUTORP_XPATH, rpaddr_str); - nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, - plist); - } - } else { - return CMD_WARNING_CONFIG_FAILED; - } + nb_cli_enqueue_change(vty, ".", NB_OP_CREATE, NULL); + if (grp) + nb_cli_enqueue_change(vty, "./group", NB_OP_MODIFY, grp); + if (plist) + nb_cli_enqueue_change(vty, "./prefix-list", NB_OP_MODIFY, plist); } - return nb_cli_apply_changes(vty, NULL); + return nb_cli_apply_changes(vty, "%s/candidate-rp-list[rp-address='%s']", + FRR_PIM_AUTORP_XPATH, rpaddr_str); } -int pim_process_autorp_announce_scope_int_cmd(struct vty *vty, bool no, - const char *scope, - const char *interval, - const char *holdtime) +int pim_process_autorp_announce_scope_int_cmd(struct vty *vty, bool no, const char *scope, + const char *interval, const char *holdtime) { - char xpath[XPATH_MAXLEN]; + /* At least one value is required, so set/delete anything defined */ + enum nb_operation op = (no ? NB_OP_DESTROY : NB_OP_MODIFY); + + if (scope) + nb_cli_enqueue_change(vty, "./announce-scope", op, scope); + if (interval) + nb_cli_enqueue_change(vty, "./announce-interval", op, interval); + if (holdtime) + nb_cli_enqueue_change(vty, "./announce-holdtime", op, holdtime); + return nb_cli_apply_changes(vty, "%s", FRR_PIM_AUTORP_XPATH); +} + +int pim_process_autorp_send_rp_discovery_cmd(struct vty *vty, bool no, bool any, bool loopback, + const char *ifname, const char *addr) +{ + /* Just take any "no" version of this command as disable the mapping agent */ + nb_cli_enqueue_change(vty, "./send-rp-discovery", NB_OP_MODIFY, (no ? "false" : "true")); if (no) { - if (scope || interval || holdtime) { - /* If any single values are set, only destroy those */ - if (scope) { - snprintfrr(xpath, sizeof(xpath), "%s/%s", - FRR_PIM_AUTORP_XPATH, - "announce-scope"); - nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, - NULL); - } - if (interval) { - snprintfrr(xpath, sizeof(xpath), "%s/%s", - FRR_PIM_AUTORP_XPATH, - "announce-interval"); - nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, - NULL); - } - if (holdtime) { - snprintfrr(xpath, sizeof(xpath), "%s/%s", - FRR_PIM_AUTORP_XPATH, - "announce-holdtime"); - nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, - NULL); - } - } else { - /* No values set, remove all */ - snprintfrr(xpath, sizeof(xpath), "%s/%s", - FRR_PIM_AUTORP_XPATH, "announce-scope"); - nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); - snprintfrr(xpath, sizeof(xpath), "%s/%s", - FRR_PIM_AUTORP_XPATH, "announce-interval"); - nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); - snprintfrr(xpath, sizeof(xpath), "%s/%s", - FRR_PIM_AUTORP_XPATH, "announce-holdtime"); - nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); - } + nb_cli_enqueue_change(vty, "./if-any", NB_OP_DESTROY, NULL); + nb_cli_enqueue_change(vty, "./interface", NB_OP_DESTROY, NULL); + nb_cli_enqueue_change(vty, "./address", NB_OP_DESTROY, NULL); + nb_cli_enqueue_change(vty, "./if-loopback", NB_OP_DESTROY, NULL); } else { - if (scope || interval || holdtime) { - if (scope) { - snprintfrr(xpath, sizeof(xpath), "%s/%s", - FRR_PIM_AUTORP_XPATH, - "announce-scope"); - nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, - scope); - } - if (interval) { - snprintfrr(xpath, sizeof(xpath), "%s/%s", - FRR_PIM_AUTORP_XPATH, - "announce-interval"); - nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, - interval); - } - if (holdtime) { - snprintfrr(xpath, sizeof(xpath), "%s/%s", - FRR_PIM_AUTORP_XPATH, - "announce-holdtime"); - nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, - holdtime); - } - } else { - return CMD_WARNING_CONFIG_FAILED; - } + /* Enabling mapping agent. Loopback is default, so any non-no for of the command will + * enable the mapping agent. + */ + if (any) + nb_cli_enqueue_change(vty, "./if-any", NB_OP_CREATE, NULL); + else if (ifname) + nb_cli_enqueue_change(vty, "./interface", NB_OP_MODIFY, ifname); + else if (addr) + nb_cli_enqueue_change(vty, "./address", NB_OP_MODIFY, addr); + else + nb_cli_enqueue_change(vty, "./if-loopback", NB_OP_CREATE, NULL); } - return nb_cli_apply_changes(vty, NULL); + return nb_cli_apply_changes(vty, "%s/%s", FRR_PIM_AUTORP_XPATH, "mapping-agent"); +} + +int pim_process_autorp_send_rp_discovery_scope_int_cmd(struct vty *vty, bool no, const char *scope, + const char *interval, const char *holdtime) +{ + /* At least one value is required, so only set/delete the values specified */ + enum nb_operation op = (no ? NB_OP_DESTROY : NB_OP_MODIFY); + + if (scope) + nb_cli_enqueue_change(vty, "./discovery-scope", op, scope); + if (interval) + nb_cli_enqueue_change(vty, "./discovery-interval", op, interval); + if (holdtime) + nb_cli_enqueue_change(vty, "./discovery-holdtime", op, holdtime); + + return nb_cli_apply_changes(vty, "%s/%s", FRR_PIM_AUTORP_XPATH, "mapping-agent"); } bool pim_sgaddr_match(pim_sgaddr item, pim_sgaddr match) diff --git a/pimd/pim_cmd_common.h b/pimd/pim_cmd_common.h index d7c97e31d45d..a067647113b1 100644 --- a/pimd/pim_cmd_common.h +++ b/pimd/pim_cmd_common.h @@ -37,14 +37,14 @@ int pim_process_no_rp_plist_cmd(struct vty *vty, const char *rp_str, const char *prefix_list); int pim_process_autorp_cmd(struct vty *vty); int pim_process_no_autorp_cmd(struct vty *vty); -int pim_process_autorp_candidate_rp_cmd(struct vty *vty, bool no, - const char *rpaddr_str, - const struct prefix_ipv4 *grp, - const char *plist); -int pim_process_autorp_announce_scope_int_cmd(struct vty *vty, bool no, - const char *scope, - const char *interval, - const char *holdtime); +int pim_process_autorp_candidate_rp_cmd(struct vty *vty, bool no, const char *rpaddr_str, + const char *grp, const char *plist); +int pim_process_autorp_announce_scope_int_cmd(struct vty *vty, bool no, const char *scope, + const char *interval, const char *holdtime); +int pim_process_autorp_send_rp_discovery_cmd(struct vty *vty, bool no, bool any, bool loopback, + const char *ifname, const char *addr); +int pim_process_autorp_send_rp_discovery_scope_int_cmd(struct vty *vty, bool no, const char *scope, + const char *interval, const char *holdtime); int pim_process_ip_pim_cmd(struct vty *vty); int pim_process_no_ip_pim_cmd(struct vty *vty); int pim_process_ip_pim_passive_cmd(struct vty *vty, bool enable); From b6aa71b59bfed4b7c706cd2d6bbf77baaaf7d084 Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Fri, 1 Nov 2024 19:14:47 +0000 Subject: [PATCH 11/69] pimd: Implement autorp mapping agent Fully flushed out the AutoRP implementation now with the AutoRP mapping agent. This touched most of AutoRP in order to have common reuse of containers for each section of AutoRP operation (Candidate RP announcement, Mapping agent, Discovery). Many debugs had guards added and many more debug logs added. Signed-off-by: Nathan Bahr --- pimd/pim_autorp.c | 1559 +++++++++++++++++++++++++++++++++------------ pimd/pim_autorp.h | 75 ++- pimd/pimd.c | 4 + 3 files changed, 1218 insertions(+), 420 deletions(-) diff --git a/pimd/pim_autorp.c b/pimd/pim_autorp.c index d36b792e394d..3b46e7fb2bde 100644 --- a/pimd/pim_autorp.c +++ b/pimd/pim_autorp.c @@ -19,6 +19,7 @@ #include "lib/json.h" #include "pimd.h" +#include "pim_util.h" #include "pim_iface.h" #include "pim_rp.h" #include "pim_sock.h" @@ -26,61 +27,90 @@ #include "pim_autorp.h" DEFINE_MTYPE_STATIC(PIMD, PIM_AUTORP, "PIM AutoRP info"); -DEFINE_MTYPE_STATIC(PIMD, PIM_AUTORP_RP, "PIM AutoRP advertised RP info"); -DEFINE_MTYPE_STATIC(PIMD, PIM_AUTORP_CRP, "PIM AutoRP candidate RP info"); +DEFINE_MTYPE_STATIC(PIMD, PIM_AUTORP_RP, "PIM AutoRP discovered RP info"); DEFINE_MTYPE_STATIC(PIMD, PIM_AUTORP_ANNOUNCE, "PIM AutoRP announcement packet"); +DEFINE_MTYPE_STATIC(PIMD, PIM_AUTORP_GRPPFIX, "PIM AutoRP group prefix list"); static const char *PIM_AUTORP_ANNOUNCEMENT_GRP = "224.0.1.39"; static const char *PIM_AUTORP_DISCOVERY_GRP = "224.0.1.40"; static const in_port_t PIM_AUTORP_PORT = 496; -static int pim_autorp_rp_cmp(const struct pim_autorp_rp *l, - const struct pim_autorp_rp *r) +static int pim_autorp_rp_cmp(const struct pim_autorp_rp *l, const struct pim_autorp_rp *r) { return pim_addr_cmp(l->addr, r->addr); } -DECLARE_SORTLIST_UNIQ(pim_autorp_rp, struct pim_autorp_rp, list, - pim_autorp_rp_cmp); +DECLARE_SORTLIST_UNIQ(pim_autorp_rp, struct pim_autorp_rp, item, pim_autorp_rp_cmp); -static void pim_autorp_rp_free(struct pim_autorp_rp *rp) +static int pim_autorp_grppfix_cmp(const struct pim_autorp_grppfix *l, + const struct pim_autorp_grppfix *r) { - event_cancel(&rp->hold_timer); + return prefix_cmp(&l->grp, &r->grp); +} - /* Clean up installed RP info */ - if (pim_rp_del(rp->autorp->pim, rp->addr, rp->grp, - (strlen(rp->grplist) ? rp->grplist : NULL), - RP_SRC_AUTORP)) - if (PIM_DEBUG_AUTORP) - zlog_err("%s: Failed to delete RP %pI4", __func__, - &rp->addr); +DECLARE_SORTLIST_UNIQ(pim_autorp_grppfix, struct pim_autorp_grppfix, item, pim_autorp_grppfix_cmp); - XFREE(MTYPE_PIM_AUTORP_RP, rp); +static void pim_autorp_grppfix_free(struct pim_autorp_grppfix_head *head) +{ + struct pim_autorp_grppfix *grp; + + while ((grp = pim_autorp_grppfix_pop(head))) + XFREE(MTYPE_PIM_AUTORP_GRPPFIX, grp); } -static void pim_autorp_rplist_free(struct pim_autorp_rp_head *head) +static void pim_autorp_rp_free(struct pim_autorp_rp *rp, bool installed) { - struct pim_autorp_rp *rp; + event_cancel(&rp->hold_timer); - while ((rp = pim_autorp_rp_pop(head))) - pim_autorp_rp_free(rp); + /* Clean up installed RP info */ + if (installed) { + if (pim_rp_del(rp->autorp->pim, rp->addr, rp->grp, + (strlen(rp->grplist) ? rp->grplist : NULL), RP_SRC_AUTORP)) { + zlog_warn("%s: Failed to delete RP %pI4", __func__, &rp->addr); + } + + if (strlen(rp->grplist)) { + struct prefix_list *pl; + + pl = prefix_list_lookup(AFI_IP, rp->grplist); + if (pl) + prefix_list_delete(pl); + } + } + + pim_autorp_grppfix_free(&rp->grp_pfix_list); + pim_autorp_grppfix_fini(&rp->grp_pfix_list); + + XFREE(MTYPE_PIM_AUTORP_RP, rp); } -static void pim_autorp_rplist_cfree(struct pim_autorp_rp_head *head) +static void pim_autorp_rplist_free(struct pim_autorp_rp_head *head, bool installed) { struct pim_autorp_rp *rp; while ((rp = pim_autorp_rp_pop(head))) - XFREE(MTYPE_PIM_AUTORP_CRP, rp); + pim_autorp_rp_free(rp, installed); } static void pim_autorp_free(struct pim_autorp *autorp) { - pim_autorp_rplist_free(&(autorp->discovery_rp_list)); + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Freeing PIM AutoRP", __func__); + + pim_autorp_rplist_free(&(autorp->discovery_rp_list), true); pim_autorp_rp_fini(&(autorp->discovery_rp_list)); - pim_autorp_rplist_cfree(&(autorp->candidate_rp_list)); + pim_autorp_rplist_free(&(autorp->candidate_rp_list), false); pim_autorp_rp_fini(&(autorp->candidate_rp_list)); + + pim_autorp_rplist_free(&(autorp->mapping_rp_list), false); + pim_autorp_rp_fini(&(autorp->mapping_rp_list)); + + pim_autorp_rplist_free(&(autorp->advertised_rp_list), false); + pim_autorp_rp_fini(&(autorp->advertised_rp_list)); + + if (autorp->announce_pkt) + XFREE(MTYPE_PIM_AUTORP_ANNOUNCE, autorp->announce_pkt); } static bool pim_autorp_join_groups(struct interface *ifp) @@ -97,26 +127,22 @@ static bool pim_autorp_join_groups(struct interface *ifp) inet_pton(PIM_AF, PIM_AUTORP_DISCOVERY_GRP, &grp); if (pim_socket_join(autorp->sock, grp, pim_ifp->primary_address, ifp->ifindex, pim_ifp)) { - zlog_err("Failed to join group %pI4 on interface %s", &grp, - ifp->name); + zlog_warn("Failed to join group %pI4 on interface %s", &grp, ifp->name); return false; } - /* TODO: Future Mapping agent implementation - * Join announcement group for AutoRP mapping agent - * inet_pton(PIM_AF, PIM_AUTORP_ANNOUNCEMENT_GRP, &grp); - * if (pim_socket_join(pim->autorp->sock, grp, - * pim_ifp->primary_address, - * ifp->ifindex, pim_ifp)) { - * zlog_err("Failed to join group %pI4 on interface %s", - * &grp, ifp->name); - * return errno; - * } - */ + zlog_info("%s: Joined AutoRP discovery group %pPA on interface %s", __func__, &grp, + ifp->name); - if (PIM_DEBUG_AUTORP) - zlog_debug("%s: Joined AutoRP groups on interface %s", __func__, - ifp->name); + inet_pton(PIM_AF, PIM_AUTORP_ANNOUNCEMENT_GRP, &grp); + if (pim_socket_join(pim->autorp->sock, grp, pim_ifp->primary_address, ifp->ifindex, + pim_ifp)) { + zlog_warn("Failed to join group %pI4 on interface %s", &grp, ifp->name); + return errno; + } + + zlog_info("%s: Joined AutoRP announcement group %pPA on interface %s", __func__, &grp, + ifp->name); return true; } @@ -135,31 +161,26 @@ static bool pim_autorp_leave_groups(struct interface *ifp) inet_pton(PIM_AF, PIM_AUTORP_DISCOVERY_GRP, &grp); if (pim_socket_leave(autorp->sock, grp, pim_ifp->primary_address, ifp->ifindex, pim_ifp)) { - zlog_err("Failed to leave group %pI4 on interface %s", &grp, - ifp->name); + zlog_warn("Failed to leave group %pI4 on interface %s", &grp, ifp->name); return false; } - /* TODO: Future Mapping agent implementation - * Leave announcement group for AutoRP mapping agent - * inet_pton(PIM_AF, PIM_AUTORP_ANNOUNCEMENT_GRP, &grp); - * if (pim_socket_leave(pim->autorp->sock, grp, - * pim_ifp->primary_address, - * ifp->ifindex, pim_ifp)) { - * zlog_err("Failed to leave group %pI4 on interface %s", - * &grp, ifp->name); - * return errno; - * } - */ + zlog_info("%s: Left AutoRP discovery group %pPA on interface %s", __func__, &grp, ifp->name); - if (PIM_DEBUG_AUTORP) - zlog_debug("%s: Left AutoRP groups on interface %s", __func__, - ifp->name); + inet_pton(PIM_AF, PIM_AUTORP_ANNOUNCEMENT_GRP, &grp); + if (pim_socket_leave(pim->autorp->sock, grp, pim_ifp->primary_address, ifp->ifindex, + pim_ifp)) { + zlog_warn("Failed to leave group %pI4 on interface %s", &grp, ifp->name); + return errno; + } + + zlog_info("%s: Left AutoRP announcement group %pPA on interface %s", __func__, &grp, + ifp->name); return true; } -static bool pim_autorp_setup(struct pim_autorp *autorp) +static bool pim_autorp_setup(int fd) { #if defined(HAVE_IP_PKTINFO) int data; @@ -170,35 +191,39 @@ static bool pim_autorp_setup(struct pim_autorp *autorp) .sin_addr = { .s_addr = INADDR_ANY }, .sin_port = htons(PIM_AUTORP_PORT) }; - setsockopt_so_recvbuf(autorp->sock, 1024 * 1024 * 8); + setsockopt_so_recvbuf(fd, 1024 * 1024 * 8); #if defined(HAVE_IP_PKTINFO) /* Linux and Solaris IP_PKTINFO */ data = 1; - if (setsockopt(autorp->sock, PIM_IPPROTO, IP_PKTINFO, &data, data_len)) { - zlog_err("Could not set IP_PKTINFO on socket fd=%d: errno=%d: %s", - autorp->sock, errno, safe_strerror(errno)); + if (setsockopt(fd, PIM_IPPROTO, IP_PKTINFO, &data, data_len)) { + zlog_warn("%s: Could not set IP_PKTINFO on socket fd=%d: errno=%d: %s", __func__, + fd, errno, safe_strerror(errno)); return false; } #endif - if (set_nonblocking(autorp->sock) < 0) { - zlog_err("Could not set non blocking on socket fd=%d: errno=%d: %s", - autorp->sock, errno, safe_strerror(errno)); + if (set_nonblocking(fd) < 0) { + zlog_warn("%s: Could not set non blocking on socket fd=%d: errno=%d: %s", __func__, + fd, errno, safe_strerror(errno)); + return false; + } + + if (sockopt_reuseaddr(fd)) { + zlog_warn("%s: Could not set reuse addr on socket fd=%d: errno=%d: %s", __func__, + fd, errno, safe_strerror(errno)); return false; } - if (sockopt_reuseaddr(autorp->sock)) { - zlog_err("Could not set reuse addr on socket fd=%d: errno=%d: %s", - autorp->sock, errno, safe_strerror(errno)); + if (setsockopt_ipv4_multicast_loop(fd, 1) < 0) { + zlog_warn("%s: Could not enable multicast loopback on socket fd=%d: errno=%d: %s", + __func__, fd, errno, safe_strerror(errno)); return false; } - if (bind(autorp->sock, (const struct sockaddr *)&autorp_addr, - sizeof(autorp_addr)) < 0) { - zlog_err("Could not bind socket: %pSUp, fd=%d, errno=%d, %s", - (union sockunion *)&autorp_addr, autorp->sock, errno, - safe_strerror(errno)); + if (bind(fd, (const struct sockaddr *)&autorp_addr, sizeof(autorp_addr)) < 0) { + zlog_warn("%s: Could not bind socket: %pSUp, fd=%d, errno=%d, %s", __func__, + (union sockunion *)&autorp_addr, fd, errno, safe_strerror(errno)); return false; } @@ -208,20 +233,148 @@ static bool pim_autorp_setup(struct pim_autorp *autorp) return true; } -static bool pim_autorp_announcement(struct pim_autorp *autorp, uint8_t rpcnt, - uint16_t holdtime, char *buf, - size_t buf_size) +static void autorp_ma_rp_holdtime(struct event *evt) { - /* TODO: Future Mapping agent implementation - * Implement AutoRP mapping agent logic using received announcement messages - */ + /* Mapping agent RP hold time expired, remove the RP */ + struct pim_autorp_rp *rp = EVENT_ARG(evt); + + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: AutoRP hold time expired, RP removed from mapping agent: addr=%pI4, grp=%pFX, grplist=%s", + __func__, &rp->addr, &rp->grp, + (strlen(rp->grplist) ? rp->grplist : "NONE")); + + pim_autorp_rp_del(&(rp->autorp->mapping_rp_list), rp); + pim_autorp_rp_free(rp, false); +} + +static bool autorp_recv_announcement(struct pim_autorp *autorp, uint8_t rpcnt, uint16_t holdtime, + char *buf, size_t buf_size) +{ + int i, j; + struct autorp_pkt_rp *rp; + struct autorp_pkt_grp *grp; + size_t offset = 0; + pim_addr rp_addr; + struct pim_autorp_rp *ma_rp; + struct pim_autorp_rp *trp; + + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Processing AutoRP Announcement (rpcnt=%u, holdtime=%u)", __func__, + rpcnt, holdtime); + + for (i = 0; i < rpcnt; ++i) { + if ((buf_size - offset) < AUTORP_RPLEN) { + zlog_warn("%s: Failed to parse AutoRP Announcement RP, invalid buffer size (%u < %u)", + __func__, (uint32_t)(buf_size - offset), AUTORP_RPLEN); + return false; + } + + rp = (struct autorp_pkt_rp *)(buf + offset); + offset += AUTORP_RPLEN; + + rp_addr.s_addr = rp->addr; + + /* Ignore RP's limited to PIM version 1 or with an unknown version */ + if (rp->pimver == AUTORP_PIM_V1 || rp->pimver == AUTORP_PIM_VUNKNOWN) { + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Ignoring unsupported PIM version (%u) in AutoRP Announcement for RP %pI4", + __func__, rp->pimver, (in_addr_t *)&(rp->addr)); + /* Update the offset to skip past the groups advertised for this RP */ + offset += (AUTORP_GRPLEN * rp->grpcnt); + continue; + } + + if (rp->grpcnt == 0) { + /* No groups?? */ + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Announcement message has no groups for RP %pI4", + __func__, (in_addr_t *)&(rp->addr)); + continue; + } + + if ((buf_size - offset) < AUTORP_GRPLEN) { + zlog_warn("%s: Buffer underrun parsing groups for RP %pI4", __func__, + (in_addr_t *)&(rp->addr)); + return false; + } + + /* Store all announced RP's, calculate what to send in discovery when discovery is sent. */ + ma_rp = XCALLOC(MTYPE_PIM_AUTORP_RP, sizeof(struct pim_autorp_rp)); + memcpy(&(ma_rp->addr), &rp_addr, sizeof(pim_addr)); + trp = pim_autorp_rp_add(&(autorp->mapping_rp_list), ma_rp); + if (trp == NULL) { + /* RP was brand new, finish initializing */ + ma_rp->autorp = autorp; + ma_rp->holdtime = holdtime; + ma_rp->hold_timer = NULL; + ma_rp->grplist[0] = '\0'; + memset(&(ma_rp->grp), 0, sizeof(ma_rp->grp)); + pim_autorp_grppfix_init(&ma_rp->grp_pfix_list); + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: New candidate RP learned (%pPA)", __func__, + &rp_addr); + } else { + /* Returned an existing entry, free allocated RP */ + XFREE(MTYPE_PIM_AUTORP_RP, ma_rp); + ma_rp = trp; + /* Free the existing group prefix list, in case the advertised groups changed */ + pim_autorp_grppfix_free(&ma_rp->grp_pfix_list); + } + + /* Cancel any existing timer and restart it */ + event_cancel(&ma_rp->hold_timer); + if (holdtime > 0) + event_add_timer(router->master, autorp_ma_rp_holdtime, ma_rp, + ma_rp->holdtime, &(ma_rp->hold_timer)); + + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Parsing %u group(s) for candidate RP %pPA", __func__, + rp->grpcnt, &rp_addr); + + for (j = 0; j < rp->grpcnt; ++j) { + /* grp is already pointing at the first group in the buffer */ + struct pim_autorp_grppfix *lgrp; + struct pim_autorp_grppfix *tgrp; + + if ((buf_size - offset) < AUTORP_GRPLEN) { + zlog_warn("%s: Failed parsing AutoRP announcement, RP(%pI4), invalid buffer size (%u < %u)", + __func__, &rp_addr, (uint32_t)(buf_size - offset), + AUTORP_GRPLEN); + return false; + } + + grp = (struct autorp_pkt_grp *)(buf + offset); + offset += AUTORP_GRPLEN; + + lgrp = XCALLOC(MTYPE_PIM_AUTORP_GRPPFIX, sizeof(struct pim_autorp_grppfix)); + lgrp->grp.family = AF_INET; + lgrp->grp.prefixlen = grp->masklen; + lgrp->grp.u.prefix4.s_addr = grp->addr; + lgrp->negative = grp->negprefix; + + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: %s%pFX added to candidate RP %pPA", __func__, + (lgrp->negative ? "!" : ""), &lgrp->grp, &rp_addr); + + tgrp = pim_autorp_grppfix_add(&ma_rp->grp_pfix_list, lgrp); + if (tgrp != NULL) { + /* This should never happen but if there was an existing entry just free the + * allocated group prefix + */ + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: %pFX was duplicated in AutoRP announcement", + __func__, &lgrp->grp); + XFREE(MTYPE_PIM_AUTORP_GRPPFIX, lgrp); + } + } + } + if (PIM_DEBUG_AUTORP) - zlog_debug("%s: AutoRP processed announcement message", - __func__); + zlog_debug("%s: AutoRP processed announcement message", __func__); return true; } -static void autorp_rp_holdtime(struct event *evt) +static void autorp_cand_rp_holdtime(struct event *evt) { /* RP hold time expired, remove the RP */ struct pim_autorp_rp *rp = EVENT_ARG(evt); @@ -232,79 +385,320 @@ static void autorp_rp_holdtime(struct event *evt) (strlen(rp->grplist) ? rp->grplist : "NONE")); pim_autorp_rp_del(&(rp->autorp->discovery_rp_list), rp); - pim_autorp_rp_free(rp); + pim_autorp_rp_free(rp, true); } -static bool pim_autorp_add_rp(struct pim_autorp *autorp, pim_addr rpaddr, - struct prefix grp, char *listname, - uint16_t holdtime) +static bool pim_autorp_add_rp(struct pim_autorp *autorp, pim_addr rpaddr, struct prefix grp, + char *listname, uint16_t holdtime) { struct pim_autorp_rp *rp; struct pim_autorp_rp *trp = NULL; int ret; ret = pim_rp_new(autorp->pim, rpaddr, grp, listname, RP_SRC_AUTORP); + /* There may not be a path to the RP right now, but that doesn't mean it failed to add the RP */ if (ret != PIM_SUCCESS && ret != PIM_RP_NO_PATH) { - zlog_err("%s: Failed to add new RP addr=%pI4, grp=%pFX, grplist=%s", - __func__, &rpaddr, &grp, - (listname ? listname : "NONE")); + zlog_warn("%s: Failed to add active RP addr=%pI4, grp=%pFX, grplist=%s", __func__, + &rpaddr, &grp, (listname ? listname : "NONE")); return false; } - if (PIM_DEBUG_AUTORP) - zlog_debug("%s: Added new AutoRP learned RP addr=%pI4, grp=%pFX, grplist=%s", - __func__, &rpaddr, &grp, - (listname ? listname : "NONE")); - rp = XCALLOC(MTYPE_PIM_AUTORP_RP, sizeof(*rp)); rp->autorp = autorp; memcpy(&(rp->addr), &rpaddr, sizeof(pim_addr)); - prefix_copy(&(rp->grp), &grp); - if (listname) - snprintf(rp->grplist, sizeof(rp->grplist), "%s", listname); - else - rp->grplist[0] = '\0'; - - rp->holdtime = holdtime; - rp->hold_timer = NULL; trp = pim_autorp_rp_add(&(autorp->discovery_rp_list), rp); if (trp == NULL) { /* RP was brand new */ trp = pim_autorp_rp_find(&(autorp->discovery_rp_list), (const struct pim_autorp_rp *)rp); + /* Make sure the timer is NULL so the cancel below doesn't mess up */ + trp->hold_timer = NULL; + zlog_info("%s: Added new AutoRP learned RP addr=%pI4, grp=%pFX, grplist=%s", + __func__, &rpaddr, &grp, (listname ? listname : "NONE")); } else { - /* RP already existed */ + /* RP already existed, free the temp one */ XFREE(MTYPE_PIM_AUTORP_RP, rp); - event_cancel(&trp->hold_timer); - - /* We know the address matches, but these values may have changed */ - trp->holdtime = holdtime; - prefix_copy(&(trp->grp), &grp); - if (listname) { - snprintf(trp->grplist, sizeof(trp->grplist), "%s", - listname); - } else { - trp->grplist[0] = '\0'; - } } + /* Cancel any existing timer before restarting it */ + event_cancel(&trp->hold_timer); + trp->holdtime = holdtime; + prefix_copy(&(trp->grp), &grp); + if (listname) + snprintf(trp->grplist, sizeof(trp->grplist), "%s", listname); + else + trp->grplist[0] = '\0'; + if (holdtime > 0) { - event_add_timer(router->master, autorp_rp_holdtime, trp, - holdtime, &(trp->hold_timer)); + event_add_timer(router->master, autorp_cand_rp_holdtime, trp, holdtime, + &(trp->hold_timer)); if (PIM_DEBUG_AUTORP) zlog_debug("%s: Started %u second hold timer for RP %pI4", __func__, holdtime, &trp->addr); - } else { - /* If hold time is zero, make sure there doesn't exist a hold timer for it already */ - event_cancel(&trp->hold_timer); } return true; } -static bool pim_autorp_discovery(struct pim_autorp *autorp, uint8_t rpcnt, - uint16_t holdtime, char *buf, size_t buf_size) +static size_t autorp_build_disc_rps(struct pim_autorp *autorp, uint8_t *buf, size_t buf_sz, + size_t *sz) +{ + /* Header has already been added, fill in starting with the address of RP1 + * buf_sz is the max size of the buf + * sz is the current size of the packet, update as buf is filled + * return the total number of RP's added + * + * + * We need to resolve the announced RP's following these rules: + * 1) Co-existence of longer and shorter group prefixes, from different RPs. E.g. when RP1 + * announces 224.2.*.*, and RP2 announces 224.2.2.*, both are accepted; + * 2) For announcements for identical group prefixes from two different RPs, the one from the + * RP with the higher IP address is accepted; + * 3) No duplicates are sent to the AUTORP-DISCOVERY address. E.g. if an RP announces both + * 224.2.2.* and 224.2.*.*, the former group-prefix is not sent and only 224.2.*.* is sent + * to the AUTORP-DISCOVERY address. + * + * + * The approach to resolution, first loop the stored RP's and extract the group prefixes, stored + * in a sorted list, sorted from least specific to most 0.0.0.0/0 -> 239.255.255.255/32. Each + * group prefix will then store the RP advertising that group prefix, this will resolve 2. + * The next step is to then loop the group prefix list and store them back into a list sorted by + * RP address, where the least specific group address will be stored, resolving 3. 1 is more + * about what is allowed, and in the example above the different prefixes will be unique in the + * list of group prefixes, and when they go back into RP's, they are also from different RP's + * and will therefore be sent. + */ + + struct pim_autorp_rp *rp; + struct pim_autorp_rp *trp; + struct pim_autorp_grppfix *grp; + struct pim_autorp_grppfix *grp2; + struct pim_autorp_grppfix *tgrp; + struct pim_autorp_grppfix_head grplist; + bool skip = false; + size_t rpcnt = 0; + size_t bsz = 0; + + /* Initialize the lists, grplist is temporary, disc rp list is stored long term for + * show output, so make sure it's empty + */ + pim_autorp_grppfix_init(&grplist); + pim_autorp_rplist_free(&autorp->advertised_rp_list, false); + + /* Loop the advertised RP's and their group prefixes and make a unique list of group prefixes, + * keeping just the highest IP RP for each group prefix + */ + frr_each (pim_autorp_rp, &autorp->mapping_rp_list, rp) { + frr_each (pim_autorp_grppfix, &rp->grp_pfix_list, grp) { + grp2 = XCALLOC(MTYPE_PIM_AUTORP_GRPPFIX, sizeof(struct pim_autorp_grppfix)); + prefix_copy(&grp2->grp, &grp->grp); + grp2->negative = grp->negative; + grp2->rp = rp->addr; + tgrp = pim_autorp_grppfix_add(&grplist, grp2); + if (tgrp != NULL) { + /* Returned an existing entry. Use the highest RP addr and free allocated object */ + if (IPV4_ADDR_CMP(&tgrp->rp, &grp2->rp)) + tgrp->rp = grp2->rp; + XFREE(MTYPE_PIM_AUTORP_GRPPFIX, grp2); + } + } + } + + /* Now loop the unique group prefixes and put it back into an RP list */ + frr_each (pim_autorp_grppfix, &grplist, grp) { + rp = XCALLOC(MTYPE_PIM_AUTORP_RP, sizeof(struct pim_autorp_rp)); + rp->addr = grp->rp; + trp = pim_autorp_rp_add(&autorp->advertised_rp_list, rp); + if (trp == NULL) { + /* RP was brand new, finish initializing */ + rp->autorp = NULL; + rp->holdtime = 0; + rp->hold_timer = NULL; + rp->grplist[0] = '\0'; + memset(&(rp->grp), 0, sizeof(rp->grp)); + pim_autorp_grppfix_init(&rp->grp_pfix_list); + } else { + /* Returned an existing entry, free allocated RP */ + XFREE(MTYPE_PIM_AUTORP_RP, rp); + rp = trp; + } + + /* Groups are in order from least specific to most, so go through the existing + * groups for this RP and see if the current group is within the prefix of one that + * is already in the list, if so, skip it, if not, add it + * If one is a positive match and the other is negative, then still include it. + */ + skip = false; + frr_each (pim_autorp_grppfix, &rp->grp_pfix_list, grp2) { + if (prefix_match(&grp2->grp, &grp->grp) && grp->negative == grp2->negative) { + skip = true; + break; + } + } + + if (skip) + continue; + + /* add the group to the RP's group list */ + grp2 = XCALLOC(MTYPE_PIM_AUTORP_GRPPFIX, sizeof(struct pim_autorp_grppfix)); + prefix_copy(&grp2->grp, &grp->grp); + grp2->negative = grp->negative; + tgrp = pim_autorp_grppfix_add(&rp->grp_pfix_list, grp2); + assert(tgrp == NULL); + } + + /* Done with temporary group prefix list, so free and finish */ + pim_autorp_grppfix_free(&grplist); + pim_autorp_grppfix_fini(&grplist); + + /* Now finally we can loop the disc rp list and build the packet */ + frr_each (pim_autorp_rp, &autorp->advertised_rp_list, rp) { + struct autorp_pkt_rp *brp; + struct autorp_pkt_grp *bgrp; + size_t rp_sz; + size_t grpcnt; + + grpcnt = pim_autorp_grppfix_count(&rp->grp_pfix_list); + rp_sz = sizeof(struct autorp_pkt_rp) + (grpcnt * sizeof(struct autorp_pkt_grp)); + if (buf_sz < *sz + rp_sz) { + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Failed to pack AutoRP discovery packet, buffer overrun, (%u < %u)", + __func__, (uint32_t)buf_sz, (uint32_t)(*sz + rp_sz)); + break; + } + + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Add RP %pI4 (grpcnt=%u) to discovery message", __func__, + &rp->addr, (uint32_t)grpcnt); + + rpcnt++; + + brp = (struct autorp_pkt_rp *)(buf + bsz); + bsz += sizeof(struct autorp_pkt_rp); + + /* Since this is an in_addr, assume it's already the right byte order */ + brp->addr = rp->addr.s_addr; + brp->pimver = AUTORP_PIM_V2; + brp->reserved = 0; + brp->grpcnt = grpcnt; + + frr_each (pim_autorp_grppfix, &rp->grp_pfix_list, grp) { + bgrp = (struct autorp_pkt_grp *)(buf + bsz); + bsz += sizeof(struct autorp_pkt_grp); + + bgrp->addr = grp->grp.u.prefix4.s_addr; + bgrp->masklen = grp->grp.prefixlen; + bgrp->negprefix = grp->negative; + + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Add group %s%pFX for RP %pI4 to discovery message", + __func__, (grp->negative ? "!" : ""), &grp->grp, + &rp->addr); + } + + /* Update the size with this RP now that it is packed */ + *sz += bsz; + } + + return rpcnt; +} + +static size_t autorp_build_disc_packet(struct pim_autorp *autorp, uint8_t *buf, size_t buf_sz) +{ + size_t sz = 0; + struct autorp_pkt_hdr *hdr; + + if (buf_sz >= AUTORP_HDRLEN) { + hdr = (struct autorp_pkt_hdr *)buf; + hdr->version = AUTORP_VERSION; + hdr->type = AUTORP_DISCOVERY_TYPE; + hdr->holdtime = htons(autorp->discovery_holdtime); + hdr->reserved = 0; + sz += AUTORP_HDRLEN; + hdr->rpcnt = autorp_build_disc_rps(autorp, buf + sizeof(struct autorp_pkt_hdr), + (buf_sz - AUTORP_HDRLEN), &sz); + if (hdr->rpcnt == 0) + sz = 0; + } + return sz; +} + +static void autorp_send_discovery(struct event *evt) +{ + struct pim_autorp *autorp = EVENT_ARG(evt); + struct sockaddr_in discGrp; + size_t disc_sz; + size_t buf_sz = 65535; + uint8_t buf[65535] = { 0 }; + + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: AutoRP sending discovery info", __func__); + + /* Mark true, even if nothing is sent */ + autorp->mapping_agent_active = true; + disc_sz = autorp_build_disc_packet(autorp, buf, buf_sz); + + if (disc_sz > 0) { + discGrp.sin_family = AF_INET; + discGrp.sin_port = htons(PIM_AUTORP_PORT); + inet_pton(PIM_AF, PIM_AUTORP_DISCOVERY_GRP, &discGrp.sin_addr); + + if (setsockopt(autorp->sock, IPPROTO_IP, IP_MULTICAST_TTL, + &(autorp->discovery_scope), sizeof(autorp->discovery_scope)) == 0) { + if (setsockopt(autorp->sock, IPPROTO_IP, IP_MULTICAST_IF, + &(autorp->mapping_agent_addrsel.run_addr), + sizeof(autorp->mapping_agent_addrsel.run_addr)) == 0) { + if (sendto(autorp->sock, buf, disc_sz, 0, + (struct sockaddr *)&discGrp, sizeof(discGrp)) > 0) { + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: AutoRP discovery message sent", + __func__); + } else if (PIM_DEBUG_AUTORP) + zlog_warn("%s: Failed to send AutoRP discovery message, errno=%d, %s", + __func__, errno, safe_strerror(errno)); + } else if (PIM_DEBUG_AUTORP) + zlog_warn("%s: Failed to set Multicast Interface for sending AutoRP discovery message, errno=%d, %s", + __func__, errno, safe_strerror(errno)); + } else if (PIM_DEBUG_AUTORP) + zlog_warn("%s: Failed to set Multicast TTL for sending AutoRP discovery message, errno=%d, %s", + __func__, errno, safe_strerror(errno)); + } + + /* Start the new timer for the entire send discovery interval */ + event_add_timer(router->master, autorp_send_discovery, autorp, autorp->discovery_interval, + &(autorp->send_discovery_timer)); +} + +static void autorp_send_discovery_on(struct pim_autorp *autorp) +{ + int interval = 5; + + /* Send the first discovery shortly after being enabled. + * If the configured interval is less than 5 seconds, then just use that. + */ + if (interval > autorp->discovery_interval) + interval = autorp->discovery_interval; + + if (autorp->send_discovery_timer) + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: AutoRP discovery sending enabled in %u seconds", __func__, + interval); + + event_add_timer(router->master, autorp_send_discovery, autorp, interval, + &(autorp->send_discovery_timer)); +} + +static void autorp_send_discovery_off(struct pim_autorp *autorp) +{ + if (autorp->send_discovery_timer) + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: AutoRP discovery sending disabled", __func__); + event_cancel(&(autorp->send_discovery_timer)); +} + +static bool autorp_recv_discovery(struct pim_autorp *autorp, uint8_t rpcnt, uint16_t holdtime, + char *buf, size_t buf_size, pim_addr src) { int i, j; struct autorp_pkt_rp *rp; @@ -318,65 +712,122 @@ static bool pim_autorp_discovery(struct pim_autorp *autorp, uint8_t rpcnt, int64_t seq = 1; bool success = true; + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Received AutoRP discovery message (src=%pI4, rpcnt=%u, holdtime=%u)", + __func__, &src, rpcnt, holdtime); + + if (autorp->send_rp_discovery && + (pim_addr_cmp(autorp->mapping_agent_addrsel.run_addr, src) < 0)) { + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: AutoRP send discovery suppressed -- Discovery received with higher IP address", + __func__); + + /* Cancel the existing send timer and restart for 3X the send discovery interval */ + event_cancel(&(autorp->send_discovery_timer)); + event_add_timer(router->master, autorp_send_discovery, autorp, + (autorp->discovery_interval * 3), &(autorp->send_discovery_timer)); + + /* Clear the last sent discovery RP's, since it is no longer valid */ + pim_autorp_rplist_free(&autorp->advertised_rp_list, false); + /* Unset flag indicating we are active */ + autorp->mapping_agent_active = false; + } + for (i = 0; i < rpcnt; ++i) { - if ((buf_size - offset) < AUTORP_RPLEN) + if ((buf_size - offset) < AUTORP_RPLEN) { + zlog_warn("%s: Failed to parse AutoRP discovery message, invalid buffer size (%u < %u)", + __func__, (uint32_t)(buf_size - offset), AUTORP_RPLEN); return false; + } rp = (struct autorp_pkt_rp *)(buf + offset); offset += AUTORP_RPLEN; rp_addr.s_addr = rp->addr; + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Parsing RP %pI4 (grpcnt=%u)", __func__, + (in_addr_t *)&rp->addr, rp->grpcnt); + /* Ignore RP's limited to PIM version 1 or with an unknown version */ - if (rp->pimver == PIM_V1 || rp->pimver == PIM_VUNKNOWN) { - zlog_warn("%s: Ignoring unsupported PIM version in AutoRP Discovery for RP %pI4", - __func__, (in_addr_t *)&(rp->addr)); + if (rp->pimver == AUTORP_PIM_V1 || rp->pimver == AUTORP_PIM_VUNKNOWN) { + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Ignoring unsupported PIM version in AutoRP Discovery for RP %pI4", + __func__, (in_addr_t *)&(rp->addr)); /* Update the offset to skip past the groups advertised for this RP */ offset += (AUTORP_GRPLEN * rp->grpcnt); continue; } - if (rp->grpcnt == 0) { /* No groups?? */ - zlog_warn("%s: Discovery message has no groups for RP %pI4", - __func__, (in_addr_t *)&(rp->addr)); + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Discovery message has no groups for RP %pI4", + __func__, (in_addr_t *)&(rp->addr)); continue; } - if ((buf_size - offset) < AUTORP_GRPLEN) { - zlog_warn("%s: Buffer underrun parsing groups for RP %pI4", - __func__, (in_addr_t *)&(rp->addr)); + /* Make sure there is enough buffer to parse all the groups */ + if ((buf_size - offset) < (AUTORP_GRPLEN * rp->grpcnt)) { + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Buffer underrun parsing groups for RP %pI4 (%u < %u)", + __func__, (in_addr_t *)&(rp->addr), + (uint32_t)(buf_size - offset), + (uint32_t)(AUTORP_GRPLEN * rp->grpcnt)); return false; } + /* Get the first group so we can check for a negative prefix */ + /* Don't add to offset yet to make the multiple group loop easier */ grp = (struct autorp_pkt_grp *)(buf + offset); - offset += AUTORP_GRPLEN; if (rp->grpcnt == 1 && grp->negprefix == 0) { /* Only one group with positive prefix, we can use the standard RP API */ + offset += AUTORP_GRPLEN; grppfix.family = AF_INET; grppfix.prefixlen = grp->masklen; grppfix.u.prefix4.s_addr = grp->addr; - if (!pim_autorp_add_rp(autorp, rp_addr, grppfix, NULL, - holdtime)) + + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Parsing group %s%pFX for RP %pI4", __func__, + (grp->negprefix ? "!" : ""), &grppfix, + (in_addr_t *)&rp->addr); + + if (!pim_autorp_add_rp(autorp, rp_addr, grppfix, NULL, holdtime)) success = false; } else { - /* More than one grp, or the only group is a negative prefix, need to make a prefix list for this RP */ - snprintfrr(plname, sizeof(plname), "__AUTORP_%pI4__", - &rp_addr); + /* More than one grp, or the only group is a negative prefix. + * Need to make a prefix list for this RP + */ + snprintfrr(plname, sizeof(plname), "__AUTORP_%pI4__", &rp_addr); + pl = prefix_list_lookup(AFI_IP, plname); + + if (pl) { + /* Existing prefix list found, delete it first */ + /* TODO: Instead of deleting completely, maybe we can just clear it and re-add entries */ + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Found existing prefix list %s, replacing it", + __func__, plname); + prefix_list_delete(pl); + } + + /* Now get a new prefix list */ pl = prefix_list_get(AFI_IP, 0, plname); for (j = 0; j < rp->grpcnt; ++j) { - /* grp is already pointing at the first group in the buffer */ + /* This will just set grp to the same pointer on the first loop, but offset will + * be updated correctly while parsing + */ + grp = (struct autorp_pkt_grp *)(buf + offset); + offset += AUTORP_GRPLEN; + ple = prefix_list_entry_new(); ple->pl = pl; ple->seq = seq; seq += 5; memset(&ple->prefix, 0, sizeof(ple->prefix)); prefix_list_entry_update_start(ple); - ple->type = (grp->negprefix ? PREFIX_DENY - : PREFIX_PERMIT); + ple->type = (grp->negprefix ? PREFIX_DENY : PREFIX_PERMIT); ple->prefix.family = AF_INET; ple->prefix.prefixlen = grp->masklen; ple->prefix.u.prefix4.s_addr = grp->addr; @@ -385,60 +836,59 @@ static bool pim_autorp_discovery(struct pim_autorp *autorp, uint8_t rpcnt, ple->le = 32; prefix_list_entry_update_finish(ple); - if ((buf_size - offset) < AUTORP_GRPLEN) - return false; - - grp = (struct autorp_pkt_grp *)(buf + offset); - offset += AUTORP_GRPLEN; + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Parsing group %s%pFX for RP %pI4", __func__, + (grp->negprefix ? "!" : ""), &ple->prefix, + (in_addr_t *)&rp->addr); } - if (!pim_autorp_add_rp(autorp, rp_addr, grppfix, plname, - holdtime)) + if (!pim_autorp_add_rp(autorp, rp_addr, grppfix, plname, holdtime)) success = false; } } - if (PIM_DEBUG_AUTORP) - zlog_debug("%s: Processed AutoRP Discovery message", __func__); - return success; } -static bool pim_autorp_msg(struct pim_autorp *autorp, char *buf, size_t buf_size) +static bool autorp_recv_msg(struct pim_autorp *autorp, char *buf, size_t buf_size, pim_addr src) { struct autorp_pkt_hdr *h; - if (buf_size < AUTORP_HDRLEN) + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Received AutoRP message", __func__); + + if (buf_size < AUTORP_HDRLEN) { + zlog_warn("%s: Invalid AutoRP Header size (%u < %u)", __func__, (uint32_t)buf_size, + AUTORP_HDRLEN); return false; + } h = (struct autorp_pkt_hdr *)buf; - if (h->version != AUTORP_VERSION) + if (h->version != AUTORP_VERSION) { + zlog_warn("%s: Unsupported AutoRP version (%u != %u)", __func__, h->version, + AUTORP_VERSION); return false; + } - if (h->type == AUTORP_ANNOUNCEMENT_TYPE && - !pim_autorp_announcement(autorp, h->rpcnt, htons(h->holdtime), - buf + AUTORP_HDRLEN, - buf_size - AUTORP_HDRLEN)) - return false; + if (h->type == AUTORP_ANNOUNCEMENT_TYPE) + return autorp_recv_announcement(autorp, h->rpcnt, htons(h->holdtime), + buf + AUTORP_HDRLEN, buf_size - AUTORP_HDRLEN); - if (h->type == AUTORP_DISCOVERY_TYPE && - !pim_autorp_discovery(autorp, h->rpcnt, htons(h->holdtime), - buf + AUTORP_HDRLEN, buf_size - AUTORP_HDRLEN)) - return false; + if (h->type == AUTORP_DISCOVERY_TYPE) + return autorp_recv_discovery(autorp, h->rpcnt, htons(h->holdtime), + buf + AUTORP_HDRLEN, buf_size - AUTORP_HDRLEN, src); - if (PIM_DEBUG_AUTORP) - zlog_debug("%s: Processed AutoRP packet", __func__); + zlog_warn("%s: Unknown AutoRP message type (%u)", __func__, h->type); - return true; + return false; } static void autorp_read(struct event *t); static void autorp_read_on(struct pim_autorp *autorp) { - event_add_read(router->master, autorp_read, autorp, autorp->sock, - &(autorp->read_event)); + event_add_read(router->master, autorp_read, autorp, autorp->sock, &(autorp->read_event)); if (PIM_DEBUG_AUTORP) zlog_debug("%s: AutoRP socket read enabled", __func__); } @@ -456,26 +906,35 @@ static void autorp_read(struct event *evt) int fd = evt->u.fd; char buf[10000]; int rd; + struct sockaddr_storage from; + socklen_t fromlen = sizeof(from); + pim_addr src; if (PIM_DEBUG_AUTORP) zlog_debug("%s: Reading from AutoRP socket", __func__); while (1) { - rd = pim_socket_recvfromto(fd, (uint8_t *)buf, sizeof(buf), - NULL, NULL, NULL, NULL, NULL); + rd = pim_socket_recvfromto(fd, (uint8_t *)buf, sizeof(buf), &from, &fromlen, NULL, + NULL, NULL); if (rd <= 0) { if (errno == EINTR) continue; if (errno == EWOULDBLOCK || errno == EAGAIN) break; + zlog_warn("%s: Failure reading rd=%d: fd=%d: errno=%d: %s", __func__, rd, + fd, errno, safe_strerror(errno)); + goto err; + } - zlog_warn("%s: Failure reading rd=%d: fd=%d: errno=%d: %s", - __func__, rd, fd, errno, safe_strerror(errno)); + if (from.ss_family == AF_INET) + src.s_addr = ((struct sockaddr_in *)&from)->sin_addr.s_addr; + else { + zlog_warn("%s: AutoRP message is not IPV4", __func__); goto err; } - if (!pim_autorp_msg(autorp, buf, rd)) - zlog_err("%s: Failure parsing AutoRP message", __func__); + if (!autorp_recv_msg(autorp, buf, rd, src)) + zlog_warn("%s: Failure parsing AutoRP message", __func__); /* Keep reading until would block */ } @@ -493,23 +952,23 @@ static bool pim_autorp_socket_enable(struct pim_autorp *autorp) frr_with_privs (&pimd_privs) { fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_IP); if (fd < 0) { - zlog_warn("Could not create autorp socket: errno=%d: %s", - errno, safe_strerror(errno)); + zlog_warn("Could not create autorp socket: errno=%d: %s", errno, + safe_strerror(errno)); return false; } - autorp->sock = fd; - if (!pim_autorp_setup(autorp)) { - zlog_warn("Could not setup autorp socket fd=%d: errno=%d: %s", - fd, errno, safe_strerror(errno)); + if (!pim_autorp_setup(fd)) { + zlog_warn("Could not setup autorp socket fd=%d: errno=%d: %s", fd, errno, + safe_strerror(errno)); close(fd); - autorp->sock = -1; return false; } } + autorp->sock = fd; + if (PIM_DEBUG_AUTORP) - zlog_debug("%s: AutoRP socket enabled", __func__); + zlog_debug("%s: AutoRP socket enabled (fd=%u)", __func__, fd); return true; } @@ -517,8 +976,8 @@ static bool pim_autorp_socket_enable(struct pim_autorp *autorp) static bool pim_autorp_socket_disable(struct pim_autorp *autorp) { if (close(autorp->sock)) { - zlog_warn("Failure closing autorp socket: fd=%d errno=%d: %s", - autorp->sock, errno, safe_strerror(errno)); + zlog_warn("Failure closing autorp socket: fd=%d errno=%d: %s", autorp->sock, errno, + safe_strerror(errno)); return false; } @@ -542,13 +1001,15 @@ static void autorp_send_announcement(struct event *evt) announceGrp.sin_port = htons(PIM_AUTORP_PORT); inet_pton(PIM_AF, PIM_AUTORP_ANNOUNCEMENT_GRP, &announceGrp.sin_addr); - if (autorp->annouce_pkt_sz >= MIN_AUTORP_PKT_SZ) { + if (autorp->announce_pkt_sz >= MIN_AUTORP_PKT_SZ) { + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Sending AutoRP announcement", __func__); + if (setsockopt(autorp->sock, IPPROTO_IP, IP_MULTICAST_TTL, - &(autorp->announce_scope), - sizeof(autorp->announce_scope)) < 0) { - if (PIM_DEBUG_AUTORP) - zlog_err("%s: Failed to set Multicast TTL for sending AutoRP announcement message, errno=%d, %s", - __func__, errno, safe_strerror(errno)); + &(autorp->announce_scope), sizeof(autorp->announce_scope)) < 0) { + zlog_warn("%s: Failed to set Multicast TTL for sending AutoRP announcement message, errno=%d, %s", + __func__, errno, safe_strerror(errno)); + return; } FOR_ALL_INTERFACES (autorp->pim->vrf, ifp) { @@ -556,57 +1017,56 @@ static void autorp_send_announcement(struct event *evt) /* Only send on active interfaces with full pim enabled, non-passive * and have a primary address set. */ - if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) && - pim_ifp && pim_ifp->pim_enable && - !pim_ifp->pim_passive_enable && + if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) && pim_ifp && + pim_ifp->pim_enable && !pim_ifp->pim_passive_enable && !pim_addr_is_any(pim_ifp->primary_address)) { - if (setsockopt(autorp->sock, IPPROTO_IP, - IP_MULTICAST_IF, + if (setsockopt(autorp->sock, IPPROTO_IP, IP_MULTICAST_IF, &(pim_ifp->primary_address), - sizeof(pim_ifp->primary_address)) < - 0) { - if (PIM_DEBUG_AUTORP) - zlog_err("%s: Failed to set Multicast Interface for sending AutoRP announcement message, errno=%d, %s", - __func__, errno, - safe_strerror(errno)); + sizeof(pim_ifp->primary_address)) < 0) { + zlog_warn("%s: Failed to set Multicast Interface for sending AutoRP announcement message, errno=%d, %s", + __func__, errno, safe_strerror(errno)); + continue; } - if (sendto(autorp->sock, autorp->annouce_pkt, - autorp->annouce_pkt_sz, 0, + + if (sendto(autorp->sock, autorp->announce_pkt, + autorp->announce_pkt_sz, 0, (struct sockaddr *)&announceGrp, - sizeof(announceGrp)) <= 0) { - if (PIM_DEBUG_AUTORP) - zlog_err("%s: Failed to send AutoRP announcement message, errno=%d, %s", - __func__, errno, - safe_strerror(errno)); - } + sizeof(announceGrp)) <= 0) + zlog_warn("%s: Failed to send AutoRP announcement message, errno=%d, %s", + __func__, errno, safe_strerror(errno)); } } } /* Start the new timer for the entire announce interval */ - event_add_timer(router->master, autorp_send_announcement, autorp, - autorp->announce_interval, &(autorp->announce_timer)); + event_add_timer(router->master, autorp_send_announcement, autorp, autorp->announce_interval, + &(autorp->announce_timer)); } static void autorp_announcement_on(struct pim_autorp *autorp) { int interval = 5; - if (interval > autorp->announce_interval) { - /* If the configured interval is less than 5 seconds, then just use that */ + /* Send the first announcement shortly after being enabled. + * If the configured interval is less than 5 seconds, then just use that. + */ + if (interval > autorp->announce_interval) interval = autorp->announce_interval; - } - event_add_timer(router->master, autorp_send_announcement, autorp, - interval, &(autorp->announce_timer)); - if (PIM_DEBUG_AUTORP) - zlog_debug("%s: AutoRP announcement sending enabled", __func__); + + if (autorp->announce_timer == NULL) + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: AutoRP announcement sending enabled", __func__); + + event_add_timer(router->master, autorp_send_announcement, autorp, interval, + &(autorp->announce_timer)); } static void autorp_announcement_off(struct pim_autorp *autorp) { + if (autorp->announce_timer != NULL) + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: AutoRP announcement sending disabled", __func__); event_cancel(&(autorp->announce_timer)); - if (PIM_DEBUG_AUTORP) - zlog_debug("%s: AutoRP announcement sending disabled", __func__); } /* Pack the groups of the RP @@ -614,34 +1074,31 @@ static void autorp_announcement_off(struct pim_autorp *autorp) * buf - Pointer to the buffer where to start packing groups * returns - Total group count packed */ -static uint8_t pim_autorp_new_announcement_rp_grps(struct pim_autorp_rp *rp, - uint8_t *buf) +static uint8_t pim_autorp_new_announcement_rp_grps(struct pim_autorp_rp *rp, uint8_t *buf) { - struct prefix_list *plist; - struct prefix_list_entry *ple; struct autorp_pkt_grp *grpp = (struct autorp_pkt_grp *)buf; uint8_t cnt = 0; - in_addr_t taddr; if (is_default_prefix(&(rp->grp))) { /* No group so pack from the prefix list * The grplist should be set and the prefix list exist with at least one group address */ + struct prefix_list *plist; + struct prefix_list_entry *ple; + plist = prefix_list_lookup(AFI_IP, rp->grplist); for (ple = plist->head; ple; ple = ple->next) { - taddr = ntohl(ple->prefix.u.prefix4.s_addr); - if ((taddr & 0xF0000000) == 0xE0000000) { + if (pim_addr_is_multicast(ple->prefix.u.prefix4) && + ple->prefix.prefixlen >= 4) { grpp->addr = ple->prefix.u.prefix4.s_addr; grpp->masklen = ple->prefix.prefixlen; - grpp->negprefix = - (ple->type == PREFIX_PERMIT ? 0 : 1); + grpp->negprefix = (ple->type == PREFIX_PERMIT ? 0 : 1); grpp->reserved = 0; ++cnt; - grpp = (struct autorp_pkt_grp - *)(buf + - (sizeof(struct autorp_pkt_grp) * - cnt)); + grpp = (struct autorp_pkt_grp *)(buf + + (sizeof(struct autorp_pkt_grp) * + cnt)); } } @@ -661,20 +1118,16 @@ static uint8_t pim_autorp_new_announcement_rp_grps(struct pim_autorp_rp *rp, * buf - Pointer to the buffer where to start packing the RP * returns - Buffer pointer pointing to the start of the next RP */ -static uint8_t *pim_autorp_new_announcement_rp(struct pim_autorp_rp *rp, - uint8_t *buf) +static uint8_t *pim_autorp_new_announcement_rp(struct pim_autorp_rp *rp, uint8_t *buf) { struct autorp_pkt_rp *brp = (struct autorp_pkt_rp *)buf; /* Since this is an in_addr, assume it's already the right byte order */ brp->addr = rp->addr.s_addr; - brp->pimver = PIM_V2; + brp->pimver = AUTORP_PIM_V2; brp->reserved = 0; - brp->grpcnt = - pim_autorp_new_announcement_rp_grps(rp, - buf + sizeof(struct autorp_pkt_rp)); - return buf + sizeof(struct autorp_pkt_rp) + - (brp->grpcnt * sizeof(struct autorp_pkt_grp)); + brp->grpcnt = pim_autorp_new_announcement_rp_grps(rp, buf + sizeof(struct autorp_pkt_rp)); + return buf + sizeof(struct autorp_pkt_rp) + (brp->grpcnt * sizeof(struct autorp_pkt_grp)); } /* Pack the candidate RP's on the announcement packet @@ -683,36 +1136,45 @@ static uint8_t *pim_autorp_new_announcement_rp(struct pim_autorp_rp *rp, * bufsz - Output parameter to track size of packed bytes * returns - Total count of RP's packed */ -static int pim_autorp_new_announcement_rps(struct pim_autorp *autorp, - uint8_t *buf, uint16_t *bufsz) +static int pim_autorp_new_announcement_rps(struct pim_autorp *autorp, uint8_t *buf, uint16_t *bufsz) { int cnt = 0; struct pim_autorp_rp *rp; /* Keep the original buffer pointer to calculate final size after packing */ uint8_t *obuf = buf; - struct prefix_list *plist; - struct prefix_list_entry *ple; - in_addr_t taddr; frr_each_safe (pim_autorp_rp, &(autorp->candidate_rp_list), rp) { - /* We must have an rp address and either group or list in order to pack this RP, so skip this one */ + /* We must have an rp address and either group or list in order to pack this RP, + * so skip this one + */ + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: Evaluating AutoRP candidate %pI4, group range %pFX, group list %s", + __func__, &rp->addr, &rp->grp, rp->grplist); + if (pim_addr_is_any(rp->addr) || - (is_default_prefix(&(rp->grp)) && strlen(rp->grplist) == 0)) + (is_default_prefix(&rp->grp) && strlen(rp->grplist) == 0)) continue; - /* Group is net set, so list must be set, make sure the prefix list exists and has valid multicast groups */ - if (is_default_prefix(&(rp->grp))) { + /* Make sure that either group prefix is set, or that the prefix list exists and has at + * least one valid multicast prefix in it. Only multicast prefixes will be used. + */ + if (is_default_prefix(&rp->grp)) { + struct prefix_list *plist; + struct prefix_list_entry *ple; + plist = prefix_list_lookup(AFI_IP, rp->grplist); if (plist == NULL) continue; plist = prefix_list_lookup(AFI_IP, rp->grplist); for (ple = plist->head; ple; ple = ple->next) { - taddr = ntohl(ple->prefix.u.prefix4.s_addr); - if ((taddr & 0xF0000000) == 0xE0000000) + if (pim_addr_is_multicast(ple->prefix.u.prefix4) && + ple->prefix.prefixlen >= 4) break; } - /* If we went through the entire list without finding a multicast prefix, then skip this RP */ + /* If we went through the entire list without finding a multicast prefix, + * then skip this RP + */ if (ple == NULL) continue; } @@ -721,6 +1183,10 @@ static int pim_autorp_new_announcement_rps(struct pim_autorp *autorp, ++cnt; /* This will return the buffer pointer at the location to start packing the next RP */ buf = pim_autorp_new_announcement_rp(rp, buf); + + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: AutoRP candidate %pI4 added to announcement", __func__, + &rp->addr); } if (cnt > 0) @@ -729,7 +1195,9 @@ static int pim_autorp_new_announcement_rps(struct pim_autorp *autorp, return cnt; } -/* Build the new announcement packet. If there is a packet to send, restart the send timer with a short wait */ +/* Build the new announcement packet. If there is a packet to send, restart the send timer + * with a short wait + */ static void pim_autorp_new_announcement(struct pim_instance *pim) { struct pim_autorp *autorp = pim->autorp; @@ -739,70 +1207,87 @@ static void pim_autorp_new_announcement(struct pim_instance *pim) /* First disable any existing send timer */ autorp_announcement_off(autorp); - if (!autorp->annouce_pkt) { - /* - * First time building, allocate the space - * Allocate the max packet size of 65536 so we don't need to resize later. - * This should be ok since we are only allocating the memory once for a single packet (potentially per vrf) - */ - autorp->annouce_pkt = XCALLOC(MTYPE_PIM_AUTORP_ANNOUNCE, 65536); - } + /* + * First time building, allocate the space + * Allocate the max packet size of 65536 so we don't need to resize later. + * This should be ok since we are only allocating the memory once for a single packet + * (potentially per vrf) + */ + if (!autorp->announce_pkt) + autorp->announce_pkt = XCALLOC(MTYPE_PIM_AUTORP_ANNOUNCE, 65536); - autorp->annouce_pkt_sz = 0; + autorp->announce_pkt_sz = 0; holdtime = autorp->announce_holdtime; - if (holdtime == DEFAULT_ANNOUNCE_HOLDTIME) + if (holdtime == DEFAULT_AUTORP_ANNOUNCE_HOLDTIME) holdtime = autorp->announce_interval * 3; if (holdtime > UINT16_MAX) holdtime = UINT16_MAX; - hdr = (struct autorp_pkt_hdr *)autorp->annouce_pkt; + hdr = (struct autorp_pkt_hdr *)autorp->announce_pkt; hdr->version = AUTORP_VERSION; hdr->type = AUTORP_ANNOUNCEMENT_TYPE; hdr->holdtime = htons((uint16_t)holdtime); hdr->reserved = 0; - hdr->rpcnt = - pim_autorp_new_announcement_rps(autorp, - autorp->annouce_pkt + - sizeof(struct autorp_pkt_hdr), - &(autorp->annouce_pkt_sz)); + hdr->rpcnt = pim_autorp_new_announcement_rps(autorp, + autorp->announce_pkt + + sizeof(struct autorp_pkt_hdr), + &(autorp->announce_pkt_sz)); /* Still need to add on the size of the header */ - autorp->annouce_pkt_sz += sizeof(struct autorp_pkt_hdr); + autorp->announce_pkt_sz += sizeof(struct autorp_pkt_hdr); /* Only turn on the announcement timer if we have a packet to send */ - if (autorp->annouce_pkt_sz >= MIN_AUTORP_PKT_SZ) + if (autorp->announce_pkt_sz >= MIN_AUTORP_PKT_SZ) autorp_announcement_on(autorp); } +void pim_autorp_prefix_list_update(struct pim_instance *pim, struct prefix_list *plist) +{ + struct pim_autorp_rp *rp = NULL; + struct pim_autorp *autorp = NULL; + + autorp = pim->autorp; + if (autorp == NULL) + return; + + /* Search for a candidate RP using this prefix list */ + frr_each_safe (pim_autorp_rp, &(autorp->candidate_rp_list), rp) { + if (strmatch(rp->grplist, plist->name)) + break; + } + + /* If we broke out of the loop early because we found a match, then rebuild the announcement */ + if (rp != NULL) + pim_autorp_new_announcement(pim); +} + bool pim_autorp_rm_candidate_rp(struct pim_instance *pim, pim_addr rpaddr) { struct pim_autorp *autorp = pim->autorp; struct pim_autorp_rp *rp; struct pim_autorp_rp find = { .addr = rpaddr }; - rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), - (const struct pim_autorp_rp *)&find); + rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), (const struct pim_autorp_rp *)&find); if (!rp) return false; pim_autorp_rp_del(&(autorp->candidate_rp_list), rp); - pim_autorp_rp_free(rp); + pim_autorp_rp_free(rp, false); pim_autorp_new_announcement(pim); return true; } -void pim_autorp_add_candidate_rp_group(struct pim_instance *pim, - pim_addr rpaddr, struct prefix group) +void pim_autorp_add_candidate_rp_group(struct pim_instance *pim, pim_addr rpaddr, + struct prefix group) { struct pim_autorp *autorp = pim->autorp; struct pim_autorp_rp *rp; struct pim_autorp_rp find = { .addr = rpaddr }; - rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), - (const struct pim_autorp_rp *)&find); + rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), (const struct pim_autorp_rp *)&find); if (!rp) { - rp = XCALLOC(MTYPE_PIM_AUTORP_CRP, sizeof(*rp)); + rp = XCALLOC(MTYPE_PIM_AUTORP_RP, sizeof(*rp)); memset(rp, 0, sizeof(struct pim_autorp_rp)); rp->autorp = autorp; memcpy(&(rp->addr), &rpaddr, sizeof(pim_addr)); @@ -817,15 +1302,13 @@ void pim_autorp_add_candidate_rp_group(struct pim_instance *pim, pim_autorp_new_announcement(pim); } -bool pim_autorp_rm_candidate_rp_group(struct pim_instance *pim, pim_addr rpaddr, - struct prefix group) +bool pim_autorp_rm_candidate_rp_group(struct pim_instance *pim, pim_addr rpaddr, struct prefix group) { struct pim_autorp *autorp = pim->autorp; struct pim_autorp_rp *rp; struct pim_autorp_rp find = { .addr = rpaddr }; - rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), - (const struct pim_autorp_rp *)&find); + rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), (const struct pim_autorp_rp *)&find); if (!rp) return false; @@ -834,17 +1317,15 @@ bool pim_autorp_rm_candidate_rp_group(struct pim_instance *pim, pim_addr rpaddr, return true; } -void pim_autorp_add_candidate_rp_plist(struct pim_instance *pim, - pim_addr rpaddr, const char *plist) +void pim_autorp_add_candidate_rp_plist(struct pim_instance *pim, pim_addr rpaddr, const char *plist) { struct pim_autorp *autorp = pim->autorp; struct pim_autorp_rp *rp; struct pim_autorp_rp find = { .addr = rpaddr }; - rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), - (const struct pim_autorp_rp *)&find); + rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), (const struct pim_autorp_rp *)&find); if (!rp) { - rp = XCALLOC(MTYPE_PIM_AUTORP_CRP, sizeof(*rp)); + rp = XCALLOC(MTYPE_PIM_AUTORP_RP, sizeof(*rp)); memset(rp, 0, sizeof(struct pim_autorp_rp)); rp->autorp = autorp; memcpy(&(rp->addr), &rpaddr, sizeof(pim_addr)); @@ -859,15 +1340,13 @@ void pim_autorp_add_candidate_rp_plist(struct pim_instance *pim, pim_autorp_new_announcement(pim); } -bool pim_autorp_rm_candidate_rp_plist(struct pim_instance *pim, pim_addr rpaddr, - const char *plist) +bool pim_autorp_rm_candidate_rp_plist(struct pim_instance *pim, pim_addr rpaddr, const char *plist) { struct pim_autorp *autorp = pim->autorp; struct pim_autorp_rp *rp; struct pim_autorp_rp find = { .addr = rpaddr }; - rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), - (const struct pim_autorp_rp *)&find); + rp = pim_autorp_rp_find(&(autorp->candidate_rp_list), (const struct pim_autorp_rp *)&find); if (!rp) return false; @@ -880,7 +1359,7 @@ void pim_autorp_announce_scope(struct pim_instance *pim, uint8_t scope) { struct pim_autorp *autorp = pim->autorp; - scope = (scope == 0 ? DEFAULT_ANNOUNCE_SCOPE : scope); + scope = (scope == 0 ? DEFAULT_AUTORP_ANNOUNCE_SCOPE : scope); if (autorp->announce_scope != scope) { autorp->announce_scope = scope; pim_autorp_new_announcement(pim); @@ -891,7 +1370,7 @@ void pim_autorp_announce_interval(struct pim_instance *pim, uint16_t interval) { struct pim_autorp *autorp = pim->autorp; - interval = (interval == 0 ? DEFAULT_ANNOUNCE_INTERVAL : interval); + interval = (interval == 0 ? DEFAULT_AUTORP_ANNOUNCE_INTERVAL : interval); if (autorp->announce_interval != interval) { autorp->announce_interval = interval; pim_autorp_new_announcement(pim); @@ -908,6 +1387,16 @@ void pim_autorp_announce_holdtime(struct pim_instance *pim, int32_t holdtime) } } +void pim_autorp_send_discovery_apply(struct pim_autorp *autorp) +{ + if (!autorp->mapping_agent_addrsel.run || !autorp->send_rp_discovery) { + autorp_send_discovery_off(autorp); + return; + } + + autorp_send_discovery_on(autorp); +} + void pim_autorp_add_ifp(struct interface *ifp) { /* Add a new interface for autorp @@ -923,17 +1412,15 @@ void pim_autorp_add_ifp(struct interface *ifp) struct pim_interface *pim_ifp; pim_ifp = ifp->info; - if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) && pim_ifp && - pim_ifp->pim_enable) { + if (CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE) && pim_ifp && pim_ifp->pim_enable) { pim = pim_ifp->pim; if (pim && pim->autorp && pim->autorp->do_discovery) { if (PIM_DEBUG_AUTORP) zlog_debug("%s: Adding interface %s to AutoRP, joining AutoRP groups", __func__, ifp->name); - if (!pim_autorp_join_groups(ifp)) { - zlog_err("Could not join AutoRP groups, errno=%d, %s", - errno, safe_strerror(errno)); - } + if (!pim_autorp_join_groups(ifp)) + zlog_warn("Could not join AutoRP groups, errno=%d, %s", errno, + safe_strerror(errno)); } } } @@ -954,10 +1441,9 @@ void pim_autorp_rm_ifp(struct interface *ifp) if (PIM_DEBUG_AUTORP) zlog_debug("%s: Removing interface %s from AutoRP, leaving AutoRP groups", __func__, ifp->name); - if (!pim_autorp_leave_groups(ifp)) { - zlog_err("Could not leave AutoRP groups, errno=%d, %s", - errno, safe_strerror(errno)); - } + if (!pim_autorp_leave_groups(ifp)) + zlog_warn("Could not leave AutoRP groups, errno=%d, %s", errno, + safe_strerror(errno)); } } } @@ -1008,14 +1494,22 @@ void pim_autorp_init(struct pim_instance *pim) autorp->read_event = NULL; autorp->announce_timer = NULL; autorp->do_discovery = false; + autorp->send_discovery_timer = NULL; + autorp->send_rp_discovery = false; pim_autorp_rp_init(&(autorp->discovery_rp_list)); pim_autorp_rp_init(&(autorp->candidate_rp_list)); - autorp->announce_scope = DEFAULT_ANNOUNCE_SCOPE; - autorp->announce_interval = DEFAULT_ANNOUNCE_INTERVAL; - autorp->announce_holdtime = DEFAULT_ANNOUNCE_HOLDTIME; + pim_autorp_rp_init(&(autorp->mapping_rp_list)); + pim_autorp_rp_init(&autorp->advertised_rp_list); + autorp->announce_scope = DEFAULT_AUTORP_ANNOUNCE_SCOPE; + autorp->announce_interval = DEFAULT_AUTORP_ANNOUNCE_INTERVAL; + autorp->announce_holdtime = DEFAULT_AUTORP_ANNOUNCE_HOLDTIME; + autorp->discovery_scope = DEFAULT_AUTORP_DISCOVERY_SCOPE; + autorp->discovery_interval = DEFAULT_AUTORP_DISCOVERY_INTERVAL; + autorp->discovery_holdtime = DEFAULT_AUTORP_DISCOVERY_HOLDTIME; + cand_addrsel_clear(&(autorp->mapping_agent_addrsel)); if (!pim_autorp_socket_enable(autorp)) { - zlog_err("%s: AutoRP failed to initialize", __func__); + zlog_warn("%s: AutoRP failed to initialize", __func__); return; } @@ -1032,24 +1526,20 @@ void pim_autorp_finish(struct pim_instance *pim) struct pim_autorp *autorp = pim->autorp; autorp_read_off(autorp); + autorp_announcement_off(autorp); + autorp_send_discovery_off(autorp); pim_autorp_free(autorp); - if (pim_autorp_socket_disable(autorp)) { - if (PIM_DEBUG_AUTORP) - zlog_debug("%s: AutoRP Finished", __func__); - } else - zlog_err("%s: AutoRP failed to finish", __func__); - + pim_autorp_socket_disable(autorp); XFREE(MTYPE_PIM_AUTORP, pim->autorp); + + if (PIM_DEBUG_AUTORP) + zlog_debug("%s: AutoRP Finished", __func__); } int pim_autorp_config_write(struct pim_instance *pim, struct vty *vty) { struct pim_autorp_rp *rp; struct pim_autorp *autorp = pim->autorp; - char interval_str[16] = { 0 }; - char scope_str[16] = { 0 }; - char holdtime_str[32] = { 0 }; - char grp_str[64] = { 0 }; int writes = 0; if (!autorp->do_discovery) { @@ -1057,24 +1547,17 @@ int pim_autorp_config_write(struct pim_instance *pim, struct vty *vty) ++writes; } - if (autorp->announce_interval != DEFAULT_ANNOUNCE_INTERVAL) { - snprintf(interval_str, sizeof(interval_str), " interval %u", - autorp->announce_interval); - } - - if (autorp->announce_scope != DEFAULT_ANNOUNCE_SCOPE) { - snprintf(scope_str, sizeof(scope_str), " scope %u", - autorp->announce_scope); - } - - if (autorp->announce_holdtime != DEFAULT_ANNOUNCE_HOLDTIME) { - snprintf(holdtime_str, sizeof(holdtime_str), " holdtime %u", - autorp->announce_holdtime); - } - - if (strlen(interval_str) || strlen(scope_str) || strlen(holdtime_str)) { - vty_out(vty, " autorp announce%s%s%s\n", interval_str, - scope_str, holdtime_str); + if (autorp->announce_interval != DEFAULT_AUTORP_ANNOUNCE_INTERVAL || + autorp->announce_scope != DEFAULT_AUTORP_ANNOUNCE_SCOPE || + autorp->announce_holdtime != DEFAULT_AUTORP_ANNOUNCE_HOLDTIME) { + vty_out(vty, " autorp announce"); + if (autorp->announce_interval != DEFAULT_AUTORP_ANNOUNCE_INTERVAL) + vty_out(vty, " interval %u", autorp->announce_interval); + if (autorp->announce_scope != DEFAULT_AUTORP_ANNOUNCE_SCOPE) + vty_out(vty, " scope %u", autorp->announce_scope); + if (autorp->announce_holdtime != DEFAULT_AUTORP_ANNOUNCE_HOLDTIME) + vty_out(vty, " holdtime %u", autorp->announce_holdtime); + vty_out(vty, "\n"); ++writes; } @@ -1084,83 +1567,371 @@ int pim_autorp_config_write(struct pim_instance *pim, struct vty *vty) (is_default_prefix(&(rp->grp)) && strlen(rp->grplist) == 0)) continue; - /* Don't make sure the prefix list has multicast groups, user may not have created it yet */ - + vty_out(vty, " autorp announce %pI4", &(rp->addr)); if (!is_default_prefix(&(rp->grp))) - snprintfrr(grp_str, sizeof(grp_str), "%pFX", &(rp->grp)); + vty_out(vty, " %pFX", &(rp->grp)); else - snprintfrr(grp_str, sizeof(grp_str), "group-list %s", - rp->grplist); - - vty_out(vty, " autorp announce %pI4 %s\n", &(rp->addr), grp_str); + vty_out(vty, " group-list %s", rp->grplist); + vty_out(vty, "\n"); ++writes; } + if (autorp->send_rp_discovery) { + if (autorp->mapping_agent_addrsel.cfg_enable) { + vty_out(vty, " autorp send-rp-discovery"); + switch (autorp->mapping_agent_addrsel.cfg_mode) { + case CAND_ADDR_LO: + break; + case CAND_ADDR_ANY: + vty_out(vty, " source any"); + break; + case CAND_ADDR_IFACE: + vty_out(vty, " source interface %s", + autorp->mapping_agent_addrsel.cfg_ifname); + break; + case CAND_ADDR_EXPLICIT: + vty_out(vty, " source address %pPA", + &autorp->mapping_agent_addrsel.cfg_addr); + break; + } + vty_out(vty, "\n"); + ++writes; + } + + if (autorp->discovery_interval != DEFAULT_AUTORP_DISCOVERY_INTERVAL || + autorp->discovery_scope != DEFAULT_AUTORP_DISCOVERY_SCOPE || + autorp->discovery_holdtime != DEFAULT_AUTORP_DISCOVERY_HOLDTIME) { + vty_out(vty, " autorp send-rp-discovery"); + if (autorp->discovery_interval != DEFAULT_AUTORP_DISCOVERY_INTERVAL) + vty_out(vty, " interval %u", autorp->discovery_interval); + if (autorp->discovery_scope != DEFAULT_AUTORP_DISCOVERY_SCOPE) + vty_out(vty, " scope %u", autorp->discovery_scope); + if (autorp->discovery_holdtime != DEFAULT_AUTORP_DISCOVERY_HOLDTIME) + vty_out(vty, " holdtime %u", autorp->discovery_holdtime); + vty_out(vty, "\n"); + ++writes; + } + } + return writes; } -void pim_autorp_show_autorp(struct vty *vty, struct pim_instance *pim, - json_object *json) +static void pim_autorp_show_autorp_json(struct pim_autorp *autorp, const char *component, + json_object *json, struct ttable *cand_table) { struct pim_autorp_rp *rp; + + if (!component || strmatch(component, "discovery")) { + json_object *disc_obj; + + disc_obj = json_object_new_object(); + json_object_boolean_add(disc_obj, "enabled", autorp->do_discovery); + if (autorp->do_discovery) { + json_object *rplist_obj; + + rplist_obj = json_object_new_object(); + frr_each (pim_autorp_rp, &(autorp->discovery_rp_list), rp) { + json_object *rp_obj; + json_object *grp_arr; + + rp_obj = json_object_new_object(); + json_object_string_addf(rp_obj, "rpAddress", "%pI4", &rp->addr); + json_object_int_add(rp_obj, "holdtime", rp->holdtime); + grp_arr = json_object_new_array(); + + if (strlen(rp->grplist)) { + struct prefix_list *pl; + struct prefix_list_entry *ple; + + pl = prefix_list_lookup(AFI_IP, rp->grplist); + if (pl == NULL) + continue; + + for (ple = pl->head; ple != NULL; ple = ple->next) { + json_object *grp_obj; + + grp_obj = json_object_new_object(); + json_object_boolean_add(grp_obj, "negative", + ple->type == PREFIX_DENY); + json_object_string_addf(grp_obj, "prefix", "%pFX", + &ple->prefix); + json_object_array_add(grp_arr, grp_obj); + } + } else { + json_object *grp_obj; + + grp_obj = json_object_new_object(); + json_object_boolean_add(grp_obj, "negative", false); + json_object_string_addf(grp_obj, "prefix", "%pFX", &rp->grp); + json_object_array_add(grp_arr, grp_obj); + } + + json_object_object_add(rp_obj, "groupRanges", grp_arr); + json_object_object_addf(rplist_obj, rp_obj, "%pI4", &rp->addr); + } + json_object_object_add(disc_obj, "rpList", rplist_obj); + } + json_object_object_add(json, "discovery", disc_obj); + } + + if (!component || strmatch(component, "candidate")) { + json_object *announce_obj; + + announce_obj = json_object_new_object(); + json_object_boolean_add(announce_obj, "enabled", + pim_autorp_rp_count(&autorp->candidate_rp_list) > 0); + if (pim_autorp_rp_count(&autorp->candidate_rp_list) > 0) { + json_object_int_add(announce_obj, "scope", autorp->announce_scope); + json_object_int_add(announce_obj, "interval", autorp->announce_interval); + json_object_int_add(announce_obj, "holdtime", + (autorp->announce_holdtime == + DEFAULT_AUTORP_ANNOUNCE_HOLDTIME + ? (autorp->announce_interval * 3) + : autorp->announce_holdtime)); + json_object_object_add(announce_obj, "rpList", + ttable_json_with_json_text(cand_table, "sss", + "rpAddress|groupRange|prefixList")); + } + json_object_object_add(json, "announce", announce_obj); + } + + if (!component || strmatch(component, "mapping-agent")) { + json_object *adv_obj; + + adv_obj = json_object_new_object(); + json_object_boolean_add(adv_obj, "enabled", autorp->send_rp_discovery); + if (autorp->send_rp_discovery) { + json_object *rplist_obj; + + json_object_boolean_add(adv_obj, "active", autorp->mapping_agent_active); + json_object_int_add(adv_obj, "scope", autorp->discovery_scope); + json_object_int_add(adv_obj, "interval", autorp->discovery_interval); + json_object_int_add(adv_obj, "holdtime", autorp->discovery_holdtime); + switch (autorp->mapping_agent_addrsel.cfg_mode) { + case CAND_ADDR_LO: + json_object_string_add(adv_obj, "source", "loopback"); + break; + case CAND_ADDR_ANY: + json_object_string_add(adv_obj, "source", "any"); + break; + case CAND_ADDR_IFACE: + json_object_string_add(adv_obj, "source", "interface"); + json_object_string_add(adv_obj, "interface", + autorp->mapping_agent_addrsel.cfg_ifname); + break; + case CAND_ADDR_EXPLICIT: + json_object_string_add(adv_obj, "source", "address"); + break; + } + json_object_string_addf(adv_obj, "address", "%pPA", + &autorp->mapping_agent_addrsel.run_addr); + + rplist_obj = json_object_new_object(); + frr_each (pim_autorp_rp, &(autorp->advertised_rp_list), rp) { + json_object *rp_obj; + json_object *grp_arr; + struct pim_autorp_grppfix *grppfix; + + rp_obj = json_object_new_object(); + json_object_string_addf(rp_obj, "rpAddress", "%pI4", &rp->addr); + grp_arr = json_object_new_array(); + frr_each (pim_autorp_grppfix, &rp->grp_pfix_list, grppfix) { + json_object *grp_obj; + + grp_obj = json_object_new_object(); + json_object_boolean_add(grp_obj, "negative", + grppfix->negative); + json_object_string_addf(grp_obj, "prefix", "%pFX", + &grppfix->grp); + json_object_array_add(grp_arr, grp_obj); + } + json_object_object_add(rp_obj, "groupRanges", grp_arr); + json_object_object_addf(rplist_obj, rp_obj, "%pI4", &rp->addr); + } + json_object_object_add(adv_obj, "rpList", rplist_obj); + } + json_object_object_add(json, "mapping-agent", adv_obj); + } +} + +void pim_autorp_show_autorp(struct vty *vty, struct pim_instance *pim, const char *component, + json_object *json) +{ struct pim_autorp *autorp = pim->autorp; - struct ttable *tt = NULL; - char *table = NULL; - char grp_str[64] = { 0 }; - char plist_str[64] = { 0 }; - json_object *annouce_jobj; - - /* Prepare table. */ - tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); - ttable_add_row(tt, "RP address|group|prefix-list"); - tt->style.cell.rpad = 2; - tt->style.corner = '+'; - ttable_restyle(tt); + struct pim_autorp_rp *rp; + struct ttable *cand_table = NULL; + struct ttable *adv_table = NULL; + struct ttable *disc_table = NULL; + char *tmp; - frr_each_safe (pim_autorp_rp, &(autorp->candidate_rp_list), rp) { - if (!is_default_prefix(&(rp->grp))) - snprintfrr(grp_str, sizeof(grp_str), "%pFX", &(rp->grp)); - else - snprintfrr(plist_str, sizeof(plist_str), "%s", - rp->grplist); + if (autorp == NULL) + return; - ttable_add_row(tt, "%pI4|%s|%s", &(rp->addr), grp_str, - plist_str); + /* We may use the candidate table in the json output, so prepare it first. */ + if (!component || strmatch(component, "candidate")) { + cand_table = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row(cand_table, "RP address|Group Range|Prefix-List"); + cand_table->style.cell.rpad = 2; + cand_table->style.corner = '+'; + ttable_restyle(cand_table); + + frr_each (pim_autorp_rp, &(autorp->candidate_rp_list), rp) { + if (strlen(rp->grplist)) + ttable_add_row(cand_table, "%pI4|%s|%s", &(rp->addr), "-", + rp->grplist); + else + ttable_add_row(cand_table, "%pI4|%pFX|%s", &(rp->addr), &(rp->grp), + "-"); + } } if (json) { - json_object_boolean_add(json, "discoveryEnabled", - autorp->do_discovery); - - annouce_jobj = json_object_new_object(); - json_object_int_add(annouce_jobj, "scope", - autorp->announce_scope); - json_object_int_add(annouce_jobj, "interval", - autorp->announce_interval); - json_object_int_add(annouce_jobj, "holdtime", - autorp->announce_holdtime); - json_object_object_add(annouce_jobj, "rpList", - ttable_json_with_json_text( - tt, "sss", - "rpAddress|group|prefixList")); - - json_object_object_add(json, "announce", annouce_jobj); - } else { - vty_out(vty, "AutoRP Discovery is %sabled\n", - (autorp->do_discovery ? "en" : "dis")); - vty_out(vty, "AutoRP Candidate RPs\n"); - vty_out(vty, " interval %us, scope %u, holdtime %us\n", - autorp->announce_interval, autorp->announce_scope, - (autorp->announce_holdtime == DEFAULT_ANNOUNCE_HOLDTIME - ? (autorp->announce_interval * 3) - : autorp->announce_holdtime)); + pim_autorp_show_autorp_json(autorp, component, json, cand_table); + if (cand_table) + ttable_del(cand_table); + return; + } - vty_out(vty, "\n"); + /* Prepare discovered RP's table. */ + if (!component || strmatch(component, "discovery")) { + disc_table = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row(disc_table, "RP address|Group Range"); + disc_table->style.cell.rpad = 2; + disc_table->style.corner = '+'; + ttable_restyle(disc_table); + + frr_each (pim_autorp_rp, &(autorp->discovery_rp_list), rp) { + if (strlen(rp->grplist)) { + struct prefix_list *pl; + struct prefix_list_entry *ple; + bool first = true; + + pl = prefix_list_lookup(AFI_IP, rp->grplist); + + if (pl == NULL) { + ttable_add_row(disc_table, + "%pI4|failed to find prefix list %s", + &(rp->addr), rp->grplist); + continue; + } - table = ttable_dump(tt, "\n"); - vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP_TTABLE, table); + for (ple = pl->head; ple != NULL; ple = ple->next) { + if (first) + ttable_add_row(disc_table, "%pI4|%s%pFX", + &(rp->addr), + (ple->type == PREFIX_DENY ? "!" + : " "), + &ple->prefix); + else + ttable_add_row(disc_table, "%s|%s%pFX", " ", + (ple->type == PREFIX_DENY ? "!" + : " "), + &ple->prefix); + first = false; + } + } else + ttable_add_row(disc_table, "%pI4| %pFX", &(rp->addr), &(rp->grp)); + } + } + + /* Prepare discovery RP's table (mapping-agent). */ + if (!component || strmatch(component, "mapping-agent")) { + adv_table = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row(adv_table, "RP address|Group Range"); + adv_table->style.cell.rpad = 2; + adv_table->style.corner = '+'; + ttable_restyle(adv_table); + + frr_each (pim_autorp_rp, &(autorp->advertised_rp_list), rp) { + struct pim_autorp_grppfix *grppfix; + bool first = true; + + frr_each (pim_autorp_grppfix, &rp->grp_pfix_list, grppfix) { + if (first) + ttable_add_row(adv_table, "%pI4|%s%pFX", &rp->addr, + grppfix->negative ? "!" : " ", &grppfix->grp); + else + ttable_add_row(adv_table, "%s|%s%pFX", " ", + grppfix->negative ? "!" : " ", &grppfix->grp); + first = false; + } + } + } + + if (!component || strmatch(component, "discovery")) { + vty_out(vty, "AutoRP Discovery is %sabled\n", (autorp->do_discovery ? "en" : "dis")); + if (autorp->do_discovery) { + tmp = ttable_dump(disc_table, "\n"); + vty_out(vty, "\n"); + vty_out(vty, "Discovered RP's (count=%u)\n", + (uint32_t)pim_autorp_rp_count(&autorp->discovery_rp_list)); + vty_out(vty, "%s\n", tmp); + XFREE(MTYPE_TMP_TTABLE, tmp); + } else + vty_out(vty, "\n"); + } + + if (!component || strmatch(component, "candidate")) { + vty_out(vty, "AutoRP Announcement is %sabled\n", + (pim_autorp_rp_count(&autorp->candidate_rp_list) > 0 ? "en" : "dis")); + if (pim_autorp_rp_count(&autorp->candidate_rp_list) > 0) { + tmp = ttable_dump(cand_table, "\n"); + vty_out(vty, " interval %us scope %u holdtime %us\n", + autorp->announce_interval, autorp->announce_scope, + (autorp->announce_holdtime == DEFAULT_AUTORP_ANNOUNCE_HOLDTIME + ? (autorp->announce_interval * 3) + : autorp->announce_holdtime)); + vty_out(vty, "\n"); + vty_out(vty, "Candidate RP's (count=%u)\n", + (uint32_t)pim_autorp_rp_count(&autorp->candidate_rp_list)); + vty_out(vty, "%s\n", tmp); + XFREE(MTYPE_TMP_TTABLE, tmp); + } else + vty_out(vty, "\n"); + } + + if (!component || strmatch(component, "mapping-agent")) { + vty_out(vty, "AutoRP Mapping-Agent is %sabled\n", + (autorp->send_rp_discovery ? "en" : "dis")); + if (autorp->send_rp_discovery) { + vty_out(vty, " interval %us scope %u holdtime %us\n", + autorp->discovery_interval, autorp->discovery_scope, + autorp->discovery_holdtime); + vty_out(vty, " source %pPA", &autorp->mapping_agent_addrsel.run_addr); + switch (autorp->mapping_agent_addrsel.cfg_mode) { + case CAND_ADDR_LO: + vty_out(vty, " (loopback)"); + break; + case CAND_ADDR_ANY: + vty_out(vty, " (any)"); + break; + case CAND_ADDR_IFACE: + vty_out(vty, " (interface %s)", + autorp->mapping_agent_addrsel.cfg_ifname); + break; + case CAND_ADDR_EXPLICIT: + vty_out(vty, " (explicit address)"); + break; + } + vty_out(vty, "\n"); + + if (autorp->mapping_agent_active) { + tmp = ttable_dump(adv_table, "\n"); + vty_out(vty, "\n"); + vty_out(vty, "Advertised RP's (count=%u)\n", + (uint32_t)pim_autorp_rp_count(&autorp->advertised_rp_list)); + vty_out(vty, "%s\n", tmp); + XFREE(MTYPE_TMP_TTABLE, tmp); + } else + vty_out(vty, " Mapping agent is inactive\n"); + } else + vty_out(vty, "\n"); } - ttable_del(tt); + if (cand_table) + ttable_del(cand_table); + if (adv_table) + ttable_del(adv_table); + if (disc_table) + ttable_del(disc_table); } diff --git a/pimd/pim_autorp.h b/pimd/pim_autorp.h index a0b029d00ae6..e4c653010924 100644 --- a/pimd/pim_autorp.h +++ b/pimd/pim_autorp.h @@ -14,16 +14,21 @@ #define AUTORP_VERSION 1 #define AUTORP_ANNOUNCEMENT_TYPE 1 #define AUTORP_DISCOVERY_TYPE 2 -#define PIM_VUNKNOWN 0 -#define PIM_V1 1 -#define PIM_V2 2 -#define PIM_V1_2 3 +#define AUTORP_PIM_VUNKNOWN 0 +#define AUTORP_PIM_V1 1 +#define AUTORP_PIM_V2 2 +#define AUTORP_PIM_V1_2 3 -#define DEFAULT_ANNOUNCE_INTERVAL 60 -#define DEFAULT_ANNOUNCE_SCOPE 31 -#define DEFAULT_ANNOUNCE_HOLDTIME -1 +#define DEFAULT_AUTORP_ANNOUNCE_INTERVAL 60 +#define DEFAULT_AUTORP_ANNOUNCE_SCOPE 31 +#define DEFAULT_AUTORP_ANNOUNCE_HOLDTIME -1 + +#define DEFAULT_AUTORP_DISCOVERY_INTERVAL 60 +#define DEFAULT_AUTORP_DISCOVERY_SCOPE 31 +#define DEFAULT_AUTORP_DISCOVERY_HOLDTIME 180 PREDECL_SORTLIST_UNIQ(pim_autorp_rp); +PREDECL_SORTLIST_UNIQ(pim_autorp_grppfix); struct autorp_pkt_grp { #if __BYTE_ORDER == __LITTLE_ENDIAN @@ -79,7 +84,15 @@ struct pim_autorp_rp { struct event *hold_timer; struct prefix grp; char grplist[32]; - struct pim_autorp_rp_item list; + struct pim_autorp_grppfix_head grp_pfix_list; + struct pim_autorp_rp_item item; +}; + +struct pim_autorp_grppfix { + struct prefix grp; + struct in_addr rp; + bool negative; + struct pim_autorp_grppfix_item item; }; struct pim_autorp { @@ -96,13 +109,18 @@ struct pim_autorp { struct event *announce_timer; /* Event for sending discovery packets*/ - /* struct event *discovery_timer; */ + struct event *send_discovery_timer; /* Flag enabling reading discovery packets */ bool do_discovery; /* Flag enabling mapping agent (reading announcements and sending discovery)*/ - /* bool do_mapping; */ + bool send_rp_discovery; + + /* Flag indicating if we are sending discovery messages (true) or if a higher IP mapping + * agent preemptied our sending (false) + */ + bool mapping_agent_active; /* List of RP's in received discovery packets */ struct pim_autorp_rp_head discovery_rp_list; @@ -111,7 +129,12 @@ struct pim_autorp { struct pim_autorp_rp_head candidate_rp_list; /* List of announced RP's to send in discovery packets */ - /* struct pim_autorp_rp_head mapping_rp_list; */ + struct pim_autorp_rp_head mapping_rp_list; + + /* List of the last advertised RP's, via mapping agent discovery + * This is only filled if a discovery message was sent + */ + struct pim_autorp_rp_head advertised_rp_list; /* Packet parameters for sending announcement packets */ uint8_t announce_scope; @@ -119,32 +142,32 @@ struct pim_autorp { int32_t announce_holdtime; /* Pre-built announcement packet, only changes when configured RP's or packet parameters change */ - uint8_t *annouce_pkt; - uint16_t annouce_pkt_sz; - - /* TODO: Packet parameters for sending discovery packets - * int discovery_scope; - * int discovery_interval; - * int discovery_holdtime; - */ + uint8_t *announce_pkt; + uint16_t announce_pkt_sz; + + /* Packet parameters for sending discovery packets */ + uint8_t discovery_scope; + uint16_t discovery_interval; + uint16_t discovery_holdtime; + struct cand_addrsel mapping_agent_addrsel; }; #define AUTORP_GRPLEN 6 #define AUTORP_RPLEN 6 #define AUTORP_HDRLEN 8 +void pim_autorp_prefix_list_update(struct pim_instance *pim, struct prefix_list *plist); bool pim_autorp_rm_candidate_rp(struct pim_instance *pim, pim_addr rpaddr); -void pim_autorp_add_candidate_rp_group(struct pim_instance *pim, - pim_addr rpaddr, struct prefix group); +void pim_autorp_add_candidate_rp_group(struct pim_instance *pim, pim_addr rpaddr, + struct prefix group); bool pim_autorp_rm_candidate_rp_group(struct pim_instance *pim, pim_addr rpaddr, struct prefix group); -void pim_autorp_add_candidate_rp_plist(struct pim_instance *pim, - pim_addr rpaddr, const char *plist); -bool pim_autorp_rm_candidate_rp_plist(struct pim_instance *pim, pim_addr rpaddr, - const char *plist); +void pim_autorp_add_candidate_rp_plist(struct pim_instance *pim, pim_addr rpaddr, const char *plist); +bool pim_autorp_rm_candidate_rp_plist(struct pim_instance *pim, pim_addr rpaddr, const char *plist); void pim_autorp_announce_scope(struct pim_instance *pim, uint8_t scope); void pim_autorp_announce_interval(struct pim_instance *pim, uint16_t interval); void pim_autorp_announce_holdtime(struct pim_instance *pim, int32_t holdtime); +void pim_autorp_send_discovery_apply(struct pim_autorp *autorp); void pim_autorp_add_ifp(struct interface *ifp); void pim_autorp_rm_ifp(struct interface *ifp); void pim_autorp_start_discovery(struct pim_instance *pim); @@ -152,7 +175,7 @@ void pim_autorp_stop_discovery(struct pim_instance *pim); void pim_autorp_init(struct pim_instance *pim); void pim_autorp_finish(struct pim_instance *pim); int pim_autorp_config_write(struct pim_instance *pim, struct vty *vty); -void pim_autorp_show_autorp(struct vty *vty, struct pim_instance *pim, +void pim_autorp_show_autorp(struct vty *vty, struct pim_instance *pim, const char *component, json_object *json); #endif diff --git a/pimd/pimd.c b/pimd/pimd.c index db619748000e..a390378a5abf 100644 --- a/pimd/pimd.c +++ b/pimd/pimd.c @@ -35,6 +35,7 @@ #include "pim_zlookup.h" #include "pim_zebra.h" #include "pim_mlag.h" +#include "pim_autorp.h" #if MAXVIFS > 256 CPP_NOTICE("Work needs to be done to make this work properly via the pim mroute socket\n"); @@ -70,6 +71,9 @@ void pim_prefix_list_update(struct prefix_list *plist) pim_rp_prefix_list_update(pim, plist); pim_ssm_prefix_list_update(pim, plist); pim_upstream_spt_prefix_list_update(pim, plist); +#if PIM_IPV == 4 + pim_autorp_prefix_list_update(pim, plist); +#endif } } From 13c0722b5ccc53b00d07388f7a2bddd44f05b17b Mon Sep 17 00:00:00 2001 From: Nathan Bahr Date: Fri, 1 Nov 2024 19:15:52 +0000 Subject: [PATCH 12/69] tests: PIM AutoRP tests expanded Now with a full AutoRP implementation, we can test AutoRP in a full network setup beginning with candidate RP announcements all the way through discovery and active RP selection. Signed-off-by: Nathan Bahr --- tests/topotests/lib/pim.py | 40 +- tests/topotests/pim_autorp/r1/frr.conf | 12 +- tests/topotests/pim_autorp/r2/frr.conf | 12 +- tests/topotests/pim_autorp/r3/frr.conf | 26 + tests/topotests/pim_autorp/r4/frr.conf | 26 + tests/topotests/pim_autorp/test_pim_autorp.py | 800 +++++++++++++----- 6 files changed, 696 insertions(+), 220 deletions(-) create mode 100644 tests/topotests/pim_autorp/r3/frr.conf create mode 100644 tests/topotests/pim_autorp/r4/frr.conf diff --git a/tests/topotests/lib/pim.py b/tests/topotests/lib/pim.py index 369a794ebc7c..349b82aab436 100644 --- a/tests/topotests/lib/pim.py +++ b/tests/topotests/lib/pim.py @@ -1720,26 +1720,26 @@ def verify_pim_rp_info( ) return errormsg - if not iamRP: - if rp_json["iAmRP"] == False: - logger.info( - "[DUT %s]: Verifying group " - "and iAmNotRP [PASSED]!!" - " Found Expected: (%s, %s:%s)", - dut, - grp_addr, - "iAmRP", - rp_json["iAmRP"], - ) - else: - errormsg = ( - "[DUT %s]: Verifying group" - "%s and iAmRP [FAILED]!! " - "Expected: (iAmRP: %s)," - " Found: (iAmRP: %s)" - % (dut, grp_addr, "false", rp_json["iAmRP"]) - ) - return errormsg + if not iamRP: + if rp_json["iAmRP"] == False: + logger.info( + "[DUT %s]: Verifying group " + "and iAmNotRP [PASSED]!!" + " Found Expected: (%s, %s:%s)", + dut, + grp_addr, + "iAmRP", + rp_json["iAmRP"], + ) + else: + errormsg = ( + "[DUT %s]: Verifying group" + "%s and iAmRP [FAILED]!! " + "Expected: (iAmRP: %s)," + " Found: (iAmRP: %s)" + % (dut, grp_addr, "false", rp_json["iAmRP"]) + ) + return errormsg logger.debug("Exiting lib API: {}".format(sys._getframe().f_code.co_name)) return True diff --git a/tests/topotests/pim_autorp/r1/frr.conf b/tests/topotests/pim_autorp/r1/frr.conf index 2fddbc3ae29e..92b9b3b41b22 100644 --- a/tests/topotests/pim_autorp/r1/frr.conf +++ b/tests/topotests/pim_autorp/r1/frr.conf @@ -2,15 +2,25 @@ hostname r1 password zebra log file /tmp/r1-frr.log +! debug pim autorp ! interface r1-eth0 - ip address 10.10.76.1/24 + ip address 10.0.0.1/24 + ip igmp + ip pim +! +interface r1-eth1 + ip address 10.0.1.1/24 ip igmp ip pim ! ip forwarding ! +ip route 10.0.2.0/24 10.0.0.2 50 +ip route 10.0.3.0/24 10.0.0.2 50 +! router pim autorp discovery + rp 10.0.3.4 224.0.1.0/24 ! \ No newline at end of file diff --git a/tests/topotests/pim_autorp/r2/frr.conf b/tests/topotests/pim_autorp/r2/frr.conf index fd3c0cad3990..d67dade6f95f 100644 --- a/tests/topotests/pim_autorp/r2/frr.conf +++ b/tests/topotests/pim_autorp/r2/frr.conf @@ -2,15 +2,25 @@ hostname r2 password zebra log file /tmp/r2-frr.log +! debug pim autorp ! interface r2-eth0 - ip address 10.10.76.2/24 + ip address 10.0.0.2/24 + ip igmp + ip pim +! +interface r2-eth1 + ip address 10.0.2.2/24 ip igmp ip pim ! ip forwarding ! +ip route 10.0.1.0/24 10.0.0.1 50 +ip route 10.0.3.0/24 10.0.2.4 50 +! router pim autorp discovery + rp 10.0.3.4 224.0.1.0/24 ! \ No newline at end of file diff --git a/tests/topotests/pim_autorp/r3/frr.conf b/tests/topotests/pim_autorp/r3/frr.conf new file mode 100644 index 000000000000..4e93d4ba211d --- /dev/null +++ b/tests/topotests/pim_autorp/r3/frr.conf @@ -0,0 +1,26 @@ +! +hostname r3 +password zebra +log file /tmp/r3-frr.log +! +debug pim autorp +! +interface r3-eth0 + ip address 10.0.1.3/24 + ip igmp + ip pim +! +interface r3-eth1 + ip address 10.0.3.3/24 + ip igmp + ip pim +! +ip forwarding +! +ip route 10.0.0.0/24 10.0.1.1 50 +ip route 10.0.2.0/24 10.0.3.4 50 +! +router pim + autorp discovery + rp 10.0.3.4 224.0.1.0/24 +! \ No newline at end of file diff --git a/tests/topotests/pim_autorp/r4/frr.conf b/tests/topotests/pim_autorp/r4/frr.conf new file mode 100644 index 000000000000..382999b11943 --- /dev/null +++ b/tests/topotests/pim_autorp/r4/frr.conf @@ -0,0 +1,26 @@ +! +hostname r4 +password zebra +log file /tmp/r4-frr.log +! +debug pim autorp +! +interface r4-eth0 + ip address 10.0.2.4/24 + ip igmp + ip pim +! +interface r4-eth1 + ip address 10.0.3.4/24 + ip igmp + ip pim +! +ip forwarding +! +ip route 10.0.0.0/24 10.0.2.2 50 +ip route 10.0.1.0/24 10.0.2.2 50 +! +router pim + autorp discovery + rp 10.0.3.4 224.0.1.0/24 +! \ No newline at end of file diff --git a/tests/topotests/pim_autorp/test_pim_autorp.py b/tests/topotests/pim_autorp/test_pim_autorp.py index ad618af29e3d..61cf8ebbc5ed 100644 --- a/tests/topotests/pim_autorp/test_pim_autorp.py +++ b/tests/topotests/pim_autorp/test_pim_autorp.py @@ -11,18 +11,14 @@ import os import sys import pytest +import json from functools import partial # pylint: disable=C0413 # Import topogen and topotest helpers -from lib import topotest -from lib.topogen import Topogen, get_topogen +from lib.topogen import Topogen, topotest, get_topogen from lib.topolog import logger -from lib.pim import ( - scapy_send_autorp_raw_packet, - verify_pim_rp_info, - verify_pim_rp_info_is_empty, -) +from lib.pim import verify_pim_rp_info from lib.common_config import step, write_test_header from time import sleep @@ -32,13 +28,26 @@ """ TOPOLOGY = """ - Basic AutoRP functionality - - +---+---+ +---+---+ - | | 10.10.76.0/24 | | - + R1 + <------------------> + R2 | - | | .1 .2 | | - +---+---+ +---+---+ + Test PIM AutoRP functionality: + AutoRP candidate RP announcements + Mapping agent announcement receive and send discovery + AutoRP discovery to active RP info + + +---+---+ +---+---+ + | | 10.0.0.0/24 | | + + R1 +----------------------+ R2 | + | | .1 .2 | | + +---+---+ r1-eth0 r2-eth0 +---+---+ + .1 | r1-eth1 r2-eth1 | .2 + | | + 10.0.1.0/24 | | 10.0.2.0/24 + | | + .3 | r3-eth0 r4-eth0 | .4 + +---+---+ r3-eth1 r4-eth1 +---+---+ + | | .3 .4 | | + + R3 +----------------------+ R4 | + | | 10.0.3.0/24 | | + +---+---+ +---+---+ """ # Save the Current Working Directory to find configuration files. @@ -55,11 +64,14 @@ def build_topo(tgen): # Create routers tgen.add_router("r1") tgen.add_router("r2") + tgen.add_router("r3") + tgen.add_router("r4") - # Create link between router 1 and 2 - switch = tgen.add_switch("s1-2") - switch.add_link(tgen.gears["r1"]) - switch.add_link(tgen.gears["r2"]) + # Create topology links + tgen.add_link(tgen.gears["r1"], tgen.gears["r2"], "r1-eth0", "r2-eth0") + tgen.add_link(tgen.gears["r1"], tgen.gears["r3"], "r1-eth1", "r3-eth0") + tgen.add_link(tgen.gears["r2"], tgen.gears["r4"], "r2-eth1", "r4-eth0") + tgen.add_link(tgen.gears["r3"], tgen.gears["r4"], "r3-eth1", "r4-eth1") def setup_module(mod): @@ -68,15 +80,6 @@ def setup_module(mod): tgen = Topogen(build_topo, mod.__name__) tgen.start_topology() - # Router 1 will be the router configured with "fake" autorp configuration, so give it a default route - # to router 2 so that routing to the RP address is not an issue - # r1_defrt_setup_cmds = [ - # "ip route add default via 10.10.76.1 dev r1-eth0", - # ] - # for cmd in r1_defrt_setup_cmds: - # tgen.net["r1"].cmd(cmd) - - logger.info("Testing PIM AutoRP support") router_list = tgen.routers() for rname, router in router_list.items(): logger.info("Loading router %s" % rname) @@ -95,8 +98,8 @@ def teardown_module(mod): tgen.stop_topology() -def test_pim_autorp_discovery_single_rp(request): - "Test PIM AutoRP Discovery with single RP" +def test_pim_autorp_init(request): + "Test PIM AutoRP startup with only the static RP" tgen = get_topogen() tc_name = request.node.name write_test_header(tc_name) @@ -104,84 +107,253 @@ def test_pim_autorp_discovery_single_rp(request): if tgen.routers_have_failure(): pytest.skip(tgen.errors) - step("Start with no RP configuration") - result = verify_pim_rp_info_is_empty(tgen, "r1") - assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) - - step("Send AutoRP packet from r1 to r2") - # 1 RP(s), hold time 5 secs, 10.10.76.1, group(s) 224.0.0.0/4 - data = "01005e00012800127f55cfb1080045c00030700c000008110abe0a0a4c01e000012801f001f0001c798b12010005000000000a0a4c0103010004e0000000" - scapy_send_autorp_raw_packet(tgen, "r1", "r1-eth0", data) - - step("Verify rp-info from AutoRP packet") - result = verify_pim_rp_info( - tgen, - None, - "r2", - "224.0.0.0/4", - "r2-eth0", - "10.10.76.1", - "AutoRP", - False, - "ipv4", - True, + step("Verify start-up with no extra RP configuration") + expected = json.loads( + """ + { + "10.0.3.4":[ + { + "rpAddress":"10.0.3.4", + "group":"224.0.1.0/24", + "source":"Static" + } + ] + }""" + ) + for rtr in ["r1", "r2", "r3", "r4"]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[rtr], + "show ip pim rp-info json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct rp-info".format(rtr) + + step("Verify start-up with AutoRP only discovery enabled") + expected = json.loads( + """ + { + "discovery":{ + "enabled": true + }, + "announce": { + "enabled":false + }, + "mapping-agent": { + "enabled":false + } + }""" ) - assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + for rtr in ["r1", "r2", "r3", "r4"]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[rtr], + "show ip pim autorp json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct autorp configuration".format( + rtr + ) + + +def test_pim_autorp_no_mapping_agent_rp(request): + "Test PIM AutoRP candidate with no mapping agent" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) - step("Verify AutoRP configuration times out") - result = verify_pim_rp_info_is_empty(tgen, "r2") - assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + # Start a candidate RP on r2 + tgen.routers()["r2"].vtysh_cmd( + """ + conf + router pim + autorp announce 10.0.0.2 224.0.0.0/4 + autorp announce scope 31 interval 1 holdtime 5 + """ + ) -def test_pim_autorp_discovery_multiple_rp(request): - "Test PIM AutoRP Discovery with multiple RP's" + # Without a mapping agent, we should still have no RP + step("Verify no RP without mapping agent") + expected = json.loads( + """ + { + "10.0.3.4":[ + { + "rpAddress":"10.0.3.4", + "group":"224.0.1.0/24", + "source":"Static" + } + ] + }""" + ) + for rtr in ["r1", "r2", "r3", "r4"]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[rtr], + "show ip pim rp-info json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct rp-info".format(rtr) + + step("Verify candidate RP in AutoRP on R2") + expected = json.loads( + """ + { + "discovery":{ + "enabled": true + }, + "announce": { + "enabled":true, + "scope":31, + "interval":1, + "holdtime":5, + "rpList":[ + { + "rpAddress":"10.0.0.2", + "groupRange":"224.0.0.0/4", + "prefixList":"-" + } + ] + }, + "mapping-agent": { + "enabled":false + } + }""" + ) + test_func = partial( + topotest.router_json_cmp, tgen.gears["r2"], "show ip pim autorp json", expected + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct autorp configuration".format("r2") + + +def test_pim_autorp_discovery_rp(request): + "Test PIM AutoRP candidate advertised by mapping agent" tgen = get_topogen() tc_name = request.node.name write_test_header(tc_name) if tgen.routers_have_failure(): - pytest.skip("skipped because of router(s) failure") + pytest.skip(tgen.errors) - step("Start with no RP configuration") - result = verify_pim_rp_info_is_empty(tgen, "r2") - assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) - - step("Send AutoRP packet from r1 to r2") - # 2 RP(s), hold time 5 secs, 10.10.76.1, group(s) 224.0.0.0/8, 10.10.76.3, group(s) 225.0.0.0/8 - data = "01005e00012800127f55cfb1080045c0003c700c000008110ab20a0a4c01e000012801f001f000283f5712020005000000000a0a4c0103010008e00000000a0a4c0303010008e1000000" - scapy_send_autorp_raw_packet(tgen, "r1", "r1-eth0", data) - - step("Verify rp-info from AutoRP packet") - result = verify_pim_rp_info( - tgen, - None, - "r2", - "224.0.0.0/8", - "r2-eth0", - "10.10.76.1", - "AutoRP", - False, - "ipv4", - True, - ) - assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) - result = verify_pim_rp_info( - tgen, - None, - "r2", - "225.0.0.0/8", - "r2-eth0", - "10.10.76.3", - "AutoRP", - False, - "ipv4", - True, + # Start the mapping agent on R1 + tgen.routers()["r1"].vtysh_cmd( + """ + conf + router pim + autorp send-rp-discovery source interface r1-eth0 + autorp send-rp-discovery scope 31 interval 1 holdtime 5 + """ ) - assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + step("Verify rp-info of the only candidate RP") + expected = json.loads( + """ + { + "10.0.3.4":[ + { + "rpAddress":"10.0.3.4", + "group":"224.0.1.0/24", + "source":"Static" + } + ], + "10.0.0.2":[ + { + "rpAddress":"10.0.0.2", + "group":"224.0.0.0/4", + "source":"AutoRP" + } + ] + }""" + ) + for rtr in ["r1", "r2", "r3", "r4"]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[rtr], + "show ip pim rp-info json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct rp-info".format(rtr) + + step("Verify mapping-agent in AutoRP on R1") + expected = json.loads( + """ + { + "announce": { + "enabled":false + }, + "mapping-agent": { + "enabled":true, + "active":true, + "scope":31, + "interval":1, + "holdtime":5, + "source":"interface", + "interface":"r1-eth0", + "address":"10.0.0.1", + "rpList":{ + "10.0.0.2":{ + "rpAddress":"10.0.0.2", + "groupRanges":[ + { + "negative":false, + "prefix":"224.0.0.0/4" + } + ] + } + } + } + }""" + ) + test_func = partial( + topotest.router_json_cmp, tgen.gears["r1"], "show ip pim autorp json", expected + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct autorp configuration".format("r1") -def test_pim_autorp_discovery_static(request): - "Test PIM AutoRP Discovery with Static RP" + step("Verify AutoRP discovery RP's") + expected = json.loads( + """ + { + "discovery":{ + "enabled": true, + "rpList":{ + "10.0.0.2":{ + "rpAddress":"10.0.0.2", + "holdtime":5, + "groupRanges":[ + { + "negative":false, + "prefix":"224.0.0.0/4" + } + ] + } + } + } + }""" + ) + for rtr in ["r1", "r2", "r3", "r4"]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[rtr], + "show ip pim autorp json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct autorp configuration".format( + rtr + ) + + +def test_pim_autorp_discovery_multiple_rp_same(request): + "Test PIM AutoRP Discovery with multiple RP's for same group prefix" tgen = get_topogen() tc_name = request.node.name write_test_header(tc_name) @@ -189,52 +361,94 @@ def test_pim_autorp_discovery_static(request): if tgen.routers_have_failure(): pytest.skip("skipped because of router(s) failure") - step("Start with no RP configuration") - result = verify_pim_rp_info_is_empty(tgen, "r2") - assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) - - step("Add static RP configuration to r2") - rnode = tgen.routers()["r2"] - rnode.cmd("vtysh -c 'conf t' -c 'router pim' -c 'rp 10.10.76.3 224.0.0.0/4'") - - step("Verify static rp-info from r2") - result = verify_pim_rp_info( - tgen, - None, - "r2", - "224.0.0.0/4", - "r2-eth0", - "10.10.76.3", - "Static", - False, - "ipv4", - True, - ) - assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) - - step("Send AutoRP packet from r1 to r2") - # 1 RP(s), hold time 5 secs, 10.10.76.1, group(s) 224.0.0.0/4 - data = "01005e00012800127f55cfb1080045c00030700c000008110abe0a0a4c01e000012801f001f0001c798b12010005000000000a0a4c0103010004e0000000" - scapy_send_autorp_raw_packet(tgen, "r1", "r1-eth0", data) - - step("Verify rp-info from AutoRP packet") - result = verify_pim_rp_info( - tgen, - None, - "r2", - "224.0.0.0/4", - "r2-eth0", - "10.10.76.1", - "AutoRP", - False, - "ipv4", - True, + # Start a candidate RP on r3 + tgen.routers()["r3"].vtysh_cmd( + """ + conf + router pim + autorp announce 10.0.1.3 224.0.0.0/4 + autorp announce scope 31 interval 1 holdtime 5 + """ ) - assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) - -def test_pim_autorp_announce_cli(request): - "Test PIM AutoRP Announcement CLI commands" + # The new candidate RP has the same group range but a higher IP, they should all + # switch to this RP + step("Verify rp-info of the candidate RP with the higher IP") + expected = json.loads( + """ + { + "10.0.3.4":[ + { + "rpAddress":"10.0.3.4", + "group":"224.0.1.0/24", + "source":"Static" + } + ], + "10.0.1.3":[ + { + "rpAddress":"10.0.1.3", + "group":"224.0.0.0/4", + "source":"AutoRP" + } + ] + }""" + ) + for rtr in ["r1", "r2", "r3", "r4"]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[rtr], + "show ip pim rp-info json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct rp-info".format(rtr) + + step("Verify AutoRP discovery RP's") + expected = json.loads( + """ + { + "discovery":{ + "enabled": true, + "rpList":{ + "10.0.0.2":{ + "rpAddress":"10.0.0.2", + "holdtime":5, + "groupRanges":[ + { + "negative":false, + "prefix":"224.0.0.0/4" + } + ] + }, + "10.0.1.3":{ + "rpAddress":"10.0.1.3", + "holdtime":5, + "groupRanges":[ + { + "negative":false, + "prefix":"224.0.0.0/4" + } + ] + } + } + } + }""" + ) + for rtr in ["r1", "r2", "r3", "r4"]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[rtr], + "show ip pim autorp json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct autorp configuration".format( + rtr + ) + + +def test_pim_autorp_discovery_multiple_rp_different(request): + "Test PIM AutoRP Discovery with multiple RP's for different group prefixes" tgen = get_topogen() tc_name = request.node.name write_test_header(tc_name) @@ -242,65 +456,218 @@ def test_pim_autorp_announce_cli(request): if tgen.routers_have_failure(): pytest.skip("skipped because of router(s) failure") - step("Add AutoRP announcement configuration to r1") - r1 = tgen.routers()["r1"] - r1.vtysh_cmd( + # Switch R3 candidate to prefix list with different groups + step("Change R3 candidate to a prefix list") + tgen.routers()["r3"].vtysh_cmd( """ conf + ip prefix-list MYLIST permit 225.0.0.0/8 + ip prefix-list MYLIST permit 226.0.0.0/8 router pim - autorp announce holdtime 90 - autorp announce interval 120 - autorp announce scope 5 - autorp announce 10.2.3.4 225.0.0.0/24 -""" + autorp announce 10.0.1.3 group-list MYLIST + """ ) - expected = { - "discoveryEnabled": True, - "announce": { - "scope": 5, - "interval": 120, - "holdtime": 90, - "rpList": [ - {"rpAddress": "10.2.3.4", "group": "225.0.0.0/24", "prefixList": ""} - ], - }, - } - - test_func = partial( - topotest.router_json_cmp, r1, "show ip pim autorp json", expected + # Now that R3 doesn't conflict, we should see both RP's + step("Verify rp-info of both candidate RP's") + expected = json.loads( + """ + { + "10.0.3.4":[ + { + "rpAddress":"10.0.3.4", + "group":"224.0.1.0/24", + "source":"Static" + } + ], + "10.0.0.2":[ + { + "rpAddress":"10.0.0.2", + "group":"224.0.0.0/4", + "source":"AutoRP" + } + ], + "10.0.1.3":[ + { + "rpAddress":"10.0.1.3", + "prefixList":"__AUTORP_10.0.1.3__", + "source":"AutoRP" + } + ] + }""" + ) + for rtr in ["r1", "r2", "r3", "r4"]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[rtr], + "show ip pim rp-info json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct rp-info".format(rtr) + + step("Verify AutoRP discovery RP's") + expected = json.loads( + """ + { + "discovery":{ + "enabled": true, + "rpList":{ + "10.0.0.2":{ + "rpAddress":"10.0.0.2", + "holdtime":5, + "groupRanges":[ + { + "negative":false, + "prefix":"224.0.0.0/4" + } + ] + }, + "10.0.1.3":{ + "rpAddress":"10.0.1.3", + "holdtime":5, + "groupRanges":[ + { + "negative":false, + "prefix":"225.0.0.0/8" + }, + { + "negative":false, + "prefix":"226.0.0.0/8" + } + ] + } + } + } + }""" ) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) - assertmsg = '"{}" JSON output mismatches'.format(r1.name) - assert result is None, assertmsg + for rtr in ["r1", "r2", "r3", "r4"]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[rtr], + "show ip pim autorp json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct autorp configuration".format( + rtr + ) + + +def test_pim_autorp_discovery_neg_prefixes(request): + "Test PIM AutoRP Discovery with negative prefixes" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) - r1.vtysh_cmd( + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + # Add negative prefixes to the R3 candidate prefix list + step("Change R3 candidate prefix list to include overlapping negative prefixes") + tgen.routers()["r3"].vtysh_cmd( """ conf - router pim - autorp announce 10.2.3.4 group-list ListA -""" + ip prefix-list MYLIST deny 225.1.0.0/16 + ip prefix-list MYLIST deny 226.1.0.0/16 + """ ) - expected = { - "discoveryEnabled": True, - "announce": { - "scope": 5, - "interval": 120, - "holdtime": 90, - "rpList": [{"rpAddress": "10.2.3.4", "group": "", "prefixList": "ListA"}], - }, - } - test_func = partial( - topotest.router_json_cmp, r1, "show ip pim autorp json", expected + step("Verify rp-info stays the same") + expected = json.loads( + """ + { + "10.0.3.4":[ + { + "rpAddress":"10.0.3.4", + "group":"224.0.1.0/24", + "source":"Static" + } + ], + "10.0.0.2":[ + { + "rpAddress":"10.0.0.2", + "group":"224.0.0.0/4", + "source":"AutoRP" + } + ], + "10.0.1.3":[ + { + "rpAddress":"10.0.1.3", + "prefixList":"__AUTORP_10.0.1.3__", + "source":"AutoRP" + } + ] + }""" ) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) - assertmsg = '"{}" JSON output mismatches'.format(r1.name) - assert result is None, assertmsg + for rtr in ["r1", "r2", "r3", "r4"]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[rtr], + "show ip pim rp-info json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct rp-info".format(rtr) + + step("Verify AutoRP discovery RP's") + expected = json.loads( + """ + { + "discovery":{ + "enabled": true, + "rpList":{ + "10.0.0.2":{ + "rpAddress":"10.0.0.2", + "holdtime":5, + "groupRanges":[ + { + "negative":false, + "prefix":"224.0.0.0/4" + } + ] + }, + "10.0.1.3":{ + "rpAddress":"10.0.1.3", + "holdtime":5, + "groupRanges":[ + { + "negative":false, + "prefix":"225.0.0.0/8" + }, + { + "negative":false, + "prefix":"226.0.0.0/8" + }, + { + "negative":true, + "prefix":"225.1.0.0/16" + }, + { + "negative":true, + "prefix":"226.1.0.0/16" + } + ] + } + } + } + }""" + ) + for rtr in ["r1", "r2", "r3", "r4"]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[rtr], + "show ip pim autorp json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct autorp configuration".format( + rtr + ) -def test_pim_autorp_announce_group(request): - "Test PIM AutoRP Announcement with a single group" +def test_pim_autorp_discovery_static(request): + "Test PIM AutoRP Discovery with Static RP" tgen = get_topogen() tc_name = request.node.name write_test_header(tc_name) @@ -308,23 +675,60 @@ def test_pim_autorp_announce_group(request): if tgen.routers_have_failure(): pytest.skip("skipped because of router(s) failure") - step("Add candidate RP configuration to r1") - rnode = tgen.routers()["r1"] - rnode.cmd( - "vtysh -c 'conf t' -c 'router pim' -c 'send-rp-announce 10.10.76.1 224.0.0.0/4'" + # Add in a static RP with a specific range and make sure both are used + step("Add static RP configuration to r4") + tgen.routers()["r4"].vtysh_cmd( + """ + conf t + router pim + rp 10.0.2.2 239.0.0.0/24 + """ ) - step("Verify Announcement sent data") - # TODO: Verify AutoRP mapping agent receives candidate RP announcement - # Mapping agent is not yet implemented - # sleep(10) - step("Change AutoRP Announcement packet parameters") - rnode.cmd( - "vtysh -c 'conf t' -c 'router pim' -c 'send-rp-announce scope 8 interval 10 holdtime 60'" + + step("Verify static rp-info from r4") + expected = json.loads( + """ + { + "10.0.3.4":[ + { + "rpAddress":"10.0.3.4", + "group":"224.0.1.0/24", + "source":"Static" + } + ], + "10.0.0.2":[ + { + "rpAddress":"10.0.0.2", + "group":"224.0.0.0/4", + "source":"AutoRP" + } + ], + "10.0.1.3":[ + { + "rpAddress":"10.0.1.3", + "prefixList":"__AUTORP_10.0.1.3__", + "source":"AutoRP" + } + ], + "10.0.2.2":[ + { + "rpAddress":"10.0.2.2", + "group":"239.0.0.0/24", + "source":"Static" + } + ] + }""" ) - step("Verify Announcement sent data") - # TODO: Verify AutoRP mapping agent receives updated candidate RP announcement - # Mapping agent is not yet implemented - # sleep(10) + + for rtr in ["r4"]: + test_func = partial( + topotest.router_json_cmp, + tgen.gears[rtr], + "show ip pim rp-info json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None) + assert result is None, "{} does not have correct rp-info".format(rtr) def test_memory_leak(): From 8f829b2495f3dbfe33ffada7f6f7aaa6289dd3ce Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Fri, 27 May 2022 17:15:46 -0300 Subject: [PATCH 13/69] pimd: remove temporary variables from MSDP peer Remove from MSDP peer data structure two temporary variables that should only be used when calling library functions. Signed-off-by: Rafael Zalamena --- pimd/pim_msdp.c | 12 ------------ pimd/pim_msdp.h | 3 --- pimd/pim_msdp_socket.c | 29 +++++++++++++++++++++-------- 3 files changed, 21 insertions(+), 23 deletions(-) diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c index 215cc3c5029c..bea74ff7d477 100644 --- a/pimd/pim_msdp.c +++ b/pimd/pim_msdp.c @@ -1012,16 +1012,6 @@ void pim_msdp_peer_pkt_txed(struct pim_msdp_peer *mp) } } -static void pim_msdp_addr2su(union sockunion *su, struct in_addr addr) -{ - sockunion_init(su); - su->sin.sin_addr = addr; - su->sin.sin_family = AF_INET; -#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN - su->sin.sin_len = sizeof(struct sockaddr_in); -#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */ -} - /* 11.2.A1: create a new peer and transition state to listen or connecting */ struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim, const struct in_addr *peer, @@ -1037,11 +1027,9 @@ struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim, mp->pim = pim; mp->peer = *peer; pim_inet4_dump("", mp->peer, mp->key_str, sizeof(mp->key_str)); - pim_msdp_addr2su(&mp->su_peer, mp->peer); mp->local = *local; /* XXX: originator_id setting needs to move to the mesh group */ pim->msdp.originator_id = *local; - pim_msdp_addr2su(&mp->su_local, mp->local); if (mesh_group_name) mp->mesh_group_name = XSTRDUP(MTYPE_PIM_MSDP_MG_NAME, mesh_group_name); diff --git a/pimd/pim_msdp.h b/pimd/pim_msdp.h index f77b0e1a3afd..f01f575e0794 100644 --- a/pimd/pim_msdp.h +++ b/pimd/pim_msdp.h @@ -114,9 +114,6 @@ struct pim_msdp_peer { enum pim_msdp_peer_state state; enum pim_msdp_peer_flags flags; - /* TCP socket info */ - union sockunion su_local; - union sockunion su_peer; int fd; /* protocol timers */ diff --git a/pimd/pim_msdp_socket.c b/pimd/pim_msdp_socket.c index 2fb0bb87c789..b29993304db5 100644 --- a/pimd/pim_msdp_socket.c +++ b/pimd/pim_msdp_socket.c @@ -49,6 +49,16 @@ static void pim_msdp_update_sock_send_buffer_size(int fd) } } +static void pim_msdp_addr2su(union sockunion *su, struct in_addr addr) +{ + sockunion_init(su); + su->sin.sin_addr = addr; + su->sin.sin_family = AF_INET; +#ifdef HAVE_STRUCT_SOCKADDR_IN_SIN_LEN + su->sin.sin_len = sizeof(struct sockaddr_in); +#endif /* HAVE_STRUCT_SOCKADDR_IN_SIN_LEN */ +} + /** * Helper function to reduce code duplication. * @@ -64,7 +74,6 @@ static int _pim_msdp_sock_listen(const struct vrf *vrf, int rv; socklen_t socklen; struct sockaddr_in sin = {}; - union sockunion su_peer = {}; sock = socket(AF_INET, SOCK_STREAM, 0); if (sock == -1) { @@ -117,7 +126,9 @@ static int _pim_msdp_sock_listen(const struct vrf *vrf, /* Set MD5 authentication. */ if (mp && mp->auth_key) { - su_peer = mp->su_peer; + union sockunion su_peer = {}; + + pim_msdp_addr2su(&su_peer, mp->peer); frr_with_privs (&pimd_privs) { sockopt_tcp_signature(sock, &su_peer, mp->auth_key); } @@ -349,6 +360,7 @@ int pim_msdp_sock_listen(struct pim_instance *pim) int pim_msdp_sock_connect(struct pim_msdp_peer *mp) { int rc; + union sockunion su_peer = {}, su_local = {}; if (PIM_DEBUG_MSDP_INTERNAL) { zlog_debug("MSDP peer %s attempt connect%s", mp->key_str, @@ -366,8 +378,11 @@ int pim_msdp_sock_connect(struct pim_msdp_peer *mp) pim_msdp_peer_stop_tcp_conn(mp, false /* chg_state */); } + pim_msdp_addr2su(&su_peer, mp->peer); + pim_msdp_addr2su(&su_local, mp->local); + /* Make socket for the peer. */ - mp->fd = sockunion_socket(&mp->su_peer); + mp->fd = sockunion_socket(&su_peer); if (mp->fd < 0) { flog_err_sys(EC_LIB_SOCKET, "pim_msdp_socket socket failure: %s", @@ -402,7 +417,7 @@ int pim_msdp_sock_connect(struct pim_msdp_peer *mp) sockopt_reuseport(mp->fd); /* source bind */ - rc = sockunion_bind(mp->fd, &mp->su_local, 0, &mp->su_local); + rc = sockunion_bind(mp->fd, &su_local, 0, &su_local); if (rc < 0) { flog_err_sys(EC_LIB_SOCKET, "pim_msdp_socket connect bind failure: %s", @@ -421,12 +436,10 @@ int pim_msdp_sock_connect(struct pim_msdp_peer *mp) /* Set authentication (if configured). */ if (mp->auth_key) { frr_with_privs (&pimd_privs) { - sockopt_tcp_signature(mp->fd, &mp->su_peer, - mp->auth_key); + sockopt_tcp_signature(mp->fd, &su_peer, mp->auth_key); } } /* Connect to the remote mp. */ - return (sockunion_connect(mp->fd, &mp->su_peer, - htons(PIM_MSDP_TCP_PORT), 0)); + return (sockunion_connect(mp->fd, &su_peer, htons(PIM_MSDP_TCP_PORT), 0)); } From 1e0629fd04b1d8ac7c64a70677cfd7d5fd38d473 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Fri, 8 Jul 2022 16:09:10 -0300 Subject: [PATCH 14/69] pimd: constify peer lookup function Make parameter `const` to document that the variable will not change. Signed-off-by: Rafael Zalamena --- pimd/pim_msdp.c | 3 +-- pimd/pim_msdp.h | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c index bea74ff7d477..0c87b3f48548 100644 --- a/pimd/pim_msdp.c +++ b/pimd/pim_msdp.c @@ -1066,8 +1066,7 @@ struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim, return mp; } -struct pim_msdp_peer *pim_msdp_peer_find(struct pim_instance *pim, - struct in_addr peer_addr) +struct pim_msdp_peer *pim_msdp_peer_find(const struct pim_instance *pim, struct in_addr peer_addr) { struct pim_msdp_peer lookup; diff --git a/pimd/pim_msdp.h b/pimd/pim_msdp.h index f01f575e0794..cd1f6647f69f 100644 --- a/pimd/pim_msdp.h +++ b/pimd/pim_msdp.h @@ -235,8 +235,7 @@ void pim_msdp_init(struct pim_instance *pim, struct event_loop *master); void pim_msdp_exit(struct pim_instance *pim); char *pim_msdp_state_dump(enum pim_msdp_peer_state state, char *buf, int buf_size); -struct pim_msdp_peer *pim_msdp_peer_find(struct pim_instance *pim, - struct in_addr peer_addr); +struct pim_msdp_peer *pim_msdp_peer_find(const struct pim_instance *pim, struct in_addr peer_addr); void pim_msdp_peer_established(struct pim_msdp_peer *mp); void pim_msdp_peer_pkt_rxed(struct pim_msdp_peer *mp); void pim_msdp_peer_stop_tcp_conn(struct pim_msdp_peer *mp, bool chg_state); From 2a94de8af2678407859473cdb700d0fe2eb908cc Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 20 Nov 2024 09:18:39 -0500 Subject: [PATCH 15/69] bgpd: bgp_connect should return an `enum connect_result` This function when it is run by bgp_start is expected to return a `enum connect_result`. But instead the function returns a variety of values that are not really being checked for. Consolidate to a correct choice. Signed-off-by: Donald Sharp --- bgpd/bgp_fsm.c | 2 +- bgpd/bgp_network.c | 10 +++++----- bgpd/bgp_network.h | 2 +- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index 8c9050185b32..4ac8201f749c 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -1826,7 +1826,7 @@ static void bgp_connect_in_progress_update_connection(struct peer *peer) static enum bgp_fsm_state_progress bgp_start(struct peer_connection *connection) { struct peer *peer = connection->peer; - int status; + enum connect_result status; bgp_peer_conf_if_to_su_update(connection); diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c index de57d91806e1..844f6b9af2f1 100644 --- a/bgpd/bgp_network.c +++ b/bgpd/bgp_network.c @@ -762,7 +762,7 @@ static int bgp_update_source(struct peer_connection *connection) } /* BGP try to connect to the peer. */ -int bgp_connect(struct peer_connection *connection) +enum connect_result bgp_connect(struct peer_connection *connection) { struct peer *peer = connection->peer; @@ -773,7 +773,7 @@ int bgp_connect(struct peer_connection *connection) if (peer->conf_if && BGP_CONNECTION_SU_UNSPEC(connection)) { if (bgp_debug_neighbor_events(peer)) zlog_debug("Peer address not learnt: Returning from connect"); - return 0; + return connect_error; } frr_with_privs(&bgpd_privs) { /* Make socket for the peer. */ @@ -787,7 +787,7 @@ int bgp_connect(struct peer_connection *connection) zlog_debug("%s: Failure to create socket for connection to %s, error received: %s(%d)", __func__, peer->host, safe_strerror(errno), errno); - return -1; + return connect_error; } set_nonblocking(connection->fd); @@ -808,7 +808,7 @@ int bgp_connect(struct peer_connection *connection) __func__, peer->host, safe_strerror(errno), errno); - return -1; + return connect_error; } sockopt_reuseaddr(connection->fd); @@ -844,7 +844,7 @@ int bgp_connect(struct peer_connection *connection) /* If the peer is passive mode, force to move to Active mode. */ if (CHECK_FLAG(peer->flags, PEER_FLAG_PASSIVE)) { BGP_EVENT_ADD(connection, TCP_connection_open_failed); - return BGP_FSM_SUCCESS; + return connect_error; } if (peer->conf_if || peer->ifname) diff --git a/bgpd/bgp_network.h b/bgpd/bgp_network.h index ceb6b6f002e3..61ca19a34da5 100644 --- a/bgpd/bgp_network.h +++ b/bgpd/bgp_network.h @@ -21,7 +21,7 @@ extern int bgp_socket(struct bgp *bgp, unsigned short port, const char *address); extern void bgp_close_vrf_socket(struct bgp *bgp); extern void bgp_close(void); -extern int bgp_connect(struct peer_connection *connection); +extern enum connect_result bgp_connect(struct peer_connection *connection); extern int bgp_getsockname(struct peer *peer); extern void bgp_updatesockname(struct peer *peer); From 6a945b41049d36dc3cd41c6ff3d1c03a34d1d49f Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 20 Nov 2024 09:22:46 -0500 Subject: [PATCH 16/69] tests: zebra_fec_nexthop_resolution improve a) timers are really large preventing convergence in 30 seconds b) The same configuration does not need to be initiated 60 times when things are not working properly. Once is enough. Signed-off-by: Donald Sharp --- .../topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf | 2 ++ .../topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf | 1 + .../topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf | 1 + .../topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf | 1 + .../topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf | 1 + .../topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf | 2 ++ .../test_zebra_fec_nexthop_resolution.py | 9 +++++---- 7 files changed, 13 insertions(+), 4 deletions(-) diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf index 9d28957d99eb..ccfec19e9b78 100644 --- a/tests/topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf +++ b/tests/topotests/zebra_fec_nexthop_resolution/r1/bgpd.conf @@ -1,5 +1,6 @@ ! router bgp 65500 + timers bgp 3 9 bgp router-id 192.0.2.1 neighbor 192.0.2.3 remote-as 65500 neighbor 192.0.2.3 update-source lo @@ -7,6 +8,7 @@ router bgp 65500 neighbor 192.0.2.7 ttl-security hops 10 neighbor 192.0.2.7 disable-connected-check neighbor 192.0.2.7 update-source lo + neighbor 192.0.2.7 timers connect 5 ! address-family ipv4 unicast network 192.0.2.1/32 diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf index 46d2c9a01d67..e02e7a4b29ac 100644 --- a/tests/topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf +++ b/tests/topotests/zebra_fec_nexthop_resolution/r2/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65500 + timers bgp 3 9 bgp router-id 192.0.2.2 neighbor 192.0.2.1 remote-as 65500 neighbor 192.0.2.1 update-source lo diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf index 060777e7fe33..f2b22d7b3801 100644 --- a/tests/topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf +++ b/tests/topotests/zebra_fec_nexthop_resolution/r3/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65500 + timers bgp 3 9 bgp router-id 192.0.2.3 neighbor 192.0.2.1 remote-as 65500 neighbor 192.0.2.1 update-source lo diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf index dc052da86347..d0f2f468bf29 100644 --- a/tests/topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf +++ b/tests/topotests/zebra_fec_nexthop_resolution/r4/bgpd.conf @@ -1,5 +1,6 @@ ! router bgp 65500 + timers bgp 3 9 bgp router-id 192.0.2.4 neighbor 192.0.2.1 remote-as 65500 neighbor 192.0.2.1 ttl-security hops 10 diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf index 1c73154e27bf..e2401eb1f902 100644 --- a/tests/topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf +++ b/tests/topotests/zebra_fec_nexthop_resolution/r5/bgpd.conf @@ -1,4 +1,5 @@ router bgp 65500 + timers bgp 3 9 bgp router-id 192.0.2.5 neighbor 192.0.2.3 remote-as 65500 neighbor 192.0.2.3 update-source lo diff --git a/tests/topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf b/tests/topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf index eeda9d9cfa9d..325124e9f89d 100644 --- a/tests/topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf +++ b/tests/topotests/zebra_fec_nexthop_resolution/r7/bgpd.conf @@ -1,10 +1,12 @@ ! router bgp 65500 + timers bgp 3 9 bgp router-id 192.0.2.7 neighbor 192.0.2.1 remote-as 65500 neighbor 192.0.2.1 ttl-security hops 10 neighbor 192.0.2.1 disable-connected-check neighbor 192.0.2.1 update-source lo + neighbor 192.0.2.1 timers connect 5 neighbor 192.0.2.5 remote-as 65500 neighbor 192.0.2.5 update-source lo ! diff --git a/tests/topotests/zebra_fec_nexthop_resolution/test_zebra_fec_nexthop_resolution.py b/tests/topotests/zebra_fec_nexthop_resolution/test_zebra_fec_nexthop_resolution.py index 984ff3c18526..e42070b4d6ed 100644 --- a/tests/topotests/zebra_fec_nexthop_resolution/test_zebra_fec_nexthop_resolution.py +++ b/tests/topotests/zebra_fec_nexthop_resolution/test_zebra_fec_nexthop_resolution.py @@ -156,16 +156,17 @@ def test_zebra_fec_nexthop_resolution_bgp(): def _check_bgp_session(): r1 = tgen.gears["r1"] - tgen.gears["r3"].vtysh_cmd("config \n no mpls fec nexthop-resolution \n end") - tgen.gears["r3"].vtysh_cmd("config \n mpls fec nexthop-resolution \n end") - tgen.gears["r5"].vtysh_cmd("config \n no mpls fec nexthop-resolution \n end") - tgen.gears["r5"].vtysh_cmd("config \n mpls fec nexthop-resolution \n end") output = json.loads(r1.vtysh_cmd("show bgp summary json")) if output["ipv4Unicast"]["peers"]["192.0.2.7"]["state"] == "Established": return None return False + tgen.gears["r3"].vtysh_cmd("config \n no mpls fec nexthop-resolution \n end") + tgen.gears["r3"].vtysh_cmd("config \n mpls fec nexthop-resolution \n end") + tgen.gears["r5"].vtysh_cmd("config \n no mpls fec nexthop-resolution \n end") + tgen.gears["r5"].vtysh_cmd("config \n mpls fec nexthop-resolution \n end") + test_func1 = functools.partial(_check_bgp_session) _, result1 = topotest.run_and_expect(test_func1, None, count=60, wait=0.5) assert result1 is None, "Failed to verify the fec_nexthop_resolution: bgp session" From c1c6298dd3d7fbcc5b19c1e3692aac9eb2e6cade Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 20 Nov 2024 16:07:34 -0500 Subject: [PATCH 17/69] bgpd: Allow bfd to work if peer known but interface address not yet If bgp is coming up and bgp has not received the interface address yet but bgp has knowledge about a bfd peering, allow it to set the peering data appropriately. Signed-off-by: Donald Sharp --- bgpd/bgp_bfd.c | 6 +++++- bgpd/bgp_nexthop.c | 28 +++++++++++++++++----------- 2 files changed, 22 insertions(+), 12 deletions(-) diff --git a/bgpd/bgp_bfd.c b/bgpd/bgp_bfd.c index a331585d3291..50b00d21b195 100644 --- a/bgpd/bgp_bfd.c +++ b/bgpd/bgp_bfd.c @@ -151,7 +151,7 @@ void bgp_peer_config_apply(struct peer *p, struct peer_group *pg) void bgp_peer_bfd_update_source(struct peer *p) { - struct bfd_session_params *session = p->bfd_config->session; + struct bfd_session_params *session; const union sockunion *source = NULL; bool changed = false; int family; @@ -162,6 +162,10 @@ void bgp_peer_bfd_update_source(struct peer *p) struct interface *ifp; union sockunion addr; + if (!p->bfd_config) + return; + + session = p->bfd_config->session; /* Nothing to do for groups. */ if (CHECK_FLAG(p->sflags, PEER_STATUS_GROUP)) return; diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c index 357d5292dabb..bf0f3b15cfde 100644 --- a/bgpd/bgp_nexthop.c +++ b/bgpd/bgp_nexthop.c @@ -32,6 +32,7 @@ #include "bgpd/bgp_vty.h" #include "bgpd/bgp_rd.h" #include "bgpd/bgp_mplsvpn.h" +#include "bgpd/bgp_bfd.h" DEFINE_MTYPE_STATIC(BGPD, MARTIAN_STRING, "BGP Martian Addr Intf String"); @@ -409,17 +410,6 @@ void bgp_connected_add(struct bgp *bgp, struct connected *ifc) bgp_dest_set_bgp_connected_ref_info(dest, bc); } - for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { - if (peer->conf_if && - (strcmp(peer->conf_if, ifc->ifp->name) == 0) && - !peer_established(peer->connection) && - !CHECK_FLAG(peer->flags, PEER_FLAG_IFPEER_V6ONLY)) { - connection = peer->connection; - if (peer_active(peer)) - BGP_EVENT_ADD(connection, BGP_Stop); - BGP_EVENT_ADD(connection, BGP_Start); - } - } } else if (addr->family == AF_INET6) { apply_mask_ipv6((struct prefix_ipv6 *)&p); @@ -443,6 +433,22 @@ void bgp_connected_add(struct bgp *bgp, struct connected *ifc) bgp_dest_set_bgp_connected_ref_info(dest, bc); } } + + /* + * Iterate over all the peers and attempt to set the bfd session + * data and if it's a bgp unnumbered get her flowing if necessary + */ + for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { + bgp_peer_bfd_update_source(peer); + if (peer->conf_if && (strcmp(peer->conf_if, ifc->ifp->name) == 0) && + !peer_established(peer->connection) && + !CHECK_FLAG(peer->flags, PEER_FLAG_IFPEER_V6ONLY)) { + connection = peer->connection; + if (peer_active(peer)) + BGP_EVENT_ADD(connection, BGP_Stop); + BGP_EVENT_ADD(connection, BGP_Start); + } + } } void bgp_connected_delete(struct bgp *bgp, struct connected *ifc) From 82a540dcd4913cb296afccdef69441248a0a5a68 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Tue, 5 Nov 2024 17:13:38 +0100 Subject: [PATCH 18/69] tests: add bgp_vpnv4_route_leak_basic bgp_vrf_route_leak_basic uses "import/export vrf" commands to perform route leaks between VRF on the r1 router. The same result can be achieved by using the "route-target import / export" commands. Copy bgp_vrf_route_leak_basic to bgp_vpnv4_route_leak_basic. Change BGP configuration to handle the route leaks with "route-target import / export". Change to retry timers. No other changes. Signed-off-by: Louis Scalbert --- .../bgp_vpnv4_route_leak_basic/r1/frr.conf | 75 +++ .../bgp_vpnv4_route_leak_basic/setup_vrfs | 17 + .../test_bgp_vpnv4_route_leak_basic.py | 517 ++++++++++++++++++ 3 files changed, 609 insertions(+) create mode 100644 tests/topotests/bgp_vpnv4_route_leak_basic/r1/frr.conf create mode 100644 tests/topotests/bgp_vpnv4_route_leak_basic/setup_vrfs create mode 100644 tests/topotests/bgp_vpnv4_route_leak_basic/test_bgp_vpnv4_route_leak_basic.py diff --git a/tests/topotests/bgp_vpnv4_route_leak_basic/r1/frr.conf b/tests/topotests/bgp_vpnv4_route_leak_basic/r1/frr.conf new file mode 100644 index 000000000000..d3ababde3a81 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_route_leak_basic/r1/frr.conf @@ -0,0 +1,75 @@ +int dummy0 + ip address 10.0.4.1/24 + no shut +! +int dummy1 + ip address 10.0.0.1/24 + no shut +! +int dummy2 + ip address 10.0.1.1/24 + no shut +! +int dummy3 + ip address 10.0.2.1/24 + no shut +! +int dummy4 + ip address 10.0.3.1/24 + no shut +! +int EVA + no shut +! +int DONNA + no shut +! +ip router-id 10.0.4.1 +! +router bgp 99 + no bgp ebgp-requires-policy + address-family ipv4 unicast + redistribute connected + rd vpn export 10.0.4.1:1 + rt vpn export 10.0.4.1:1 + rt vpn import 10.0.4.1:101 + export vpn + import vpn + ! +! +router bgp 99 vrf DONNA + no bgp ebgp-requires-policy + address-family ipv4 unicast + redistribute connected + label vpn export 101 + rd vpn export 10.0.4.1:1 + rt vpn export 10.0.4.1:101 + rt vpn import 10.0.4.1:1 10.0.4.1:102 10.0.4.1:103 + export vpn + import vpn + ! +! +router bgp 99 vrf EVA + no bgp ebgp-requires-policy + address-family ipv4 unicast + redistribute connected + label vpn export 102 + rd vpn export 10.0.4.1:1 + rt vpn export 10.0.4.1:102 + rt vpn import 10.0.4.1:101 10.0.4.1:103 + export vpn + import vpn + ! +! +router bgp 99 vrf ZITA + no bgp ebgp-requires-policy + no bgp network import-check + address-family ipv4 unicast + network 172.16.101.0/24 + label vpn export 103 + rd vpn export 10.0.4.1:1 + rt vpn export 10.0.4.1:103 + export vpn + import vpn + ! +! diff --git a/tests/topotests/bgp_vpnv4_route_leak_basic/setup_vrfs b/tests/topotests/bgp_vpnv4_route_leak_basic/setup_vrfs new file mode 100644 index 000000000000..f62c5cd21115 --- /dev/null +++ b/tests/topotests/bgp_vpnv4_route_leak_basic/setup_vrfs @@ -0,0 +1,17 @@ +#!/bin/bash + +ip link add DONNA type vrf table 1001 +ip link add EVA type vrf table 1002 + +ip link add dummy0 type dummy # vrf default +ip link add dummy1 type dummy +ip link add dummy2 type dummy +ip link add dummy3 type dummy +ip link add dummy4 type dummy + +ip link set dummy1 master DONNA +ip link set dummy2 master EVA +ip link set dummy3 master DONNA +ip link set dummy4 master EVA + + diff --git a/tests/topotests/bgp_vpnv4_route_leak_basic/test_bgp_vpnv4_route_leak_basic.py b/tests/topotests/bgp_vpnv4_route_leak_basic/test_bgp_vpnv4_route_leak_basic.py new file mode 100644 index 000000000000..a44f07b5600e --- /dev/null +++ b/tests/topotests/bgp_vpnv4_route_leak_basic/test_bgp_vpnv4_route_leak_basic.py @@ -0,0 +1,517 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# test_bgp_vpnv4_route_leak_basic.py +# +# Copyright (c) 2018 Cumulus Networks, Inc. +# Donald Sharp +# Copyright (c) 2024 6WIND SAS +# + +""" +Test basic VPNv4 route leaking +""" + +import os +import sys +from functools import partial +import pytest + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from lib.checkping import check_ping + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + "Build function" + + for routern in range(1, 2): + tgen.add_router("r{}".format(routern)) + + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + # For all registered routers, load the unified configuration file + for rname, router in tgen.routers().items(): + router.run("/bin/bash {}/setup_vrfs".format(CWD)) + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + # After loading the configurations, this function loads configured daemons. + tgen.start_router() + # tgen.mininet_cli() + + +def teardown_module(mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + + +def test_vrf_route_leak_donna(): + logger.info("Ensure that routes are leaked back and forth") + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + # Test DONNA VRF. + expect = { + "10.0.0.0/24": [ + { + "protocol": "connected", + } + ], + "10.0.1.0/24": [ + { + "protocol": "bgp", + "selected": True, + "nexthops": [ + { + "fib": True, + "interfaceName": "EVA", + "vrf": "EVA", + "active": True, + }, + ], + }, + ], + "10.0.2.0/24": [{"protocol": "connected"}], + "10.0.3.0/24": [ + { + "protocol": "bgp", + "selected": True, + "nexthops": [ + { + "fib": True, + "interfaceName": "EVA", + "vrf": "EVA", + "active": True, + }, + ], + }, + ], + "10.0.4.0/24": [ + { + "protocol": "bgp", + "selected": True, + "nexthops": [ + { + "fib": True, + "interfaceName": "dummy0", + "vrf": "default", + "active": True, + }, + ], + }, + ], + "172.16.101.0/24": [ + { + "protocol": "bgp", + "selected": None, + "nexthops": [ + { + "fib": None, + "interfaceName": "unknown", + "vrf": "Unknown", + "active": None, + }, + ], + }, + ], + } + + test_func = partial( + topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect + ) + result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result, "BGP VRF DONNA check failed:\n{}".format(diff) + + +def test_vrf_route_leak_eva(): + logger.info("Ensure that routes are leaked back and forth") + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + # Test EVA VRF. + expect = { + "10.0.0.0/24": [ + { + "protocol": "bgp", + "selected": True, + "nexthops": [ + { + "fib": True, + "interfaceName": "DONNA", + "vrf": "DONNA", + "active": True, + }, + ], + }, + ], + "10.0.1.0/24": [ + { + "protocol": "connected", + } + ], + "10.0.2.0/24": [ + { + "protocol": "bgp", + "selected": True, + "nexthops": [ + { + "fib": True, + "interfaceName": "DONNA", + "vrf": "DONNA", + "active": True, + }, + ], + }, + ], + "10.0.3.0/24": [ + { + "protocol": "connected", + } + ], + "172.16.101.0/24": [ + { + "protocol": "bgp", + "selected": None, + "nexthops": [ + { + "fib": None, + "interfaceName": "unknown", + "vrf": "Unknown", + "active": None, + }, + ], + }, + ], + } + + test_func = partial( + topotest.router_json_cmp, r1, "show ip route vrf EVA json", expect + ) + result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result, "BGP VRF EVA check failed:\n{}".format(diff) + + +def test_vrf_route_leak_default(): + logger.info("Ensure that routes are leaked back and forth") + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + # Test default VRF. + expect = { + "10.0.0.0/24": [ + { + "protocol": "bgp", + "selected": True, + "nexthops": [ + { + "fib": True, + "interfaceName": "DONNA", + "vrf": "DONNA", + "active": True, + }, + ], + }, + ], + "10.0.2.0/24": [ + { + "protocol": "bgp", + "selected": True, + "nexthops": [ + { + "fib": True, + "interfaceName": "DONNA", + "vrf": "DONNA", + "active": True, + }, + ], + }, + ], + "10.0.4.0/24": [ + { + "protocol": "connected", + } + ], + } + + test_func = partial(topotest.router_json_cmp, r1, "show ip route json", expect) + result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result, "BGP VRF default check failed:\n{}".format(diff) + + +def test_ping(): + "Simple ping tests" + + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + logger.info("Ping from default to DONNA") + check_ping("r1", "10.0.0.1", True, 10, 0.5, source_addr="10.0.4.1") + + +def test_vrf_route_leak_donna_after_eva_down(): + logger.info("Ensure that route states change after EVA interface goes down") + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + r1.vtysh_cmd( + """ +configure +interface EVA + shutdown +""" + ) + + # Test DONNA VRF. + expect = { + "10.0.1.0/24": [ + { + "protocol": "bgp", + "selected": None, + "nexthops": [ + { + "fib": None, + "interfaceName": "EVA", + "vrf": "EVA", + "active": None, + }, + ], + }, + ], + "10.0.3.0/24": [ + { + "protocol": "bgp", + "selected": None, + "nexthops": [ + { + "fib": None, + "interfaceName": "EVA", + "vrf": "EVA", + "active": None, + }, + ], + }, + ], + } + + test_func = partial( + topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect + ) + result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result, "BGP VRF DONNA check failed:\n{}".format(diff) + + """ + Check that "show ip route vrf DONNA json" and the JSON at key "DONNA" of + "show ip route vrf all json" gives the same result. + """ + + def check_vrf_table(router, vrf, expect): + output = router.vtysh_cmd("show ip route vrf all json", isjson=True) + vrf_table = output.get(vrf, {}) + + return topotest.json_cmp(vrf_table, expect) + + test_func = partial(check_vrf_table, r1, "DONNA", expect) + result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result, "BGP VRF DONNA check failed:\n{}".format(diff) + + +def test_vrf_route_leak_donna_after_eva_up(): + logger.info("Ensure that route states change after EVA interface goes up") + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + r1.vtysh_cmd( + """ +configure +interface EVA + no shutdown +""" + ) + + # Test DONNA VRF. + expect = { + "10.0.1.0/24": [ + { + "protocol": "bgp", + "selected": True, + "nexthops": [ + { + "fib": True, + "interfaceName": "EVA", + "vrf": "EVA", + "active": True, + }, + ], + }, + ], + "10.0.3.0/24": [ + { + "protocol": "bgp", + "selected": True, + "nexthops": [ + { + "fib": True, + "interfaceName": "EVA", + "vrf": "EVA", + "active": True, + }, + ], + }, + ], + } + + test_func = partial( + topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect + ) + result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result, "BGP VRF DONNA check failed:\n{}".format(diff) + + +def test_vrf_route_leak_donna_add_vrf_zita(): + logger.info("Add VRF ZITA and ensure that the route from VRF ZITA is updated") + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + r1.cmd("ip link add ZITA type vrf table 1003") + + # Test DONNA VRF. + expect = { + "172.16.101.0/24": [ + { + "protocol": "bgp", + "selected": None, + "nexthops": [ + { + "fib": None, + "interfaceName": "ZITA", + "vrf": "ZITA", + "active": None, + }, + ], + }, + ], + } + + test_func = partial( + topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect + ) + result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result, "BGP VRF DONNA check failed:\n{}".format(diff) + + +def test_vrf_route_leak_donna_set_zita_up(): + logger.info("Set VRF ZITA up and ensure that the route from VRF ZITA is updated") + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + r1.vtysh_cmd( + """ +configure +interface ZITA + no shutdown +""" + ) + + # Test DONNA VRF. + expect = { + "172.16.101.0/24": [ + { + "protocol": "bgp", + "selected": True, + "nexthops": [ + { + "fib": True, + "interfaceName": "ZITA", + "vrf": "ZITA", + "active": True, + }, + ], + }, + ], + } + + test_func = partial( + topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect + ) + result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result, "BGP VRF DONNA check failed:\n{}".format(diff) + + +def test_vrf_route_leak_donna_delete_vrf_zita(): + logger.info("Delete VRF ZITA and ensure that the route from VRF ZITA is deleted") + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + r1.cmd("ip link delete ZITA") + + # Test DONNA VRF. + expect = { + "172.16.101.0/24": None, + } + + test_func = partial( + topotest.router_json_cmp, r1, "show ip route vrf DONNA json", expect + ) + result, diff = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result, "BGP VRF DONNA check failed:\n{}".format(diff) + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) From 21d1e85db584a5707e8323307df7044017356c10 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Thu, 26 May 2022 09:54:27 -0300 Subject: [PATCH 19/69] pimd,yang: log MSDP neighbor events Move MSDP neighbor events global debug to per PIM instance. Signed-off-by: Rafael Zalamena --- pimd/pim_cmd.c | 16 +++++++ pimd/pim_instance.c | 5 ++ pimd/pim_instance.h | 7 +++ pimd/pim_msdp.c | 106 ++++++++++++++++++------------------------- pimd/pim_msdp.h | 1 + pimd/pim_nb.c | 6 +++ pimd/pim_nb.h | 1 + pimd/pim_nb_config.c | 30 ++++++++++++ yang/frr-pim.yang | 7 +++ 9 files changed, 117 insertions(+), 62 deletions(-) diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index 934da2d53e67..e23492ca3b4f 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -8267,6 +8267,21 @@ DEFUN (show_ip_msdp_sa_sg_vrf_all, return CMD_SUCCESS; } +DEFPY(msdp_log_neighbor_changes, msdp_log_neighbor_changes_cmd, + "[no] msdp log neighbor-events", + NO_STR + MSDP_STR + "MSDP log messages\n" + "MSDP log neighbor event messages\n") +{ + char xpath_value[XPATH_MAXLEN + 32]; + + snprintf(xpath_value, sizeof(xpath_value), "%s/msdp/log-neighbor-events", VTY_CURR_XPATH); + nb_cli_enqueue_change(vty, xpath_value, no ? NB_OP_DESTROY : NB_OP_MODIFY, "true"); + + return nb_cli_apply_changes(vty, NULL); +} + struct pim_sg_cache_walk_data { struct vty *vty; json_object *json; @@ -8898,6 +8913,7 @@ void pim_cmd_init(void) install_element(PIM_NODE, &pim_msdp_mesh_group_source_cmd); install_element(PIM_NODE, &no_pim_msdp_mesh_group_source_cmd); install_element(PIM_NODE, &no_pim_msdp_mesh_group_cmd); + install_element(PIM_NODE, &msdp_log_neighbor_changes_cmd); install_element(PIM_NODE, &pim_bsr_candidate_rp_cmd); install_element(PIM_NODE, &pim_bsr_candidate_rp_group_cmd); diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c index e18ae97d6fcc..bec6e49e1896 100644 --- a/pimd/pim_instance.c +++ b/pimd/pim_instance.c @@ -272,3 +272,8 @@ void pim_vrf_terminate(void) vrf_terminate(); } + +bool pim_msdp_log_neighbor_events(const struct pim_instance *pim) +{ + return (pim->log_flags & PIM_MSDP_LOG_NEIGHBOR_EVENTS); +} diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h index 1b7815d86c51..a4e550c9d00b 100644 --- a/pimd/pim_instance.h +++ b/pimd/pim_instance.h @@ -192,6 +192,11 @@ struct pim_instance { uint64_t gm_rx_drop_sys; + /** Log information flags. */ + uint32_t log_flags; +/** Log neighbor event messages. */ +#define PIM_MSDP_LOG_NEIGHBOR_EVENTS 0x01 + bool stopping; #if PIM_IPV == 6 @@ -218,4 +223,6 @@ extern struct pim_router *router; struct pim_instance *pim_get_pim_instance(vrf_id_t vrf_id); +extern bool pim_msdp_log_neighbor_events(const struct pim_instance *pim); + #endif diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c index 0c87b3f48548..fc07d9239d1e 100644 --- a/pimd/pim_msdp.c +++ b/pimd/pim_msdp.c @@ -744,7 +744,7 @@ static void pim_msdp_peer_state_chg_log(struct pim_msdp_peer *mp) char state_str[PIM_MSDP_STATE_STRLEN]; pim_msdp_state_dump(mp->state, state_str, sizeof(state_str)); - zlog_debug("MSDP peer %s state chg to %s", mp->key_str, state_str); + zlog_info("MSDP peer %s state changed to %s", mp->key_str, state_str); } /* MSDP Connection State Machine actions (defined in RFC-3618:Sec-11.2) */ @@ -753,9 +753,8 @@ static void pim_msdp_peer_state_chg_log(struct pim_msdp_peer *mp) static void pim_msdp_peer_connect(struct pim_msdp_peer *mp) { mp->state = PIM_MSDP_CONNECTING; - if (PIM_DEBUG_MSDP_EVENTS) { + if (pim_msdp_log_neighbor_events(mp->pim)) pim_msdp_peer_state_chg_log(mp); - } pim_msdp_peer_cr_timer_setup(mp, true /* start */); } @@ -764,9 +763,8 @@ static void pim_msdp_peer_connect(struct pim_msdp_peer *mp) static void pim_msdp_peer_listen(struct pim_msdp_peer *mp) { mp->state = PIM_MSDP_LISTEN; - if (PIM_DEBUG_MSDP_EVENTS) { + if (pim_msdp_log_neighbor_events(mp->pim)) pim_msdp_peer_state_chg_log(mp); - } /* this is interntionally asymmetric i.e. we set up listen-socket when * the @@ -790,9 +788,8 @@ void pim_msdp_peer_established(struct pim_msdp_peer *mp) mp->state = PIM_MSDP_ESTABLISHED; mp->uptime = pim_time_monotonic_sec(); - if (PIM_DEBUG_MSDP_EVENTS) { + if (pim_msdp_log_neighbor_events(mp->pim)) pim_msdp_peer_state_chg_log(mp); - } /* stop retry timer on active peers */ pim_msdp_peer_cr_timer_setup(mp, false /* start */); @@ -816,9 +813,9 @@ void pim_msdp_peer_stop_tcp_conn(struct pim_msdp_peer *mp, bool chg_state) ++mp->est_flaps; } mp->state = PIM_MSDP_INACTIVE; - if (PIM_DEBUG_MSDP_EVENTS) { + + if (pim_msdp_log_neighbor_events(mp->pim)) pim_msdp_peer_state_chg_log(mp); - } } if (PIM_DEBUG_MSDP_INTERNAL) { @@ -851,10 +848,10 @@ void pim_msdp_peer_stop_tcp_conn(struct pim_msdp_peer *mp, bool chg_state) /* RFC-3618:Sec-5.6 - stop the peer tcp connection and startover */ void pim_msdp_peer_reset_tcp_conn(struct pim_msdp_peer *mp, const char *rc_str) { - if (PIM_DEBUG_EVENTS) { - zlog_debug("MSDP peer %s tcp reset %s", mp->key_str, rc_str); - snprintf(mp->last_reset, sizeof(mp->last_reset), "%s", rc_str); - } + if (pim_msdp_log_neighbor_events(mp->pim)) + zlog_info("MSDP peer %s tcp reset %s", mp->key_str, rc_str); + + snprintf(mp->last_reset, sizeof(mp->last_reset), "%s", rc_str); /* close the connection and transition to listening or connecting */ pim_msdp_peer_stop_tcp_conn(mp, true /* chg_state */); @@ -865,12 +862,6 @@ void pim_msdp_peer_reset_tcp_conn(struct pim_msdp_peer *mp, const char *rc_str) } } -static void pim_msdp_peer_timer_expiry_log(struct pim_msdp_peer *mp, - const char *timer_str) -{ - zlog_debug("MSDP peer %s %s timer expired", mp->key_str, timer_str); -} - /* RFC-3618:Sec-5.4 - peer hold timer */ static void pim_msdp_peer_hold_timer_cb(struct event *t) { @@ -878,17 +869,16 @@ static void pim_msdp_peer_hold_timer_cb(struct event *t) mp = EVENT_ARG(t); - if (PIM_DEBUG_MSDP_EVENTS) { - pim_msdp_peer_timer_expiry_log(mp, "hold"); - } + if (pim_msdp_log_neighbor_events(mp->pim)) + zlog_info("MSDP peer %s hold timer expired", mp->key_str); if (mp->state != PIM_MSDP_ESTABLISHED) { return; } - if (PIM_DEBUG_MSDP_EVENTS) { + if (pim_msdp_log_neighbor_events(mp->pim)) pim_msdp_peer_state_chg_log(mp); - } + pim_msdp_peer_reset_tcp_conn(mp, "ht-expired"); } @@ -910,9 +900,8 @@ static void pim_msdp_peer_ka_timer_cb(struct event *t) mp = EVENT_ARG(t); - if (PIM_DEBUG_MSDP_EVENTS) { - pim_msdp_peer_timer_expiry_log(mp, "ka"); - } + if (pim_msdp_log_neighbor_events(mp->pim)) + zlog_info("MSDP peer %s keep alive timer expired", mp->key_str); pim_msdp_pkt_ka_tx(mp); pim_msdp_peer_ka_timer_setup(mp, true /* start */); @@ -970,9 +959,8 @@ static void pim_msdp_peer_cr_timer_cb(struct event *t) mp = EVENT_ARG(t); - if (PIM_DEBUG_MSDP_EVENTS) { - pim_msdp_peer_timer_expiry_log(mp, "connect-retry"); - } + if (pim_msdp_log_neighbor_events(mp->pim)) + zlog_info("MSDP peer %s connection retry timer expired", mp->key_str); if (mp->state != PIM_MSDP_CONNECTING || PIM_MSDP_PEER_IS_LISTENER(mp)) { return; @@ -1051,8 +1039,8 @@ struct pim_msdp_peer *pim_msdp_peer_add(struct pim_instance *pim, mp = hash_get(pim->msdp.peer_hash, mp, hash_alloc_intern); listnode_add_sort(pim->msdp.peer_list, mp); - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP peer %s created", mp->key_str); + if (pim_msdp_log_neighbor_events(pim)) { + zlog_info("MSDP peer %s created", mp->key_str); pim_msdp_peer_state_chg_log(mp); } @@ -1116,9 +1104,8 @@ void pim_msdp_peer_del(struct pim_msdp_peer **mp) listnode_delete((*mp)->pim->msdp.peer_list, *mp); hash_release((*mp)->pim->msdp.peer_hash, *mp); - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP peer %s deleted", (*mp)->key_str); - } + if (pim_msdp_log_neighbor_events((*mp)->pim)) + zlog_info("MSDP peer %s deleted", (*mp)->key_str); /* free up any associated memory */ pim_msdp_peer_free(*mp); @@ -1193,10 +1180,8 @@ void pim_msdp_mg_free(struct pim_instance *pim, struct pim_msdp_mg **mgp) for (ALL_LIST_ELEMENTS((*mgp)->mbr_list, n, nn, mbr)) pim_msdp_mg_mbr_del((*mgp), mbr); - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP mesh-group %s deleted", - (*mgp)->mesh_group_name); - } + if (pim_msdp_log_neighbor_events(pim)) + zlog_info("MSDP mesh-group %s deleted", (*mgp)->mesh_group_name); XFREE(MTYPE_PIM_MSDP_MG_NAME, (*mgp)->mesh_group_name); @@ -1213,15 +1198,14 @@ struct pim_msdp_mg *pim_msdp_mg_new(struct pim_instance *pim, struct pim_msdp_mg *mg; mg = XCALLOC(MTYPE_PIM_MSDP_MG, sizeof(*mg)); - + mg->pim = pim; mg->mesh_group_name = XSTRDUP(MTYPE_PIM_MSDP_MG_NAME, mesh_group_name); mg->mbr_list = list_new(); mg->mbr_list->del = (void (*)(void *))pim_msdp_mg_mbr_free; mg->mbr_list->cmp = (int (*)(void *, void *))pim_msdp_mg_mbr_comp; - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP mesh-group %s created", mg->mesh_group_name); - } + if (pim_msdp_log_neighbor_events(pim)) + zlog_info("MSDP mesh-group %s created", mg->mesh_group_name); SLIST_INSERT_HEAD(&pim->msdp.mglist, mg, mg_entry); @@ -1255,12 +1239,10 @@ void pim_msdp_mg_mbr_del(struct pim_msdp_mg *mg, struct pim_msdp_mg_mbr *mbr) } listnode_delete(mg->mbr_list, mbr); - if (PIM_DEBUG_MSDP_EVENTS) { - char ip_str[INET_ADDRSTRLEN]; - pim_inet4_dump("", mbr->mbr_ip, ip_str, sizeof(ip_str)); - zlog_debug("MSDP mesh-group %s mbr %s deleted", - mg->mesh_group_name, ip_str); - } + if (pim_msdp_log_neighbor_events(mg->pim)) + zlog_info("MSDP mesh-group %s neighbor %pI4 deleted", mg->mesh_group_name, + &mbr->mbr_ip); + pim_msdp_mg_mbr_free(mbr); if (mg->mbr_cnt) { --mg->mbr_cnt; @@ -1277,10 +1259,9 @@ static void pim_msdp_src_del(struct pim_msdp_mg *mg) if (mbr->mp) pim_msdp_peer_del(&mbr->mp); } - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP mesh-group %s src cleared", - mg->mesh_group_name); - } + + if (pim_msdp_log_neighbor_events(mg->pim)) + zlog_info("MSDP mesh-group %s source cleared", mg->mesh_group_name); } /*********************** MSDP feature APIs *********************************/ @@ -1292,6 +1273,9 @@ int pim_msdp_config_write(struct pim_instance *pim, struct vty *vty) char src_str[INET_ADDRSTRLEN]; int count = 0; + if (pim_msdp_log_neighbor_events(pim)) + vty_out(vty, " msdp log neighbor-events\n"); + if (SLIST_EMPTY(&pim->msdp.mglist)) return count; @@ -1426,9 +1410,8 @@ void pim_msdp_mg_src_add(struct pim_instance *pim, struct pim_msdp_mg *mg, /* No new address, disable everyone. */ if (ai->s_addr == INADDR_ANY) { - if (PIM_DEBUG_MSDP_EVENTS) - zlog_debug("MSDP mesh-group %s src unset", - mg->mesh_group_name); + if (pim_msdp_log_neighbor_events(pim)) + zlog_info("MSDP mesh-group %s source unset", mg->mesh_group_name); return; } @@ -1437,9 +1420,8 @@ void pim_msdp_mg_src_add(struct pim_instance *pim, struct pim_msdp_mg *mg, mbr->mp = pim_msdp_peer_add(pim, &mbr->mbr_ip, &mg->src_ip, mg->mesh_group_name); - if (PIM_DEBUG_MSDP_EVENTS) - zlog_debug("MSDP mesh-group %s src %pI4 set", - mg->mesh_group_name, &mg->src_ip); + if (pim_msdp_log_neighbor_events(pim)) + zlog_info("MSDP mesh-group %s source %pI4 set", mg->mesh_group_name, &mg->src_ip); } struct pim_msdp_mg_mbr *pim_msdp_mg_mbr_add(struct pim_instance *pim, @@ -1457,9 +1439,9 @@ struct pim_msdp_mg_mbr *pim_msdp_mg_mbr_add(struct pim_instance *pim, mbr->mp = pim_msdp_peer_add(pim, &mbr->mbr_ip, &mg->src_ip, mg->mesh_group_name); - if (PIM_DEBUG_MSDP_EVENTS) - zlog_debug("MSDP mesh-group %s mbr %pI4 created", - mg->mesh_group_name, &mbr->mbr_ip); + if (pim_msdp_log_neighbor_events(pim)) + zlog_info("MSDP mesh-group %s neighbor %pI4 created", mg->mesh_group_name, + &mbr->mbr_ip); ++mg->mbr_cnt; diff --git a/pimd/pim_msdp.h b/pimd/pim_msdp.h index cd1f6647f69f..0a7c74e438e7 100644 --- a/pimd/pim_msdp.h +++ b/pimd/pim_msdp.h @@ -165,6 +165,7 @@ struct pim_msdp_mg { struct in_addr src_ip; uint32_t mbr_cnt; struct list *mbr_list; + struct pim_instance *pim; /** Belongs to PIM instance list. */ SLIST_ENTRY(pim_msdp_mg) mg_entry; diff --git a/pimd/pim_nb.c b/pimd/pim_nb.c index 1dc66be82d7d..29e8f9a782ce 100644 --- a/pimd/pim_nb.c +++ b/pimd/pim_nb.c @@ -129,6 +129,12 @@ const struct frr_yang_module_info frr_pim_info = { .modify = pim_msdp_connection_retry_modify, } }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/log-neighbor-events", + .cbs = { + .modify = pim_msdp_log_neighbor_events_modify, + } + }, { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups", .cbs = { diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h index b45af3d589da..48dc6574a8ec 100644 --- a/pimd/pim_nb.h +++ b/pimd/pim_nb.h @@ -54,6 +54,7 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ss int pim_msdp_hold_time_modify(struct nb_cb_modify_args *args); int pim_msdp_keep_alive_modify(struct nb_cb_modify_args *args); int pim_msdp_connection_retry_modify(struct nb_cb_modify_args *args); +int pim_msdp_log_neighbor_events_modify(struct nb_cb_modify_args *args); int pim_msdp_mesh_group_create(struct nb_cb_create_args *args); int pim_msdp_mesh_group_destroy(struct nb_cb_destroy_args *args); int pim_msdp_mesh_group_members_create(struct nb_cb_create_args *args); diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index 87338f37c0ea..fa1c744cfcd3 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -1064,8 +1064,38 @@ pim6_msdp_err(routing_control_plane_protocols_control_plane_protocol_pim_address pim6_msdp_err(pim_msdp_peer_authentication_type_modify, nb_cb_modify_args); pim6_msdp_err(pim_msdp_peer_authentication_key_modify, nb_cb_modify_args); pim6_msdp_err(pim_msdp_peer_authentication_key_destroy, nb_cb_destroy_args); +pim6_msdp_err(pim_msdp_log_neighbor_events_modify, nb_cb_modify_args); #if PIM_IPV != 6 +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/log-neighbor-events + */ +int pim_msdp_log_neighbor_events_modify(struct nb_cb_modify_args *args) +{ + struct pim_instance *pim; + struct vrf *vrf; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + /* NOTHING */ + break; + + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + if (yang_dnode_get_bool(args->dnode, NULL)) + SET_FLAG(pim->log_flags, PIM_MSDP_LOG_NEIGHBOR_EVENTS); + else + UNSET_FLAG(pim->log_flags, PIM_MSDP_LOG_NEIGHBOR_EVENTS); + break; + } + + return NB_OK; +} + /* * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups diff --git a/yang/frr-pim.yang b/yang/frr-pim.yang index 6a6c52185ddb..d1e95cef2e8b 100644 --- a/yang/frr-pim.yang +++ b/yang/frr-pim.yang @@ -250,6 +250,13 @@ module frr-pim { container msdp { description "Global MSDP configuration."; uses msdp-timers; + + leaf log-neighbor-events { + type boolean; + default false; + description + "Log all MSDP neighbor related events."; + } } list msdp-mesh-groups { From fc39bfaa0a2f3556d3986a01828e0647cb47d747 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Thu, 26 May 2022 11:22:27 -0300 Subject: [PATCH 20/69] pimd,yang: log MSDP SA events Add new command to log all SA events. Signed-off-by: Rafael Zalamena --- pimd/pim_cmd.c | 17 +++++++++ pimd/pim_instance.c | 5 +++ pimd/pim_instance.h | 3 ++ pimd/pim_msdp.c | 80 +++++++++++++++++------------------------- pimd/pim_msdp_packet.c | 27 +++++++------- pimd/pim_nb.c | 6 ++++ pimd/pim_nb.h | 1 + pimd/pim_nb_config.c | 30 ++++++++++++++++ yang/frr-pim.yang | 7 ++++ 9 files changed, 114 insertions(+), 62 deletions(-) diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index e23492ca3b4f..bd79ddadf442 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -8282,6 +8282,22 @@ DEFPY(msdp_log_neighbor_changes, msdp_log_neighbor_changes_cmd, return nb_cli_apply_changes(vty, NULL); } +DEFPY(msdp_log_sa_changes, msdp_log_sa_changes_cmd, + "[no] msdp log sa-events", + NO_STR + MSDP_STR + "MSDP log messages\n" + "MSDP log SA event messages\n") +{ + char xpath_value[XPATH_MAXLEN + 32]; + + snprintf(xpath_value, sizeof(xpath_value), "%s/msdp/log-sa-events", VTY_CURR_XPATH); + nb_cli_enqueue_change(vty, xpath_value, no ? NB_OP_DESTROY : NB_OP_MODIFY, "true"); + + return nb_cli_apply_changes(vty, NULL); +} + + struct pim_sg_cache_walk_data { struct vty *vty; json_object *json; @@ -8914,6 +8930,7 @@ void pim_cmd_init(void) install_element(PIM_NODE, &no_pim_msdp_mesh_group_source_cmd); install_element(PIM_NODE, &no_pim_msdp_mesh_group_cmd); install_element(PIM_NODE, &msdp_log_neighbor_changes_cmd); + install_element(PIM_NODE, &msdp_log_sa_changes_cmd); install_element(PIM_NODE, &pim_bsr_candidate_rp_cmd); install_element(PIM_NODE, &pim_bsr_candidate_rp_group_cmd); diff --git a/pimd/pim_instance.c b/pimd/pim_instance.c index bec6e49e1896..4e4e5a6ce8f9 100644 --- a/pimd/pim_instance.c +++ b/pimd/pim_instance.c @@ -277,3 +277,8 @@ bool pim_msdp_log_neighbor_events(const struct pim_instance *pim) { return (pim->log_flags & PIM_MSDP_LOG_NEIGHBOR_EVENTS); } + +bool pim_msdp_log_sa_events(const struct pim_instance *pim) +{ + return (pim->log_flags & PIM_MSDP_LOG_SA_EVENTS); +} diff --git a/pimd/pim_instance.h b/pimd/pim_instance.h index a4e550c9d00b..dab7ed269828 100644 --- a/pimd/pim_instance.h +++ b/pimd/pim_instance.h @@ -196,6 +196,8 @@ struct pim_instance { uint32_t log_flags; /** Log neighbor event messages. */ #define PIM_MSDP_LOG_NEIGHBOR_EVENTS 0x01 +/** Log SA event messages. */ +#define PIM_MSDP_LOG_SA_EVENTS 0x02 bool stopping; @@ -224,5 +226,6 @@ extern struct pim_router *router; struct pim_instance *pim_get_pim_instance(vrf_id_t vrf_id); extern bool pim_msdp_log_neighbor_events(const struct pim_instance *pim); +extern bool pim_msdp_log_sa_events(const struct pim_instance *pim); #endif diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c index fc07d9239d1e..a536ab6fe031 100644 --- a/pimd/pim_msdp.c +++ b/pimd/pim_msdp.c @@ -47,20 +47,13 @@ static int pim_msdp_mg_mbr_comp(const void *p1, const void *p2); static void pim_msdp_mg_mbr_free(struct pim_msdp_mg_mbr *mbr); /************************ SA cache management ******************************/ -static void pim_msdp_sa_timer_expiry_log(struct pim_msdp_sa *sa, - const char *timer_str) -{ - zlog_debug("MSDP SA %s %s timer expired", sa->sg_str, timer_str); -} - /* RFC-3618:Sec-5.1 - global active source advertisement timer */ static void pim_msdp_sa_adv_timer_cb(struct event *t) { struct pim_instance *pim = EVENT_ARG(t); - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP SA advertisement timer expired"); - } + if (pim_msdp_log_sa_events(pim)) + zlog_info("MSDP SA advertisement timer expired"); pim_msdp_sa_adv_timer_setup(pim, true /* start */); pim_msdp_pkt_sa_tx(pim); @@ -83,9 +76,8 @@ static void pim_msdp_sa_state_timer_cb(struct event *t) sa = EVENT_ARG(t); - if (PIM_DEBUG_MSDP_EVENTS) { - pim_msdp_sa_timer_expiry_log(sa, "state"); - } + if (pim_msdp_log_sa_events(sa->pim)) + zlog_info("MSDP SA %s state timer expired", sa->sg_str); pim_msdp_sa_deref(sa, PIM_MSDP_SAF_PEER); } @@ -120,9 +112,8 @@ static void pim_msdp_sa_upstream_del(struct pim_msdp_sa *sa) sa->flags &= ~PIM_MSDP_SAF_UP_DEL_IN_PROG; } - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP SA %s de-referenced SPT", sa->sg_str); - } + if (pim_msdp_log_sa_events(sa->pim)) + zlog_info("MSDP SA %s de-referenced SPT", sa->sg_str); } static bool pim_msdp_sa_upstream_add_ok(struct pim_msdp_sa *sa, @@ -185,10 +176,8 @@ static void pim_msdp_sa_upstream_update(struct pim_msdp_sa *sa, if (up && (PIM_UPSTREAM_FLAG_TEST_SRC_MSDP(up->flags))) { /* somehow we lost track of the upstream ptr? best log it */ sa->up = up; - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP SA %s SPT reference missing", - sa->sg_str); - } + if (pim_msdp_log_sa_events(sa->pim)) + zlog_info("MSDP SA %s SPT reference missing", sa->sg_str); return; } @@ -204,14 +193,11 @@ static void pim_msdp_sa_upstream_update(struct pim_msdp_sa *sa, /* should we also start the kat in parallel? we will need it * when the * SA ages out */ - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP SA %s referenced SPT", sa->sg_str); - } + if (pim_msdp_log_sa_events(sa->pim)) + zlog_info("MSDP SA %s referenced SPT", sa->sg_str); } else { - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP SA %s SPT reference failed", - sa->sg_str); - } + if (pim_msdp_log_sa_events(sa->pim)) + zlog_info("MSDP SA %s SPT reference failed", sa->sg_str); } } @@ -240,9 +226,8 @@ static struct pim_msdp_sa *pim_msdp_sa_new(struct pim_instance *pim, sa = hash_get(pim->msdp.sa_hash, sa, hash_alloc_intern); listnode_add_sort(pim->msdp.sa_list, sa); - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP SA %s created", sa->sg_str); - } + if (pim_msdp_log_sa_events(pim)) + zlog_info("MSDP SA %s created", sa->sg_str); return sa; } @@ -282,9 +267,8 @@ static void pim_msdp_sa_del(struct pim_msdp_sa *sa) listnode_delete(sa->pim->msdp.sa_list, sa); hash_release(sa->pim->msdp.sa_hash, sa); - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP SA %s deleted", sa->sg_str); - } + if (pim_msdp_log_sa_events(sa->pim)) + zlog_info("MSDP SA %s deleted", sa->sg_str); /* free up any associated memory */ pim_msdp_sa_free(sa); @@ -333,10 +317,9 @@ static void pim_msdp_sa_deref(struct pim_msdp_sa *sa, if ((sa->flags & PIM_MSDP_SAF_LOCAL)) { if (flags & PIM_MSDP_SAF_LOCAL) { - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP SA %s local reference removed", - sa->sg_str); - } + if (pim_msdp_log_sa_events(sa->pim)) + zlog_info("MSDP SA %s local reference removed", sa->sg_str); + if (sa->pim->msdp.local_cnt) --sa->pim->msdp.local_cnt; } @@ -346,10 +329,9 @@ static void pim_msdp_sa_deref(struct pim_msdp_sa *sa, if (flags & PIM_MSDP_SAF_PEER) { struct in_addr rp; - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP SA %s peer reference removed", - sa->sg_str); - } + if (pim_msdp_log_sa_events(sa->pim)) + zlog_info("MSDP SA %s peer reference removed", sa->sg_str); + pim_msdp_sa_state_timer_setup(sa, false /* start */); rp.s_addr = INADDR_ANY; pim_msdp_sa_peer_ip_set(sa, NULL /* mp */, rp); @@ -386,10 +368,8 @@ void pim_msdp_sa_ref(struct pim_instance *pim, struct pim_msdp_peer *mp, if (mp) { if (!(sa->flags & PIM_MSDP_SAF_PEER)) { sa->flags |= PIM_MSDP_SAF_PEER; - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP SA %s added by peer", - sa->sg_str); - } + if (pim_msdp_log_sa_events(pim)) + zlog_info("MSDP SA %s added by peer", sa->sg_str); } pim_msdp_sa_peer_ip_set(sa, mp, rp); /* start/re-start the state timer to prevent cache expiry */ @@ -403,10 +383,9 @@ void pim_msdp_sa_ref(struct pim_instance *pim, struct pim_msdp_peer *mp, if (!(sa->flags & PIM_MSDP_SAF_LOCAL)) { sa->flags |= PIM_MSDP_SAF_LOCAL; ++sa->pim->msdp.local_cnt; - if (PIM_DEBUG_MSDP_EVENTS) { - zlog_debug("MSDP SA %s added locally", - sa->sg_str); - } + if (pim_msdp_log_sa_events(pim)) + zlog_info("MSDP SA %s added locally", sa->sg_str); + /* send an immediate SA update to peers */ pim_addr_to_prefix(&grp, sa->sg.grp); rp_info = pim_rp_find_match_group(pim, &grp); @@ -710,6 +689,9 @@ bool pim_msdp_peer_rpf_check(struct pim_msdp_peer *mp, struct in_addr rp) return true; } + if (pim_msdp_log_sa_events(mp->pim)) + zlog_info("MSDP peer %pI4 RPF failure for %pI4", &mp->peer, &rp); + return false; } @@ -1275,6 +1257,8 @@ int pim_msdp_config_write(struct pim_instance *pim, struct vty *vty) if (pim_msdp_log_neighbor_events(pim)) vty_out(vty, " msdp log neighbor-events\n"); + if (pim_msdp_log_sa_events(pim)) + vty_out(vty, " msdp log sa-events\n"); if (SLIST_EMPTY(&pim->msdp.mglist)) return count; diff --git a/pimd/pim_msdp_packet.c b/pimd/pim_msdp_packet.c index 27f4966a1cc3..f66a941ee312 100644 --- a/pimd/pim_msdp_packet.c +++ b/pimd/pim_msdp_packet.c @@ -487,9 +487,8 @@ static void pim_msdp_pkt_sa_gen(struct pim_instance *pim, } if (msdp_peer_sa_filter(mp, sa)) { - if (PIM_DEBUG_MSDP_EVENTS) - zlog_debug("MSDP peer %pI4 filter SA out %s", - &mp->peer, sa->sg_str); + if (pim_msdp_log_sa_events(pim)) + zlog_info("MSDP peer %pI4 filter SA out %s", &mp->peer, sa->sg_str); continue; } @@ -551,9 +550,9 @@ void pim_msdp_pkt_sa_tx_one(struct pim_msdp_sa *sa) pim_msdp_pkt_sa_fill_one(sa); for (ALL_LIST_ELEMENTS_RO(sa->pim->msdp.peer_list, node, mp)) { if (msdp_peer_sa_filter(mp, sa)) { - if (PIM_DEBUG_MSDP_EVENTS) - zlog_debug("MSDP peer %pI4 filter SA out %s", - &mp->peer, sa->sg_str); + if (pim_msdp_log_sa_events(sa->pim)) + zlog_info("MSDP peer %pI4 filter SA out %s", &mp->peer, sa->sg_str); + continue; } @@ -583,9 +582,10 @@ void pim_msdp_pkt_sa_tx_one_to_one_peer(struct pim_msdp_peer *mp, /* Don't push it if filtered. */ if (msdp_peer_sa_filter(mp, &sa)) { - if (PIM_DEBUG_MSDP_EVENTS) - zlog_debug("MSDP peer %pI4 filter SA out (%pI4, %pI4)", - &mp->peer, &sa.sg.src, &sa.sg.grp); + if (pim_msdp_log_sa_events(mp->pim)) + zlog_info("MSDP peer %pI4 filter SA out (%pI4, %pI4)", &mp->peer, + &sa.sg.src, &sa.sg.grp); + return; } @@ -641,11 +641,10 @@ static void pim_msdp_pkt_sa_rx_one(struct pim_msdp_peer *mp, struct in_addr rp) /* Filter incoming SA with configured access list. */ if (mp->acl_in) { acl = access_list_lookup(AFI_IP, mp->acl_in); - if (msdp_access_list_apply(acl, &sg.src, &sg.grp) == - FILTER_DENY) { - if (PIM_DEBUG_MSDP_EVENTS) - zlog_debug("MSDP peer %pI4 filter SA in (%pI4, %pI4)", - &mp->peer, &sg.src, &sg.grp); + if (msdp_access_list_apply(acl, &sg.src, &sg.grp) == FILTER_DENY) { + if (pim_msdp_log_sa_events(mp->pim)) + zlog_info("MSDP peer %pI4 filter SA in (%pI4, %pI4)", &mp->peer, + &sg.src, &sg.grp); return; } } diff --git a/pimd/pim_nb.c b/pimd/pim_nb.c index 29e8f9a782ce..e9221125243e 100644 --- a/pimd/pim_nb.c +++ b/pimd/pim_nb.c @@ -135,6 +135,12 @@ const struct frr_yang_module_info frr_pim_info = { .modify = pim_msdp_log_neighbor_events_modify, } }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/log-sa-events", + .cbs = { + .modify = pim_msdp_log_sa_events_modify, + } + }, { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups", .cbs = { diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h index 48dc6574a8ec..61cc9c162811 100644 --- a/pimd/pim_nb.h +++ b/pimd/pim_nb.h @@ -55,6 +55,7 @@ int pim_msdp_hold_time_modify(struct nb_cb_modify_args *args); int pim_msdp_keep_alive_modify(struct nb_cb_modify_args *args); int pim_msdp_connection_retry_modify(struct nb_cb_modify_args *args); int pim_msdp_log_neighbor_events_modify(struct nb_cb_modify_args *args); +int pim_msdp_log_sa_events_modify(struct nb_cb_modify_args *args); int pim_msdp_mesh_group_create(struct nb_cb_create_args *args); int pim_msdp_mesh_group_destroy(struct nb_cb_destroy_args *args); int pim_msdp_mesh_group_members_create(struct nb_cb_create_args *args); diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index fa1c744cfcd3..659b110307ed 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -1065,6 +1065,7 @@ pim6_msdp_err(pim_msdp_peer_authentication_type_modify, nb_cb_modify_args); pim6_msdp_err(pim_msdp_peer_authentication_key_modify, nb_cb_modify_args); pim6_msdp_err(pim_msdp_peer_authentication_key_destroy, nb_cb_destroy_args); pim6_msdp_err(pim_msdp_log_neighbor_events_modify, nb_cb_modify_args); +pim6_msdp_err(pim_msdp_log_sa_events_modify, nb_cb_modify_args); #if PIM_IPV != 6 /* @@ -1096,6 +1097,35 @@ int pim_msdp_log_neighbor_events_modify(struct nb_cb_modify_args *args) return NB_OK; } +/* + * XPath: + * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp/log-sa-events + */ +int pim_msdp_log_sa_events_modify(struct nb_cb_modify_args *args) +{ + struct pim_instance *pim; + struct vrf *vrf; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + /* NOTHING */ + break; + + case NB_EV_APPLY: + vrf = nb_running_get_entry(args->dnode, NULL, true); + pim = vrf->info; + if (yang_dnode_get_bool(args->dnode, NULL)) + SET_FLAG(pim->log_flags, PIM_MSDP_LOG_SA_EVENTS); + else + UNSET_FLAG(pim->log_flags, PIM_MSDP_LOG_SA_EVENTS); + break; + } + + return NB_OK; +} + /* * XPath: * /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-mesh-groups diff --git a/yang/frr-pim.yang b/yang/frr-pim.yang index d1e95cef2e8b..c3c953b76b42 100644 --- a/yang/frr-pim.yang +++ b/yang/frr-pim.yang @@ -257,6 +257,13 @@ module frr-pim { description "Log all MSDP neighbor related events."; } + + leaf log-sa-events { + type boolean; + default false; + description + "Log all MSDP SA related events."; + } } list msdp-mesh-groups { From 0b0648f15c0ced171dc29961ea374f11dd6a66cc Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Thu, 21 Nov 2024 10:23:37 -0300 Subject: [PATCH 21/69] topotests: test new log toggle Test MSDP new log toggle: the MSDP peer connection state and SA events should be logged. Signed-off-by: Rafael Zalamena --- tests/topotests/msdp_topo1/r1/pimd.conf | 2 ++ tests/topotests/msdp_topo1/test_msdp_topo1.py | 20 +++++++++++++++++++ 2 files changed, 22 insertions(+) diff --git a/tests/topotests/msdp_topo1/r1/pimd.conf b/tests/topotests/msdp_topo1/r1/pimd.conf index 3c116a003bea..1548aed05e18 100644 --- a/tests/topotests/msdp_topo1/r1/pimd.conf +++ b/tests/topotests/msdp_topo1/r1/pimd.conf @@ -22,5 +22,7 @@ ip pim rp 10.254.254.1 ip pim join-prune-interval 5 ! router pim + msdp log neighbor-events + msdp log sa-events msdp peer 192.168.0.2 password 1234 ! diff --git a/tests/topotests/msdp_topo1/test_msdp_topo1.py b/tests/topotests/msdp_topo1/test_msdp_topo1.py index ff80052d2665..1c97f7cb1e47 100755 --- a/tests/topotests/msdp_topo1/test_msdp_topo1.py +++ b/tests/topotests/msdp_topo1/test_msdp_topo1.py @@ -17,6 +17,7 @@ import sys import json from functools import partial +import re import pytest # Save the Current Working Directory to find configuration files. @@ -510,6 +511,25 @@ def test_msdp_sa_filter(): assert val is None, "multicast route convergence failure" +def test_msdp_log_events(): + "Test that the enabled logs are working as expected." + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1_log = tgen.gears["r1"].net.getLog("log", "pimd") + + # Look up for informational messages that should have been enabled. + match = re.search( + "MSDP peer 192.168.1.2 state changed to established", r1_log) + assert match is not None + + match = re.search( + r"MSDP SA \(192.168.10.100\,229.1.2.3\) created", r1_log) + assert match is not None + + def test_memory_leak(): "Run the memory leak test and report results." tgen = get_topogen() From b1fbcb6d13df3cbb76f98a3554bc2dceae057ec1 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 21 Nov 2024 09:13:24 -0500 Subject: [PATCH 22/69] tools: Add bmp command to support bundle generation Add a bit of bmp data gathering for bgp support bundle generation Signed-off-by: Donald Sharp --- tools/etc/frr/support_bundle_commands.conf | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/etc/frr/support_bundle_commands.conf b/tools/etc/frr/support_bundle_commands.conf index b3889e8784e1..4b3e0eac5973 100644 --- a/tools/etc/frr/support_bundle_commands.conf +++ b/tools/etc/frr/support_bundle_commands.conf @@ -56,6 +56,7 @@ show evpn next-hops vni all show evpn rmac vni all show evpn vni detail +show bmp CMD_LIST_END # Zebra Support Bundle Command List From 910b3840947db15251231a4da95fe8d3f9334e3d Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 21 Nov 2024 09:14:19 -0500 Subject: [PATCH 23/69] tools: Add isis command generation to support bundle When a support bundle is created no isis commands were issued to attempt to gather data about what is going on in isis. Let's rectify that Signed-off-by: Donald Sharp --- tools/etc/frr/support_bundle_commands.conf | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/tools/etc/frr/support_bundle_commands.conf b/tools/etc/frr/support_bundle_commands.conf index 4b3e0eac5973..0083399b132d 100644 --- a/tools/etc/frr/support_bundle_commands.conf +++ b/tools/etc/frr/support_bundle_commands.conf @@ -135,9 +135,18 @@ CMD_LIST_END # CMD_LIST_END # ISIS Support Bundle Command List -# PROC_NAME:isis -# CMD_LIST_START -# CMD_LIST_END +PROC_NAME:isis +CMD_LIST_START +show isis database detail +show isis interface detail +show isis route +show isis mpls ldp-sync +show isis mpls-te database detail +show isis mpls-te interface +show isis mpls-te router +show isis neighbor detail +show isis topology +CMD_LIST_END # BFD Support Bundle Command List # PROC_NAME:bfd From 274156f4d353841e795730b85e3643552b46b470 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 21 Nov 2024 09:15:22 -0500 Subject: [PATCH 24/69] tools: Add zebra label show commands to support bundle There were no zebra show commands related to mpls labels being generated for support bundles. Let's rectify that situation. Signed-off-by: Donald Sharp --- tools/etc/frr/support_bundle_commands.conf | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/etc/frr/support_bundle_commands.conf b/tools/etc/frr/support_bundle_commands.conf index 0083399b132d..cd1a0a48f6ed 100644 --- a/tools/etc/frr/support_bundle_commands.conf +++ b/tools/etc/frr/support_bundle_commands.conf @@ -75,6 +75,10 @@ show ip fib show ipv6 fib show nexthop-group rib show route-map +show mpls table +show mpls fec +show mpls ldp +show mpls pseudowires show memory show interface vrf all show vrf From dca9915059254007dbd7853eb85f86889dc679e6 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 21 Nov 2024 09:16:14 -0500 Subject: [PATCH 25/69] tools: Add pim msdp show commands to support bundle The support bundle was not gathering any msdp data for pim at all. Let's add a bit to allow us to have more data here when a suppport bundle is generated Signed-off-by: Donald Sharp --- tools/etc/frr/support_bundle_commands.conf | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/etc/frr/support_bundle_commands.conf b/tools/etc/frr/support_bundle_commands.conf index cd1a0a48f6ed..6a21ae3fb584 100644 --- a/tools/etc/frr/support_bundle_commands.conf +++ b/tools/etc/frr/support_bundle_commands.conf @@ -185,6 +185,9 @@ show ip pim join show ip pim state show ip pim statistics show ip pim rpf +show ip msdp vrf all mesh-group +show ip msdp vrf all peer +show ip msdp vrf all sa detail CMD_LIST_END # OSPFv3 Support Bundle Command List From 058faf605db592c3403c4ca9d409650522fbf12a Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 21 Nov 2024 09:26:52 -0500 Subject: [PATCH 26/69] tests: Ensure connected routes are installed before continuing Upon high load the ospf_instance_redistribute test can attempt to install routes with sharpd before the connected routes have fully baked themselves into zebra. Since sharpd intentionally has no retry mechanism we need to ensure that the test is waiting a small bit. Signed-off-by: Donald Sharp --- .../test_ospf_instance_redistribute.py | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/tests/topotests/ospf_instance_redistribute/test_ospf_instance_redistribute.py b/tests/topotests/ospf_instance_redistribute/test_ospf_instance_redistribute.py index 6f9a58b19547..5e30cbd01371 100644 --- a/tests/topotests/ospf_instance_redistribute/test_ospf_instance_redistribute.py +++ b/tests/topotests/ospf_instance_redistribute/test_ospf_instance_redistribute.py @@ -82,6 +82,37 @@ def test_install_sharp_instance_routes(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) + r1 = tgen.gears["r1"] + logger.info("Ensure that connected routes are actually installed") + expected = { + "192.168.100.0/24": [ + { + "prefix": "192.168.100.0/24", + "prefixLen": 24, + "protocol": "connected", + "vrfName": "default", + "selected": True, + "destSelected": True, + "installed": True, + "nexthops": [ + { + "fib": True, + "directlyConnected": True, + "interfaceName": "lo", + "active": True, + "weight": 1, + } + ], + } + ] + } + + test_func = partial( + topotest.router_json_cmp, r1, "show ip route connected json", expected + ) + + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + logger.info("Installing sharp routes") r1 = tgen.gears["r1"] r1.vtysh_cmd("sharp install route 4.5.6.7 nexthop 192.168.100.2 1") From ef650aca2752303fa3a99adc05edfefeabca24ee Mon Sep 17 00:00:00 2001 From: Corey Siltala Date: Thu, 21 Nov 2024 10:44:23 -0600 Subject: [PATCH 27/69] tools: Add pim show commands to support bundle Add more data gathering for pim and pim6 support bundles Signed-off-by: Corey Siltala --- tools/etc/frr/support_bundle_commands.conf | 101 +++++++++++++-------- 1 file changed, 64 insertions(+), 37 deletions(-) diff --git a/tools/etc/frr/support_bundle_commands.conf b/tools/etc/frr/support_bundle_commands.conf index 6a21ae3fb584..a248a1a30424 100644 --- a/tools/etc/frr/support_bundle_commands.conf +++ b/tools/etc/frr/support_bundle_commands.conf @@ -165,29 +165,51 @@ CMD_LIST_END # PIM Support Bundle Command List PROC_NAME:pim CMD_LIST_START -show ip multicast -show ip pim interface -show ip pim interface traffic -show ip pim nexthop -show ip pim neighbor -show ip pim bsr -show ip pim bsrp-info -show ip pim bsm-database -show ip pim rp-info -show ip igmp groups -show ip igmp interface -show ip igmp join +show ip igmp vrf all groups detail +show ip igmp vrf all interface detail +show ip igmp vrf all join +show ip igmp vrf all join-group +show ip igmp vrf all proxy show ip igmp sources +show ip igmp vrf all static-group show ip igmp statistics -show ip pim upstream -show ip mroute -show ip pim join -show ip pim state -show ip pim statistics -show ip pim rpf + +show ip mroute vrf all +show ip multicast vrf all + show ip msdp vrf all mesh-group show ip msdp vrf all peer show ip msdp vrf all sa detail + +show ip pim vrf all autorp +show ip pim bsm-database +show ip pim bsr +show ip pim bsr candidate-bsr +show ip pim bsr candidate-rp +show ip pim bsr candidate-rp-database +show ip pim bsr groups +show ip pim bsr rp-info +show ip pim channel +show ip pim group-type +show ip pim vrf all interface detail +show ip pim interface traffic +show ip pim vrf all join +show ip pim jp-agg +show ip pim local-membership +show ip pim mlag summary +show ip pim mlag vrf all interface +show ip pim vrf all mlag upstream +show ip pim vrf all neighbor detail +show ip pim nexthop +show ip pim vrf all rp-info +show ip pim vrf all rpf +show ip pim secondary +show ip pim vrf all state +show ip pim statistics +show ip pim vrf all upstream +show ip pim upstream-join-desired +show ip pim upstream-rpf +show ip pim vxlan-groups CMD_LIST_END # OSPFv3 Support Bundle Command List @@ -233,32 +255,37 @@ CMD_LIST_END #PIMv6 Support Bundle Command List PROC_NAME:pim6 CMD_LIST_START +show ipv6 mld vrf all interface detail +show ipv6 mld vrf all statistics +show ipv6 mld vrf all joins detail +show ipv6 mld vrf all groups + +show ipv6 mroute vrf all +show ipv6 multicast vrf all + +show ipv6 pim bsm-database +show ipv6 pim bsr +show ipv6 pim bsr candidate-bsr +show ipv6 pim bsr candidate-rp +show ipv6 pim bsr candidate-rp-database +show ipv6 pim bsr groups +show ipv6 pim bsr rp-info show ipv6 pim channel -show ipv6 pim interface +show ipv6 pim vrf all interface detail show ipv6 pim interface traffic -show ipv6 pim join -show ipv6 jp-agg -show ipv6 pim nexthop -show ipv6 pim nexthop-lookup -show ipv6 pim neighbor +show ipv6 pim vrf all join +show ipv6 pim jp-agg show ipv6 pim local-membership -show ipv6 pim rp-info -show ipv6 pim rpf +show ipv6 pim nexthop +show ipv6 pim vrf all neighbor detail +show ipv6 pim vrf all rp-info +show ipv6 pim vrf all rpf show ipv6 pim secondary -show ipv6 pim state +show ipv6 pim vrf all state show ipv6 pim statistics -show ipv6 pim upstream +show ipv6 pim vrf all upstream show ipv6 pim upstream-join-desired show ipv6 pim upstream-rpf -show ipv6 mld interface -show ipv6 mld statistics -show ipv6 mld joins -show ipv6 mld groups -show ipv6 multicast -show ipv6 mroute -show ipv6 pim bsr -show ipv6 pim bsrp-info -show ipv6 pim bsm-databases CMD_LIST_END #MGMT Support Bundle Command List From 7fb4c03f5b1c9cd4e8acb85b4cf953ac85aa0bbf Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Fri, 22 Nov 2024 10:30:37 +0200 Subject: [PATCH 28/69] bgpd: Do not reset peers on suppress-fib toggling If the desired state is the same - do nothing instead of resetting once again. Fixes: bdb5ae8bce94432eb5e581f04f48dc4aa5db7ca4 ("bgpd: Make suppress-fib-pending clear peering") Signed-off-by: Donatas Abraitis --- bgpd/bgpd.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index aa2bd5c3719c..d9d20563f917 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -463,6 +463,10 @@ void bgp_suppress_fib_pending_set(struct bgp *bgp, bool set) if (bgp->inst_type == BGP_INSTANCE_TYPE_VIEW) return; + /* Do nothing if already in a desired state */ + if (set == !!CHECK_FLAG(bgp->flags, BGP_FLAG_SUPPRESS_FIB_PENDING)) + return; + if (set) { SET_FLAG(bgp->flags, BGP_FLAG_SUPPRESS_FIB_PENDING); /* Send msg to zebra for the first instance of bgp enabled From cb6f7b153e6d76aea1636d7a63bce418195ecf35 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Fri, 22 Nov 2024 11:02:15 -0500 Subject: [PATCH 29/69] lib, zebra: Do not have duplicate memory type problems In zebra_mpls.c it has a usage of MTYPE_NH_LABEL which is defined in both lib/nexthop.c and zebra/zebra_mpls.c. The usage in zebra_mpls.c is a realloc. This leads to a crash: (gdb) bt 0 __pthread_kill_implementation (no_tid=0, signo=6, threadid=126487246404032) at ./nptl/pthread_kill.c:44 1 __pthread_kill_internal (signo=6, threadid=126487246404032) at ./nptl/pthread_kill.c:78 2 __GI___pthread_kill (threadid=126487246404032, signo=signo@entry=6) at ./nptl/pthread_kill.c:89 3 0x0000730a1b442476 in __GI_raise (sig=6) at ../sysdeps/posix/raise.c:26 4 0x0000730a1b94fb18 in core_handler (signo=6, siginfo=0x7ffeed1e07b0, context=0x7ffeed1e0680) at lib/sigevent.c:268 5 6 __pthread_kill_implementation (no_tid=0, signo=6, threadid=126487246404032) at ./nptl/pthread_kill.c:44 7 __pthread_kill_internal (signo=6, threadid=126487246404032) at ./nptl/pthread_kill.c:78 8 __GI___pthread_kill (threadid=126487246404032, signo=signo@entry=6) at ./nptl/pthread_kill.c:89 9 0x0000730a1b442476 in __GI_raise (sig=sig@entry=6) at ../sysdeps/posix/raise.c:26 10 0x0000730a1b4287f3 in __GI_abort () at ./stdlib/abort.c:79 11 0x0000730a1b9984f5 in _zlog_assert_failed (xref=0x730a1ba59480 <_xref.16>, extra=0x0) at lib/zlog.c:789 12 0x0000730a1b8f8908 in mt_count_free (mt=0x576e0edda520 , ptr=0x576e36617b80) at lib/memory.c:74 13 0x0000730a1b8f8a59 in qrealloc (mt=0x576e0edda520 , ptr=0x576e36617b80, size=16) at lib/memory.c:112 14 0x0000576e0ec85e2e in nhlfe_out_label_update (nhlfe=0x576e368895f0, nh_label=0x576e3660e9b0) at zebra/zebra_mpls.c:1462 15 0x0000576e0ec833ff in lsp_install (zvrf=0x576e3655fb50, label=17, rn=0x576e366197c0, re=0x576e3660a590) at zebra/zebra_mpls.c:224 16 0x0000576e0ec87c34 in zebra_mpls_lsp_install (zvrf=0x576e3655fb50, rn=0x576e366197c0, re=0x576e3660a590) at zebra/zebra_mpls.c:2215 17 0x0000576e0ecbb427 in rib_process_update_fib (zvrf=0x576e3655fb50, rn=0x576e366197c0, old=0x576e36619660, new=0x576e3660a590) at zebra/zebra_rib.c:1084 18 0x0000576e0ecbc230 in rib_process (rn=0x576e366197c0) at zebra/zebra_rib.c:1480 19 0x0000576e0ecbee04 in process_subq_route (lnode=0x576e368e0270, qindex=8 '\b') at zebra/zebra_rib.c:2661 20 0x0000576e0ecc0711 in process_subq (subq=0x576e3653fc80, qindex=META_QUEUE_BGP) at zebra/zebra_rib.c:3226 21 0x0000576e0ecc07f9 in meta_queue_process (dummy=0x576e3653fae0, data=0x576e3653fb80) at zebra/zebra_rib.c:3265 22 0x0000730a1b97d2a9 in work_queue_run (thread=0x7ffeed1e3f30) at lib/workqueue.c:282 23 0x0000730a1b96b039 in event_call (thread=0x7ffeed1e3f30) at lib/event.c:1996 24 0x0000730a1b8e4d2d in frr_run (master=0x576e36277e10) at lib/libfrr.c:1232 25 0x0000576e0ec35ca9 in main (argc=7, argv=0x7ffeed1e4208) at zebra/main.c:536 Clearly replacing a label stack is an operation that should be owned by lib/nexthop.c. So lets move this function into there and have zebra_mpls.c just call the function to replace the label stack. Signed-off-by: Donald Sharp --- lib/nexthop.c | 26 ++++++++++++++++++++++++++ lib/nexthop.h | 2 ++ zebra/zebra_mpls.c | 27 +-------------------------- 3 files changed, 29 insertions(+), 26 deletions(-) diff --git a/lib/nexthop.c b/lib/nexthop.c index 98b05295b996..332581fbd8f4 100644 --- a/lib/nexthop.c +++ b/lib/nexthop.c @@ -581,6 +581,32 @@ void nexthop_del_labels(struct nexthop *nexthop) nexthop->nh_label_type = ZEBRA_LSP_NONE; } +void nexthop_change_labels(struct nexthop *nexthop, struct mpls_label_stack *new_stack) +{ + struct mpls_label_stack *nh_label_tmp; + uint32_t i; + + /* Enforce limit on label stack size */ + if (new_stack->num_labels > MPLS_MAX_LABELS) + new_stack->num_labels = MPLS_MAX_LABELS; + + /* Resize the array to accommodate the new label stack */ + if (new_stack->num_labels > nexthop->nh_label->num_labels) { + nh_label_tmp = XREALLOC(MTYPE_NH_LABEL, nexthop->nh_label, + sizeof(struct mpls_label_stack) + + new_stack->num_labels * sizeof(mpls_label_t)); + if (nh_label_tmp) { + nexthop->nh_label = nh_label_tmp; + nexthop->nh_label->num_labels = new_stack->num_labels; + } else + new_stack->num_labels = nexthop->nh_label->num_labels; + } + + /* Copy the label stack into the array */ + for (i = 0; i < new_stack->num_labels; i++) + nexthop->nh_label->label[i] = new_stack->label[i]; +} + void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action, const struct seg6local_context *ctx) { diff --git a/lib/nexthop.h b/lib/nexthop.h index 02ea4d96f2df..5dfb58d84661 100644 --- a/lib/nexthop.h +++ b/lib/nexthop.h @@ -154,6 +154,8 @@ void nexthops_free(struct nexthop *nexthop); void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t ltype, uint8_t num_labels, const mpls_label_t *labels); void nexthop_del_labels(struct nexthop *); +void nexthop_change_labels(struct nexthop *nexthop, struct mpls_label_stack *new_stack); + void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action, const struct seg6local_context *ctx); void nexthop_del_srv6_seg6local(struct nexthop *nexthop); diff --git a/zebra/zebra_mpls.c b/zebra/zebra_mpls.c index 9549af5f14bc..0d3fd2a7268f 100644 --- a/zebra/zebra_mpls.c +++ b/zebra/zebra_mpls.c @@ -37,7 +37,6 @@ DEFINE_MTYPE_STATIC(ZEBRA, LSP, "MPLS LSP object"); DEFINE_MTYPE_STATIC(ZEBRA, FEC, "MPLS FEC object"); DEFINE_MTYPE_STATIC(ZEBRA, NHLFE, "MPLS nexthop object"); -DEFINE_MTYPE_STATIC(ZEBRA, NH_LABEL, "Nexthop label"); bool mpls_enabled; bool mpls_pw_reach_strict; /* Strict reachability checking */ @@ -1453,31 +1452,7 @@ static int nhlfe_del(struct zebra_nhlfe *nhlfe) static void nhlfe_out_label_update(struct zebra_nhlfe *nhlfe, struct mpls_label_stack *nh_label) { - struct mpls_label_stack *nh_label_tmp; - int i; - - /* Enforce limit on label stack size */ - if (nh_label->num_labels > MPLS_MAX_LABELS) - nh_label->num_labels = MPLS_MAX_LABELS; - - /* Resize the array to accommodate the new label stack */ - if (nh_label->num_labels > nhlfe->nexthop->nh_label->num_labels) { - nh_label_tmp = XREALLOC(MTYPE_NH_LABEL, nhlfe->nexthop->nh_label, - sizeof(struct mpls_label_stack) + - nh_label->num_labels * - sizeof(mpls_label_t)); - if (nh_label_tmp) { - nhlfe->nexthop->nh_label = nh_label_tmp; - nhlfe->nexthop->nh_label->num_labels = - nh_label->num_labels; - } else - nh_label->num_labels = - nhlfe->nexthop->nh_label->num_labels; - } - - /* Copy the label stack into the array */ - for (i = 0; i < nh_label->num_labels; i++) - nhlfe->nexthop->nh_label->label[i] = nh_label->label[i]; + nexthop_change_labels(nhlfe->nexthop, nh_label); } static int mpls_lsp_uninstall_all(struct hash *lsp_table, struct zebra_lsp *lsp, From b4babae93b81be46c105e59c74b1708f00b68704 Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Sat, 23 Nov 2024 08:46:00 +0100 Subject: [PATCH 30/69] tools: Add missing keyword `segment-routing srv6` in frr-reload Fix the error below: ``` [58564|mgmtd] sending configuration [58565|zebra] sending configuration, line 10: % Unknown command[52]: node-msd [58573|isisd] sending configuration [58565|zebra] Configuration file[/etc/frr/frr.conf] processing failure: 2 ``` Signed-off-by: Carmine Scarpitta --- tools/frr-reload.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/frr-reload.py b/tools/frr-reload.py index 08a1f1e07eac..baa6285f458c 100755 --- a/tools/frr-reload.py +++ b/tools/frr-reload.py @@ -255,7 +255,9 @@ def get_normalized_interface_vrf(line): }, "router rip": {}, "router ripng": {}, - "router isis ": {}, + "router isis ": { + "segment-routing srv6": {}, + }, "router openfabric ": {}, "router ospf": {}, "router ospf6": {}, From 458e8a851608b8308b564e6cbfe95314629dccda Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Sat, 23 Nov 2024 08:46:50 +0100 Subject: [PATCH 31/69] tools: Add missing keyword `node-msd` in frr-reload Fix the error below: ``` [58564|mgmtd] sending configuration [58565|zebra] sending configuration, line 14: % Unknown command[52]: max-segs-left 3 line 18: % Unknown command[52]: max-end-pop 3 line 22: % Unknown command[52]: max-h-encaps 2 line 26: % Unknown command[52]: max-end-d 5 [58573|isisd] sending configuration [58565|zebra] Configuration file[/etc/frr/frr.conf] processing failure: 2 ``` Signed-off-by: Carmine Scarpitta --- tools/frr-reload.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/frr-reload.py b/tools/frr-reload.py index baa6285f458c..33d6cf04444b 100755 --- a/tools/frr-reload.py +++ b/tools/frr-reload.py @@ -256,7 +256,9 @@ def get_normalized_interface_vrf(line): "router rip": {}, "router ripng": {}, "router isis ": { - "segment-routing srv6": {}, + "segment-routing srv6": { + "node-msd": {}, + }, }, "router openfabric ": {}, "router ospf": {}, From 1bcccb87cdc5de9dfe0573c0b2faf57d7043ac17 Mon Sep 17 00:00:00 2001 From: Olivier Dugeon Date: Sat, 23 Nov 2024 18:50:21 +0100 Subject: [PATCH 32/69] ospfd: Correct invalid SR-MPLS output label When OSPFd starts, there is 2 possible scenarios for Segment Routing: 1/ Routes associated to Prefixes are not yet available i.e. Segment Routing LSA are received before LSA Type 1. In this case, the function ospf_sr_nhlfe_update() is triggered when a new SPF is launch. Thus, neighbors and output label are always synchronise with the routing table. 2/ Routes are already available i.e. LSA Type 1 are received before Segment Routing LSA, in particular the Router Information which contains the SRGB. During nhlfe computation, perfixes are leave with incomplete configuration, in particular, the SR nexthop is set to NULL. If this scenario is handle through the function update_out_nhlfe (triggered when SRGB is received or modified from a neighbor node), the output label is not correctly configured as the nexthop SR node associated to the prefix has been leave to NULL. This patch correct this problem by calling the function compute_nhlfe() when the nexthop SR Node associated to the prefix is NULL within the update_out_nhlfe() function. Thus, we guarantee that the SR prefix is always correctly configuration indpedently of the scenario i.e. arrival of the different LSA. Signed-off-by: Olivier Dugeon --- ospfd/ospf_sr.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/ospfd/ospf_sr.c b/ospfd/ospf_sr.c index 97dc5786795c..89db97922f87 100644 --- a/ospfd/ospf_sr.c +++ b/ospfd/ospf_sr.c @@ -1334,6 +1334,12 @@ static void update_out_nhlfe(struct hash_bucket *bucket, void *args) continue; for (ALL_LIST_ELEMENTS_RO(srp->route->paths, pnode, path)) { + /* Compute NHFLE if path has not been initialized */ + if (!path->srni.nexthop) { + compute_prefix_nhlfe(srp); + continue; + } + /* Skip path that has not next SR-Node as nexthop */ if (path->srni.nexthop != srnext) continue; From c8e83c3bfd94fa6a5a2ea85a8d374aae3a48980a Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Sun, 24 Nov 2024 09:37:22 +0100 Subject: [PATCH 33/69] tools: Add missing keyword `encapsulation` in frr-reload Consider the following scenario. You start from the configuration below: ``` ! segment-routing srv6 encapsulation source-address fc00:0:1::1 ! ! ! ``` Then you change the source address: ``` r1# configure r1(config)# segment-routing r1(config-sr)# srv6 r1(config-srv6)# encapsulation r1(config-srv6-encap) source-address 1::1 ``` And finally, reload the configuration `python3 frr-reload.py --reload /etc/frr/frr.conf` frr-reload returns the error below: ``` Failed to execute segment-routing srv6 no source-address 1::1 exit exit "segment-routing -- srv6 -- no source-address 1::1 -- exit -- exit" we failed to remove this command % Unknown command: no source-address 1::1 [79975|mgmtd] sending configuration line 3: % Unknown command[76]: source-address fc00:0:1::1 [79975|mgmtd] Configuration file[/etc/frr/frr.conf] processing failure: 2 ``` The reason is that the keyword `encapsulation` is missing in frr-reload. This patch adds the missing keyword `encapsulation`. Signed-off-by: Carmine Scarpitta --- tools/frr-reload.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/frr-reload.py b/tools/frr-reload.py index 33d6cf04444b..2bb364f32b00 100755 --- a/tools/frr-reload.py +++ b/tools/frr-reload.py @@ -279,7 +279,7 @@ def get_normalized_interface_vrf(line): "policy ": {"candidate-path ": {}}, "pcep": {"pcc": {}, "pce ": {}, "pce-config ": {}}, }, - "srv6": {"locators": {"locator ": {}}}, + "srv6": {"locators": {"locator ": {}}, "encapsulation": {}}, }, "nexthop-group ": {}, "route-map ": {}, From 0a85b1ba04f6463e300aa6d5e064a5e5d79bec53 Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Sun, 24 Nov 2024 21:57:19 +0200 Subject: [PATCH 34/69] bgpd: Fix graceful-restart for peer-groups Slipped somehow that peer-groups with GR is just completely broken, but it was working before. Strikes again, that we MUST have more and more topotests. Fixes: 15403f521a12b668e87ef8961c78e0ed97c6ff92 ("bgpd: Streamline GR config, act on change immediately") Signed-off-by: Donatas Abraitis --- bgpd/bgp_fsm.c | 96 ++++++++++++++++++++++++++++++++++---------------- bgpd/bgp_vty.c | 30 ---------------- bgpd/bgpd.c | 1 + 3 files changed, 67 insertions(+), 60 deletions(-) diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index 4ac8201f749c..490451f193db 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -2726,33 +2726,55 @@ static void bgp_gr_update_mode_of_all_peers(struct bgp *bgp, struct listnode *node = {0}; struct listnode *nnode = {0}; enum peer_mode peer_old_state = PEER_INVALID; - - /* TODO: Need to handle peer-groups. */ + struct peer_group *group; + struct peer *member; for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { - peer_old_state = bgp_peer_gr_mode_get(peer); - if (peer_old_state != PEER_GLOBAL_INHERIT) - continue; + if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { + peer_old_state = bgp_peer_gr_mode_get(peer); + if (peer_old_state != PEER_GLOBAL_INHERIT) + continue; - bgp_peer_inherit_global_gr_mode(peer, global_new_state); - bgp_peer_gr_flags_update(peer); + bgp_peer_inherit_global_gr_mode(peer, global_new_state); + bgp_peer_gr_flags_update(peer); - if (BGP_DEBUG(graceful_restart, GRACEFUL_RESTART)) - zlog_debug("%pBP: Inherited Global GR mode, GR flags 0x%x peer flags 0x%" PRIx64 - "...resetting session", - peer, peer->peer_gr_new_status_flag, - peer->flags); + if (BGP_DEBUG(graceful_restart, GRACEFUL_RESTART)) + zlog_debug("%pBP: Inherited Global GR mode, GR flags 0x%x peer flags 0x%" PRIx64 + "...resetting session", + peer, peer->peer_gr_new_status_flag, peer->flags); - peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE; + peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE; - /* Reset session to match with behavior for other peer - * configs that require the session to be re-setup. - */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); - else - bgp_session_reset_safe(peer, &nnode); + if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) + bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, + BGP_NOTIFY_CEASE_CONFIG_CHANGE); + else + bgp_session_reset_safe(peer, &nnode); + } else { + group = peer->group; + for (ALL_LIST_ELEMENTS(group->peer, node, nnode, member)) { + peer_old_state = bgp_peer_gr_mode_get(member); + if (peer_old_state != PEER_GLOBAL_INHERIT) + continue; + + bgp_peer_inherit_global_gr_mode(member, global_new_state); + bgp_peer_gr_flags_update(member); + + if (BGP_DEBUG(graceful_restart, GRACEFUL_RESTART)) + zlog_debug("%pBP: Inherited Global GR mode, GR flags 0x%x peer flags 0x%" PRIx64 + "...resetting session", + member, member->peer_gr_new_status_flag, + member->flags); + + member->last_reset = PEER_DOWN_CAPABILITY_CHANGE; + + if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) + bgp_notify_send(member->connection, BGP_NOTIFY_CEASE, + BGP_NOTIFY_CEASE_CONFIG_CHANGE); + else + bgp_session_reset(member); + } + } } } @@ -2911,6 +2933,9 @@ unsigned int bgp_peer_gr_action(struct peer *peer, enum peer_mode old_state, { enum global_mode global_gr_mode; bool session_reset = true; + struct peer_group *group; + struct peer *member; + struct listnode *node, *nnode; if (old_state == new_state) return BGP_GR_NO_OPERATION; @@ -2945,16 +2970,27 @@ unsigned int bgp_peer_gr_action(struct peer *peer, enum peer_mode old_state, bgp_peer_move_to_gr_mode(peer, new_state); if (session_reset) { - peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE; + if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { + peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE; - /* Reset session to match with behavior for other peer - * configs that require the session to be re-setup. - */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); - else - bgp_session_reset(peer); + if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) + bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, + BGP_NOTIFY_CEASE_CONFIG_CHANGE); + else + bgp_session_reset(peer); + } else { + group = peer->group; + for (ALL_LIST_ELEMENTS(group->peer, node, nnode, member)) { + member->last_reset = PEER_DOWN_CAPABILITY_CHANGE; + bgp_peer_move_to_gr_mode(member, new_state); + + if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) + bgp_notify_send(member->connection, BGP_NOTIFY_CEASE, + BGP_NOTIFY_CEASE_CONFIG_CHANGE); + else + bgp_session_reset(member); + } + } } return BGP_GR_SUCCESS; diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index cba1cdaf1a91..26b840a86ba1 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -3519,11 +3519,6 @@ DEFUN (bgp_neighbor_graceful_restart_set, peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg); if (!peer) return CMD_WARNING_CONFIG_FAILED; - if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { - vty_out(vty, - "Per peer-group graceful-restart configuration is not yet supported\n"); - return CMD_WARNING_CONFIG_FAILED; - } result = bgp_neighbor_graceful_restart(peer, PEER_GR_CMD); if (result == BGP_GR_SUCCESS) { @@ -3554,11 +3549,6 @@ DEFUN (no_bgp_neighbor_graceful_restart, peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg); if (!peer) return CMD_WARNING_CONFIG_FAILED; - if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { - vty_out(vty, - "Per peer-group graceful-restart configuration is not yet supported\n"); - return CMD_WARNING_CONFIG_FAILED; - } result = bgp_neighbor_graceful_restart(peer, NO_PEER_GR_CMD); if (ret == BGP_GR_SUCCESS) { @@ -3588,11 +3578,6 @@ DEFUN (bgp_neighbor_graceful_restart_helper_set, peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg); if (!peer) return CMD_WARNING_CONFIG_FAILED; - if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { - vty_out(vty, - "Per peer-group graceful-restart configuration is not yet supported\n"); - return CMD_WARNING_CONFIG_FAILED; - } ret = bgp_neighbor_graceful_restart(peer, PEER_HELPER_CMD); if (ret == BGP_GR_SUCCESS) { @@ -3623,11 +3608,6 @@ DEFUN (no_bgp_neighbor_graceful_restart_helper, peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg); if (!peer) return CMD_WARNING_CONFIG_FAILED; - if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { - vty_out(vty, - "Per peer-group graceful-restart configuration is not yet supported\n"); - return CMD_WARNING_CONFIG_FAILED; - } ret = bgp_neighbor_graceful_restart(peer, NO_PEER_HELPER_CMD); if (ret == BGP_GR_SUCCESS) { @@ -3657,11 +3637,6 @@ DEFUN (bgp_neighbor_graceful_restart_disable_set, peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg); if (!peer) return CMD_WARNING_CONFIG_FAILED; - if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { - vty_out(vty, - "Per peer-group graceful-restart configuration is not yet supported\n"); - return CMD_WARNING_CONFIG_FAILED; - } ret = bgp_neighbor_graceful_restart(peer, PEER_DISABLE_CMD); if (ret == BGP_GR_SUCCESS) { @@ -3693,11 +3668,6 @@ DEFUN (no_bgp_neighbor_graceful_restart_disable, peer = peer_and_group_lookup_vty(vty, argv[idx_peer]->arg); if (!peer) return CMD_WARNING_CONFIG_FAILED; - if (CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { - vty_out(vty, - "Per peer-group graceful-restart configuration is not yet supported\n"); - return CMD_WARNING_CONFIG_FAILED; - } ret = bgp_neighbor_graceful_restart(peer, NO_PEER_DISABLE_CMD); if (ret == BGP_GR_SUCCESS) { diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index a8431bee978a..258fc87f96db 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -3022,6 +3022,7 @@ static void peer_group2peer_config_copy(struct peer_group *group, PEER_ATTR_INHERIT(peer, group, local_role); /* Update GR flags for the peer. */ + PEER_ATTR_INHERIT(peer, group, peer_gr_new_status_flag); bgp_peer_gr_flags_update(peer); /* Apply BFD settings from group to peer if it exists. */ From ee5893ff598a72e3a9bf4d2f6a9bd43f833defa6 Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Mon, 25 Nov 2024 10:42:16 +0200 Subject: [PATCH 35/69] bgpd: Use gracefulRestart JSON field Deprecate gracefulRestartCapability which is inconsistent with an existing format if advertised and received are printed. Signed-off-by: Donatas Abraitis --- bgpd/bgp_vty.c | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index 26b840a86ba1..d1238bc8de57 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -14897,22 +14897,31 @@ static void bgp_show_peer(struct vty *vty, struct peer *p, bool use_json, if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV) || CHECK_FLAG(p->cap, PEER_CAP_RESTART_ADV)) { if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_ADV) && - CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV)) + CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV)) { json_object_string_add( json_cap, "gracefulRestart", "advertisedAndReceived"); - else if (CHECK_FLAG(p->cap, - PEER_CAP_RESTART_ADV)) + } else if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_ADV)) { + json_object_string_add(json_cap, "gracefulRestart", + "advertised"); +#if CONFDATE > 20250525 +CPP_NOTICE("Remove `gracefulRestartCapability` JSON field") +#endif json_object_string_add( json_cap, "gracefulRestartCapability", "advertised"); - else if (CHECK_FLAG(p->cap, - PEER_CAP_RESTART_RCV)) + } else if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV)) { + json_object_string_add(json_cap, "gracefulRestart", + "received"); +#if CONFDATE > 20250525 +CPP_NOTICE("Remove `gracefulRestartCapability` JSON field") +#endif json_object_string_add( json_cap, "gracefulRestartCapability", "received"); + } if (CHECK_FLAG(p->cap, PEER_CAP_RESTART_RCV)) { int restart_af_count = 0; From a4bfa8c574e8ea6f1b8c6035b071a35e8d3d3f08 Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Mon, 25 Nov 2024 10:40:28 +0200 Subject: [PATCH 36/69] tests: Check if GR settings are inherited for peer-groups Convert to unified config also. Signed-off-by: Donatas Abraitis --- tests/topotests/bgp_peer_group/r1/bgpd.conf | 12 ------ tests/topotests/bgp_peer_group/r1/frr.conf | 21 ++++++++++ tests/topotests/bgp_peer_group/r1/zebra.conf | 9 ---- tests/topotests/bgp_peer_group/r2/bgpd.conf | 11 ----- tests/topotests/bgp_peer_group/r2/frr.conf | 19 +++++++++ tests/topotests/bgp_peer_group/r2/zebra.conf | 9 ---- .../bgp_peer_group/r3/{bgpd.conf => frr.conf} | 5 +++ tests/topotests/bgp_peer_group/r3/zebra.conf | 6 --- .../bgp_peer_group/test_bgp_peer-group.py | 41 +++++++++++-------- 9 files changed, 70 insertions(+), 63 deletions(-) delete mode 100644 tests/topotests/bgp_peer_group/r1/bgpd.conf create mode 100644 tests/topotests/bgp_peer_group/r1/frr.conf delete mode 100644 tests/topotests/bgp_peer_group/r1/zebra.conf delete mode 100644 tests/topotests/bgp_peer_group/r2/bgpd.conf create mode 100644 tests/topotests/bgp_peer_group/r2/frr.conf delete mode 100644 tests/topotests/bgp_peer_group/r2/zebra.conf rename tests/topotests/bgp_peer_group/r3/{bgpd.conf => frr.conf} (78%) delete mode 100644 tests/topotests/bgp_peer_group/r3/zebra.conf diff --git a/tests/topotests/bgp_peer_group/r1/bgpd.conf b/tests/topotests/bgp_peer_group/r1/bgpd.conf deleted file mode 100644 index 68d8e61a5981..000000000000 --- a/tests/topotests/bgp_peer_group/r1/bgpd.conf +++ /dev/null @@ -1,12 +0,0 @@ -! -router bgp 65001 - neighbor PG peer-group - neighbor PG remote-as external - neighbor PG timers 3 10 - neighbor 192.168.255.3 peer-group PG - neighbor r1-eth0 interface peer-group PG - neighbor PG1 peer-group - neighbor PG1 remote-as external - neighbor PG1 timers 3 20 - neighbor 192.168.251.2 peer-group PG1 -! diff --git a/tests/topotests/bgp_peer_group/r1/frr.conf b/tests/topotests/bgp_peer_group/r1/frr.conf new file mode 100644 index 000000000000..035c8e4cf16e --- /dev/null +++ b/tests/topotests/bgp_peer_group/r1/frr.conf @@ -0,0 +1,21 @@ +! +interface r1-eth0 + ip address 192.168.255.1/24 +! +interface r1-eth1 + ip address 192.168.251.1/30 +! +ip forwarding +! +router bgp 65001 + neighbor PG peer-group + neighbor PG remote-as external + neighbor PG timers 3 10 + neighbor 192.168.255.3 peer-group PG + neighbor r1-eth0 interface peer-group PG + neighbor PG1 peer-group + neighbor PG1 remote-as external + neighbor PG1 timers 3 20 + neighbor PG1 graceful-restart-disable + neighbor 192.168.251.2 peer-group PG1 +! diff --git a/tests/topotests/bgp_peer_group/r1/zebra.conf b/tests/topotests/bgp_peer_group/r1/zebra.conf deleted file mode 100644 index 16fd8c538c4f..000000000000 --- a/tests/topotests/bgp_peer_group/r1/zebra.conf +++ /dev/null @@ -1,9 +0,0 @@ -! -interface r1-eth0 - ip address 192.168.255.1/24 -! -interface r1-eth1 - ip address 192.168.251.1/30 -! -ip forwarding -! diff --git a/tests/topotests/bgp_peer_group/r2/bgpd.conf b/tests/topotests/bgp_peer_group/r2/bgpd.conf deleted file mode 100644 index d0e8f017d1d6..000000000000 --- a/tests/topotests/bgp_peer_group/r2/bgpd.conf +++ /dev/null @@ -1,11 +0,0 @@ -! -router bgp 65002 - neighbor PG peer-group - neighbor PG remote-as external - neighbor PG timers 3 10 - neighbor r2-eth0 interface peer-group PG - neighbor PG1 peer-group - neighbor PG1 remote-as external - neighbor PG1 timers 3 20 - neighbor 192.168.251.1 peer-group PG1 -! diff --git a/tests/topotests/bgp_peer_group/r2/frr.conf b/tests/topotests/bgp_peer_group/r2/frr.conf new file mode 100644 index 000000000000..4713789f15b7 --- /dev/null +++ b/tests/topotests/bgp_peer_group/r2/frr.conf @@ -0,0 +1,19 @@ +! +interface r2-eth0 + ip address 192.168.255.2/24 +! +interface r2-eth1 + ip address 192.168.251.2/30 +! +ip forwarding +! +router bgp 65002 + neighbor PG peer-group + neighbor PG remote-as external + neighbor PG timers 3 10 + neighbor r2-eth0 interface peer-group PG + neighbor PG1 peer-group + neighbor PG1 remote-as external + neighbor PG1 timers 3 20 + neighbor 192.168.251.1 peer-group PG1 +! diff --git a/tests/topotests/bgp_peer_group/r2/zebra.conf b/tests/topotests/bgp_peer_group/r2/zebra.conf deleted file mode 100644 index c2ad956c9c64..000000000000 --- a/tests/topotests/bgp_peer_group/r2/zebra.conf +++ /dev/null @@ -1,9 +0,0 @@ -! -interface r2-eth0 - ip address 192.168.255.2/24 -! -interface r2-eth1 - ip address 192.168.251.2/30 -! -ip forwarding -! diff --git a/tests/topotests/bgp_peer_group/r3/bgpd.conf b/tests/topotests/bgp_peer_group/r3/frr.conf similarity index 78% rename from tests/topotests/bgp_peer_group/r3/bgpd.conf rename to tests/topotests/bgp_peer_group/r3/frr.conf index 5a1340fb0b88..e8bffaab51cd 100644 --- a/tests/topotests/bgp_peer_group/r3/bgpd.conf +++ b/tests/topotests/bgp_peer_group/r3/frr.conf @@ -1,4 +1,9 @@ ! +interface r3-eth0 + ip address 192.168.255.3/24 +! +ip forwarding +! router bgp 65003 no bgp ebgp-requires-policy neighbor PG peer-group diff --git a/tests/topotests/bgp_peer_group/r3/zebra.conf b/tests/topotests/bgp_peer_group/r3/zebra.conf deleted file mode 100644 index e9fdfb70c5ab..000000000000 --- a/tests/topotests/bgp_peer_group/r3/zebra.conf +++ /dev/null @@ -1,6 +0,0 @@ -! -interface r3-eth0 - ip address 192.168.255.3/24 -! -ip forwarding -! diff --git a/tests/topotests/bgp_peer_group/test_bgp_peer-group.py b/tests/topotests/bgp_peer_group/test_bgp_peer-group.py index 7d476b053852..45f713b8a240 100644 --- a/tests/topotests/bgp_peer_group/test_bgp_peer-group.py +++ b/tests/topotests/bgp_peer_group/test_bgp_peer-group.py @@ -2,12 +2,14 @@ # SPDX-License-Identifier: ISC # -# Copyright (c) 2021 by +# Copyright (c) 2021-2024 by # Donatas Abraitis +# Donatas Abraitis # """ -Test if peer-group works for numbered and unnumbered configurations. +Test if various random settings with peer-group works for +numbered and unnumbered configurations. """ import os @@ -21,7 +23,7 @@ # pylint: disable=C0413 from lib import topotest -from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topogen import Topogen, get_topogen from lib.topolog import logger pytestmark = [pytest.mark.bgpd] @@ -48,12 +50,7 @@ def setup_module(mod): router_list = tgen.routers() for _, (rname, router) in enumerate(router_list.items(), 1): - router.load_config( - TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) - ) - router.load_config( - TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) - ) + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) tgen.start_router() @@ -72,14 +69,26 @@ def test_bgp_peer_group(): def _bgp_peer_group_configured(): output = json.loads(tgen.gears["r1"].vtysh_cmd("show ip bgp neighbor json")) expected = { - "r1-eth0": {"peerGroup": "PG", "bgpState": "Established"}, - "192.168.255.3": {"peerGroup": "PG", "bgpState": "Established"}, - "192.168.251.2": {"peerGroup": "PG1", "bgpState": "Established"}, + "r1-eth0": { + "peerGroup": "PG", + "bgpState": "Established", + "neighborCapabilities": {"gracefulRestart": "advertisedAndReceived"}, + }, + "192.168.255.3": { + "peerGroup": "PG", + "bgpState": "Established", + "neighborCapabilities": {"gracefulRestart": "advertisedAndReceived"}, + }, + "192.168.251.2": { + "peerGroup": "PG1", + "bgpState": "Established", + "neighborCapabilities": {"gracefulRestart": "received"}, + }, } return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_peer_group_configured) - _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) assert result is None, "Failed bgp convergence in r1" def _bgp_peer_group_check_advertised_routes(): @@ -97,7 +106,7 @@ def _bgp_peer_group_check_advertised_routes(): return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_peer_group_check_advertised_routes) - _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) assert result is None, "Failed checking advertised routes from r3" @@ -122,7 +131,7 @@ def _bgp_peer_group_remoteas_del(): return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_peer_group_remoteas_del) - _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) assert result is None, "Failed bgp convergence in r1" logger.info("Re-add bgp peer-group PG1 remote-as neighbor should be established") @@ -139,7 +148,7 @@ def _bgp_peer_group_remoteas_add(): return topotest.json_cmp(output, expected) test_func = functools.partial(_bgp_peer_group_remoteas_add) - _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) assert result is None, "Failed bgp convergence in r1" From adae8192d184beafdf84b15b24b66c7ccd0c92e0 Mon Sep 17 00:00:00 2001 From: Chirag Shah Date: Tue, 19 Nov 2024 12:24:30 -0800 Subject: [PATCH 37/69] zebra: EVPN check vxlan oper up in vlan mapping When VLAN-VNI mapping is updated, do not set the L2VNI up event if the associated VXLAN device is not up. This may result in bgp synced remote routes to skip installing in Zebra and onwards (Kernel). Ticket: #4139506 Signed-off-by: Chirag Shah --- zebra/zebra_vxlan_if.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/zebra/zebra_vxlan_if.c b/zebra/zebra_vxlan_if.c index 17ab05c1f3f5..2658c9f01c44 100644 --- a/zebra/zebra_vxlan_if.c +++ b/zebra/zebra_vxlan_if.c @@ -1032,7 +1032,13 @@ int zebra_vxlan_if_vni_up(struct interface *ifp, struct zebra_vxlan_vni *vnip) /* If part of a bridge, inform BGP about this VNI. */ /* Also, read and populate local MACs and neighbors. */ if (zif->brslave_info.br_if) { - zebra_evpn_send_add_to_client(zevpn); + if (if_is_operative(zevpn->vxlan_if)) { + zebra_evpn_send_add_to_client(zevpn); + } else { + if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_VXLAN) + zlog_debug("%s VNI %u vxlan_if %s oper down skipping vni up to client", + __func__, zevpn->vni, zevpn->vxlan_if->name); + } zebra_evpn_read_mac_neigh(zevpn, ifp); } } From 97538158ba0b073febebea1439dceae2947a7e0c Mon Sep 17 00:00:00 2001 From: Chirag Shah Date: Thu, 14 Nov 2024 15:17:07 -0800 Subject: [PATCH 38/69] zebra: EVPN add debug trace for HREP entry Ticket: #4139506 Signed-off-by: Chirag Shah --- zebra/rt_netlink.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index ab07ef8d2124..492fe528894e 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -3683,6 +3683,13 @@ netlink_vxlan_flood_update_ctx(const struct zebra_dplane_ctx *ctx, int cmd, if (dplane_ctx_get_type(ctx) != 0) proto = zebra2proto(dplane_ctx_get_type(ctx)); + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug("Tx %s family %s IF %s(%u) VNI %u MAC %pEA VTEP %pIA vid %u", + nl_msg_type_to_str(cmd), nl_family_to_str(PF_BRIDGE), + dplane_ctx_get_ifname(ctx), dplane_ctx_get_ifindex(ctx), + dplane_ctx_neigh_get_vni(ctx), &dst_mac, + dplane_ctx_neigh_get_ipaddr(ctx), dplane_ctx_mac_get_vlan(ctx)); + return netlink_neigh_update_msg_encode( ctx, cmd, (const void *)&dst_mac, ETH_ALEN, dplane_ctx_neigh_get_ipaddr(ctx), false, PF_BRIDGE, 0, NTF_SELF, From 866148ef1b1931f5385c6452344540fc61366a6c Mon Sep 17 00:00:00 2001 From: Chirag Shah Date: Wed, 20 Nov 2024 09:15:24 -0800 Subject: [PATCH 39/69] zebra: add debug in remote vtep install ifp not up Ticket: #4139506 Signed-off-by: Chirag Shah --- zebra/zebra_vxlan.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c index 9e2c138e21f1..0658f996e6b6 100644 --- a/zebra/zebra_vxlan.c +++ b/zebra/zebra_vxlan.c @@ -4755,8 +4755,14 @@ void zebra_vxlan_remote_vtep_add(vrf_id_t vrf_id, vni_t vni, zif = ifp->info; /* If down or not mapped to a bridge, we're done. */ - if (!if_is_operative(ifp) || !zif->brslave_info.br_if) + if (!if_is_operative(ifp) || !zif->brslave_info.br_if) { + if (IS_ZEBRA_DEBUG_KERNEL) + zlog_debug( + "%s VNI %u VTEP %pI4 ifp %s oper %u br_if %u skipping update", + __func__, zevpn->vni, &vtep_ip, ifp->name, + if_is_operative(ifp), !zif->brslave_info.br_if); return; + } zvtep = zebra_evpn_vtep_find(zevpn, &vtep_ip); if (zvtep) { From 91119dab9b2e1ac911685cf7232739d3e7dab453 Mon Sep 17 00:00:00 2001 From: Chirag Shah Date: Tue, 19 Nov 2024 18:11:10 -0800 Subject: [PATCH 40/69] tools: add helper function to print family Ticket: #4127810 Testing: 2024-11-20T01:52:10.073 frr_zebra:netlink_neigh_update_msg_encode {'mac': '00:00:00:00:00:00', 'ip': '27.0.0.4', 'nhg': 0, 'flags': 2, 'state': 192, 'family': 'bridge', 'type': 0, 'op': 23} Signed-off-by: Chirag Shah --- tools/frr_babeltrace.py | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/tools/frr_babeltrace.py b/tools/frr_babeltrace.py index 9832568b3736..c3a22cd1e809 100755 --- a/tools/frr_babeltrace.py +++ b/tools/frr_babeltrace.py @@ -79,6 +79,28 @@ def parse_event(event, field_parsers): else: field_info[field] = event.get(field) print(event.name, field_info) + + +def print_family_str(field_val): + """ + pretty print kernel family to string + """ + if field_val == socket.AF_INET: + cmd_str = "ipv4" + elif field_val == socket.AF_INET6: + cmd_str = "ipv6" + elif field_val == socket.AF_BRIDGE: + cmd_str = "bridge" + elif field_val == 128: # RTNL_FAMILY_IPMR: + cmd_str = "ipv4MR" + elif field_val == 129: # RTNL_FAMILY_IP6MR: + cmd_str = "ipv6MR" + else: + cmd_str = "Invalid family" + + return cmd_str + + ############################ common parsers - end ############################# ############################ evpn parsers - start ############################# From 814a89f6e51cdb27f695212774e711704b44f8cc Mon Sep 17 00:00:00 2001 From: Chirag Shah Date: Mon, 25 Nov 2024 08:58:25 -0800 Subject: [PATCH 41/69] tools: fix style check babletrace file python3 -m black tools/frr_babeltrace.py Signed-off-by: Chirag Shah --- tools/frr_babeltrace.py | 54 ++++++++++++++++++++++++++++++++--------- 1 file changed, 42 insertions(+), 12 deletions(-) diff --git a/tools/frr_babeltrace.py b/tools/frr_babeltrace.py index c3a22cd1e809..7ace5d64b68b 100755 --- a/tools/frr_babeltrace.py +++ b/tools/frr_babeltrace.py @@ -18,6 +18,7 @@ import babeltrace + ########################### common parsers - start ############################ def print_ip_addr(field_val): """ @@ -48,24 +49,28 @@ def print_mac(field_val): """ return ":".join("%02x" % fb for fb in field_val) + def print_net_ipv4_addr(field_val): """ pretty print ctf_integer_network ipv4 """ return str(ipaddress.IPv4Address(field_val)) + def print_esi(field_val): """ pretty print ethernet segment id, esi_t """ return ":".join("%02x" % fb for fb in field_val) + def get_field_list(event): """ only fetch fields added via the TP, skip metadata etc. """ return event.field_list_with_scope(babeltrace.CTFScope.EVENT_FIELDS) + def parse_event(event, field_parsers): """ Wild card event parser; doesn't make things any prettier @@ -103,6 +108,7 @@ def print_family_str(field_val): ############################ common parsers - end ############################# + ############################ evpn parsers - start ############################# def parse_frr_bgp_evpn_mac_ip_zsend(event): """ @@ -114,13 +120,16 @@ def parse_frr_bgp_evpn_mac_ip_zsend(event): ctf_integer_network_hex(unsigned int, vtep, vtep.s_addr) ctf_array(unsigned char, esi, esi, sizeof(esi_t)) """ - field_parsers = {"ip": print_ip_addr, - "mac": print_mac, - "esi": print_esi, - "vtep": print_net_ipv4_addr} + field_parsers = { + "ip": print_ip_addr, + "mac": print_mac, + "esi": print_esi, + "vtep": print_net_ipv4_addr, + } parse_event(event, field_parsers) + def parse_frr_bgp_evpn_bum_vtep_zsend(event): """ bgp evpn bum-vtep parser; raw format - @@ -132,6 +141,7 @@ def parse_frr_bgp_evpn_bum_vtep_zsend(event): parse_event(event, field_parsers) + def parse_frr_bgp_evpn_mh_nh_rmac_send(event): """ bgp evpn nh-rmac parser; raw format - @@ -141,17 +151,18 @@ def parse_frr_bgp_evpn_mh_nh_rmac_send(event): parse_event(event, field_parsers) + def parse_frr_bgp_evpn_mh_local_es_add_zrecv(event): """ bgp evpn local-es parser; raw format - ctf_array(unsigned char, esi, esi, sizeof(esi_t)) ctf_integer_network_hex(unsigned int, vtep, vtep.s_addr) """ - field_parsers = {"esi": print_esi, - "vtep": print_net_ipv4_addr} + field_parsers = {"esi": print_esi, "vtep": print_net_ipv4_addr} parse_event(event, field_parsers) + def parse_frr_bgp_evpn_mh_local_es_del_zrecv(event): """ bgp evpn local-es parser; raw format - @@ -161,6 +172,7 @@ def parse_frr_bgp_evpn_mh_local_es_del_zrecv(event): parse_event(event, field_parsers) + def parse_frr_bgp_evpn_mh_local_es_evi_add_zrecv(event): """ bgp evpn local-es-evi parser; raw format - @@ -170,6 +182,7 @@ def parse_frr_bgp_evpn_mh_local_es_evi_add_zrecv(event): parse_event(event, field_parsers) + def parse_frr_bgp_evpn_mh_local_es_evi_del_zrecv(event): """ bgp evpn local-es-evi parser; raw format - @@ -179,6 +192,7 @@ def parse_frr_bgp_evpn_mh_local_es_evi_del_zrecv(event): parse_event(event, field_parsers) + def parse_frr_bgp_evpn_mh_es_evi_vtep_add(event): """ bgp evpn remote ead evi remote vtep add; raw format - @@ -189,6 +203,7 @@ def parse_frr_bgp_evpn_mh_es_evi_vtep_add(event): parse_event(event, field_parsers) + def parse_frr_bgp_evpn_mh_es_evi_vtep_del(event): """ bgp evpn remote ead evi remote vtep del; raw format - @@ -199,6 +214,7 @@ def parse_frr_bgp_evpn_mh_es_evi_vtep_del(event): parse_event(event, field_parsers) + def parse_frr_bgp_evpn_mh_local_ead_es_evi_route_upd(event): """ bgp evpn local ead evi vtep; raw format - @@ -209,6 +225,7 @@ def parse_frr_bgp_evpn_mh_local_ead_es_evi_route_upd(event): parse_event(event, field_parsers) + def parse_frr_bgp_evpn_mh_local_ead_es_evi_route_del(event): """ bgp evpn local ead evi vtep del; raw format - @@ -219,6 +236,7 @@ def parse_frr_bgp_evpn_mh_local_ead_es_evi_route_del(event): parse_event(event, field_parsers) + def parse_frr_bgp_evpn_local_vni_add_zrecv(event): """ bgp evpn local-vni parser; raw format - @@ -230,6 +248,7 @@ def parse_frr_bgp_evpn_local_vni_add_zrecv(event): parse_event(event, field_parsers) + def parse_frr_bgp_evpn_local_l3vni_add_zrecv(event): """ bgp evpn local-l3vni parser; raw format - @@ -237,12 +256,15 @@ def parse_frr_bgp_evpn_local_l3vni_add_zrecv(event): ctf_array(unsigned char, svi_rmac, svi_rmac, sizeof(struct ethaddr)) ctf_array(unsigned char, vrr_rmac, vrr_rmac, sizeof(struct ethaddr)) """ - field_parsers = {"vtep": print_net_ipv4_addr, - "svi_rmac": print_mac, - "vrr_rmac": print_mac} + field_parsers = { + "vtep": print_net_ipv4_addr, + "svi_rmac": print_mac, + "vrr_rmac": print_mac, + } parse_event(event, field_parsers) + def parse_frr_bgp_evpn_local_macip_add_zrecv(event): """ bgp evpn local-mac-ip parser; raw format - @@ -256,6 +278,7 @@ def parse_frr_bgp_evpn_local_macip_add_zrecv(event): parse_event(event, field_parsers) + def parse_frr_bgp_evpn_local_macip_del_zrecv(event): """ bgp evpn local-mac-ip del parser; raw format - @@ -267,16 +290,20 @@ def parse_frr_bgp_evpn_local_macip_del_zrecv(event): parse_event(event, field_parsers) + def parse_frr_bgp_evpn_advertise_type5(event): """ local originated type-5 route """ - field_parsers = {"ip": print_ip_addr, - "rmac": print_mac, - "vtep": print_net_ipv4_addr} + field_parsers = { + "ip": print_ip_addr, + "rmac": print_mac, + "vtep": print_net_ipv4_addr, + } parse_event(event, field_parsers) + def parse_frr_bgp_evpn_withdraw_type5(event): """ local originated type-5 route withdraw @@ -285,8 +312,10 @@ def parse_frr_bgp_evpn_withdraw_type5(event): parse_event(event, field_parsers) + ############################ evpn parsers - end *############################# + def main(): """ FRR lttng trace output parser; babel trace plugin @@ -341,5 +370,6 @@ def main(): else: parse_event(event, {}) + if __name__ == "__main__": main() From 069dff269e8516aec24d0ae4408ac5344c5fd2e0 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 19 Sep 2024 15:36:50 -0400 Subject: [PATCH 42/69] zebra: Add ability to know if some config is set For interface config: shutdown mpls multicast These states were never being shown in output, let's show it. Signed-off-by: Donald Sharp --- zebra/interface.c | 34 ++++++++++++++++++++++++++++------ 1 file changed, 28 insertions(+), 6 deletions(-) diff --git a/zebra/interface.c b/zebra/interface.c index 86de5dbae68e..4090e81d68fa 100644 --- a/zebra/interface.c +++ b/zebra/interface.c @@ -47,6 +47,20 @@ DEFINE_MTYPE_STATIC(ZEBRA, ZIF_DESC, "Intf desc"); static void if_down_del_nbr_connected(struct interface *ifp); +static const char *if_zebra_data_state(uint8_t state) +{ + switch (state) { + case IF_ZEBRA_DATA_UNSPEC: + return "Not specified by CLI"; + case IF_ZEBRA_DATA_ON: + return "Enabled by CLI"; + case IF_ZEBRA_DATA_OFF: + return "Disabled by CLI"; + } + + return "STATE IS WRONG DEV ESCAPE"; +} + static void if_zebra_speed_update(struct event *thread) { struct interface *ifp = EVENT_ARG(thread); @@ -2627,8 +2641,8 @@ static void if_dump_vty(struct vty *vty, struct interface *ifp) vty_out(vty, "mtu6 %d ", ifp->mtu6); vty_out(vty, "\n flags: %s\n", if_flag_dump(ifp->flags)); - if (zebra_if->mpls) - vty_out(vty, " MPLS enabled\n"); + vty_out(vty, " MPLS %s %s\n", zebra_if->mpls ? "enabled" : "", + if_zebra_data_state(zebra_if->multicast)); if (zebra_if->linkdown) vty_out(vty, " Ignore all v4 routes with linkdown\n"); @@ -2640,6 +2654,10 @@ static void if_dump_vty(struct vty *vty, struct interface *ifp) if (zebra_if->v6mcast_on) vty_out(vty, " v6 Multicast forwarding is on\n"); + vty_out(vty, " Multicast config is %s\n", if_zebra_data_state(zebra_if->multicast)); + + vty_out(vty, " Shutdown config is %s\n", if_zebra_data_state(zebra_if->shutdown)); + /* Hardware address. */ vty_out(vty, " Type: %s\n", if_link_type_str(ifp->ll_type)); if (ifp->hw_addr_len != 0) { @@ -2988,10 +3006,14 @@ static void if_dump_vty_json(struct vty *vty, struct interface *ifp, json_object_boolean_add(json_if, "mplsEnabled", zebra_if->mpls); json_object_boolean_add(json_if, "linkDown", zebra_if->linkdown); json_object_boolean_add(json_if, "linkDownV6", zebra_if->linkdownv6); - json_object_boolean_add(json_if, "mcForwardingV4", - zebra_if->v4mcast_on); - json_object_boolean_add(json_if, "mcForwardingV6", - zebra_if->v6mcast_on); + json_object_boolean_add(json_if, "mcForwardingV4", zebra_if->v4mcast_on); + json_object_boolean_add(json_if, "mcForwardingV6", zebra_if->v6mcast_on); + + json_object_string_add(json_if, "multicastConfig", if_zebra_data_state(zebra_if->multicast)); + + json_object_string_add(json_if, "shutdownConfig", if_zebra_data_state(zebra_if->shutdown)); + + json_object_string_add(json_if, "mplsConfig", if_zebra_data_state(zebra_if->mpls_config)); if (ifp->ifindex == IFINDEX_INTERNAL) { json_object_boolean_add(json_if, "pseudoInterface", true); From c23f505f45bd0cb670386149a50ae744f5c5bda5 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 19 Sep 2024 21:06:40 -0400 Subject: [PATCH 43/69] tests: Add some test cases for missed coverage I noticed that there was some missed code coverage in zebra. multicast [enable|disable] and show interface description vrf all Add a bit to get it covered. Signed-off-by: Donald Sharp --- .../test_all_protocol_startup.py | 72 +++++++++++++++++++ 1 file changed, 72 insertions(+) diff --git a/tests/topotests/all_protocol_startup/test_all_protocol_startup.py b/tests/topotests/all_protocol_startup/test_all_protocol_startup.py index 0ffd7627340a..06a350c8e96c 100644 --- a/tests/topotests/all_protocol_startup/test_all_protocol_startup.py +++ b/tests/topotests/all_protocol_startup/test_all_protocol_startup.py @@ -20,6 +20,7 @@ import pytest import glob from time import sleep +from lib.topolog import logger pytestmark = [ pytest.mark.babeld, @@ -1715,6 +1716,77 @@ def _show_func(): net["r1"].cmd('vtysh -c "conf" -c "no nexthop-group resilience"') +def test_interface_stuff(): + global fatal_error + net = get_topogen().net + + # Skip if previous fatal error condition is raised + if fatal_error != "": + pytest.skip(fatal_error) + + print("\n\n** Verifying some interface code") + print("************************************\n") + + net["r1"].cmd('vtysh -c "conf" -c "interface r1-eth0" -c "multicast enable"') + + def _test_interface_multicast_on(): + output = json.loads(net["r1"].cmd('vtysh -c "show int r1-eth0 json"')) + expected = { + "r1-eth0": { + "flags": "", + "multicastConfig": "Enabled by CLI", + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_test_interface_multicast_on) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Multicast bit was not set on r1-eth0" + + net["r1"].cmd('vtysh -c "conf" -c "interface r1-eth0" -c "multicast disable"') + + def _test_interface_multicast_off(): + output = json.loads( + net["r1"].cmd('vtysh -c "show int r1-eth0 vrf default json"') + ) + expected = { + "r1-eth0": { + "flags": "", + "multicastConfig": "Disabled by CLI", + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_test_interface_multicast_off) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Multicast bit was not turned off on r1-eth0" + + net["r1"].cmd('vtysh -c "conf" -c "interface r1-eth0" -c "no multicast disable"') + + def _test_interface_multicast_disable(): + output = json.loads(net["r1"].cmd('vtysh -c "show int r1-eth0 json"')) + expected = { + "r1-eth0": { + "flags": "", + "multicastConfig": "Not specified by CLI", + } + } + return topotest.json_cmp(output, expected) + + test_func = functools.partial(_test_interface_multicast_disable) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Multicast bit was set on r1-eth0" + + logger.info("Ensure that these commands are still nominally working") + rc, o, e = net["r1"].cmd_status('vtysh -c "show interface description vrf all"') + logger.info(o) + assert rc == 0 + + rc, o, e = net["r1"].cmd_status('vtysh -c "show interface description vrf default"') + logger.info(o) + assert rc == 0 + + def test_shutdown_check_stderr(): global fatal_error net = get_topogen().net From a92854047b6a250d8fd0dcccc1aab51a0318c7c5 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 19 Sep 2024 21:39:50 -0400 Subject: [PATCH 44/69] zebra: Remove some unused functions on linux build The functions: if_get_flags if_flags_update if_flags_mangle are never invoked from a linux netlink build. Put a #ifdef around those functions so that they are not included on the linux build as that they are not needed there. Signed-off-by: Donald Sharp --- zebra/interface.c | 2 ++ zebra/ioctl.c | 2 ++ 2 files changed, 4 insertions(+) diff --git a/zebra/interface.c b/zebra/interface.c index 4090e81d68fa..f7fd112cd4ce 100644 --- a/zebra/interface.c +++ b/zebra/interface.c @@ -380,6 +380,7 @@ int if_subnet_delete(struct interface *ifp, struct connected *ifc) return 0; } +#ifndef HAVE_NETLINK /* if_flags_mangle: A place for hacks that require mangling * or tweaking the interface flags. * @@ -431,6 +432,7 @@ void if_flags_update(struct interface *ifp, uint64_t newflags) if_up(ifp, true); } } +#endif /* Wake up configured address if it is not in current kernel address. */ diff --git a/zebra/ioctl.c b/zebra/ioctl.c index a35784cd36e4..47ce7c943df8 100644 --- a/zebra/ioctl.c +++ b/zebra/ioctl.c @@ -390,6 +390,7 @@ int if_unset_prefix_ctx(const struct zebra_dplane_ctx *ctx) #endif /* HAVE_STRUCT_IFALIASREQ */ #endif /* HAVE_NETLINK */ +#ifndef HAVE_NETLINK /* get interface flags */ void if_get_flags(struct interface *ifp) { @@ -485,6 +486,7 @@ void if_get_flags(struct interface *ifp) out: if_flags_update(ifp, (ifreqflags.ifr_flags & 0x0000ffff)); } +#endif /* Set interface flags */ int if_set_flags(struct interface *ifp, uint64_t flags) From 04361b30599c4525c5f39c73000006230f28f5d7 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Tue, 24 Sep 2024 10:46:11 -0400 Subject: [PATCH 45/69] tests: Add some test cases for snmp Noticed that we were not really attempting to even test large swaths of our snmp infrastructure. Let's load up some very simple configs for those daemons that FRR supports and ensure that SNMP is working to some extent. Signed-off-by: Donald Sharp --- tests/topotests/lib/snmptest.py | 6 ++- .../topotests/simple_snmp_test/r1/isisd.conf | 2 + .../topotests/simple_snmp_test/r1/ospf6d.conf | 12 +++++ .../topotests/simple_snmp_test/r1/ospfd.conf | 11 ++++ tests/topotests/simple_snmp_test/r1/ripd.conf | 8 +++ .../topotests/simple_snmp_test/r1/zebra.conf | 3 +- .../simple_snmp_test/test_simple_snmp.py | 52 +++++++++++++++++-- 7 files changed, 89 insertions(+), 5 deletions(-) create mode 100644 tests/topotests/simple_snmp_test/r1/ospf6d.conf create mode 100644 tests/topotests/simple_snmp_test/r1/ospfd.conf create mode 100644 tests/topotests/simple_snmp_test/r1/ripd.conf diff --git a/tests/topotests/lib/snmptest.py b/tests/topotests/lib/snmptest.py index 8e2e76d154b4..6d586cee5074 100644 --- a/tests/topotests/lib/snmptest.py +++ b/tests/topotests/lib/snmptest.py @@ -104,12 +104,16 @@ def get_next(self, oid): return None return self._get_snmp_value(result) - def walk(self, oid): + def walk(self, oid, raw=False): cmd = "snmpwalk {0} {1} 2>&1 | grep -v SNMPv2-PDU".format( self._snmp_config(), oid ) result = self.router.cmd(cmd) + + if raw: + return result + return self._parse_multiline(result) def parse_notif_ipv4(self, notif): diff --git a/tests/topotests/simple_snmp_test/r1/isisd.conf b/tests/topotests/simple_snmp_test/r1/isisd.conf index 435abde330df..c53d2509e2d5 100644 --- a/tests/topotests/simple_snmp_test/r1/isisd.conf +++ b/tests/topotests/simple_snmp_test/r1/isisd.conf @@ -3,6 +3,8 @@ log stdout debugging ! debug isis route-events ! debug isis events ! +agentx +! interface r1-eth0 ip router isis ISIS1 ipv6 router isis ISIS1 diff --git a/tests/topotests/simple_snmp_test/r1/ospf6d.conf b/tests/topotests/simple_snmp_test/r1/ospf6d.conf new file mode 100644 index 000000000000..e81151710ba6 --- /dev/null +++ b/tests/topotests/simple_snmp_test/r1/ospf6d.conf @@ -0,0 +1,12 @@ +agentx + +int r1-eth0 + ipv6 ospf6 area 0.0.0.0 + +int r1-eth1 + ipv6 ospf6 area 0.0.0.0 +int r1-eth2 + ipv6 ospf6 area 0.0.0.0 + +router ospf6 + redistribute local \ No newline at end of file diff --git a/tests/topotests/simple_snmp_test/r1/ospfd.conf b/tests/topotests/simple_snmp_test/r1/ospfd.conf new file mode 100644 index 000000000000..cc0d9e52c2df --- /dev/null +++ b/tests/topotests/simple_snmp_test/r1/ospfd.conf @@ -0,0 +1,11 @@ +agentx + +int r1-eth0 + ip ospf area 0.0.0.0 +int r1-eth1 + ip ospf area 0.0.0.0 +int r1-eth2 + ip ospf area 0.0.0.0 + +router ospf + redistribute local \ No newline at end of file diff --git a/tests/topotests/simple_snmp_test/r1/ripd.conf b/tests/topotests/simple_snmp_test/r1/ripd.conf new file mode 100644 index 000000000000..71cdb058cf79 --- /dev/null +++ b/tests/topotests/simple_snmp_test/r1/ripd.conf @@ -0,0 +1,8 @@ +! +! +router rip + network 0.0.0.0/0 + redistribute local +! +agentx +! \ No newline at end of file diff --git a/tests/topotests/simple_snmp_test/r1/zebra.conf b/tests/topotests/simple_snmp_test/r1/zebra.conf index 5281d0055d9b..6483a661ceea 100644 --- a/tests/topotests/simple_snmp_test/r1/zebra.conf +++ b/tests/topotests/simple_snmp_test/r1/zebra.conf @@ -1,5 +1,7 @@ log file zebra.log ! +agentx +! interface r1-eth0 ip address 192.168.12.12/24 ipv6 address 2000:1:1:12::12/64 @@ -18,5 +20,4 @@ interface lo ipv6 address 2000:1:1:1::1/128 ! ! -! line vty diff --git a/tests/topotests/simple_snmp_test/test_simple_snmp.py b/tests/topotests/simple_snmp_test/test_simple_snmp.py index 0387e2927405..c74ffcc2db5c 100755 --- a/tests/topotests/simple_snmp_test/test_simple_snmp.py +++ b/tests/topotests/simple_snmp_test/test_simple_snmp.py @@ -24,7 +24,8 @@ # Import topogen and topotest helpers from lib.topogen import Topogen, TopoRouter, get_topogen from lib.snmptest import SnmpTester - +from time import sleep +from lib.topolog import logger pytestmark = [pytest.mark.bgpd, pytest.mark.isisd, pytest.mark.snmp] @@ -59,16 +60,35 @@ def setup_module(mod): # For all registered routers, load the zebra configuration file for rname, router in router_list.items(): router.load_config( - TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + TopoRouter.RD_ZEBRA, + os.path.join(CWD, "{}/zebra.conf".format(rname)), + "-M snmp", ) router.load_config( - TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) + TopoRouter.RD_ISIS, + os.path.join(CWD, "{}/isisd.conf".format(rname)), + "-M snmp", ) router.load_config( TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)), "-M snmp", ) + router.load_config( + TopoRouter.RD_RIP, + os.path.join(CWD, "{}/ripd.conf".format(rname)), + "-M snmp", + ) + router.load_config( + TopoRouter.RD_OSPF, + os.path.join(CWD, "{}/ospfd.conf".format(rname)), + "-M snmp", + ) + router.load_config( + TopoRouter.RD_OSPF6, + os.path.join(CWD, "{}/ospf6d.conf".format(rname)), + "-M snmp", + ) router.load_config( TopoRouter.RD_SNMP, os.path.join(CWD, "{}/snmpd.conf".format(rname)), @@ -77,6 +97,16 @@ def setup_module(mod): # After loading the configurations, this function loads configured daemons. tgen.start_router() + # Why this sleep? If you are using zebra w/ snmp we have a chicken + # and egg problem with the snmpd. snmpd is being started up with + # ip addresses, and as such snmpd may not be ready to listen yet + # (see startup stuff in topotest.py ) with the 2 second delay + # on starting snmpd after zebra. As such if we want to test + # anything in zebra we need to sleep a bit to allow the connection + # to happen. I have no good way to test to see if zebra is up + # and running with snmp at this point in time. So this will have + # to do. + sleep(17) def teardown_module(): @@ -103,6 +133,22 @@ def test_r1_bgp_version(): assert r1_snmp.test_oid_walk("bgpVersion", ["10"]) assert r1_snmp.test_oid_walk("bgpVersion", ["10"], ["0"]) + assert r1_snmp.test_oid( + "IP-FORWARD-MIB::ipForwardDest.192.168.12.0", "192.168.12.0" + ) + + assert r1_snmp.test_oid("ISIS-MIB::isisSysVersion", "one(1)") + # rip is not auto-loading agentx from mgmtd + # assert r1_snmp.test_oid("RIPv2-MIB::rip2GlobalQueries", "0") + + assert r1_snmp.test_oid("OSPF-MIB::ospfVersionNumber", "version2(2)") + assert r1_snmp.test_oid("OSPFV3-MIB::ospfv3VersionNumber", "version3(3)") + + # Let's just dump everything and make sure we get some additional test + # coverage + logger.info("Let's walk everything") + logger.info(r1_snmp.walk(".1", raw=True)) + def test_memory_leak(): "Run the memory leak test and report results." From 735809756f8389b54098f4abcb42c233bb1f1dcd Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Tue, 24 Sep 2024 13:59:48 -0400 Subject: [PATCH 46/69] ripd: Use route_node_match instead of route_node_match_ipv4 Signed-off-by: Donald Sharp --- ripd/ripd.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/ripd/ripd.c b/ripd/ripd.c index 8768819fe26c..2d038507ab3a 100644 --- a/ripd/ripd.c +++ b/ripd/ripd.c @@ -1263,9 +1263,13 @@ static void rip_response_process(struct rip_packet *packet, int size, rip->vrf->vrf_id)) { struct route_node *rn; struct rip_info *rinfo; + struct prefix p = { 0 }; - rn = route_node_match_ipv4(rip->table, - &rte->nexthop); + p.family = AF_INET; + p.prefixlen = IPV4_MAX_BITLEN; + p.u.prefix4 = rte->nexthop; + + rn = route_node_match(rip->table, &p); if (rn) { rinfo = rn->info; From 67526c4b8c6a4f4757d617ea9fd54d7f95a540d0 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 25 Sep 2024 08:02:42 -0400 Subject: [PATCH 47/69] lib: Remove route_node_match_ipv[4|6] not being used These functions are not being used. Let's just remove them from our code base. Signed-off-by: Donald Sharp --- lib/table.c | 26 -------------------------- lib/table.h | 4 ---- 2 files changed, 30 deletions(-) diff --git a/lib/table.c b/lib/table.c index 3bf93894ec0b..cf185de22617 100644 --- a/lib/table.c +++ b/lib/table.c @@ -208,32 +208,6 @@ struct route_node *route_node_match(struct route_table *table, return NULL; } -struct route_node *route_node_match_ipv4(struct route_table *table, - const struct in_addr *addr) -{ - struct prefix_ipv4 p; - - memset(&p, 0, sizeof(p)); - p.family = AF_INET; - p.prefixlen = IPV4_MAX_BITLEN; - p.prefix = *addr; - - return route_node_match(table, (struct prefix *)&p); -} - -struct route_node *route_node_match_ipv6(struct route_table *table, - const struct in6_addr *addr) -{ - struct prefix_ipv6 p; - - memset(&p, 0, sizeof(p)); - p.family = AF_INET6; - p.prefixlen = IPV6_MAX_BITLEN; - p.prefix = *addr; - - return route_node_match(table, &p); -} - /* Lookup same prefix node. Return NULL when we can't find route. */ struct route_node *route_node_lookup(struct route_table *table, union prefixconstptr pu) diff --git a/lib/table.h b/lib/table.h index acfc87615429..c31be2b6889d 100644 --- a/lib/table.h +++ b/lib/table.h @@ -195,10 +195,6 @@ extern struct route_node *route_node_lookup_maynull(struct route_table *table, union prefixconstptr pu); extern struct route_node *route_node_match(struct route_table *table, union prefixconstptr pu); -extern struct route_node *route_node_match_ipv4(struct route_table *table, - const struct in_addr *addr); -extern struct route_node *route_node_match_ipv6(struct route_table *table, - const struct in6_addr *addr); extern unsigned long route_table_count(struct route_table *table); From 4f032a44a43345f42ccb5557a7b48805c99ad17e Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 7 Sep 2023 07:57:26 -0400 Subject: [PATCH 48/69] tests: Fix ospfapi client to clear ospf process Test is failing locally: 2023-09-06 18:39:56,865 DEBUG: r1: vtysh result: Hello, this is FRRouting (version 9.1-dev). Copyright 1996-2005 Kunihiro Ishiguro, et al. r1# conf t r1(config)# router ospf r1(config-router)# ospf router-id 1.1.1.1 For this router-id change to take effect, use "clear ip ospf process" command r1(config-router)# 2023-09-06 18:39:56,865 DEBUG: root: GOT LINE: 'SUCCESS: 1.0.0.0' 2023-09-06 18:39:56,866 DEBUG: root: GOT LINE: '2023-09-06 18:39:55,982 INFO: TESTER: root: Waiting for 1.1.1.1' 2023-09-06 18:39:56,867 DEBUG: root: GOT LINE: '2023-09-06 18:39:55,982 DEBUG: TESTER: root: expected '1.1.1.1' != '1.0.0.0'' 2023-09-06 18:39:56,867 DEBUG: root: GOT LINE: 'waiting on notify' Sure looks like the router-id is not allowed to be changed because neighbors have already been formed. If we are changing the router-id then let's clear the process to allow it to correctly change. Signed-off-by: Donald Sharp --- tests/topotests/ospfapi/test_ospf_clientapi.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/topotests/ospfapi/test_ospf_clientapi.py b/tests/topotests/ospfapi/test_ospf_clientapi.py index 89a34ff9b58d..9e00fcf11fc1 100644 --- a/tests/topotests/ospfapi/test_ospf_clientapi.py +++ b/tests/topotests/ospfapi/test_ospf_clientapi.py @@ -218,10 +218,12 @@ def _test_router_id(tgen, testbin): step("router id: check for modified router id") r1.vtysh_multicmd("conf t\nrouter ospf\nospf router-id 1.1.1.1") + r1.vtysh_multicmd("clear ip ospf process") _wait_output(p, "SUCCESS: {}".format(waitlist[1])) step("router id: check for restored router id") r1.vtysh_multicmd("conf t\nrouter ospf\nospf router-id 1.0.0.0") + r1.vtysh_multicmd("clear ip ospf process") _wait_output(p, "SUCCESS: {}".format(waitlist[2])) except Exception as error: logging.error("ERROR: %s", error) From bf05536741872d5df361eeb521f45ca7f9d9b1e5 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 7 Sep 2023 08:06:28 -0400 Subject: [PATCH 49/69] tests: fix max med on startup The test is failing because on r2 we are looking for a metric of 777 on startup, but the start of looking for this happens to be after the 5 second delay that is setup in the config. On r1: 2023/09/06 17:05:14.999407 BGP: [G822R-SBMNH] config-from-file# router bgp 65001 2023/09/06 17:05:15.003060 BGP: [G822R-SBMNH] config-from-file# bgp max-med on-startup 5 777 2023/09/06 17:05:15.003342 BGP: [G822R-SBMNH] config-from-file# no bgp ebgp-requires-policy 2023/09/06 17:05:15.003453 BGP: [G822R-SBMNH] config-from-file# neighbor 192.168.255.2 remote-as 65001 2023/09/06 17:05:15.004029 BGP: [G822R-SBMNH] config-from-file# neighbor 192.168.255.2 timers 3 10 2023/09/06 17:05:15.004242 BGP: [G822R-SBMNH] config-from-file# address-family ipv4 unicast 2023/09/06 17:05:15.004329 BGP: [G822R-SBMNH] config-from-file# redistribute connected 2023/09/06 17:05:15.005023 BGP: [G822R-SBMNH] config-from-file# exit-address-family 2023/09/06 17:05:15.005140 BGP: [G822R-SBMNH] config-from-file# ! 2023/09/06 17:05:15.005162 BGP: [G822R-SBMNH] config-from-file# ! 2023/09/06 17:05:17.538112 BGP: [M7Q4P-46WDR] vty[25]@> enable 2023/09/06 17:05:17.546700 BGP: [M7Q4P-46WDR] vty[25]@# clear log cmdline-targets 2023/09/06 17:05:17.570635 BGP: [M7Q4P-46WDR] vty[25]@(config)# log commands 2023/09/06 17:05:17.572518 BGP: [M7Q4P-46WDR] vty[25]@(config)# log timestamp precision 6 2023/09/06 17:05:24.982647 BGP: [YNGC8-65JDM] Begin maxmed onstartup mode - timer 5 seconds 2023/09/06 17:05:26.033134 BGP: [M59KS-A3ZXZ] bgp_update_receive: rcvd End-of-RIB for IPv4 Unicast from 192.168.255.2 in vrf default 2023/09/06 17:05:29.982960 BGP: [N1747-51Y51] Max med on startup ended - timer expired. on r2: 2023/09/06 17:05:23.976029 BGP: [G822R-SBMNH] config-from-file# ! 2023/09/06 17:05:26.084086 BGP: [M59KS-A3ZXZ] bgp_update_receive: rcvd End-of-RIB for IPv4 Unicast from 192.168.255.1 in vrf default 2023/09/06 17:05:27.280103 BGP: [M7Q4P-46WDR] vty[25]@> enable 2023/09/06 17:05:27.290204 BGP: [M7Q4P-46WDR] vty[25]@# clear log cmdline-targets 2023/09/06 17:05:27.328798 BGP: [M7Q4P-46WDR] vty[25]@(config)# log commands 2023/09/06 17:05:27.335032 BGP: [M7Q4P-46WDR] vty[25]@(config)# log timestamp precision 6 2023/09/06 17:05:31.558216 BGP: [M7Q4P-46WDR] vty[5]@> enable 2023/09/06 17:05:31.562482 BGP: [M7Q4P-46WDR] vty[5]@# do show logging 2023/09/06 17:05:32.942204 BGP: [M7Q4P-46WDR] vty[5]@> enable 2023/09/06 17:05:32.946745 BGP: [M7Q4P-46WDR] vty[5]@# show ip bgp neighbor 192.168.255.1 json 2023/09/06 17:05:34.173879 BGP: [M7Q4P-46WDR] vty[5]@> enable 2023/09/06 17:05:34.178448 BGP: [M7Q4P-46WDR] vty[5]@# show ip bgp neighbor 192.168.255.1 routes json 2023/09/06 17:05:36.459365 BGP: [M7Q4P-46WDR] vty[5]@> enable 2023/09/06 17:05:36.472019 BGP: [M7Q4P-46WDR] vty[5]@# show ip bgp neighbor 192.168.255.1 routes json 2023/09/06 17:05:38.557840 BGP: [M7Q4P-46WDR] vty[5]@> enable 2023/09/06 17:05:38.558948 BGP: [M7Q4P-46WDR] vty[5]@# show ip bgp neighbor 192.168.255.1 routes json 2023/09/06 17:05:40.198563 BGP: [M7Q4P-46WDR] vty[5]@> enable Notice that the 5 second delay for the max med expires at 29 seconds but the show routes on r2 does not even begin until 34 seconds, long after the max med has expired and the test has moved on. Let's relax the max-med timer to 30 seconds and modify the test to wait a bit longer for both finding it and expiring timer. Signed-off-by: Donald Sharp --- tests/topotests/bgp_max_med_on_startup/r1/bgpd.conf | 2 +- .../bgp_max_med_on_startup/test_bgp_max_med_on_startup.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/topotests/bgp_max_med_on_startup/r1/bgpd.conf b/tests/topotests/bgp_max_med_on_startup/r1/bgpd.conf index 41bf96344ad0..14f90b859db0 100644 --- a/tests/topotests/bgp_max_med_on_startup/r1/bgpd.conf +++ b/tests/topotests/bgp_max_med_on_startup/r1/bgpd.conf @@ -1,6 +1,6 @@ ! router bgp 65001 - bgp max-med on-startup 5 777 + bgp max-med on-startup 30 777 no bgp ebgp-requires-policy neighbor 192.168.255.2 remote-as 65001 neighbor 192.168.255.2 timers 3 10 diff --git a/tests/topotests/bgp_max_med_on_startup/test_bgp_max_med_on_startup.py b/tests/topotests/bgp_max_med_on_startup/test_bgp_max_med_on_startup.py index 545d7bd245a9..12ec88249a96 100644 --- a/tests/topotests/bgp_max_med_on_startup/test_bgp_max_med_on_startup.py +++ b/tests/topotests/bgp_max_med_on_startup/test_bgp_max_med_on_startup.py @@ -82,12 +82,12 @@ def _bgp_has_routes(router, metric): # Check session is established test_func = functools.partial(_bgp_converge, router2) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1.0) assert result is None, "Failed bgp convergence on r2" # Check metric has value of max-med test_func = functools.partial(_bgp_has_routes, router2, 777) - _, result = topotest.run_and_expect(test_func, None, count=30, wait=0.5) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=1.0) assert result is None, "r2 does not receive routes with metric 777" # Check that when the max-med timer expires, metric is updated From 277784fe34d3036c9f6f37bc7deff7fb235012ee Mon Sep 17 00:00:00 2001 From: Mark Stapp Date: Mon, 25 Nov 2024 15:37:39 -0500 Subject: [PATCH 50/69] zebra: avoid a race during FPM dplane plugin shutdown During zebra shutdown, the main pthread and the FPM pthread can deadlock if the FPM pthread is in fpm_reconnect(). Each pthread tries to use event_cancel_async() to cancel tasks that may be scheduled for the other pthread - this leads to a deadlock as neither thread can progress. This adds an atomic boolean that's managed as each pthread enters and leaves the cleanup code in question, preventing the two threads from running into the deadlock. Signed-off-by: Mark Stapp --- zebra/dplane_fpm_nl.c | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c index e6b4af367429..3ec1c9d65723 100644 --- a/zebra/dplane_fpm_nl.c +++ b/zebra/dplane_fpm_nl.c @@ -68,6 +68,8 @@ static const char *prov_name = "dplane_fpm_nl"; +static atomic_bool fpm_cleaning_up; + struct fpm_nl_ctx { /* data plane connection. */ int socket; @@ -524,6 +526,16 @@ static void fpm_connect(struct event *t); static void fpm_reconnect(struct fpm_nl_ctx *fnc) { + bool cleaning_p = false; + + /* This is being called in the FPM pthread: ensure we don't deadlock + * with similar code that may be run in the main pthread. + */ + if (!atomic_compare_exchange_strong_explicit( + &fpm_cleaning_up, &cleaning_p, true, memory_order_seq_cst, + memory_order_seq_cst)) + return; + /* Cancel all zebra threads first. */ event_cancel_async(zrouter.master, &fnc->t_lspreset, NULL); event_cancel_async(zrouter.master, &fnc->t_lspwalk, NULL); @@ -551,6 +563,12 @@ static void fpm_reconnect(struct fpm_nl_ctx *fnc) EVENT_OFF(fnc->t_read); EVENT_OFF(fnc->t_write); + /* Reset the barrier value */ + cleaning_p = true; + atomic_compare_exchange_strong_explicit( + &fpm_cleaning_up, &cleaning_p, false, memory_order_seq_cst, + memory_order_seq_cst); + /* FPM is disabled, don't attempt to connect. */ if (fnc->disabled) return; @@ -1624,6 +1642,16 @@ static int fpm_nl_start(struct zebra_dplane_provider *prov) static int fpm_nl_finish_early(struct fpm_nl_ctx *fnc) { + bool cleaning_p = false; + + /* This is being called in the main pthread: ensure we don't deadlock + * with similar code that may be run in the FPM pthread. + */ + if (!atomic_compare_exchange_strong_explicit( + &fpm_cleaning_up, &cleaning_p, true, memory_order_seq_cst, + memory_order_seq_cst)) + return 0; + /* Disable all events and close socket. */ EVENT_OFF(fnc->t_lspreset); EVENT_OFF(fnc->t_lspwalk); @@ -1644,6 +1672,12 @@ static int fpm_nl_finish_early(struct fpm_nl_ctx *fnc) fnc->socket = -1; } + /* Reset the barrier value */ + cleaning_p = true; + atomic_compare_exchange_strong_explicit( + &fpm_cleaning_up, &cleaning_p, false, memory_order_seq_cst, + memory_order_seq_cst); + return 0; } From 2484793bc50d6e83c35bc6e8b1c59d82ab20e3a1 Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Tue, 26 Nov 2024 11:26:55 +0200 Subject: [PATCH 51/69] tests: Fix docker build for topotests Signed-off-by: Donatas Abraitis --- tests/topotests/Dockerfile | 138 +++++++++++++------- tests/topotests/docker/inner/compile_frr.sh | 5 +- 2 files changed, 90 insertions(+), 53 deletions(-) diff --git a/tests/topotests/Dockerfile b/tests/topotests/Dockerfile index 1503e67d31dc..d55827fe6c57 100644 --- a/tests/topotests/Dockerfile +++ b/tests/topotests/Dockerfile @@ -1,60 +1,98 @@ -FROM ubuntu:18.04 +FROM ubuntu:22.04 -RUN export DEBIAN_FRONTEND=noninteractive \ - && apt-get update \ - && apt-get install -y \ - autoconf \ - binutils \ - bison \ - ca-certificates \ - flex \ +ARG DEBIAN_FRONTEND=noninteractive +ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=DontWarn + +RUN apt update -y && apt upgrade -y && \ + # Basic build requirements from documentation + apt-get install -y \ + autoconf \ + automake \ + bison \ + build-essential \ + flex \ + git \ + install-info \ + libc-ares-dev \ + libcap-dev \ + libelf-dev \ + libjson-c-dev \ + libpam0g-dev \ + libreadline-dev \ + libsnmp-dev \ + libsqlite3-dev \ + lsb-release \ + libtool \ + lcov \ + make \ + perl \ + pkg-config \ + python3-dev \ + python3-sphinx \ + screen \ + texinfo \ + tmux \ + && \ + # Protobuf build requirements + apt-get install -y \ + libprotobuf-c-dev \ + protobuf-c-compiler \ + && \ + # Libyang2 extra build requirements + apt-get install -y \ + cmake \ + libpcre2-dev \ + && \ + # GRPC extra build requirements + apt-get install -y \ + libgrpc-dev \ + libgrpc++-dev \ + protobuf-compiler-grpc \ + && \ + # Runtime/triage/testing requirements + apt-get install -y \ + rsync \ + curl \ gdb \ - git \ - gpg \ - install-info \ - iputils-ping \ + kmod \ iproute2 \ - less \ - libtool \ - libjson-c-dev \ - libpcre3-dev \ - libpython-dev \ - libpython3-dev \ - libreadline-dev \ - libc-ares-dev \ - libcap-dev \ - libelf-dev \ - man \ - mininet \ - pkg-config \ - python-pip \ + iputils-ping \ + liblua5.3-dev \ + libssl-dev \ + lua5.3 \ + net-tools \ python3 \ - python3-dev \ - python3-sphinx \ - python3-pytest \ - rsync \ + python3-pip \ + snmp \ + snmp-mibs-downloader \ + snmpd \ + sudo \ + time \ + tshark \ + valgrind \ + yodl \ strace \ tcpdump \ - texinfo \ - tmux \ - valgrind \ - vim \ - wget \ - x11-xserver-utils \ - xterm \ - && pip install \ - exabgp==3.4.17 \ - "scapy>=2.4.2" \ - ipaddr \ - pytest \ - && rm -rf /var/lib/apt/lists/* + && \ + download-mibs && \ + wget https://raw.githubusercontent.com/FRRouting/frr-mibs/main/iana/IANA-IPPM-METRICS-REGISTRY-MIB -O /usr/share/snmp/mibs/iana/IANA-IPPM-METRICS-REGISTRY-MIB && \ + wget https://raw.githubusercontent.com/FRRouting/frr-mibs/main/ietf/SNMPv2-PDU -O /usr/share/snmp/mibs/ietf/SNMPv2-PDU && \ + wget https://raw.githubusercontent.com/FRRouting/frr-mibs/main/ietf/IPATM-IPMC-MIB -O /usr/share/snmp/mibs/ietf/IPATM-IPMC-MIB && \ + python3 -m pip install wheel && \ + python3 -m pip install 'protobuf<4' grpcio grpcio-tools && \ + python3 -m pip install 'pytest>=6.2.4' 'pytest-xdist>=2.3.0' && \ + python3 -m pip install 'scapy>=2.4.5' && \ + python3 -m pip install xmltodict && \ + python3 -m pip install git+https://github.com/Exa-Networks/exabgp@0659057837cd6c6351579e9f0fa47e9fb7de7311 + +# Install FRR built packages +RUN mkdir -p /etc/apt/keyrings && \ + curl -s -o /etc/apt/keyrings/frrouting.gpg https://deb.frrouting.org/frr/keys.gpg && \ + echo deb '[signed-by=/etc/apt/keyrings/frrouting.gpg]' https://deb.frrouting.org/frr \ + $(lsb_release -s -c) "frr-stable" > /etc/apt/sources.list.d/frr.list && \ + apt-get update && apt-get install -y librtr-dev libyang2-dev libyang2-tools -RUN export DEBIAN_FRONTEND=noninteractive \ - && wget -qO- https://deb.frrouting.org/frr/keys.asc | apt-key add - \ - && echo "deb https://deb.frrouting.org/frr bionic frr-stable" > /etc/apt/sources.list.d/frr.list \ - && apt-get update \ - && apt-get install -y libyang-dev \ - && rm -rf /var/lib/apt/lists/* +RUN apt install -y openvswitch-switch RUN groupadd -r -g 92 frr \ && groupadd -r -g 85 frrvty \ diff --git a/tests/topotests/docker/inner/compile_frr.sh b/tests/topotests/docker/inner/compile_frr.sh index 4a88dc677f3c..e943c385c730 100755 --- a/tests/topotests/docker/inner/compile_frr.sh +++ b/tests/topotests/docker/inner/compile_frr.sh @@ -58,9 +58,6 @@ if [ ! -e Makefile ]; then fi bash configure >&3 \ - --enable-static-bin \ - --enable-static \ - --enable-shared \ --enable-dev-build \ --with-moduledir=/usr/lib/frr/modules \ --prefix=/usr \ @@ -69,6 +66,8 @@ if [ ! -e Makefile ]; then --sbindir=/usr/lib/frr \ --enable-multipath=0 \ --enable-fpm \ + --enable-grpc \ + --enable-scripting \ --enable-sharpd \ $EXTRA_CONFIGURE \ --with-pkg-extra-version=-topotests \ From 6eb3a602893d21cc70047f20597adf594d6a45ea Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Tue, 26 Nov 2024 11:34:12 +0200 Subject: [PATCH 52/69] tests: Set vm.mmap_rnd_bits for topotests docker foobar Signed-off-by: Donatas Abraitis --- tests/topotests/docker/inner/entrypoint.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/topotests/docker/inner/entrypoint.sh b/tests/topotests/docker/inner/entrypoint.sh index 44e16db4b9ff..b92217440b1d 100755 --- a/tests/topotests/docker/inner/entrypoint.sh +++ b/tests/topotests/docker/inner/entrypoint.sh @@ -20,6 +20,11 @@ cd "${FRR_BUILD_DIR}/tests/topotests" log_info "Setting permissions on /tmp so we can generate logs" chmod 1777 /tmp +# This is a MUST, otherwise we have: +# AddressSanitizer:DEADLYSIGNAL +# Segmentation fault +sysctl -w vm.mmap_rnd_bits=28 + if [ $# -eq 0 ] || ([[ "$1" != /* ]] && [[ "$1" != ./* ]]); then export TOPOTESTS_CHECK_MEMLEAK=/tmp/memleak_ export TOPOTESTS_CHECK_STDERR=Yes From f3189d797d5edccb7af78d1db0b6d81a2512027c Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Tue, 26 Nov 2024 11:42:49 +0200 Subject: [PATCH 53/69] tests: Deprecate TOPOTEST_PULL We do not maintain docker.com/frrouting anymore and not building custom images for topotests. Use local images for topotests instead. Just use: ``` mak topotests-build make topotests ``` Signed-off-by: Donatas Abraitis --- doc/developer/topotests.rst | 2 +- tests/topotests/docker/README.md | 2 +- tests/topotests/docker/frr-topotests.sh | 7 ------- 3 files changed, 2 insertions(+), 9 deletions(-) diff --git a/doc/developer/topotests.rst b/doc/developer/topotests.rst index 5077745a1563..9e05a99474f6 100644 --- a/doc/developer/topotests.rst +++ b/doc/developer/topotests.rst @@ -881,7 +881,7 @@ commands: .. code:: console make topotests-build - TOPOTEST_PULL=0 make topotests + make topotests .. _topotests-guidelines: diff --git a/tests/topotests/docker/README.md b/tests/topotests/docker/README.md index 2b40994cf61f..2bd58a15b8a3 100644 --- a/tests/topotests/docker/README.md +++ b/tests/topotests/docker/README.md @@ -68,5 +68,5 @@ without pulling from the registry using the following commands: ```console make topotests-build -TOPOTEST_PULL=0 make topotests +make topotests ``` diff --git a/tests/topotests/docker/frr-topotests.sh b/tests/topotests/docker/frr-topotests.sh index ce373d9bd082..bd37055147a7 100755 --- a/tests/topotests/docker/frr-topotests.sh +++ b/tests/topotests/docker/frr-topotests.sh @@ -45,9 +45,6 @@ if [[ "$1" = "-h" ]] || [[ "$1" = "--help" ]]; then TOPOTEST_OPTIONS These options are appended to the docker-run command for starting the tests. - TOPOTEST_PULL If set to 0, don't try to pull the most recent - version of the docker image from dockerhub. - TOPOTEST_SANITIZER Controls whether to use the address sanitizer. Enabled by default, set to 0 to disable. @@ -122,10 +119,6 @@ if [ -z "$TOPOTEST_BUILDCACHE" ]; then || docker volume create "${TOPOTEST_BUILDCACHE}" fi -if [ "${TOPOTEST_PULL:-1}" = "1" ]; then - docker pull frrouting/topotests:latest -fi - if [[ -n "$TMUX" ]]; then TMUX_OPTIONS="-v $(dirname $TMUX):$(dirname $TMUX) -e TMUX=$TMUX -e TMUX_PANE=$TMUX_PANE" fi From c5d7815cccb92c192ca6b752843b8b827a607b53 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Mon, 25 Nov 2024 22:47:21 +0100 Subject: [PATCH 54/69] bgpd: fix version attribute is an int, not a string The json display of the version attribute is originally an integer. It has changed, most probably mistakenly. > { > "vrfId": 7, > "vrfName": "vrf1", > "tableVersion": 3, > "routerId": "192.0.2.1", > "defaultLocPrf": 100, > "localAS": 65500, > "routes": { > "172.31.0.1/32": { > "prefix": "172.31.0.1/32", > "version": "1", <--- int or string ?? Let us fix it, by using the integer display instead. Fixes: f9f2d188e398 ("bgpd: fix 'json detail' output structure") Signed-off-by: Philippe Guibert --- bgpd/bgp_route.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index 72e798a7e2c9..5feda7183739 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -12318,8 +12318,7 @@ void route_vty_out_detail_header(struct vty *vty, struct bgp *bgp, } else { if (incremental_print) { vty_out(vty, "\"prefix\": \"%pFX\",\n", p); - vty_out(vty, "\"version\": \"%" PRIu64 "\",", - dest->version); + vty_out(vty, "\"version\": %" PRIu64 ",", dest->version); } else { json_object_string_addf(json, "prefix", "%pFX", p); From 561debab5430ac85e8e42e839312d213479a767a Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Tue, 26 Nov 2024 14:19:34 +0100 Subject: [PATCH 55/69] bgpd: fix use single whitespace when displaying flowspec entries There is an extra space in the 'Displayed' line of show bgp command, that should not be present. Fix this by being consistent with the output of the other address families. Fixes: ("a1baf9e84f71") bgpd: Use single whitespace when displaying show bgp summary Signed-off-by: Philippe Guibert --- bgpd/bgp_flowspec_vty.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bgpd/bgp_flowspec_vty.c b/bgpd/bgp_flowspec_vty.c index d4ccca84bb20..3d2dda4ee4d0 100644 --- a/bgpd/bgp_flowspec_vty.c +++ b/bgpd/bgp_flowspec_vty.c @@ -441,7 +441,7 @@ int bgp_show_table_flowspec(struct vty *vty, struct bgp *bgp, afi_t afi, } if (total_count && !use_json) vty_out(vty, - "\nDisplayed %ld flowspec entries\n", + "\nDisplayed %ld flowspec entries\n", total_count); return CMD_SUCCESS; } From 2e5b4e32c4d402bd547ba06ac2b797236e7eaea6 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Tue, 5 Nov 2024 15:47:08 -0500 Subject: [PATCH 56/69] bgpd: peer_notify_unconfig should be connection based Convert this function to being connection based. Signed-off-by: Donald Sharp --- bgpd/bgp_vty.c | 10 +++++----- bgpd/bgpd.c | 17 ++++++----------- bgpd/bgpd.h | 2 +- 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index d1238bc8de57..d1f20b3bbacb 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -5264,7 +5264,7 @@ DEFUN (no_neighbor, * interface. */ if (peer->ifp) bgp_zebra_terminate_radv(peer->bgp, peer); - peer_notify_unconfig(peer); + peer_notify_unconfig(peer->connection); peer_delete(peer); return CMD_SUCCESS; } @@ -5300,10 +5300,10 @@ DEFUN (no_neighbor, if (CHECK_FLAG(peer->flags, PEER_FLAG_CAPABILITY_ENHE)) bgp_zebra_terminate_radv(peer->bgp, peer); - peer_notify_unconfig(peer); + peer_notify_unconfig(peer->connection); peer_delete(peer); if (other && other->connection->status != Deleted) { - peer_notify_unconfig(other); + peer_notify_unconfig(other->connection); peer_delete(other); } } @@ -5338,7 +5338,7 @@ DEFUN (no_neighbor_interface_config, /* Request zebra to terminate IPv6 RAs on this interface. */ if (peer->ifp) bgp_zebra_terminate_radv(peer->bgp, peer); - peer_notify_unconfig(peer); + peer_notify_unconfig(peer->connection); peer_delete(peer); } else { vty_out(vty, "%% Create the bgp interface first\n"); @@ -5746,7 +5746,7 @@ DEFUN (no_neighbor_set_peer_group, if (CHECK_FLAG(peer->flags, PEER_FLAG_CAPABILITY_ENHE)) bgp_zebra_terminate_radv(peer->bgp, peer); - peer_notify_unconfig(peer); + peer_notify_unconfig(peer->connection); ret = peer_delete(peer); return bgp_vty_return(vty, ret); diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index 258fc87f96db..947251e01492 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -3076,11 +3076,10 @@ int peer_group_remote_as(struct bgp *bgp, const char *group_name, as_t *as, return 0; } -void peer_notify_unconfig(struct peer *peer) +void peer_notify_unconfig(struct peer_connection *connection) { - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_PEER_UNCONFIG); + if (BGP_IS_VALID_STATE_FOR_NOTIF(connection->status)) + bgp_notify_send(connection, BGP_NOTIFY_CEASE, BGP_NOTIFY_CEASE_PEER_UNCONFIG); } static void peer_notify_shutdown(struct peer *peer) @@ -3107,9 +3106,9 @@ void peer_group_notify_unconfig(struct peer_group *group) other = peer->doppelganger; if (other && other->connection->status != Deleted) { other->group = NULL; - peer_notify_unconfig(other); + peer_notify_unconfig(other->connection); } else - peer_notify_unconfig(peer); + peer_notify_unconfig(peer->connection); } } @@ -8841,11 +8840,7 @@ void bgp_terminate(void) peer); continue; } - if (BGP_IS_VALID_STATE_FOR_NOTIF( - peer->connection->status)) - bgp_notify_send(peer->connection, - BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_PEER_UNCONFIG); + peer_notify_unconfig(peer->connection); } } diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index f123188ae8c4..e5252b78b4f5 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -2385,7 +2385,7 @@ extern int peer_remote_as(struct bgp *bgp, union sockunion *su, extern int peer_group_remote_as(struct bgp *bgp, const char *peer_str, as_t *as, enum peer_asn_type as_type, const char *as_str); extern int peer_delete(struct peer *peer); -extern void peer_notify_unconfig(struct peer *peer); +extern void peer_notify_unconfig(struct peer_connection *connection); extern int peer_group_delete(struct peer_group *); extern int peer_group_remote_as_delete(struct peer_group *); extern int peer_group_listen_range_add(struct peer_group *, struct prefix *); From ba0edb9545038a8026813d5997a958cc6ed88765 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 6 Nov 2024 08:15:06 -0500 Subject: [PATCH 57/69] bgpd: Add `peer_notify_config_change()` function We have about a bajillion tests of if we can notify the peer and then we send a config change notification. Let's just make a function that does this. Signed-off-by: Donald Sharp --- bgpd/bgp_fsm.c | 12 ++--- bgpd/bgp_vty.c | 11 ++--- bgpd/bgpd.c | 131 +++++++++++++++++-------------------------------- bgpd/bgpd.h | 1 + 4 files changed, 52 insertions(+), 103 deletions(-) diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index 490451f193db..2cdbadc63c58 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -2746,8 +2746,7 @@ static void bgp_gr_update_mode_of_all_peers(struct bgp *bgp, peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE; if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset_safe(peer, &nnode); } else { @@ -2769,8 +2768,7 @@ static void bgp_gr_update_mode_of_all_peers(struct bgp *bgp, member->last_reset = PEER_DOWN_CAPABILITY_CHANGE; if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - bgp_notify_send(member->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(member->connection); else bgp_session_reset(member); } @@ -2974,8 +2972,7 @@ unsigned int bgp_peer_gr_action(struct peer *peer, enum peer_mode old_state, peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE; if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset(peer); } else { @@ -2985,8 +2982,7 @@ unsigned int bgp_peer_gr_action(struct peer *peer, enum peer_mode old_state, bgp_peer_move_to_gr_mode(member, new_state); if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - bgp_notify_send(member->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(member->connection); else bgp_session_reset(member); } diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index d1f20b3bbacb..651ec71b1a90 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -2940,9 +2940,7 @@ DEFUN(bgp_reject_as_sets, bgp_reject_as_sets_cmd, */ for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { peer->last_reset = PEER_DOWN_AS_SETS_REJECT; - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } return CMD_SUCCESS; @@ -2965,9 +2963,7 @@ DEFUN(no_bgp_reject_as_sets, no_bgp_reject_as_sets_cmd, */ for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { peer->last_reset = PEER_DOWN_AS_SETS_REJECT; - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } return CMD_SUCCESS; @@ -5101,8 +5097,7 @@ static int peer_conf_interface_get(struct vty *vty, const char *conf_if, /* v6only flag changed. Reset bgp seesion */ if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset(peer); } diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index 947251e01492..5940b20de8c8 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -309,9 +309,7 @@ static int bgp_router_id_set(struct bgp *bgp, const struct in_addr *id, peer->last_reset = PEER_DOWN_RID_CHANGE; - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } /* EVPN uses router id in RD, update them */ @@ -447,8 +445,7 @@ void bm_wait_for_fib_set(bool set) peer->connection->status)) continue; - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } } } @@ -507,8 +504,7 @@ void bgp_suppress_fib_pending_set(struct bgp *bgp, bool set) if (!BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) continue; - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } } @@ -532,9 +528,7 @@ void bgp_cluster_id_set(struct bgp *bgp, struct in_addr *cluster_id) peer->last_reset = PEER_DOWN_CLID_CHANGE; - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } } @@ -556,9 +550,7 @@ void bgp_cluster_id_unset(struct bgp *bgp) peer->last_reset = PEER_DOWN_CLID_CHANGE; - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } } @@ -641,9 +633,7 @@ void bgp_confederation_id_set(struct bgp *bgp, as_t as, const char *as_str) peer->connection->status)) { peer->last_reset = PEER_DOWN_CONFED_ID_CHANGE; - bgp_notify_send(peer->connection, - BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } else bgp_session_reset_safe(peer, &nnode); } @@ -659,9 +649,7 @@ void bgp_confederation_id_set(struct bgp *bgp, as_t as, const char *as_str) peer->connection->status)) { peer->last_reset = PEER_DOWN_CONFED_ID_CHANGE; - bgp_notify_send(peer->connection, - BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } else bgp_session_reset_safe(peer, &nnode); } @@ -686,9 +674,7 @@ void bgp_confederation_id_unset(struct bgp *bgp) peer->last_reset = PEER_DOWN_CONFED_ID_CHANGE; if (BGP_IS_VALID_STATE_FOR_NOTIF( peer->connection->status)) - bgp_notify_send(peer->connection, - BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset_safe(peer, &nnode); } @@ -740,9 +726,7 @@ void bgp_confederation_peers_add(struct bgp *bgp, as_t as, const char *as_str) peer->connection->status)) { peer->last_reset = PEER_DOWN_CONFED_PEER_CHANGE; - bgp_notify_send(peer->connection, - BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } else bgp_session_reset_safe(peer, &nnode); } @@ -797,9 +781,7 @@ void bgp_confederation_peers_remove(struct bgp *bgp, as_t as) peer->connection->status)) { peer->last_reset = PEER_DOWN_CONFED_PEER_CHANGE; - bgp_notify_send(peer->connection, - BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } else bgp_session_reset_safe(peer, &nnode); } @@ -2100,8 +2082,7 @@ void peer_as_change(struct peer *peer, as_t as, enum peer_asn_type as_type, if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { peer->last_reset = PEER_DOWN_REMOTE_AS_CHANGE; if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset(peer); } @@ -2467,15 +2448,11 @@ static int peer_activate_af(struct peer *peer, afi_t afi, safi_t safi) false); } } else { - bgp_notify_send(peer->connection, - BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } } - if (peer->connection->status == OpenSent || - peer->connection->status == OpenConfirm) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); + /* * If we are turning on a AFI/SAFI locally and we've * started bringing a peer up, we need to tell @@ -2488,8 +2465,7 @@ static int peer_activate_af(struct peer *peer, afi_t afi, safi_t safi) other = peer->doppelganger; if (other && (other->connection->status == OpenSent || other->connection->status == OpenConfirm)) - bgp_notify_send(other->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(other->connection); } return 0; @@ -2596,14 +2572,10 @@ static bool non_peergroup_deactivate_af(struct peer *peer, afi_t afi, bgp_clear_route(peer, afi, safi); peer->pcount[afi][safi] = 0; } else { - bgp_notify_send(peer->connection, - BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } - } else { - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); - } + } else + peer_notify_config_change(peer->connection); } return false; @@ -3076,6 +3048,12 @@ int peer_group_remote_as(struct bgp *bgp, const char *group_name, as_t *as, return 0; } +void peer_notify_config_change(struct peer_connection *connection) +{ + if (BGP_IS_VALID_STATE_FOR_NOTIF(connection->status)) + bgp_notify_send(connection, BGP_NOTIFY_CEASE, BGP_NOTIFY_CEASE_CONFIG_CHANGE); +} + void peer_notify_unconfig(struct peer_connection *connection) { if (BGP_IS_VALID_STATE_FOR_NOTIF(connection->status)) @@ -3356,8 +3334,7 @@ int peer_group_bind(struct bgp *bgp, union sockunion *su, struct peer *peer, peer->last_reset = PEER_DOWN_RMAP_BIND; if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset(peer); } @@ -4725,8 +4702,7 @@ void peer_change_action(struct peer *peer, afi_t afi, safi_t safi, PEER_FLAG_CONFIG_NODE))) peer_delete(peer->doppelganger); - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } else if (type == peer_change_reset_in) { if (CHECK_FLAG(peer->cap, PEER_CAP_REFRESH_RCV)) bgp_route_refresh_send(peer, afi, safi, 0, 0, 0, @@ -4738,8 +4714,7 @@ void peer_change_action(struct peer *peer, afi_t afi, safi_t safi, PEER_FLAG_CONFIG_NODE))) peer_delete(peer->doppelganger); - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } } else if (type == peer_change_reset_out) { paf = peer_af_find(peer, afi, safi); @@ -4939,8 +4914,7 @@ static void peer_flag_modify_action(struct peer *peer, uint64_t flag) BGP_EVENT_ADD(peer->connection, BGP_Stop); } } else if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) { - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); } else bgp_session_reset(peer); } @@ -5427,9 +5401,7 @@ int peer_ebgp_multihop_set(struct peer *peer, int ttl) if (peer->sort != BGP_PEER_IBGP) { if (BGP_IS_VALID_STATE_FOR_NOTIF( peer->connection->status)) - bgp_notify_send(peer->connection, - BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset(peer); @@ -5446,8 +5418,7 @@ int peer_ebgp_multihop_set(struct peer *peer, int ttl) member->ttl = group->conf->ttl; if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - bgp_notify_send(member->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(member->connection); else bgp_session_reset(member); @@ -5484,8 +5455,7 @@ int peer_ebgp_multihop_unset(struct peer *peer) if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset(peer); @@ -5502,8 +5472,7 @@ int peer_ebgp_multihop_unset(struct peer *peer) if (member->connection->fd >= 0) { if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - bgp_notify_send(member->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(member->connection); else bgp_session_reset(member); } @@ -5657,8 +5626,7 @@ int peer_update_source_if_set(struct peer *peer, const char *ifname) peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE; /* Send notification or reset peer depending on state. */ if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset(peer); @@ -5695,8 +5663,7 @@ int peer_update_source_if_set(struct peer *peer, const char *ifname) /* Send notification or reset peer depending on state. */ if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - bgp_notify_send(member->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(member->connection); else bgp_session_reset(member); @@ -5728,8 +5695,7 @@ void peer_update_source_addr_set(struct peer *peer, const union sockunion *su) peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE; /* Send notification or reset peer depending on state. */ if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset(peer); @@ -5765,8 +5731,7 @@ void peer_update_source_addr_set(struct peer *peer, const union sockunion *su) /* Send notification or reset peer depending on state. */ if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - bgp_notify_send(member->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset(member); @@ -5816,8 +5781,7 @@ void peer_update_source_unset(struct peer *peer) peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE; /* Send notification or reset peer depending on state. */ if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset(peer); @@ -5852,8 +5816,7 @@ void peer_update_source_unset(struct peer *peer) /* Send notification or reset peer depending on state. */ if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - bgp_notify_send(member->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(member->connection); else bgp_session_reset(member); @@ -6885,8 +6848,7 @@ int peer_local_as_unset(struct peer *peer) peer->last_reset = PEER_DOWN_LOCAL_AS_CHANGE; /* Send notification or stop peer depending on state. */ if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else BGP_EVENT_ADD(peer->connection, BGP_Stop); @@ -6914,8 +6876,7 @@ int peer_local_as_unset(struct peer *peer) /* Send notification or stop peer depending on state. */ if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - bgp_notify_send(member->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(member->connection); else bgp_session_reset(member); } @@ -6946,8 +6907,7 @@ int peer_password_set(struct peer *peer, const char *password) peer->last_reset = PEER_DOWN_PASSWORD_CHANGE; /* Send notification or reset peer depending on state. */ if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset(peer); @@ -6984,8 +6944,7 @@ int peer_password_set(struct peer *peer, const char *password) member->last_reset = PEER_DOWN_PASSWORD_CHANGE; /* Send notification or reset peer depending on state. */ if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - bgp_notify_send(member->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(member->connection); else bgp_session_reset(member); @@ -7030,8 +6989,7 @@ int peer_password_unset(struct peer *peer) if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { /* Send notification or reset peer depending on state. */ if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - bgp_notify_send(peer->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(peer->connection); else bgp_session_reset(peer); @@ -7057,8 +7015,7 @@ int peer_password_unset(struct peer *peer) /* Send notification or reset peer depending on state. */ if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - bgp_notify_send(member->connection, BGP_NOTIFY_CEASE, - BGP_NOTIFY_CEASE_CONFIG_CHANGE); + peer_notify_config_change(member->connection); else bgp_session_reset(member); diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index e5252b78b4f5..5af2d387ae53 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -2386,6 +2386,7 @@ extern int peer_group_remote_as(struct bgp *bgp, const char *peer_str, as_t *as, enum peer_asn_type as_type, const char *as_str); extern int peer_delete(struct peer *peer); extern void peer_notify_unconfig(struct peer_connection *connection); +extern void peer_notify_config_change(struct peer_connection *connection); extern int peer_group_delete(struct peer_group *); extern int peer_group_remote_as_delete(struct peer_group *); extern int peer_group_listen_range_add(struct peer_group *, struct prefix *); From eacf923b00c019e9a877c9716e5d6506594d532e Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 6 Nov 2024 08:24:28 -0500 Subject: [PATCH 58/69] bgpd: Fix pattern of usage in bgp_notify_config_change if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) peer_notify_config_change(peer->connection); else bgp_session_reset_safe(peer, &nnode); Let's add a bool return to peer_notify_config_change of whether or not it should call the peer session reset. This simplifies the code a bunch. Signed-off-by: Donald Sharp --- bgpd/bgp_fsm.c | 16 ++----- bgpd/bgp_vty.c | 4 +- bgpd/bgpd.c | 117 +++++++++++++++---------------------------------- bgpd/bgpd.h | 2 +- 4 files changed, 41 insertions(+), 98 deletions(-) diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index 2cdbadc63c58..6734c5e8dca6 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -2745,9 +2745,7 @@ static void bgp_gr_update_mode_of_all_peers(struct bgp *bgp, peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE; - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset_safe(peer, &nnode); } else { group = peer->group; @@ -2767,9 +2765,7 @@ static void bgp_gr_update_mode_of_all_peers(struct bgp *bgp, member->last_reset = PEER_DOWN_CAPABILITY_CHANGE; - if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - peer_notify_config_change(member->connection); - else + if (!peer_notify_config_change(member->connection)) bgp_session_reset(member); } } @@ -2971,9 +2967,7 @@ unsigned int bgp_peer_gr_action(struct peer *peer, enum peer_mode old_state, if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { peer->last_reset = PEER_DOWN_CAPABILITY_CHANGE; - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); } else { group = peer->group; @@ -2981,9 +2975,7 @@ unsigned int bgp_peer_gr_action(struct peer *peer, enum peer_mode old_state, member->last_reset = PEER_DOWN_CAPABILITY_CHANGE; bgp_peer_move_to_gr_mode(member, new_state); - if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - peer_notify_config_change(member->connection); - else + if (!peer_notify_config_change(member->connection)) bgp_session_reset(member); } } diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index 651ec71b1a90..6ff94129dcf5 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -5096,9 +5096,7 @@ static int peer_conf_interface_get(struct vty *vty, const char *conf_if, peer->last_reset = PEER_DOWN_V6ONLY_CHANGE; /* v6only flag changed. Reset bgp seesion */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); } diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index 5940b20de8c8..9d0f579c2862 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -629,12 +629,10 @@ void bgp_confederation_id_set(struct bgp *bgp, as_t as, const char *as_str) if (already_confed) { if (ptype == BGP_PEER_EBGP) { peer->local_as = as; - if (BGP_IS_VALID_STATE_FOR_NOTIF( - peer->connection->status)) { + if (peer_notify_config_change(peer->connection)) peer->last_reset = PEER_DOWN_CONFED_ID_CHANGE; - peer_notify_config_change(peer->connection); - } else + else bgp_session_reset_safe(peer, &nnode); } } else { @@ -645,12 +643,10 @@ void bgp_confederation_id_set(struct bgp *bgp, as_t as, const char *as_str) /* Reset the local_as to be our EBGP one */ if (ptype == BGP_PEER_EBGP) peer->local_as = as; - if (BGP_IS_VALID_STATE_FOR_NOTIF( - peer->connection->status)) { + if (peer_notify_config_change(peer->connection)) peer->last_reset = PEER_DOWN_CONFED_ID_CHANGE; - peer_notify_config_change(peer->connection); - } else + else bgp_session_reset_safe(peer, &nnode); } } @@ -672,10 +668,7 @@ void bgp_confederation_id_unset(struct bgp *bgp) if (peer_sort(peer) != BGP_PEER_IBGP) { peer->local_as = bgp->as; peer->last_reset = PEER_DOWN_CONFED_ID_CHANGE; - if (BGP_IS_VALID_STATE_FOR_NOTIF( - peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset_safe(peer, &nnode); } } @@ -722,12 +715,10 @@ void bgp_confederation_peers_add(struct bgp *bgp, as_t as, const char *as_str) if (peer->as == as) { peer->local_as = bgp->as; (void)peer_sort(peer); - if (BGP_IS_VALID_STATE_FOR_NOTIF( - peer->connection->status)) { + if (peer_notify_config_change(peer->connection)) peer->last_reset = PEER_DOWN_CONFED_PEER_CHANGE; - peer_notify_config_change(peer->connection); - } else + else bgp_session_reset_safe(peer, &nnode); } } @@ -777,12 +768,10 @@ void bgp_confederation_peers_remove(struct bgp *bgp, as_t as) if (peer->as == as) { peer->local_as = bgp->confed_id; (void)peer_sort(peer); - if (BGP_IS_VALID_STATE_FOR_NOTIF( - peer->connection->status)) { + if (peer_notify_config_change(peer->connection)) peer->last_reset = PEER_DOWN_CONFED_PEER_CHANGE; - peer_notify_config_change(peer->connection); - } else + else bgp_session_reset_safe(peer, &nnode); } } @@ -2081,9 +2070,7 @@ void peer_as_change(struct peer *peer, as_t as, enum peer_asn_type as_type, /* Stop peer. */ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { peer->last_reset = PEER_DOWN_REMOTE_AS_CHANGE; - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); } origtype = peer_sort_lookup(peer); @@ -2463,8 +2450,7 @@ static int peer_activate_af(struct peer *peer, afi_t afi, safi_t safi) * activation. */ other = peer->doppelganger; - if (other && (other->connection->status == OpenSent || - other->connection->status == OpenConfirm)) + if (other) peer_notify_config_change(other->connection); } @@ -3048,10 +3034,14 @@ int peer_group_remote_as(struct bgp *bgp, const char *group_name, as_t *as, return 0; } -void peer_notify_config_change(struct peer_connection *connection) +bool peer_notify_config_change(struct peer_connection *connection) { - if (BGP_IS_VALID_STATE_FOR_NOTIF(connection->status)) + if (BGP_IS_VALID_STATE_FOR_NOTIF(connection->status)) { bgp_notify_send(connection, BGP_NOTIFY_CEASE, BGP_NOTIFY_CEASE_CONFIG_CHANGE); + return true; + } + + return false; } void peer_notify_unconfig(struct peer_connection *connection) @@ -3333,9 +3323,7 @@ int peer_group_bind(struct bgp *bgp, union sockunion *su, struct peer *peer, peer->last_reset = PEER_DOWN_RMAP_BIND; - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); } @@ -4913,9 +4901,7 @@ static void peer_flag_modify_action(struct peer *peer, uint64_t flag) peer->v_start = BGP_INIT_START_TIMER; BGP_EVENT_ADD(peer->connection, BGP_Stop); } - } else if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) { - peer_notify_config_change(peer->connection); - } else + } else if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); } @@ -5399,10 +5385,7 @@ int peer_ebgp_multihop_set(struct peer *peer, int ttl) if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { if (peer->sort != BGP_PEER_IBGP) { - if (BGP_IS_VALID_STATE_FOR_NOTIF( - peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); /* Reconfigure BFD peer with new TTL. */ @@ -5417,9 +5400,7 @@ int peer_ebgp_multihop_set(struct peer *peer, int ttl) member->ttl = group->conf->ttl; - if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - peer_notify_config_change(member->connection); - else + if (!peer_notify_config_change(member->connection)) bgp_session_reset(member); /* Reconfigure BFD peer with new TTL. */ @@ -5454,9 +5435,7 @@ int peer_ebgp_multihop_unset(struct peer *peer) peer->ttl = ttl; if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); /* Reconfigure BFD peer with new TTL. */ @@ -5471,9 +5450,7 @@ int peer_ebgp_multihop_unset(struct peer *peer) member->ttl = BGP_DEFAULT_TTL; if (member->connection->fd >= 0) { - if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - peer_notify_config_change(member->connection); - else + if (!peer_notify_config_change(member->connection)) bgp_session_reset(member); } @@ -5625,9 +5602,7 @@ int peer_update_source_if_set(struct peer *peer, const char *ifname) if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE; /* Send notification or reset peer depending on state. */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); /* Apply new source configuration to BFD session. */ @@ -5662,9 +5637,7 @@ int peer_update_source_if_set(struct peer *peer, const char *ifname) member->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE; /* Send notification or reset peer depending on state. */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - peer_notify_config_change(member->connection); - else + if (!peer_notify_config_change(member->connection)) bgp_session_reset(member); /* Apply new source configuration to BFD session. */ @@ -5694,9 +5667,7 @@ void peer_update_source_addr_set(struct peer *peer, const union sockunion *su) if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE; /* Send notification or reset peer depending on state. */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); /* Apply new source configuration to BFD session. */ @@ -5730,9 +5701,7 @@ void peer_update_source_addr_set(struct peer *peer, const union sockunion *su) member->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE; /* Send notification or reset peer depending on state. */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset(member); /* Apply new source configuration to BFD session. */ @@ -5780,9 +5749,7 @@ void peer_update_source_unset(struct peer *peer) if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { peer->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE; /* Send notification or reset peer depending on state. */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); /* Apply new source configuration to BFD session. */ @@ -5815,9 +5782,7 @@ void peer_update_source_unset(struct peer *peer) member->last_reset = PEER_DOWN_UPDATE_SOURCE_CHANGE; /* Send notification or reset peer depending on state. */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - peer_notify_config_change(member->connection); - else + if (!peer_notify_config_change(member->connection)) bgp_session_reset(member); /* Apply new source configuration to BFD session. */ @@ -6847,9 +6812,7 @@ int peer_local_as_unset(struct peer *peer) if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { peer->last_reset = PEER_DOWN_LOCAL_AS_CHANGE; /* Send notification or stop peer depending on state. */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) BGP_EVENT_ADD(peer->connection, BGP_Stop); /* Skip peer-group mechanics for regular peers. */ @@ -6875,9 +6838,7 @@ int peer_local_as_unset(struct peer *peer) member->last_reset = PEER_DOWN_LOCAL_AS_CHANGE; /* Send notification or stop peer depending on state. */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - peer_notify_config_change(member->connection); - else + if (!peer_notify_config_change(member->connection)) bgp_session_reset(member); } @@ -6906,9 +6867,7 @@ int peer_password_set(struct peer *peer, const char *password) if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { peer->last_reset = PEER_DOWN_PASSWORD_CHANGE; /* Send notification or reset peer depending on state. */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); /* @@ -6943,9 +6902,7 @@ int peer_password_set(struct peer *peer, const char *password) member->last_reset = PEER_DOWN_PASSWORD_CHANGE; /* Send notification or reset peer depending on state. */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - peer_notify_config_change(member->connection); - else + if (!peer_notify_config_change(member->connection)) bgp_session_reset(member); /* Attempt to install password on socket. */ @@ -6988,9 +6945,7 @@ int peer_password_unset(struct peer *peer) /* Check if handling a regular peer. */ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { /* Send notification or reset peer depending on state. */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(peer->connection->status)) - peer_notify_config_change(peer->connection); - else + if (!peer_notify_config_change(peer->connection)) bgp_session_reset(peer); /* Attempt to uninstall password on socket. */ @@ -7014,9 +6969,7 @@ int peer_password_unset(struct peer *peer) XFREE(MTYPE_PEER_PASSWORD, member->password); /* Send notification or reset peer depending on state. */ - if (BGP_IS_VALID_STATE_FOR_NOTIF(member->connection->status)) - peer_notify_config_change(member->connection); - else + if (!peer_notify_config_change(member->connection)) bgp_session_reset(member); /* Attempt to uninstall password on socket. */ diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index 5af2d387ae53..2b6921b69503 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -2386,7 +2386,7 @@ extern int peer_group_remote_as(struct bgp *bgp, const char *peer_str, as_t *as, enum peer_asn_type as_type, const char *as_str); extern int peer_delete(struct peer *peer); extern void peer_notify_unconfig(struct peer_connection *connection); -extern void peer_notify_config_change(struct peer_connection *connection); +extern bool peer_notify_config_change(struct peer_connection *connection); extern int peer_group_delete(struct peer_group *); extern int peer_group_remote_as_delete(struct peer_group *); extern int peer_group_listen_range_add(struct peer_group *, struct prefix *); From 2771431938d3cdde4a210bbf4c600ef88c985642 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 6 Nov 2024 11:55:43 -0500 Subject: [PATCH 59/69] bgpd: Modify bgp_udpatesockname to pass in a connection Signed-off-by: Donald Sharp --- bgpd/bgp_fsm.c | 2 +- bgpd/bgp_network.c | 8 ++++---- bgpd/bgp_network.h | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index 6734c5e8dca6..cd63480f20a2 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -1809,7 +1809,7 @@ bgp_connect_fail(struct peer_connection *connection) */ static void bgp_connect_in_progress_update_connection(struct peer *peer) { - bgp_updatesockname(peer); + bgp_updatesockname(peer, peer->connection); if (!peer->su_remote && !BGP_CONNECTION_SU_UNSPEC(peer->connection)) { /* if connect initiated, then dest port and dest addresses are well known */ peer->su_remote = sockunion_dup(&peer->connection->su); diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c index 844f6b9af2f1..89c71060a22a 100644 --- a/bgpd/bgp_network.c +++ b/bgpd/bgp_network.c @@ -861,7 +861,7 @@ enum connect_result bgp_connect(struct peer_connection *connection) htons(peer->port), ifindex); } -void bgp_updatesockname(struct peer *peer) +void bgp_updatesockname(struct peer *peer, struct peer_connection *connection) { if (peer->su_local) { sockunion_free(peer->su_local); @@ -873,14 +873,14 @@ void bgp_updatesockname(struct peer *peer) peer->su_remote = NULL; } - peer->su_local = sockunion_getsockname(peer->connection->fd); - peer->su_remote = sockunion_getpeername(peer->connection->fd); + peer->su_local = sockunion_getsockname(connection->fd); + peer->su_remote = sockunion_getpeername(connection->fd); } /* After TCP connection is established. Get local address and port. */ int bgp_getsockname(struct peer *peer) { - bgp_updatesockname(peer); + bgp_updatesockname(peer, peer->connection); if (!bgp_zebra_nexthop_set(peer->su_local, peer->su_remote, &peer->nexthop, peer)) { diff --git a/bgpd/bgp_network.h b/bgpd/bgp_network.h index 61ca19a34da5..481661825d21 100644 --- a/bgpd/bgp_network.h +++ b/bgpd/bgp_network.h @@ -23,7 +23,7 @@ extern void bgp_close_vrf_socket(struct bgp *bgp); extern void bgp_close(void); extern enum connect_result bgp_connect(struct peer_connection *connection); extern int bgp_getsockname(struct peer *peer); -extern void bgp_updatesockname(struct peer *peer); +extern void bgp_updatesockname(struct peer *peer, struct peer_connection *connection); extern int bgp_md5_set_prefix(struct bgp *bgp, struct prefix *p, const char *password); From 72f716ef2880408a3a6e71691f5c8aae24b059cd Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 6 Nov 2024 14:25:20 -0500 Subject: [PATCH 60/69] bgpd: Modify bgp_connect_in_progress_update_connection to use connection Signed-off-by: Donald Sharp --- bgpd/bgp_fsm.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index cd63480f20a2..240ec59481bc 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -1807,12 +1807,14 @@ bgp_connect_fail(struct peer_connection *connection) /* after connect is called(), getpeername is able to return * port and address on non established streams */ -static void bgp_connect_in_progress_update_connection(struct peer *peer) +static void bgp_connect_in_progress_update_connection(struct peer_connection *connection) { - bgp_updatesockname(peer, peer->connection); + struct peer *peer = connection->peer; + + bgp_updatesockname(peer, connection); if (!peer->su_remote && !BGP_CONNECTION_SU_UNSPEC(peer->connection)) { /* if connect initiated, then dest port and dest addresses are well known */ - peer->su_remote = sockunion_dup(&peer->connection->su); + peer->su_remote = sockunion_dup(&connection->su); if (sockunion_family(peer->su_remote) == AF_INET) peer->su_remote->sin.sin_port = htons(peer->port); else if (sockunion_family(peer->su_remote) == AF_INET6) @@ -1916,7 +1918,7 @@ static enum bgp_fsm_state_progress bgp_start(struct peer_connection *connection) __func__, peer->connection->fd); return BGP_FSM_FAILURE; } - bgp_connect_in_progress_update_connection(peer); + bgp_connect_in_progress_update_connection(connection); /* * - when the socket becomes ready, poll() will signify POLLOUT From 1baeb81632d46c20f7f75e619cfea73784d66c01 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 6 Nov 2024 14:31:19 -0500 Subject: [PATCH 61/69] bgpd: bgp_getsockname should use connection Let's use the connection associated with the peer instead. Signed-off-by: Donald Sharp --- bgpd/bgp_fsm.c | 12 ++++++------ bgpd/bgp_network.c | 4 +++- bgpd/bgp_network.h | 2 +- bgpd/bgp_packet.c | 2 +- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index 240ec59481bc..463296f0258a 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -265,7 +265,7 @@ static struct peer *peer_xfer_conn(struct peer *from_peer) from_peer->addpath_paths_limit[afi][safi]; } - if (bgp_getsockname(peer) < 0) { + if (bgp_getsockname(keeper) < 0) { flog_err(EC_LIB_SOCKET, "%%bgp_getsockname() failed for %s peer %s fd %d (from_peer fd %d)", (CHECK_FLAG(peer->sflags, PEER_STATUS_ACCEPT_PEER) @@ -277,7 +277,7 @@ static struct peer *peer_xfer_conn(struct peer *from_peer) return NULL; } if (going_away->status > Active) { - if (bgp_getsockname(from_peer) < 0) { + if (bgp_getsockname(going_away) < 0) { flog_err(EC_LIB_SOCKET, "%%bgp_getsockname() failed for %s from_peer %s fd %d (peer fd %d)", @@ -1694,11 +1694,11 @@ bgp_connect_success(struct peer_connection *connection) return bgp_stop(connection); } - if (bgp_getsockname(peer) < 0) { + if (bgp_getsockname(connection) < 0) { flog_err_sys(EC_LIB_SOCKET, "%s: bgp_getsockname(): failed for peer %s, fd %d", __func__, peer->host, connection->fd); - bgp_notify_send(peer->connection, BGP_NOTIFY_FSM_ERR, + bgp_notify_send(connection, BGP_NOTIFY_FSM_ERR, bgp_fsm_error_subcode(connection->status)); bgp_writes_on(connection); return BGP_FSM_FAILURE; @@ -1740,11 +1740,11 @@ bgp_connect_success_w_delayopen(struct peer_connection *connection) return bgp_stop(connection); } - if (bgp_getsockname(peer) < 0) { + if (bgp_getsockname(connection) < 0) { flog_err_sys(EC_LIB_SOCKET, "%s: bgp_getsockname(): failed for peer %s, fd %d", __func__, peer->host, connection->fd); - bgp_notify_send(peer->connection, BGP_NOTIFY_FSM_ERR, + bgp_notify_send(connection, BGP_NOTIFY_FSM_ERR, bgp_fsm_error_subcode(connection->status)); bgp_writes_on(connection); return BGP_FSM_FAILURE; diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c index 89c71060a22a..e6117a5ce021 100644 --- a/bgpd/bgp_network.c +++ b/bgpd/bgp_network.c @@ -878,8 +878,10 @@ void bgp_updatesockname(struct peer *peer, struct peer_connection *connection) } /* After TCP connection is established. Get local address and port. */ -int bgp_getsockname(struct peer *peer) +int bgp_getsockname(struct peer_connection *connection) { + struct peer *peer = connection->peer; + bgp_updatesockname(peer, peer->connection); if (!bgp_zebra_nexthop_set(peer->su_local, peer->su_remote, diff --git a/bgpd/bgp_network.h b/bgpd/bgp_network.h index 481661825d21..ed1a72ec8900 100644 --- a/bgpd/bgp_network.h +++ b/bgpd/bgp_network.h @@ -22,7 +22,7 @@ extern int bgp_socket(struct bgp *bgp, unsigned short port, extern void bgp_close_vrf_socket(struct bgp *bgp); extern void bgp_close(void); extern enum connect_result bgp_connect(struct peer_connection *connection); -extern int bgp_getsockname(struct peer *peer); +extern int bgp_getsockname(struct peer_connection *connection); extern void bgp_updatesockname(struct peer *peer, struct peer_connection *connection); extern int bgp_md5_set_prefix(struct bgp *bgp, struct prefix *p, diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c index a76a300c11bc..e9cc52449b42 100644 --- a/bgpd/bgp_packet.c +++ b/bgpd/bgp_packet.c @@ -2054,7 +2054,7 @@ static int bgp_open_receive(struct peer_connection *connection, return BGP_Stop; /* Get sockname. */ - if (bgp_getsockname(peer) < 0) { + if (bgp_getsockname(connection) < 0) { flog_err_sys(EC_LIB_SOCKET, "%s: bgp_getsockname() failed for peer: %s", __func__, peer->host); From 7bf3f53e44b8e54cdc92450ae6a68b7f17d36684 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 6 Nov 2024 15:30:32 -0500 Subject: [PATCH 62/69] bgpd: peer_active is connection oriented, make it so Signed-off-by: Donald Sharp --- bgpd/bgp_fsm.c | 4 ++-- bgpd/bgp_network.c | 6 +++--- bgpd/bgp_nexthop.c | 2 +- bgpd/bgp_zebra.c | 2 +- bgpd/bgpd.c | 27 ++++++++++++++------------- bgpd/bgpd.h | 2 +- 6 files changed, 22 insertions(+), 21 deletions(-) diff --git a/bgpd/bgp_fsm.c b/bgpd/bgp_fsm.c index 463296f0258a..cadef3997423 100644 --- a/bgpd/bgp_fsm.c +++ b/bgpd/bgp_fsm.c @@ -325,8 +325,8 @@ void bgp_timer_set(struct peer_connection *connection) /* First entry point of peer's finite state machine. In Idle status start timer is on unless peer is shutdown or peer is inactive. All other timer must be turned off */ - if (BGP_PEER_START_SUPPRESSED(peer) || !peer_active(peer) - || peer->bgp->vrf_id == VRF_UNKNOWN) { + if (BGP_PEER_START_SUPPRESSED(peer) || !peer_active(connection) || + peer->bgp->vrf_id == VRF_UNKNOWN) { EVENT_OFF(connection->t_start); } else { BGP_TIMER_ON(connection->t_start, bgp_start_timer, diff --git a/bgpd/bgp_network.c b/bgpd/bgp_network.c index e6117a5ce021..f1bea1c189ff 100644 --- a/bgpd/bgp_network.c +++ b/bgpd/bgp_network.c @@ -504,7 +504,7 @@ static void bgp_accept(struct event *thread) bgp_fsm_change_status(connection1, Active); EVENT_OFF(connection1->t_start); - if (peer_active(peer1)) { + if (peer_active(peer1->connection)) { if (CHECK_FLAG(peer1->flags, PEER_FLAG_TIMER_DELAYOPEN)) BGP_EVENT_ADD(connection1, @@ -557,7 +557,7 @@ static void bgp_accept(struct event *thread) } /* Check that at least one AF is activated for the peer. */ - if (!peer_active(peer1)) { + if (!peer_active(connection1)) { if (bgp_debug_neighbor_events(peer1)) zlog_debug( "%s - incoming conn rejected - no AF activated for peer", @@ -658,7 +658,7 @@ static void bgp_accept(struct event *thread) bgp_event_update(connection1, TCP_connection_closed); } - if (peer_active(peer)) { + if (peer_active(peer->connection)) { if (CHECK_FLAG(peer->flags, PEER_FLAG_TIMER_DELAYOPEN)) BGP_EVENT_ADD(connection, TCP_connection_open_w_delay); else diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c index bf0f3b15cfde..1ef90a8e382a 100644 --- a/bgpd/bgp_nexthop.c +++ b/bgpd/bgp_nexthop.c @@ -444,7 +444,7 @@ void bgp_connected_add(struct bgp *bgp, struct connected *ifc) !peer_established(peer->connection) && !CHECK_FLAG(peer->flags, PEER_FLAG_IFPEER_V6ONLY)) { connection = peer->connection; - if (peer_active(peer)) + if (peer_active(connection)) BGP_EVENT_ADD(connection, BGP_Stop); BGP_EVENT_ADD(connection, BGP_Start); } diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index 16f4a0d2df19..688dfacaa0b6 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -137,7 +137,7 @@ static void bgp_start_interface_nbrs(struct bgp *bgp, struct interface *ifp) for (ALL_LIST_ELEMENTS(bgp->peer, node, nnode, peer)) { if (peer->conf_if && (strcmp(peer->conf_if, ifp->name) == 0) && !peer_established(peer->connection)) { - if (peer_active(peer)) + if (peer_active(peer->connection)) BGP_EVENT_ADD(peer->connection, BGP_Stop); BGP_EVENT_ADD(peer->connection, BGP_Start); } diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index 9d0f579c2862..f92ae969f8c4 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -1986,7 +1986,7 @@ struct peer *peer_create(union sockunion *su, const char *conf_if, bgp->coalesce_time = MIN(BGP_MAX_SUBGROUP_COALESCE_TIME, ct); } - active = peer_active(peer); + active = peer_active(peer->connection); if (!active) { if (peer->connection->su.sa.sa_family == AF_UNSPEC) peer->last_reset = PEER_DOWN_NBR_ADDR; @@ -2019,7 +2019,7 @@ struct peer *peer_create(union sockunion *su, const char *conf_if, if (bgp->autoshutdown) peer_flag_set(peer, PEER_FLAG_SHUTDOWN); /* Set up peer's events and timers. */ - else if (!active && peer_active(peer)) + else if (!active && peer_active(peer->connection)) bgp_timer_set(peer->connection); bgp_peer_gr_flags_update(peer); @@ -2412,13 +2412,13 @@ static int peer_activate_af(struct peer *peer, afi_t afi, safi_t safi) if (peer_af_create(peer, afi, safi) == NULL) return 1; - active = peer_active(peer); + active = peer_active(peer->connection); peer->afc[afi][safi] = 1; if (peer->group) peer_group2peer_config_copy_af(peer->group, peer, afi, safi); - if (!active && peer_active(peer)) { + if (!active && peer_active(peer->connection)) { bgp_timer_set(peer->connection); } else { peer->last_reset = PEER_DOWN_AF_ACTIVATE; @@ -3358,7 +3358,7 @@ int peer_group_bind(struct bgp *bgp, union sockunion *su, struct peer *peer, } /* Set up peer's events and timers. */ - if (peer_active(peer)) + if (peer_active(peer->connection)) bgp_timer_set(peer->connection); } @@ -4599,9 +4599,11 @@ bool bgp_path_attribute_treat_as_withdraw(struct peer *peer, char *buf, } /* If peer is configured at least one address family return 1. */ -bool peer_active(struct peer *peer) +bool peer_active(struct peer_connection *connection) { - if (BGP_CONNECTION_SU_UNSPEC(peer->connection)) + struct peer *peer = connection->peer; + + if (BGP_CONNECTION_SU_UNSPEC(connection)) return false; if (peer->bfd_config) { @@ -6296,7 +6298,7 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect) /* Skip peer-group mechanics for regular peers. */ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { if (!peer_established(peer->connection)) { - if (peer_active(peer)) + if (peer_active(peer->connection)) BGP_EVENT_ADD(peer->connection, BGP_Stop); BGP_EVENT_ADD(peer->connection, BGP_Start); } @@ -6317,7 +6319,7 @@ int peer_timers_connect_set(struct peer *peer, uint32_t connect) member->v_connect = connect; if (!peer_established(member->connection)) { - if (peer_active(member)) + if (peer_active(member->connection)) BGP_EVENT_ADD(member->connection, BGP_Stop); BGP_EVENT_ADD(member->connection, BGP_Start); } @@ -6350,7 +6352,7 @@ int peer_timers_connect_unset(struct peer *peer) /* Skip peer-group mechanics for regular peers. */ if (!CHECK_FLAG(peer->sflags, PEER_STATUS_GROUP)) { if (!peer_established(peer->connection)) { - if (peer_active(peer)) + if (peer_active(peer->connection)) BGP_EVENT_ADD(peer->connection, BGP_Stop); BGP_EVENT_ADD(peer->connection, BGP_Start); } @@ -6371,7 +6373,7 @@ int peer_timers_connect_unset(struct peer *peer) member->v_connect = peer->bgp->default_connect_retry; if (!peer_established(member->connection)) { - if (peer_active(member)) + if (peer_active(member->connection)) BGP_EVENT_ADD(member->connection, BGP_Stop); BGP_EVENT_ADD(member->connection, BGP_Start); } @@ -8646,8 +8648,7 @@ static int peer_unshut_after_cfg(struct bgp *bgp) peer->host); peer->shut_during_cfg = false; - if (peer_active(peer) && - peer->connection->status != Established) { + if (peer_active(peer->connection) && peer->connection->status != Established) { if (peer->connection->status != Idle) BGP_EVENT_ADD(peer->connection, BGP_Stop); BGP_EVENT_ADD(peer->connection, BGP_Start); diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index 2b6921b69503..df55d879e71d 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -2295,7 +2295,7 @@ extern struct peer *peer_unlock_with_caller(const char *, struct peer *); extern enum bgp_peer_sort peer_sort(struct peer *peer); extern enum bgp_peer_sort peer_sort_lookup(struct peer *peer); -extern bool peer_active(struct peer *); +extern bool peer_active(struct peer_connection *connection); extern bool peer_active_nego(struct peer *); extern bool peer_afc_received(struct peer *peer); extern bool peer_afc_advertised(struct peer *peer); From 887a0840f647878e68969ce73ad94c867b72019d Mon Sep 17 00:00:00 2001 From: Chirag Shah Date: Tue, 26 Nov 2024 08:56:08 -0800 Subject: [PATCH 63/69] zebra: EVPN fix code style in vlan vni map debugs Fix up couple of style issues missed in PR 17483 Signed-off-by: Chirag Shah --- zebra/zebra_vxlan.c | 7 +++---- zebra/zebra_vxlan_if.c | 2 +- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/zebra/zebra_vxlan.c b/zebra/zebra_vxlan.c index 0658f996e6b6..ad112a4ab1f4 100644 --- a/zebra/zebra_vxlan.c +++ b/zebra/zebra_vxlan.c @@ -4757,10 +4757,9 @@ void zebra_vxlan_remote_vtep_add(vrf_id_t vrf_id, vni_t vni, /* If down or not mapped to a bridge, we're done. */ if (!if_is_operative(ifp) || !zif->brslave_info.br_if) { if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - "%s VNI %u VTEP %pI4 ifp %s oper %u br_if %u skipping update", - __func__, zevpn->vni, &vtep_ip, ifp->name, - if_is_operative(ifp), !zif->brslave_info.br_if); + zlog_debug("%s VNI %u VTEP %pI4 ifp %s oper %u br_if %u skipping update", + __func__, zevpn->vni, &vtep_ip, ifp->name, if_is_operative(ifp), + !zif->brslave_info.br_if); return; } diff --git a/zebra/zebra_vxlan_if.c b/zebra/zebra_vxlan_if.c index 2658c9f01c44..ea0be2f6447e 100644 --- a/zebra/zebra_vxlan_if.c +++ b/zebra/zebra_vxlan_if.c @@ -1037,7 +1037,7 @@ int zebra_vxlan_if_vni_up(struct interface *ifp, struct zebra_vxlan_vni *vnip) } else { if (IS_ZEBRA_DEBUG_KERNEL || IS_ZEBRA_DEBUG_VXLAN) zlog_debug("%s VNI %u vxlan_if %s oper down skipping vni up to client", - __func__, zevpn->vni, zevpn->vxlan_if->name); + __func__, zevpn->vni, zevpn->vxlan_if->name); } zebra_evpn_read_mac_neigh(zevpn, ifp); } From d6431905009c71f6e607ae719ae288574053369b Mon Sep 17 00:00:00 2001 From: Mike RE Mallin Date: Tue, 26 Nov 2024 12:13:21 -0500 Subject: [PATCH 64/69] Docker: Add the ability to override FRR UID Signed-off-by: Mike RE Mallin Signed-off-by: Mike RE Mallin --- docker/ubuntu-ci/Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/ubuntu-ci/Dockerfile b/docker/ubuntu-ci/Dockerfile index 5c4649dc325c..aaad3bc172a9 100644 --- a/docker/ubuntu-ci/Dockerfile +++ b/docker/ubuntu-ci/Dockerfile @@ -84,10 +84,11 @@ RUN apt update && apt upgrade -y && \ python3 -m pip install xmltodict && \ python3 -m pip install git+https://github.com/Exa-Networks/exabgp@0659057837cd6c6351579e9f0fa47e9fb7de7311 +ARG UID=1000 RUN groupadd -r -g 92 frr && \ groupadd -r -g 85 frrvty && \ adduser --system --ingroup frr --home /home/frr \ - --gecos "FRR suite" --shell /bin/bash frr && \ + --gecos "FRR suite" -u $UID --shell /bin/bash frr && \ usermod -a -G frrvty frr && \ useradd -d /var/run/exabgp/ -s /bin/false exabgp && \ echo 'frr ALL = NOPASSWD: ALL' | tee /etc/sudoers.d/frr && \ From 8a51814085f12b46b963967afd6ffc326477996b Mon Sep 17 00:00:00 2001 From: "guozhongfeng.gzf" Date: Wed, 27 Nov 2024 10:05:03 +0800 Subject: [PATCH 65/69] doc:Fix bgp doc warning Signed-off-by: guozhongfeng.gzf --- doc/user/bgp.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index d7c624ee463f..fa4d7d55eadd 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -2931,6 +2931,7 @@ BGP Extended Communities in Route Map match on to for the purpose of determining what type of SR-TE Policy Tunnel a BGP route can resolve over, and it also shows the order for resolving the BGP route if there are different tunnels. + - ``00`` Can match on a specific endpoint only which should be the nexthop of the route(Default Setting). - ``01`` Can match on a specific endpoint or a null endpoint. From fe272d35084c0c9507d36c5e0413dd4504b96e3b Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Fri, 22 Nov 2024 13:28:10 +0100 Subject: [PATCH 66/69] topotests: bgp_evpn_rt5, rework test Rework the test file by adding test methods. Add a convergence test that checks for the incoming BGP prefixes. Signed-off-by: Philippe Guibert --- .../r1/bgp_l2vpn_evpn_routes.json | 131 +++++++++++ .../r2/bgp_l2vpn_evpn_routes.json | 131 +++++++++++ tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py | 210 ++++++++++++------ 3 files changed, 407 insertions(+), 65 deletions(-) create mode 100644 tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes.json create mode 100644 tests/topotests/bgp_evpn_rt5/r2/bgp_l2vpn_evpn_routes.json diff --git a/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes.json b/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes.json new file mode 100644 index 000000000000..7532ce933167 --- /dev/null +++ b/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes.json @@ -0,0 +1,131 @@ +{ + "bgpLocalRouterId":"192.168.100.21", + "defaultLocPrf":100, + "localAS":65000, + "192.168.101.41:2":{ + "rd":"192.168.101.41:2", + "[5]:[0]:[32]:[192.168.101.41]":{ + "prefix":"[5]:[0]:[32]:[192.168.101.41]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"internal", + "routeType":5, + "ethTag":0, + "ipLen":32, + "ip":"192.168.101.41", + "metric":0, + "locPrf":100, + "weight":0, + "peerId":"192.168.100.41", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.41", + "hostname":"r2", + "afi":"ipv4", + "used":true + } + ] + } + ] + }, + "[5]:[0]:[128]:[fd00::2]":{ + "prefix":"[5]:[0]:[128]:[fd00::2]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"internal", + "routeType":5, + "ethTag":0, + "ipLen":128, + "ip":"fd00::2", + "metric":0, + "locPrf":100, + "weight":0, + "peerId":"192.168.100.41", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.41", + "hostname":"r2", + "afi":"ipv4", + "used":true + } + ] + } + ] + } + }, + "192.168.102.21:2":{ + "rd":"192.168.102.21:2", + "[5]:[0]:[32]:[192.168.102.21]":{ + "prefix":"[5]:[0]:[32]:[192.168.102.21]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"external", + "routeType":5, + "ethTag":0, + "ipLen":32, + "ip":"192.168.102.21", + "metric":0, + "weight":32768, + "peerId":"(unspec)", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.21", + "hostname":"r1", + "afi":"ipv4", + "used":true + } + ] + } + ] + }, + "[5]:[0]:[128]:[fd00::1]":{ + "prefix":"[5]:[0]:[128]:[fd00::1]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"external", + "routeType":5, + "ethTag":0, + "ipLen":128, + "ip":"fd00::1", + "metric":0, + "weight":32768, + "peerId":"(unspec)", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.21", + "hostname":"r1", + "afi":"ipv4", + "used":true + } + ] + } + ] + } + }, + "numPrefix":4, + "totalPrefix":4 +} diff --git a/tests/topotests/bgp_evpn_rt5/r2/bgp_l2vpn_evpn_routes.json b/tests/topotests/bgp_evpn_rt5/r2/bgp_l2vpn_evpn_routes.json new file mode 100644 index 000000000000..597bca5fd3f1 --- /dev/null +++ b/tests/topotests/bgp_evpn_rt5/r2/bgp_l2vpn_evpn_routes.json @@ -0,0 +1,131 @@ +{ + "bgpLocalRouterId":"192.168.100.41", + "defaultLocPrf":100, + "localAS":65000, + "192.168.101.41:2":{ + "rd":"192.168.101.41:2", + "[5]:[0]:[32]:[192.168.101.41]":{ + "prefix":"[5]:[0]:[32]:[192.168.101.41]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"external", + "routeType":5, + "ethTag":0, + "ipLen":32, + "ip":"192.168.101.41", + "metric":0, + "weight":32768, + "peerId":"(unspec)", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.41", + "hostname":"r2", + "afi":"ipv4", + "used":true + } + ] + } + ] + }, + "[5]:[0]:[128]:[fd00::2]":{ + "prefix":"[5]:[0]:[128]:[fd00::2]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"external", + "routeType":5, + "ethTag":0, + "ipLen":128, + "ip":"fd00::2", + "metric":0, + "weight":32768, + "peerId":"(unspec)", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.41", + "hostname":"r2", + "afi":"ipv4", + "used":true + } + ] + } + ] + } + }, + "192.168.102.21:2":{ + "rd":"192.168.102.21:2", + "[5]:[0]:[32]:[192.168.102.21]":{ + "prefix":"[5]:[0]:[32]:[192.168.102.21]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"internal", + "routeType":5, + "ethTag":0, + "ipLen":32, + "ip":"192.168.102.21", + "metric":0, + "locPrf":100, + "weight":0, + "peerId":"192.168.100.21", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.21", + "hostname":"r1", + "afi":"ipv4", + "used":true + } + ] + } + ] + }, + "[5]:[0]:[128]:[fd00::1]":{ + "prefix":"[5]:[0]:[128]:[fd00::1]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"internal", + "routeType":5, + "ethTag":0, + "ipLen":128, + "ip":"fd00::1", + "metric":0, + "locPrf":100, + "weight":0, + "peerId":"192.168.100.21", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.21", + "hostname":"r1", + "afi":"ipv4", + "used":true + } + ] + } + ] + } + }, + "numPrefix":4, + "totalPrefix":4 +} diff --git a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py index 9dfb7fc4d90d..85d825bce303 100644 --- a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py +++ b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py @@ -13,6 +13,8 @@ with route advertisements on a separate netns. """ +import json +from functools import partial import os import sys import pytest @@ -160,6 +162,36 @@ def teardown_module(_mod): tgen.stop_topology() +def _test_evpn_ping_router(pingrouter, ipv4_only=False): + """ + internal function to check ping between r1 and r2 + """ + # Check IPv4 and IPv6 connectivity between r1 and r2 ( routing vxlan evpn) + logger.info( + "Check Ping IPv4 from R1(r1-vrf-101) to R2(r2-vrf-101 = 192.168.101.41)" + ) + output = pingrouter.run("ip netns exec r1-vrf-101 ping 192.168.101.41 -f -c 1000") + logger.info(output) + if "1000 packets transmitted, 1000 received" not in output: + assertmsg = ( + "expected ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) should be ok" + ) + assert 0, assertmsg + else: + logger.info("Check Ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) OK") + + if ipv4_only: + return + + logger.info("Check Ping IPv6 from R1(r1-vrf-101) to R2(r2-vrf-101 = fd00::2)") + output = pingrouter.run("ip netns exec r1-vrf-101 ping fd00::2 -f -c 1000") + logger.info(output) + if "1000 packets transmitted, 1000 received" not in output: + assert 0, "expected ping IPv6 from R1(r1-vrf-101) to R2(fd00::2) should be ok" + else: + logger.info("Check Ping IPv6 from R1(r1-vrf-101) to R2(fd00::2) OK") + + def test_protocols_convergence(): """ Assert that all protocols have converged @@ -168,7 +200,34 @@ def test_protocols_convergence(): tgen = get_topogen() if tgen.routers_have_failure(): pytest.skip(tgen.errors) - topotest.sleep(4, "waiting 4 seconds for bgp convergence") + # Check BGP IPv4 routing tables on r1 + logger.info("Checking BGP L2VPN EVPN routes for convergence on r1") + + for rname in ("r1", "r2"): + router = tgen.gears[rname] + json_file = "{}/{}/bgp_l2vpn_evpn_routes.json".format(CWD, router.name) + if not os.path.isfile(json_file): + assert 0, "bgp_l2vpn_evpn_routes.json file not found" + + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, + router, + "show bgp l2vpn evpn json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) + assertmsg = '"{}" JSON output mismatches'.format(router.name) + assert result is None, assertmsg + + +def test_protocols_dump_info(): + """ + Dump EVPN information + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) # Check IPv4/IPv6 routing tables. output = tgen.gears["r1"].vtysh_cmd("show bgp l2vpn evpn", isjson=False) logger.info("==== result from show bgp l2vpn evpn") @@ -203,6 +262,15 @@ def test_protocols_convergence(): logger.info("==== result from show evpn rmac vni all") logger.info(output) + +def test_router_check_ip(): + """ + Check routes are correctly installed + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + expected = { "fd00::2/128": [ { @@ -221,50 +289,71 @@ def test_protocols_convergence(): ) assert result is None, "ipv6 route check failed" - expected = { - "101": { - "numNextHops": 2, - "192.168.100.41": { - "nexthopIp": "192.168.100.41", - }, - "::ffff:192.168.100.41": { - "nexthopIp": "::ffff:192.168.100.41", - }, + +def _test_router_check_evpn_contexts(router, ipv4_only=False): + """ + Check EVPN nexthops and RMAC number are correctly configured + """ + if ipv4_only: + expected = { + "101": { + "numNextHops": 1, + "192.168.100.41": { + "nexthopIp": "192.168.100.41", + }, + } + } + else: + expected = { + "101": { + "numNextHops": 2, + "192.168.100.41": { + "nexthopIp": "192.168.100.41", + }, + "::ffff:192.168.100.41": { + "nexthopIp": "::ffff:192.168.100.41", + }, + } } - } result = topotest.router_json_cmp( - tgen.gears["r1"], "show evpn next-hops vni all json", expected + router, "show evpn next-hops vni all json", expected ) assert result is None, "evpn next-hops check failed" expected = {"101": {"numRmacs": 1}} - result = topotest.router_json_cmp( - tgen.gears["r1"], "show evpn rmac vni all json", expected - ) + result = topotest.router_json_cmp(router, "show evpn rmac vni all json", expected) assert result is None, "evpn rmac number check failed" - # Check IPv4 and IPv6 connectivity between r1 and r2 ( routing vxlan evpn) - pingrouter = tgen.gears["r1"] - logger.info( - "Check Ping IPv4 from R1(r1-vrf-101) to R2(r2-vrf-101 = 192.168.101.41)" - ) - output = pingrouter.run("ip netns exec r1-vrf-101 ping 192.168.101.41 -f -c 1000") - logger.info(output) - if "1000 packets transmitted, 1000 received" not in output: - assertmsg = ( - "expected ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) should be ok" - ) - assert 0, assertmsg - else: - logger.info("Check Ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) OK") - logger.info("Check Ping IPv6 from R1(r1-vrf-101) to R2(r2-vrf-101 = fd00::2)") - output = pingrouter.run("ip netns exec r1-vrf-101 ping fd00::2 -f -c 1000") - logger.info(output) - if "1000 packets transmitted, 1000 received" not in output: - assert 0, "expected ping IPv6 from R1(r1-vrf-101) to R2(fd00::2) should be ok" - else: - logger.info("Check Ping IPv6 from R1(r1-vrf-101) to R2(fd00::2) OK") +def test_router_check_evpn_contexts(): + """ + Check EVPN nexthops and RMAC number are correctly configured + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + _test_router_check_evpn_contexts(tgen.gears["r1"]) + + +def test_evpn_ping(): + """ + Check ping between R1 and R2 is ok + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + _test_evpn_ping_router(tgen.gears["r1"]) + + +def test_evpn_remove_ip(): + """ + Check the removal of an EVPN route is correctly handled + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) config_no_ipv6 = { "r2": { @@ -293,6 +382,7 @@ def test_protocols_convergence(): } result = verify_bgp_rib(tgen, "ipv6", "r1", ipv6_routes, expected=False) assert result is not True, "expect IPv6 route fd00::2/128 withdrawn" + output = tgen.gears["r1"].vtysh_cmd("show evpn next-hops vni all", isjson=False) logger.info("==== result from show evpn next-hops vni all") logger.info(output) @@ -300,37 +390,27 @@ def test_protocols_convergence(): logger.info("==== result from show evpn next-hops vni all") logger.info(output) - expected = { - "101": { - "numNextHops": 1, - "192.168.100.41": { - "nexthopIp": "192.168.100.41", - }, - } - } - result = topotest.router_json_cmp( - tgen.gears["r1"], "show evpn next-hops vni all json", expected - ) - assert result is None, "evpn next-hops check failed" - expected = {"101": {"numRmacs": 1}} - result = topotest.router_json_cmp( - tgen.gears["r1"], "show evpn rmac vni all json", expected - ) - assert result is None, "evpn rmac number check failed" +def test_router_check_evpn_contexts_again(): + """ + Check EVPN nexthops and RMAC number are correctly configured + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) - logger.info( - "Check Ping IPv4 from R1(r1-vrf-101) to R2(r2-vrf-101 = 192.168.101.41)" - ) - output = pingrouter.run("ip netns exec r1-vrf-101 ping 192.168.101.41 -f -c 1000") - logger.info(output) - if "1000 packets transmitted, 1000 received" not in output: - assertmsg = ( - "expected ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) should be ok" - ) - assert 0, assertmsg - else: - logger.info("Check Ping IPv4 from R1(r1-vrf-101) to R2(192.168.101.41) OK") + _test_router_check_evpn_contexts(tgen.gears["r1"], ipv4_only=True) + + +def test_evpn_ping_again(): + """ + Check ping between R1 and R2 is ok + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + _test_evpn_ping_router(tgen.gears["r1"], ipv4_only=True) def test_memory_leak(): From b8b7a615ac2fb6e5d49a0f07afabcd76614734df Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Fri, 22 Nov 2024 15:57:25 +0100 Subject: [PATCH 67/69] topotests: bgp_evpn_rt5, add test for advertise route-map service Use the advertise route-map command, and check that it filters out correctly the undesirable prefixes. Reversely, check that undoing that route-map recovers all prefixes. Signed-off-by: Philippe Guibert --- .../r1/bgp_l2vpn_evpn_routes_all.json | 191 ++++++++++++++++++ tests/topotests/bgp_evpn_rt5/r2/bgpd.conf | 22 +- tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py | 35 ++++ 3 files changed, 246 insertions(+), 2 deletions(-) create mode 100644 tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes_all.json diff --git a/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes_all.json b/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes_all.json new file mode 100644 index 000000000000..a14ba1291e7b --- /dev/null +++ b/tests/topotests/bgp_evpn_rt5/r1/bgp_l2vpn_evpn_routes_all.json @@ -0,0 +1,191 @@ +{ + "bgpLocalRouterId":"192.168.100.21", + "defaultLocPrf":100, + "localAS":65000, + "192.168.101.41:2":{ + "rd":"192.168.101.41:2", + "[5]:[0]:[32]:[192.168.101.41]":{ + "prefix":"[5]:[0]:[32]:[192.168.101.41]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"internal", + "routeType":5, + "ethTag":0, + "ipLen":32, + "ip":"192.168.101.41", + "metric":0, + "locPrf":100, + "weight":0, + "peerId":"192.168.100.41", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.41", + "hostname":"r2", + "afi":"ipv4", + "used":true + } + ] + } + ] + }, + "[5]:[0]:[32]:[192.168.102.41]":{ + "prefix":"[5]:[0]:[32]:[192.168.102.41]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"internal", + "routeType":5, + "ethTag":0, + "ipLen":32, + "ip":"192.168.102.41", + "metric":0, + "locPrf":100, + "weight":0, + "peerId":"192.168.100.41", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.41", + "hostname":"r2", + "afi":"ipv4", + "used":true + } + ] + } + ] + }, + "[5]:[0]:[128]:[fd00::2]":{ + "prefix":"[5]:[0]:[128]:[fd00::2]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"internal", + "routeType":5, + "ethTag":0, + "ipLen":128, + "ip":"fd00::2", + "metric":0, + "locPrf":100, + "weight":0, + "peerId":"192.168.100.41", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.41", + "hostname":"r2", + "afi":"ipv4", + "used":true + } + ] + } + ] + }, + "[5]:[0]:[128]:[fd00::3]":{ + "prefix":"[5]:[0]:[128]:[fd00::3]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"internal", + "routeType":5, + "ethTag":0, + "ipLen":128, + "ip":"fd00::3", + "metric":0, + "locPrf":100, + "weight":0, + "peerId":"192.168.100.41", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.41", + "hostname":"r2", + "afi":"ipv4", + "used":true + } + ] + } + ] + } + }, + "192.168.102.21:2":{ + "rd":"192.168.102.21:2", + "[5]:[0]:[32]:[192.168.102.21]":{ + "prefix":"[5]:[0]:[32]:[192.168.102.21]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"external", + "routeType":5, + "ethTag":0, + "ipLen":32, + "ip":"192.168.102.21", + "metric":0, + "weight":32768, + "peerId":"(unspec)", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.21", + "hostname":"r1", + "afi":"ipv4", + "used":true + } + ] + } + ] + }, + "[5]:[0]:[128]:[fd00::1]":{ + "prefix":"[5]:[0]:[128]:[fd00::1]", + "prefixLen":352, + "paths":[ + { + "valid":true, + "bestpath":true, + "selectionReason":"First path received", + "pathFrom":"external", + "routeType":5, + "ethTag":0, + "ipLen":128, + "ip":"fd00::1", + "metric":0, + "weight":32768, + "peerId":"(unspec)", + "path":"", + "origin":"IGP", + "nexthops":[ + { + "ip":"192.168.100.21", + "hostname":"r1", + "afi":"ipv4", + "used":true + } + ] + } + ] + } + }, + "numPrefix":6, + "totalPrefix":6 +} diff --git a/tests/topotests/bgp_evpn_rt5/r2/bgpd.conf b/tests/topotests/bgp_evpn_rt5/r2/bgpd.conf index de5a0efc445f..4f1d8e4a3788 100644 --- a/tests/topotests/bgp_evpn_rt5/r2/bgpd.conf +++ b/tests/topotests/bgp_evpn_rt5/r2/bgpd.conf @@ -20,12 +20,30 @@ router bgp 65000 vrf r2-vrf-101 no bgp network import-check address-family ipv4 unicast network 192.168.101.41/32 + network 192.168.102.41/32 exit-address-family address-family ipv6 unicast network fd00::2/128 + network fd00::3/128 exit-address-family address-family l2vpn evpn - advertise ipv4 unicast - advertise ipv6 unicast + advertise ipv4 unicast route-map rmap4 + advertise ipv6 unicast route-map rmap6 exit-address-family ! +access-list acl4_1 seq 10 permit 192.168.101.41/32 +access-list acl4_2 seq 10 permit 192.168.102.41/32 +ipv6 access-list acl6_1 seq 10 permit fd00::2/128 +ipv6 access-list acl6_2 seq 10 permit fd00::3/128 +route-map rmap4 permit 1 + match ip address acl4_1 +exit +route-map rmap4 deny 2 + match ip address acl4_2 +exit +route-map rmap6 permit 1 + match ipv6 address acl6_1 +exit +route-map rmap6 deny 2 + match ipv6 address acl6_2 +exit diff --git a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py index 85d825bce303..a9636a92f4b2 100644 --- a/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py +++ b/tests/topotests/bgp_evpn_rt5/test_bgp_evpn.py @@ -347,6 +347,40 @@ def test_evpn_ping(): _test_evpn_ping_router(tgen.gears["r1"]) +def test_evpn_disable_routemap(): + """ + Check the removal of a route-map on R2. More EVPN Prefixes are expected + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["r2"].vtysh_cmd( + """ + configure terminal\n + router bgp 65000 vrf r2-vrf-101\n + address-family l2vpn evpn\n + advertise ipv4 unicast\n + advertise ipv6 unicast\n + """ + ) + router = tgen.gears["r1"] + json_file = "{}/{}/bgp_l2vpn_evpn_routes_all.json".format(CWD, router.name) + if not os.path.isfile(json_file): + assert 0, "bgp_l2vpn_evpn_routes.json file not found" + + expected = json.loads(open(json_file).read()) + test_func = partial( + topotest.router_json_cmp, + router, + "show bgp l2vpn evpn json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=20, wait=1) + assertmsg = '"{}" JSON output mismatches'.format(router.name) + assert result is None, assertmsg + + def test_evpn_remove_ip(): """ Check the removal of an EVPN route is correctly handled @@ -360,6 +394,7 @@ def test_evpn_remove_ip(): "raw_config": [ "router bgp 65000 vrf r2-vrf-101", "address-family ipv6 unicast", + "no network fd00::3/128", "no network fd00::2/128", ] } From 8b3b152a1a8e871002812b28d3378f76b1c5ee11 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Mon, 11 Mar 2024 11:51:55 +0100 Subject: [PATCH 68/69] bgpd: fix use real SID in BGP nexthop tracking When receiving an SRv6 BGP update, the nexthop tracking is used to find out the reachability of the BGP update. > # show bgp ipv6 vpn fd00:200::/64 > Paths: (1 available, best #1) > [..] > 4:4::4:4 from 4:4::4:4 (4.4.4.4) > Origin incomplete, metric 0, localpref 100, valid, internal, best (First path received) > Extended Community: RT:52:100 > Remote label: 16 > Remote SID: 2001:db8:f4:: > Last update: Mon Mar 11 11:50:04 2024 The IPv6 address used is the "Remote SID". Actually, this value is incomplete. Remote SID stands for the attribute value received in BGP, while the label value determines a complement of SRv6 SID value. The transposition technique authorises that in BGP, and in the above case, the incoming BGP update has used the transposition length. When there is a transposition in the SID attribute available, use the real SID address. The nexthop tracking will use that forged address. > # show bgp nexthop > Current BGP nexthop cache: > 4:4::4:4 valid [IGP metric 30], #paths 0, peer 4:4::4:4 > gate fe80::dced:1ff:fed6:878c, if ntfp3 > Last update: Mon Mar 11 11:50:02 2024 > 2001:db8:f4:1:: valid [IGP metric 0], #paths 2 > gate fe80::dced:1ff:fed6:878c, if ntfp3 Fixes: 26c747ed6c0b ("bgpd: extend make_prefix to form srv6-based prefix") Signed-off-by: Philippe Guibert --- bgpd/bgp_nht.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c index 9b633b7139b2..ed83757ea315 100644 --- a/bgpd/bgp_nht.c +++ b/bgpd/bgp_nht.c @@ -1066,9 +1066,16 @@ static int make_prefix(int afi, struct bgp_path_info *pi, struct prefix *p) case AFI_IP6: p->family = AF_INET6; if (pi->attr->srv6_l3vpn) { - IPV6_ADDR_COPY(&(p->u.prefix6), - &(pi->attr->srv6_l3vpn->sid)); p->prefixlen = IPV6_MAX_BITLEN; + if (pi->attr->srv6_l3vpn->transposition_len != 0 && + BGP_PATH_INFO_NUM_LABELS(pi)) { + IPV6_ADDR_COPY(&p->u.prefix6, &pi->attr->srv6_l3vpn->sid); + transpose_sid(&p->u.prefix6, + decode_label(&pi->extra->labels->label[0]), + pi->attr->srv6_l3vpn->transposition_offset, + pi->attr->srv6_l3vpn->transposition_len); + } else + IPV6_ADDR_COPY(&(p->u.prefix6), &(pi->attr->srv6_l3vpn->sid)); } else if (is_bgp_static) { p->u.prefix6 = p_orig->u.prefix6; p->prefixlen = p_orig->prefixlen; From d87ec38dfdfdf8f4ac4c3f6843b58b1170137630 Mon Sep 17 00:00:00 2001 From: famfo Date: Tue, 26 Nov 2024 19:42:03 +0100 Subject: [PATCH 69/69] topotests: Allow runing under both docker and podman Signed-off-by: famfo --- tests/topotests/docker/build.sh | 4 ++-- tests/topotests/docker/frr-topotests.sh | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/tests/topotests/docker/build.sh b/tests/topotests/docker/build.sh index aec20587ba39..20d08e4979f2 100755 --- a/tests/topotests/docker/build.sh +++ b/tests/topotests/docker/build.sh @@ -1,11 +1,11 @@ -#!/bin/bash +#!/usr/bin/env bash # SPDX-License-Identifier: MIT # # Copyright 2018 Network Device Education Foundation, Inc. ("NetDEF") cd "$(dirname "$0")"/.. -exec docker build --pull \ +exec $(command -v docker || command -v podman) build --pull \ --compress \ -t frrouting/topotests:latest \ . diff --git a/tests/topotests/docker/frr-topotests.sh b/tests/topotests/docker/frr-topotests.sh index bd37055147a7..8de8e7b1f676 100755 --- a/tests/topotests/docker/frr-topotests.sh +++ b/tests/topotests/docker/frr-topotests.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # SPDX-License-Identifier: MIT # # Copyright 2018 Network Device Education Foundation, Inc. ("NetDEF") @@ -113,10 +113,12 @@ if [ -z "$TOPOTEST_FRR" ]; then git -C "$TOPOTEST_FRR" ls-files -z > "${TOPOTEST_LOGS}/git-ls-files" fi +cmd="$(command -v docker || command -v podman)" + if [ -z "$TOPOTEST_BUILDCACHE" ]; then TOPOTEST_BUILDCACHE=topotest-buildcache - docker volume inspect "${TOPOTEST_BUILDCACHE}" &> /dev/null \ - || docker volume create "${TOPOTEST_BUILDCACHE}" + "${cmd}" volume inspect "${TOPOTEST_BUILDCACHE}" &> /dev/null \ + || "${cmd}" volume create "${TOPOTEST_BUILDCACHE}" fi if [[ -n "$TMUX" ]]; then @@ -145,4 +147,4 @@ if [ -t 0 ]; then set -- -t "$@" fi -exec docker run "$@" +exec "${cmd}" run "$@"