From 5a589217f873efaa7684a994c0c2b4ffebe28409 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Fri, 6 Sep 2024 14:29:45 -0400 Subject: [PATCH 01/73] tests: When finding nexthops ensure that they are active Do not accept a nexthop as valid unless it is marked as being active. Signed-off-by: Donald Sharp --- .../bgp_default_originate/test_default_orginate_vrf.py | 8 +------- tests/topotests/lib/common_config.py | 2 +- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/tests/topotests/bgp_default_originate/test_default_orginate_vrf.py b/tests/topotests/bgp_default_originate/test_default_orginate_vrf.py index 1506b02e5d40..905c3e2b663a 100644 --- a/tests/topotests/bgp_default_originate/test_default_orginate_vrf.py +++ b/tests/topotests/bgp_default_originate/test_default_orginate_vrf.py @@ -546,13 +546,7 @@ def test_verify_default_originate_route_with_non_default_VRF_p1(request): tc_name, result ) - result = verify_rib( - tgen, - addr_type, - "r2", - static_routes_input, - next_hop=DEFAULT_ROUTE_NXT_HOP_R1[addr_type], - ) + result = verify_rib(tgen, addr_type, "r2", static_routes_input) assert result is True, "Testcase {} : Failed \n Error: {}".format( tc_name, result ) diff --git a/tests/topotests/lib/common_config.py b/tests/topotests/lib/common_config.py index 540a627c6543..2cee5fdaedfa 100644 --- a/tests/topotests/lib/common_config.py +++ b/tests/topotests/lib/common_config.py @@ -3371,7 +3371,7 @@ def verify_rib( found_hops = [ rib_r["ip"] for rib_r in rib_routes_json[st_rt][0]["nexthops"] - if "ip" in rib_r + if "ip" in rib_r and "active" in rib_r ] # If somehow key "ip" is not found in nexthops JSON From 1bbbcf043b1896b21cc303c37c99732858b6e1ed Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 7 Feb 2024 14:56:15 -0500 Subject: [PATCH 02/73] zebra: Properly note that a nhg's nexthop has gone down Current code when a link is set down is to just mark the nexthop group as not properly setup. Leaving situations where when an interface goes down and show output is entered we see incorrect state. This is true for anything that would be checking those flags at that point in time. Modify the interface down nexthop group code to notice the nexthops appropriately ( and I mean set the appropriate flags ) and to allow a `show ip route` command to actually display what is going on with the nexthops. eva# show ip route 1.0.0.0 Routing entry for 1.0.0.0/32 Known via "sharp", distance 150, metric 0, best Last update 00:00:06 ago * 192.168.44.33, via dummy1, weight 1 * 192.168.45.33, via dummy2, weight 1 sharpd@eva:~/frr1$ sudo ip link set dummy2 down eva# show ip route 1.0.0.0 Routing entry for 1.0.0.0/32 Known via "sharp", distance 150, metric 0, best Last update 00:00:12 ago * 192.168.44.33, via dummy1, weight 1 192.168.45.33, via dummy2 inactive, weight 1 Notice now that the 1.0.0.0/32 route now correctly displays the route for the nexthop group entry. Signed-off-by: Donald Sharp --- zebra/zebra_nhg.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index 637eabde8d2d..3dc4f2437499 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -1101,11 +1101,15 @@ void zebra_nhg_check_valid(struct nhg_hash_entry *nhe) bool valid = false; /* - * If I have other nhe's depending on me, then this is a - * singleton nhe so set this nexthops flag as appropriate. + * If I have other nhe's depending on me, or I have nothing + * I am depending on then this is a + * singleton nhe so set this nexthops flag as appropriate. */ - if (nhg_connected_tree_count(&nhe->nhg_depends)) + if (nhg_connected_tree_count(&nhe->nhg_depends) || + nhg_connected_tree_count(&nhe->nhg_dependents) == 0) { + UNSET_FLAG(nhe->nhg.nexthop->flags, NEXTHOP_FLAG_FIB); UNSET_FLAG(nhe->nhg.nexthop->flags, NEXTHOP_FLAG_ACTIVE); + } /* If anthing else in the group is valid, the group is valid */ frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) { From ce166ca78928d31200461ec841ac597ac97c52e9 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 11 Sep 2024 13:58:32 -0400 Subject: [PATCH 03/73] zebra: Expose _route_entry_dump_nh so it can be used. Expose this helper function so it can be used in zebra_nhg.c Signed-off-by: Donald Sharp --- zebra/rib.h | 4 ++++ zebra/zebra_rib.c | 9 ++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/zebra/rib.h b/zebra/rib.h index 4293b5f2407a..071cc7b3dee9 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -637,6 +637,10 @@ extern pid_t pid; extern uint32_t rt_table_main_id; +void route_entry_dump_nh(const struct route_entry *re, const char *straddr, + const struct vrf *re_vrf, + const struct nexthop *nexthop); + /* Name of hook calls */ #define ZEBRA_ON_RIB_PROCESS_HOOK_CALL "on_rib_process_dplane_results" diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index 402a3104b941..cba626490e99 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -4118,9 +4118,8 @@ void rib_delnode(struct route_node *rn, struct route_entry *re) /* * Helper that debugs a single nexthop within a route-entry */ -static void _route_entry_dump_nh(const struct route_entry *re, - const char *straddr, const struct vrf *re_vrf, - const struct nexthop *nexthop) +void route_entry_dump_nh(const struct route_entry *re, const char *straddr, + const struct vrf *re_vrf, const struct nexthop *nexthop) { char nhname[PREFIX_STRLEN]; char backup_str[50]; @@ -4243,7 +4242,7 @@ void _route_entry_dump(const char *func, union prefixconstptr pp, /* Dump nexthops */ for (ALL_NEXTHOPS(re->nhe->nhg, nexthop)) - _route_entry_dump_nh(re, straddr, vrf, nexthop); + route_entry_dump_nh(re, straddr, vrf, nexthop); if (zebra_nhg_get_backup_nhg(re->nhe)) { zlog_debug("%s(%s): backup nexthops:", straddr, @@ -4251,7 +4250,7 @@ void _route_entry_dump(const char *func, union prefixconstptr pp, nhg = zebra_nhg_get_backup_nhg(re->nhe); for (ALL_NEXTHOPS_PTR(nhg, nexthop)) - _route_entry_dump_nh(re, straddr, vrf, nexthop); + route_entry_dump_nh(re, straddr, vrf, nexthop); } zlog_debug("%s(%s): dump complete", straddr, VRF_LOGNAME(vrf)); From 3be8b48e6bab845a57b9bee83158f5b5bdf10555 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 11 Sep 2024 14:19:51 -0400 Subject: [PATCH 04/73] zebra: Reinstall nexthop when interface comes back up If a interface down event caused a nexthop group to remove one of the entries in the kernel, have it be reinstalled when the interface comes back up. Mark the nexthop as usable. new behavior: eva# show nexthop-group rib 181818168 ID: 181818168 (sharp) RefCnt: 1 Uptime: 00:00:23 VRF: default(bad-value) Valid, Installed Depends: (35) (38) (44) (51) via 192.168.99.33, dummy1 (vrf default), weight 1 via 192.168.100.33, dummy2 (vrf default), weight 1 via 192.168.101.33, dummy3 (vrf default), weight 1 via 192.168.102.33, dummy4 (vrf default), weight 1 eva# conf eva(config)# int dummy3 eva(config-if)# shut eva(config-if)# do show nexthop-group rib 181818168 ID: 181818168 (sharp) RefCnt: 1 Uptime: 00:00:44 VRF: default(bad-value) Depends: (35) (38) (44) (51) via 192.168.99.33, dummy1 (vrf default), weight 1 via 192.168.100.33, dummy2 (vrf default), weight 1 via 192.168.101.33, dummy3 (vrf default) inactive, weight 1 via 192.168.102.33, dummy4 (vrf default), weight 1 eva(config-if)# no shut eva(config-if)# do show nexthop-group rib 181818168 ID: 181818168 (sharp) RefCnt: 1 Uptime: 00:00:53 VRF: default(bad-value) Valid, Installed Depends: (35) (38) (44) (51) via 192.168.99.33, dummy1 (vrf default), weight 1 via 192.168.100.33, dummy2 (vrf default), weight 1 via 192.168.101.33, dummy3 (vrf default), weight 1 via 192.168.102.33, dummy4 (vrf default), weight 1 eva(config-if)# exit eva(config)# exit eva# exit sharpd@eva ~/frr1 (master) [255]> ip nexthop show id 181818168 id 181818168 group 35/38/44/51 proto 194 sharpd@eva ~/frr1 (master)> Signed-off-by: Donald Sharp --- zebra/zebra_nhg.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index 3dc4f2437499..6493ca6aff0e 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -3782,6 +3782,17 @@ void zebra_interface_nhg_reinstall(struct interface *ifp) frr_each_safe (nhg_connected_tree, &rb_node_dep->nhe->nhg_dependents, rb_node_dependent) { + struct nexthop *nhop_dependent = + rb_node_dependent->nhe->nhg.nexthop; + + while (nhop_dependent && + !nexthop_same(nhop_dependent, nh)) + nhop_dependent = nhop_dependent->next; + + if (nhop_dependent) + SET_FLAG(nhop_dependent->flags, + NEXTHOP_FLAG_ACTIVE); + if (IS_ZEBRA_DEBUG_NHG) zlog_debug("%s dependent nhe %pNG Setting Reinstall flag", __func__, From f02d76f0fd1085c522bc89f5c3aae8d03230ab6e Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 11 Sep 2024 14:24:27 -0400 Subject: [PATCH 05/73] zebra: Attempt to reuse NHG after interface up and route reinstall The previous commit modified zebra to reinstall the singleton nexthops for a nexthop group when a interface event comes up. Now let's modify zebra to attempt to reuse the nexthop group when this happens and the upper level protocol resends the route down with that. Only match if the protocol is the same as well as the instance and the nexthop groups would match. Here is the new behavior: eva(config)# do show ip route 9.9.9.9/32 Routing entry for 9.9.9.9/32 Known via "static", distance 1, metric 0, best Last update 00:00:08 ago * 192.168.99.33, via dummy1, weight 1 * 192.168.100.33, via dummy2, weight 1 * 192.168.101.33, via dummy3, weight 1 * 192.168.102.33, via dummy4, weight 1 eva(config)# do show ip route nexthop-group 9.9.9.9/32 % Unknown command: do show ip route nexthop-group 9.9.9.9/32 eva(config)# do show ip route 9.9.9.9/32 nexthop-group Routing entry for 9.9.9.9/32 Known via "static", distance 1, metric 0, best Last update 00:00:54 ago Nexthop Group ID: 57 * 192.168.99.33, via dummy1, weight 1 * 192.168.100.33, via dummy2, weight 1 * 192.168.101.33, via dummy3, weight 1 * 192.168.102.33, via dummy4, weight 1 eva(config)# exit eva# conf eva(config)# int dummy3 eva(config-if)# shut eva(config-if)# no shut eva(config-if)# do show ip route 9.9.9.9/32 nexthop-group Routing entry for 9.9.9.9/32 Known via "static", distance 1, metric 0, best Last update 00:00:08 ago Nexthop Group ID: 57 * 192.168.99.33, via dummy1, weight 1 * 192.168.100.33, via dummy2, weight 1 * 192.168.101.33, via dummy3, weight 1 * 192.168.102.33, via dummy4, weight 1 eva(config-if)# exit eva(config)# exit eva# exit sharpd@eva ~/frr1 (master) [255]> ip nexthop show id 57 id 57 group 37/43/50/58 proto zebra sharpd@eva ~/frr1 (master)> ip route show 9.9.9.9/32 9.9.9.9 nhid 57 proto 196 metric 20 nexthop via 192.168.99.33 dev dummy1 weight 1 nexthop via 192.168.100.33 dev dummy2 weight 1 nexthop via 192.168.101.33 dev dummy3 weight 1 nexthop via 192.168.102.33 dev dummy4 weight 1 sharpd@eva ~/frr1 (master)> Notice that we now no longer are creating a bunch of new nexthop groups. Signed-off-by: Donald Sharp --- zebra/zebra_nhg.c | 158 +++++++++++++++++++++++++++++++++++++++++++++- zebra/zebra_nhg.h | 3 +- zebra/zebra_rib.c | 2 +- 3 files changed, 159 insertions(+), 4 deletions(-) diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index 6493ca6aff0e..b5b8e2255239 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -1103,7 +1103,7 @@ void zebra_nhg_check_valid(struct nhg_hash_entry *nhe) /* * If I have other nhe's depending on me, or I have nothing * I am depending on then this is a - * singleton nhe so set this nexthops flag as appropriate. + * singleton nhe so set this nexthops flag as appropriate. */ if (nhg_connected_tree_count(&nhe->nhg_depends) || nhg_connected_tree_count(&nhe->nhg_dependents) == 0) { @@ -2924,6 +2924,154 @@ static uint32_t proto_nhg_nexthop_active_update(struct nexthop_group *nhg) return curr_active; } +/* + * This function takes the start of two comparable nexthops from two different + * nexthop groups and walks them to see if they can be considered the same + * or not. This is being used to determine if zebra should reuse a nhg + * from the old_re to the new_re, when an interface goes down and the + * new nhg sent down from the upper level protocol would resolve to it + */ +static bool zebra_nhg_nexthop_compare(const struct nexthop *nhop, + const struct nexthop *old_nhop, + const struct route_node *rn) +{ + bool same = true; + + while (nhop && old_nhop) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: %pRN Comparing %pNHvv(%u) to old: %pNHvv(%u)", + __func__, rn, nhop, nhop->flags, old_nhop, + old_nhop->flags); + if (!CHECK_FLAG(old_nhop->flags, NEXTHOP_FLAG_ACTIVE)) { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: %pRN Old is not active going to the next one", + __func__, rn); + old_nhop = old_nhop->next; + continue; + } + + if (nexthop_same(nhop, old_nhop)) { + struct nexthop *new_recursive, *old_recursive; + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s: %pRN New and old are same, continuing search", + __func__, rn); + + new_recursive = nhop->resolved; + old_recursive = old_nhop->resolved; + + while (new_recursive && old_recursive) { + if (!nexthop_same(new_recursive, old_recursive)) { + same = false; + break; + } + + new_recursive = new_recursive->next; + old_recursive = old_recursive->next; + } + + if (new_recursive) + same = false; + else if (old_recursive) { + while (old_recursive) { + if (CHECK_FLAG(old_recursive->flags, + NEXTHOP_FLAG_ACTIVE)) + break; + old_recursive = old_recursive->next; + } + + if (old_recursive) + same = false; + } + + if (!same) + break; + + nhop = nhop->next; + old_nhop = old_nhop->next; + continue; + } else { + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s:%pRN They are not the same, stopping using new nexthop entry", + __func__, rn); + same = false; + break; + } + } + + if (nhop) + same = false; + else if (old_nhop) { + while (old_nhop) { + if (CHECK_FLAG(old_nhop->flags, NEXTHOP_FLAG_ACTIVE)) + break; + old_nhop = old_nhop->next; + } + + if (old_nhop) + same = false; + } + + return same; +} + +static struct nhg_hash_entry *zebra_nhg_rib_compare_old_nhe( + const struct route_node *rn, const struct route_entry *re, + struct nhg_hash_entry *new_nhe, struct nhg_hash_entry *old_nhe) +{ + struct nexthop *nhop, *old_nhop; + bool same = true; + struct vrf *vrf = vrf_lookup_by_id(re->vrf_id); + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) { + char straddr[PREFIX_STRLEN]; + + prefix2str(&rn->p, straddr, sizeof(straddr)); + zlog_debug("%s: %pRN new id: %u old id: %u", __func__, rn, + new_nhe->id, old_nhe->id); + zlog_debug("%s: %pRN NEW", __func__, rn); + for (ALL_NEXTHOPS(new_nhe->nhg, nhop)) + route_entry_dump_nh(re, straddr, vrf, nhop); + + zlog_debug("%s: %pRN OLD", __func__, rn); + for (ALL_NEXTHOPS(old_nhe->nhg, nhop)) + route_entry_dump_nh(re, straddr, vrf, nhop); + } + + nhop = new_nhe->nhg.nexthop; + old_nhop = old_nhe->nhg.nexthop; + + same = zebra_nhg_nexthop_compare(nhop, old_nhop, rn); + + if (same) { + struct nexthop_group *bnhg, *old_bnhg; + + bnhg = zebra_nhg_get_backup_nhg(new_nhe); + old_bnhg = zebra_nhg_get_backup_nhg(old_nhe); + + if (bnhg || old_bnhg) { + if (bnhg && !old_bnhg) + same = false; + else if (!bnhg && old_bnhg) + same = false; + else + same = zebra_nhg_nexthop_compare(bnhg->nexthop, + old_bnhg->nexthop, + rn); + } + } + + if (IS_ZEBRA_DEBUG_NHG_DETAIL) + zlog_debug("%s:%pRN They are %sthe same, using the %s nhg entry", + __func__, rn, same ? "" : "not ", + same ? "old" : "new"); + + if (same) + return old_nhe; + else + return new_nhe; +} + /* * Iterate over all nexthops of the given RIB entry and refresh their * ACTIVE flag. If any nexthop is found to toggle the ACTIVE flag, @@ -2931,7 +3079,8 @@ static uint32_t proto_nhg_nexthop_active_update(struct nexthop_group *nhg) * * Return value is the new number of active nexthops. */ -int nexthop_active_update(struct route_node *rn, struct route_entry *re) +int nexthop_active_update(struct route_node *rn, struct route_entry *re, + struct route_entry *old_re) { struct nhg_hash_entry *curr_nhe; uint32_t curr_active = 0, backup_active = 0; @@ -2987,6 +3136,11 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re) new_nhe = zebra_nhg_rib_find_nhe(curr_nhe, rt_afi); + if (old_re && old_re->type == re->type && + old_re->instance == re->instance) + new_nhe = zebra_nhg_rib_compare_old_nhe(rn, re, new_nhe, + old_re->nhe); + if (IS_ZEBRA_DEBUG_NHG_DETAIL) zlog_debug( "%s: re %p CHANGED: nhe %p (%pNG) => new_nhe %p (%pNG)", diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h index 712c1057a1a8..435ccb0d01f5 100644 --- a/zebra/zebra_nhg.h +++ b/zebra/zebra_nhg.h @@ -404,7 +404,8 @@ extern void zebra_nhg_mark_keep(void); /* Nexthop resolution processing */ struct route_entry; /* Forward ref to avoid circular includes */ -extern int nexthop_active_update(struct route_node *rn, struct route_entry *re); +extern int nexthop_active_update(struct route_node *rn, struct route_entry *re, + struct route_entry *old_re); #ifdef _FRR_ATTRIBUTE_PRINTFRR #pragma FRR printfrr_ext "%pNG" (const struct nhg_hash_entry *) diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index cba626490e99..721eca70a493 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -1313,7 +1313,7 @@ static void rib_process(struct route_node *rn) */ if (CHECK_FLAG(re->status, ROUTE_ENTRY_CHANGED)) { proto_re_changed = re; - if (!nexthop_active_update(rn, re)) { + if (!nexthop_active_update(rn, re, old_fib)) { const struct prefix *p; struct rib_table_info *info; From 5118fd10184b14e8d2e1f1e9f5a858a907a1bd78 Mon Sep 17 00:00:00 2001 From: Y Bharath Date: Tue, 7 May 2024 20:31:03 +0530 Subject: [PATCH 06/73] tests: catch exception during switch shutdown Signed-off-by: y-bharath14 --- tests/topotests/lib/topogen.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/topotests/lib/topogen.py b/tests/topotests/lib/topogen.py index 7941e5c1d233..14dd61b077eb 100644 --- a/tests/topotests/lib/topogen.py +++ b/tests/topotests/lib/topogen.py @@ -492,7 +492,16 @@ def stop_topology(self): "Errors found post shutdown - details follow: {}".format(errors) ) - self.net.stop() + try: + self.net.stop() + + except OSError as error: + # OSError exception is raised when mininet tries to stop switch + # though switch is stopped once but mininet tries to stop same + # switch again, where it ended up with exception + + logger.info(error) + logger.info("Exception ignored: switch is already stopped") def get_exabgp_cmd(self): if not self.exabgp_cmd: From 06b5601c62e65b05adbcd1b7876970d80be0b2fb Mon Sep 17 00:00:00 2001 From: b29332 Date: Fri, 27 Sep 2024 15:51:17 +0800 Subject: [PATCH 07/73] isisd: Fix the PQ space computation error in TI-LFA When there are pseudo-nodes on the device, during TI-LFA calculation of PQ space, even if this IS vertex is not originally a P/Q node, it might be calculated as a P/Q node due to the presence of pseudo-nodes, causing this IS vertex to become a P/Q node. Signed-off-by: baozhen-H3C --- isisd/isis_lfa.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/isisd/isis_lfa.c b/isisd/isis_lfa.c index 887f27eec5d9..e0b3a4dca10b 100644 --- a/isisd/isis_lfa.c +++ b/isisd/isis_lfa.c @@ -1064,7 +1064,7 @@ static void lfa_calc_reach_nodes(struct isis_spftree *spftree, for (ALL_QUEUE_ELEMENTS_RO(&spftree->paths, node, vertex)) { char buf[VID2STR_BUFFER]; - if (!VTYPE_IS(vertex->type)) + if (vertex->type != VTYPE_NONPSEUDO_IS && vertex->type != VTYPE_NONPSEUDO_TE_IS) continue; /* Skip root node. */ From 60016a8e8be2826fb8494091583d7075ad60aeb7 Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Fri, 27 Sep 2024 13:59:42 +0300 Subject: [PATCH 08/73] bgpd: Show unmodified version of received-routes per neighbor If we have soft inbound enabled, we should see how the route looks like before it was modified by a route-map/prefix-list. Signed-off-by: Donatas Abraitis --- bgpd/bgp_route.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index f28c9adda218..503bb09fd5be 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -14513,7 +14513,7 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table, struct bgp_adj_out *adj = NULL; struct bgp_dest *dest; struct bgp *bgp; - struct attr attr; + struct attr attr, attr_unchanged; int ret; struct update_subgroup *subgrp; struct peer_af *paf = NULL; @@ -14693,6 +14693,7 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table, } attr = *ain->attr; + attr_unchanged = *ain->attr; route_filtered = false; /* Filter prefix using distribute list, @@ -14748,9 +14749,8 @@ show_adj_route(struct vty *vty, struct peer *peer, struct bgp_table *table, json_ar, json_net, "%pFX", rn_p); } else - route_vty_out_tmp(vty, bgp, dest, rn_p, - &attr, safi, use_json, - json_ar, wide); + route_vty_out_tmp(vty, bgp, dest, rn_p, &attr_unchanged, + safi, use_json, json_ar, wide); bgp_attr_flush(&attr); (*output_count)++; } From f4c17673d142a9fbacf0c32a69a585449db93615 Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Fri, 27 Sep 2024 14:04:59 +0300 Subject: [PATCH 09/73] tests: Check if we can see unmodified received-routes with soft inbound Signed-off-by: Donatas Abraitis --- .../__init__.py | 0 .../r1/frr.conf | 17 +++ .../r2/frr.conf | 14 +++ ...t_bgp_received_routes_with_soft_inbound.py | 103 ++++++++++++++++++ 4 files changed, 134 insertions(+) create mode 100644 tests/topotests/bgp_received_routes_with_soft_inbound/__init__.py create mode 100644 tests/topotests/bgp_received_routes_with_soft_inbound/r1/frr.conf create mode 100644 tests/topotests/bgp_received_routes_with_soft_inbound/r2/frr.conf create mode 100644 tests/topotests/bgp_received_routes_with_soft_inbound/test_bgp_received_routes_with_soft_inbound.py diff --git a/tests/topotests/bgp_received_routes_with_soft_inbound/__init__.py b/tests/topotests/bgp_received_routes_with_soft_inbound/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/bgp_received_routes_with_soft_inbound/r1/frr.conf b/tests/topotests/bgp_received_routes_with_soft_inbound/r1/frr.conf new file mode 100644 index 000000000000..01dd4f3c5a23 --- /dev/null +++ b/tests/topotests/bgp_received_routes_with_soft_inbound/r1/frr.conf @@ -0,0 +1,17 @@ +! +int r1-eth0 + ip address 192.168.1.1/24 +! +router bgp 65001 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.1.2 remote-as external + address-family ipv4 unicast + neighbor 192.168.1.2 route-map r2 in + neighbor 192.168.1.2 soft-reconfiguration inbound + exit-address-family +! +route-map r2 permit 10 + set as-path prepend 65000 65000 65000 +exit +! diff --git a/tests/topotests/bgp_received_routes_with_soft_inbound/r2/frr.conf b/tests/topotests/bgp_received_routes_with_soft_inbound/r2/frr.conf new file mode 100644 index 000000000000..86dd8e338955 --- /dev/null +++ b/tests/topotests/bgp_received_routes_with_soft_inbound/r2/frr.conf @@ -0,0 +1,14 @@ +! +int r2-eth0 + ip address 192.168.1.2/24 +! +router bgp 65002 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.1.1 remote-as external + neighbor 192.168.1.1 timers 1 3 + neighbor 192.168.1.1 timers connect 1 + address-family ipv4 unicast + network 10.0.0.2/32 + exit-address-family +! diff --git a/tests/topotests/bgp_received_routes_with_soft_inbound/test_bgp_received_routes_with_soft_inbound.py b/tests/topotests/bgp_received_routes_with_soft_inbound/test_bgp_received_routes_with_soft_inbound.py new file mode 100644 index 000000000000..0b933add2f64 --- /dev/null +++ b/tests/topotests/bgp_received_routes_with_soft_inbound/test_bgp_received_routes_with_soft_inbound.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# Copyright (c) 2024 by +# Donatas Abraitis +# + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, get_topogen + +pytestmark = [pytest.mark.bgpd] + + +def setup_module(mod): + topodef = {"s1": ("r1", "r2")} + tgen = Topogen(topodef, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for _, (rname, router) in enumerate(router_list.items(), 1): + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_received_routes_with_soft_inbound(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + + def _bgp_converge(): + output = json.loads(r1.vtysh_cmd("show bgp ipv4 unicast json")) + expected = { + "routes": { + "10.0.0.2/32": [ + { + "valid": True, + "path": "65000 65000 65000 65002", + "nexthops": [ + { + "ip": "192.168.1.2", + } + ], + } + ] + } + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_converge, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't converge" + + def _bgp_check_receveived_routes(): + output = json.loads( + r1.vtysh_cmd( + "show bgp ipv4 unicast neighbors 192.168.1.2 received-routes json" + ) + ) + expected = { + "receivedRoutes": { + "10.0.0.2/32": { + "valid": True, + "path": "65002", + "nextHop": "192.168.1.2", + } + } + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_check_receveived_routes, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't converge" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) From ba4555c646f68cbcda8b075abb7fee74ca06a2d6 Mon Sep 17 00:00:00 2001 From: "Barry A. Trent" Date: Thu, 26 Sep 2024 14:49:19 -0700 Subject: [PATCH 10/73] pimd: fix autorp CLI bugs Signed-off-by: Barry A. Trent --- pimd/pim_autorp.c | 3 ++- pimd/pim_cmd.c | 2 +- pimd/pim_cmd_common.c | 8 ++++---- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/pimd/pim_autorp.c b/pimd/pim_autorp.c index 8f3b8de3cd95..1f4d0c65af67 100644 --- a/pimd/pim_autorp.c +++ b/pimd/pim_autorp.c @@ -851,6 +851,7 @@ void pim_autorp_add_candidate_rp_plist(struct pim_instance *pim, snprintf(rp->grplist, sizeof(rp->grplist), "%s", plist); /* A new group prefix list implies that any previous group prefix is now invalid */ memset(&(rp->grp), 0, sizeof(rp->grp)); + rp->grp.family = AF_INET; pim_autorp_new_announcement(pim); } @@ -1155,7 +1156,7 @@ void pim_autorp_show_autorp(struct vty *vty, struct pim_instance *pim, table = ttable_dump(tt, "\n"); vty_out(vty, "%s\n", table); - XFREE(MTYPE_TMP, table); + XFREE(MTYPE_TMP_TTABLE, table); } ttable_del(tt); diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index aa7fc0d81f6a..934da2d53e67 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -4609,7 +4609,7 @@ DEFPY (pim_autorp_announce_rp, "Prefix list\n" "List name\n") { - return pim_process_autorp_candidate_rp_cmd(vty, no, rpaddr_str, grp, + return pim_process_autorp_candidate_rp_cmd(vty, no, rpaddr_str, (grp_str ? grp : NULL), plist); } diff --git a/pimd/pim_cmd_common.c b/pimd/pim_cmd_common.c index be7460d0fbeb..02ddea8252d6 100644 --- a/pimd/pim_cmd_common.c +++ b/pimd/pim_cmd_common.c @@ -639,9 +639,9 @@ int pim_process_autorp_candidate_rp_cmd(struct vty *vty, bool no, char grpstr[64]; if (no) { - if (!is_default_prefix((const struct prefix *)grp) || plist) { + if ((grp && !is_default_prefix((const struct prefix *)grp)) || plist) { /* If any single values are set, only destroy those */ - if (!is_default_prefix((const struct prefix *)grp)) { + if (grp && !is_default_prefix((const struct prefix *)grp)) { snprintfrr(xpath, sizeof(xpath), "%s/candidate-rp-list[rp-address='%s']/group", FRR_PIM_AUTORP_XPATH, rpaddr_str); @@ -663,12 +663,12 @@ int pim_process_autorp_candidate_rp_cmd(struct vty *vty, bool no, nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); } } else { - if (!is_default_prefix((const struct prefix *)grp) || plist) { + if ((grp && !is_default_prefix((const struct prefix *)grp)) || plist) { snprintfrr(xpath, sizeof(xpath), "%s/candidate-rp-list[rp-address='%s']", FRR_PIM_AUTORP_XPATH, rpaddr_str); nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); - if (!is_default_prefix((const struct prefix *)grp)) { + if (grp && !is_default_prefix((const struct prefix *)grp)) { snprintfrr(xpath, sizeof(xpath), "%s/candidate-rp-list[rp-address='%s']/group", FRR_PIM_AUTORP_XPATH, rpaddr_str); From 41fa1541632660e8eab92c3b3b786007c5cbe67b Mon Sep 17 00:00:00 2001 From: "Barry A. Trent" Date: Fri, 27 Sep 2024 12:59:12 -0700 Subject: [PATCH 11/73] tests: enhance autorp topotest Signed-off-by: Barry A. Trent --- tests/topotests/pim_autorp/test_pim_autorp.py | 154 ++++++++++++++++-- 1 file changed, 144 insertions(+), 10 deletions(-) diff --git a/tests/topotests/pim_autorp/test_pim_autorp.py b/tests/topotests/pim_autorp/test_pim_autorp.py index 5aecce942e7b..ad618af29e3d 100644 --- a/tests/topotests/pim_autorp/test_pim_autorp.py +++ b/tests/topotests/pim_autorp/test_pim_autorp.py @@ -11,12 +11,18 @@ import os import sys import pytest +from functools import partial # pylint: disable=C0413 # Import topogen and topotest helpers +from lib import topotest from lib.topogen import Topogen, get_topogen from lib.topolog import logger -from lib.pim import scapy_send_autorp_raw_packet, verify_pim_rp_info, verify_pim_rp_info_is_empty +from lib.pim import ( + scapy_send_autorp_raw_packet, + verify_pim_rp_info, + verify_pim_rp_info_is_empty, +) from lib.common_config import step, write_test_header from time import sleep @@ -55,6 +61,7 @@ def build_topo(tgen): switch.add_link(tgen.gears["r1"]) switch.add_link(tgen.gears["r2"]) + def setup_module(mod): logger.info("PIM AutoRP basic functionality:\n {}".format(TOPOLOGY)) @@ -87,6 +94,7 @@ def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() + def test_pim_autorp_discovery_single_rp(request): "Test PIM AutoRP Discovery with single RP" tgen = get_topogen() @@ -106,13 +114,25 @@ def test_pim_autorp_discovery_single_rp(request): scapy_send_autorp_raw_packet(tgen, "r1", "r1-eth0", data) step("Verify rp-info from AutoRP packet") - result = verify_pim_rp_info(tgen, None, "r2", "224.0.0.0/4", "r2-eth0", "10.10.76.1", "AutoRP", False, "ipv4", True) + result = verify_pim_rp_info( + tgen, + None, + "r2", + "224.0.0.0/4", + "r2-eth0", + "10.10.76.1", + "AutoRP", + False, + "ipv4", + True, + ) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) step("Verify AutoRP configuration times out") result = verify_pim_rp_info_is_empty(tgen, "r2") assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) + def test_pim_autorp_discovery_multiple_rp(request): "Test PIM AutoRP Discovery with multiple RP's" tgen = get_topogen() @@ -132,9 +152,31 @@ def test_pim_autorp_discovery_multiple_rp(request): scapy_send_autorp_raw_packet(tgen, "r1", "r1-eth0", data) step("Verify rp-info from AutoRP packet") - result = verify_pim_rp_info(tgen, None, "r2", "224.0.0.0/8", "r2-eth0", "10.10.76.1", "AutoRP", False, "ipv4", True) + result = verify_pim_rp_info( + tgen, + None, + "r2", + "224.0.0.0/8", + "r2-eth0", + "10.10.76.1", + "AutoRP", + False, + "ipv4", + True, + ) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) - result = verify_pim_rp_info(tgen, None, "r2", "225.0.0.0/8", "r2-eth0", "10.10.76.3", "AutoRP", False, "ipv4", True) + result = verify_pim_rp_info( + tgen, + None, + "r2", + "225.0.0.0/8", + "r2-eth0", + "10.10.76.3", + "AutoRP", + False, + "ipv4", + True, + ) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) @@ -156,7 +198,18 @@ def test_pim_autorp_discovery_static(request): rnode.cmd("vtysh -c 'conf t' -c 'router pim' -c 'rp 10.10.76.3 224.0.0.0/4'") step("Verify static rp-info from r2") - result = verify_pim_rp_info(tgen, None, "r2", "224.0.0.0/4", "r2-eth0", "10.10.76.3", "Static", False, "ipv4", True) + result = verify_pim_rp_info( + tgen, + None, + "r2", + "224.0.0.0/4", + "r2-eth0", + "10.10.76.3", + "Static", + False, + "ipv4", + True, + ) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) step("Send AutoRP packet from r1 to r2") @@ -165,10 +218,87 @@ def test_pim_autorp_discovery_static(request): scapy_send_autorp_raw_packet(tgen, "r1", "r1-eth0", data) step("Verify rp-info from AutoRP packet") - result = verify_pim_rp_info(tgen, None, "r2", "224.0.0.0/4", "r2-eth0", "10.10.76.1", "AutoRP", False, "ipv4", True) + result = verify_pim_rp_info( + tgen, + None, + "r2", + "224.0.0.0/4", + "r2-eth0", + "10.10.76.1", + "AutoRP", + False, + "ipv4", + True, + ) assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) +def test_pim_autorp_announce_cli(request): + "Test PIM AutoRP Announcement CLI commands" + tgen = get_topogen() + tc_name = request.node.name + write_test_header(tc_name) + + if tgen.routers_have_failure(): + pytest.skip("skipped because of router(s) failure") + + step("Add AutoRP announcement configuration to r1") + r1 = tgen.routers()["r1"] + r1.vtysh_cmd( + """ + conf + router pim + autorp announce holdtime 90 + autorp announce interval 120 + autorp announce scope 5 + autorp announce 10.2.3.4 225.0.0.0/24 +""" + ) + + expected = { + "discoveryEnabled": True, + "announce": { + "scope": 5, + "interval": 120, + "holdtime": 90, + "rpList": [ + {"rpAddress": "10.2.3.4", "group": "225.0.0.0/24", "prefixList": ""} + ], + }, + } + + test_func = partial( + topotest.router_json_cmp, r1, "show ip pim autorp json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = '"{}" JSON output mismatches'.format(r1.name) + assert result is None, assertmsg + + r1.vtysh_cmd( + """ + conf + router pim + autorp announce 10.2.3.4 group-list ListA +""" + ) + expected = { + "discoveryEnabled": True, + "announce": { + "scope": 5, + "interval": 120, + "holdtime": 90, + "rpList": [{"rpAddress": "10.2.3.4", "group": "", "prefixList": "ListA"}], + }, + } + + test_func = partial( + topotest.router_json_cmp, r1, "show ip pim autorp json", expected + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = '"{}" JSON output mismatches'.format(r1.name) + assert result is None, assertmsg + + def test_pim_autorp_announce_group(request): "Test PIM AutoRP Announcement with a single group" tgen = get_topogen() @@ -180,17 +310,21 @@ def test_pim_autorp_announce_group(request): step("Add candidate RP configuration to r1") rnode = tgen.routers()["r1"] - rnode.cmd("vtysh -c 'conf t' -c 'router pim' -c 'send-rp-announce 10.10.76.1 224.0.0.0/4'") + rnode.cmd( + "vtysh -c 'conf t' -c 'router pim' -c 'send-rp-announce 10.10.76.1 224.0.0.0/4'" + ) step("Verify Announcement sent data") # TODO: Verify AutoRP mapping agent receives candidate RP announcement # Mapping agent is not yet implemented - #sleep(10) + # sleep(10) step("Change AutoRP Announcement packet parameters") - rnode.cmd("vtysh -c 'conf t' -c 'router pim' -c 'send-rp-announce scope 8 interval 10 holdtime 60'") + rnode.cmd( + "vtysh -c 'conf t' -c 'router pim' -c 'send-rp-announce scope 8 interval 10 holdtime 60'" + ) step("Verify Announcement sent data") # TODO: Verify AutoRP mapping agent receives updated candidate RP announcement # Mapping agent is not yet implemented - #sleep(10) + # sleep(10) def test_memory_leak(): From 1276eaaa90f0360687fab14a7c7f58399be6c0f3 Mon Sep 17 00:00:00 2001 From: anlan_cs Date: Sun, 29 Sep 2024 10:15:31 +0800 Subject: [PATCH 12/73] tools: fix missing check interfaces for reloading pim Without checking interfaces, the other interfaces' changes will be wrongly lost. Running config: ``` interface A ip pim ip pim use-source 11.0.0.1 exit ! interface B ip pim ip pim use-source 22.0.0.1 exit ! ``` Reload the new config: ``` interface A exit ! interface B ip pim exit ``` Before: ``` 2024-09-29 10:08:27,686 INFO: Executed "interface A no ip pim exit" ``` After: ``` 2024-09-29 10:05:01,356 INFO: Executed "interface A no ip pim exit" 2024-09-29 10:05:01,376 INFO: Executed "interface B no ip pim use-source 22.0.0.1 exit" ``` Signed-off-by: anlan_cs --- tools/frr-reload.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/tools/frr-reload.py b/tools/frr-reload.py index 9dae348b8e21..53bb6513e21a 100755 --- a/tools/frr-reload.py +++ b/tools/frr-reload.py @@ -1140,14 +1140,14 @@ def pim_delete_move_lines(lines_to_add, lines_to_del): # they are implicitly deleted by 'no ip pim'. # Remove all such depdendent options from delete # pending list. - pim_disable = False + pim_disable = [] lines_to_del_to_del = [] index = -1 for ctx_keys, line in lines_to_del: index = index + 1 if ctx_keys[0].startswith("interface") and line and line == "ip pim": - pim_disable = True + pim_disable.append(ctx_keys[0]) # no ip msdp peer <> does not accept source so strip it off. if line and line.startswith("ip msdp peer "): @@ -1158,14 +1158,14 @@ def pim_delete_move_lines(lines_to_add, lines_to_del): lines_to_del.remove((ctx_keys, line)) lines_to_del.insert(index, (ctx_keys, new_line)) - if pim_disable: - for ctx_keys, line in lines_to_del: - if ( - ctx_keys[0].startswith("interface") - and line - and (line.startswith("ip pim ") or line.startswith("ip multicast ")) - ): - lines_to_del_to_del.append((ctx_keys, line)) + for ctx_keys, line in lines_to_del: + if ( + ctx_keys[0] in pim_disable + and ctx_keys[0].startswith("interface") + and line + and (line.startswith("ip pim ") or line.startswith("ip multicast ")) + ): + lines_to_del_to_del.append((ctx_keys, line)) for ctx_keys, line in lines_to_del_to_del: lines_to_del.remove((ctx_keys, line)) From 5291228bf7bd6f1582a54369e469a6e3b769eb17 Mon Sep 17 00:00:00 2001 From: anlan_cs Date: Mon, 30 Sep 2024 13:06:26 +0800 Subject: [PATCH 13/73] pimd: fix missing IPV4 check In `pim_if_addr_add()`, some code inside `PIM_IPV == 4` ( the case of `igmp->mtrace_only` ) wrongly accepts ipv6 address. So, clearly add IPV4 check. Signed-off-by: anlan_cs --- pimd/pim_iface.c | 107 +++++++++++++++++++++-------------------------- 1 file changed, 48 insertions(+), 59 deletions(-) diff --git a/pimd/pim_iface.c b/pimd/pim_iface.c index 1dc9307e4f8e..20e3ba184ba8 100644 --- a/pimd/pim_iface.c +++ b/pimd/pim_iface.c @@ -527,77 +527,66 @@ void pim_if_addr_add(struct connected *ifc) detect_address_change(ifp, 0, __func__); - // if (ifc->address->family != AF_INET) - // return; - #if PIM_IPV == 4 - struct in_addr ifaddr = ifc->address->u.prefix4; + if (ifc->address->family == AF_INET) { + struct in_addr ifaddr = ifc->address->u.prefix4; - if (pim_ifp->gm_enable) { - struct gm_sock *igmp; + if (pim_ifp->gm_enable) { + struct gm_sock *igmp; - /* lookup IGMP socket */ - igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list, - ifaddr); - if (!igmp) { - /* if addr new, add IGMP socket */ - if (ifc->address->family == AF_INET) + /* lookup IGMP socket */ + igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list, ifaddr); + if (!igmp) { + /* if addr new, add IGMP socket */ pim_igmp_sock_add(pim_ifp->gm_socket_list, ifaddr, ifp, false); - } else if (igmp->mtrace_only) { - igmp_sock_delete(igmp); - pim_igmp_sock_add(pim_ifp->gm_socket_list, ifaddr, ifp, - false); - } + } else if (igmp->mtrace_only) { + igmp_sock_delete(igmp); + pim_igmp_sock_add(pim_ifp->gm_socket_list, ifaddr, ifp, false); + } - /* Replay Static IGMP groups */ - if (pim_ifp->gm_join_list) { - struct listnode *node; - struct listnode *nextnode; - struct gm_join *ij; - int join_fd; - - for (ALL_LIST_ELEMENTS(pim_ifp->gm_join_list, node, - nextnode, ij)) { - /* Close socket and reopen with Source and Group - */ - close(ij->sock_fd); - join_fd = gm_join_sock( - ifp->name, ifp->ifindex, ij->group_addr, - ij->source_addr, pim_ifp); - if (join_fd < 0) { - char group_str[INET_ADDRSTRLEN]; - char source_str[INET_ADDRSTRLEN]; - pim_inet4_dump("", ij->group_addr, - group_str, - sizeof(group_str)); - pim_inet4_dump( - "", ij->source_addr, - source_str, sizeof(source_str)); - zlog_warn( - "%s: gm_join_sock() failure for IGMP group %s source %s on interface %s", - __func__, group_str, source_str, - ifp->name); - /* warning only */ - } else - ij->sock_fd = join_fd; + /* Replay Static IGMP groups */ + if (pim_ifp->gm_join_list) { + struct listnode *node; + struct listnode *nextnode; + struct gm_join *ij; + int join_fd; + + for (ALL_LIST_ELEMENTS(pim_ifp->gm_join_list, node, nextnode, ij)) { + /* Close socket and reopen with Source and Group + */ + close(ij->sock_fd); + join_fd = gm_join_sock(ifp->name, ifp->ifindex, + ij->group_addr, ij->source_addr, + pim_ifp); + if (join_fd < 0) { + char group_str[INET_ADDRSTRLEN]; + char source_str[INET_ADDRSTRLEN]; + pim_inet4_dump("", ij->group_addr, group_str, + sizeof(group_str)); + pim_inet4_dump("", ij->source_addr, + source_str, sizeof(source_str)); + zlog_warn("%s: gm_join_sock() failure for IGMP group %s source %s on interface %s", + __func__, group_str, source_str, + ifp->name); + /* warning only */ + } else + ij->sock_fd = join_fd; + } } - } - } /* igmp */ - else { - struct gm_sock *igmp; - - /* lookup IGMP socket */ - igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list, - ifaddr); - if (ifc->address->family == AF_INET) { + } /* igmp */ + else { + struct gm_sock *igmp; + + /* lookup IGMP socket */ + igmp = pim_igmp_sock_lookup_ifaddr(pim_ifp->gm_socket_list, ifaddr); if (igmp) igmp_sock_delete(igmp); /* if addr new, add IGMP socket */ pim_igmp_sock_add(pim_ifp->gm_socket_list, ifaddr, ifp, true); - } - } /* igmp mtrace only */ + } /* igmp mtrace only */ + } #endif if (pim_ifp->pim_enable) { From 30eb4b73ff0082f419c34e0a65636179aa02d9be Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Mon, 5 Aug 2024 14:36:18 -0300 Subject: [PATCH 14/73] pimd: remove unreachable code MLD code is IPv6 only so the define `PIM_IPV` will never be 4. Signed-off-by: Rafael Zalamena --- pimd/pim6_mld.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pimd/pim6_mld.c b/pimd/pim6_mld.c index 8ccf42d729ba..a871837701a4 100644 --- a/pimd/pim6_mld.c +++ b/pimd/pim6_mld.c @@ -62,7 +62,6 @@ static void gm_sg_timer_start(struct gm_if *gm_ifp, struct gm_sg *sg, sg->iface->ifp->name, &sg->sgaddr /* clang-format off */ -#if PIM_IPV == 6 static const pim_addr gm_all_hosts = { .s6_addr = { 0xff, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -84,13 +83,6 @@ static const pim_addr gm_dummy_untracked = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }, }; -#else -/* 224.0.0.1 */ -static const pim_addr gm_all_hosts = { .s_addr = htonl(0xe0000001), }; -/* 224.0.0.22 */ -static const pim_addr gm_all_routers = { .s_addr = htonl(0xe0000016), }; -static const pim_addr gm_dummy_untracked = { .s_addr = 0xffffffff, }; -#endif /* clang-format on */ #define IPV6_MULTICAST_SCOPE_LINK 2 From e1338f5ade1fd223e8eaca750123894cc5e7ca03 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Wed, 31 Jul 2024 22:16:54 -0300 Subject: [PATCH 15/73] pimd: fix northbound error message on delete `snprintf` doesn't know about `%pPAs` use `snprintfrr` instead. Signed-off-by: Rafael Zalamena --- pimd/pim_nb_config.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index f864ce8f8ee6..ea8b56fee395 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -934,10 +934,9 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ss yang_dnode_get_pimaddr(&source_addr, args->dnode, NULL); result = pim_ssmpingd_start(pim, source_addr); if (result) { - snprintf( - args->errmsg, args->errmsg_len, - "%% Failure starting ssmpingd for source %pPA: %d", - &source_addr, result); + snprintfrr(args->errmsg, args->errmsg_len, + "%% Failure starting ssmpingd for source %pPA: %d", &source_addr, + result); return NB_ERR_INCONSISTENCY; } } @@ -964,10 +963,9 @@ int routing_control_plane_protocols_control_plane_protocol_pim_address_family_ss yang_dnode_get_pimaddr(&source_addr, args->dnode, NULL); result = pim_ssmpingd_stop(pim, source_addr); if (result) { - snprintf( - args->errmsg, args->errmsg_len, - "%% Failure stopping ssmpingd for source %pPA: %d", - &source_addr, result); + snprintfrr(args->errmsg, args->errmsg_len, + "%% Failure stopping ssmpingd for source %pPA: %d", &source_addr, + result); return NB_ERR_INCONSISTENCY; } @@ -3860,10 +3858,9 @@ int lib_interface_gmp_address_family_join_group_destroy( GM_JOIN_STATIC); if (result) { - snprintf(args->errmsg, args->errmsg_len, - "%% Failure leaving " GM - " group %pPAs %pPAs on interface %s: %d", - &source_addr, &group_addr, ifp->name, result); + snprintfrr(args->errmsg, args->errmsg_len, + "%% Failure leaving " GM " group %pPAs %pPAs on interface %s: %d", + &source_addr, &group_addr, ifp->name, result); return NB_ERR_INCONSISTENCY; } @@ -3952,9 +3949,9 @@ int lib_interface_gmp_address_family_static_group_destroy( result = pim_if_static_group_del(ifp, group_addr, source_addr); if (result) { - snprintf(args->errmsg, args->errmsg_len, - "%% Failure removing static group %pPAs %pPAs on interface %s: %d", - &source_addr, &group_addr, ifp->name, result); + snprintfrr(args->errmsg, args->errmsg_len, + "%% Failure removing static group %pPAs %pPAs on interface %s: %d", + &source_addr, &group_addr, ifp->name, result); return NB_ERR_INCONSISTENCY; } From 660146b57ad48f40ca4ea845cc8538500a8d4759 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Mon, 30 Sep 2024 11:31:56 -0300 Subject: [PATCH 16/73] lib: fix calloc warning on recent compiler Fix the following compiler warning: ``` lib/elf_py.c: In function _elffile_load_: lib/elf_py.c:1310:34: warning: _calloc_ sizes specified with _sizeof_ in the earlier argument and not in the later argument [-Wcalloc-transposed-args] 1310 | w->sects = calloc(sizeof(PyObject *), w->ehdr->e_shnum); | ^~~~~~~~ lib/elf_py.c:1310:34: note: earlier argument should specify number of elements, later size of each element ``` Signed-off-by: Rafael Zalamena --- lib/elf_py.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/elf_py.c b/lib/elf_py.c index 2b4fea373f03..6c63d1f89202 100644 --- a/lib/elf_py.c +++ b/lib/elf_py.c @@ -1307,7 +1307,7 @@ static PyObject *elffile_load(PyTypeObject *type, PyObject *args, } #endif - w->sects = calloc(sizeof(PyObject *), w->ehdr->e_shnum); + w->sects = calloc(w->ehdr->e_shnum, sizeof(PyObject *)); w->n_sect = w->ehdr->e_shnum; return (PyObject *)w; From 068aea10137950b7f80475a4846191ae8fe3ae3d Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 26 Sep 2024 10:40:30 -0400 Subject: [PATCH 17/73] bgpd: Use CHECK_FLAG to remain consistent for mp_flags Signed-off-by: Donald Sharp --- bgpd/bgp_mpath.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/bgpd/bgp_mpath.c b/bgpd/bgp_mpath.c index eadd52b8e0f2..3421b00ba75b 100644 --- a/bgpd/bgp_mpath.c +++ b/bgpd/bgp_mpath.c @@ -472,10 +472,10 @@ bool bgp_path_info_mpath_chkwtd(struct bgp *bgp, struct bgp_path_info *path) */ if (bgp->lb_handling != BGP_LINK_BW_SKIP_MISSING && bgp->lb_handling != BGP_LINK_BW_DEFWT_4_MISSING) - return (path->mpath->mp_flags & BGP_MP_LB_ALL); + return CHECK_FLAG(path->mpath->mp_flags, BGP_MP_LB_ALL); /* At least one path should have bandwidth. */ - return (path->mpath->mp_flags & BGP_MP_LB_PRESENT); + return CHECK_FLAG(path->mpath->mp_flags, BGP_MP_LB_PRESENT); } /* From 6e0fe595f174e593133fbd52dcafdee511897db2 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 26 Sep 2024 10:46:23 -0400 Subject: [PATCH 18/73] bgpd: Ensure mpath data is only on bestpath The mpath data structure has data that is only relevant for the first mpath in the list. It is not being used anywhere else. Let's document that a bit more. Signed-off-by: Donald Sharp --- bgpd/bgp_mpath.c | 4 ++++ bgpd/bgp_mpath.h | 9 ++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/bgpd/bgp_mpath.c b/bgpd/bgp_mpath.c index 3421b00ba75b..3b8b27455663 100644 --- a/bgpd/bgp_mpath.c +++ b/bgpd/bgp_mpath.c @@ -411,6 +411,10 @@ static void bgp_path_info_mpath_count_set(struct bgp_path_info *path, * bgp_path_info_mpath_lb_update * * Update cumulative info related to link-bandwidth + * + * This is only set on the first mpath of the list + * as such we should UNSET the flags when removing + * to ensure nothing accidently happens */ static void bgp_path_info_mpath_lb_update(struct bgp_path_info *path, bool set, bool all_paths_lb, uint64_t cum_bw) diff --git a/bgpd/bgp_mpath.h b/bgpd/bgp_mpath.h index 129682d1dc19..267d729e06d4 100644 --- a/bgpd/bgp_mpath.h +++ b/bgpd/bgp_mpath.h @@ -25,15 +25,18 @@ struct bgp_path_info_mpath { /* When attached to best path, the number of selected multipaths */ uint16_t mp_count; - /* Flags - relevant as noted. */ + /* Flags - relevant as noted, attached to bestpath. */ uint16_t mp_flags; #define BGP_MP_LB_PRESENT 0x1 /* Link-bandwidth present for >= 1 path */ #define BGP_MP_LB_ALL 0x2 /* Link-bandwidth present for all multipaths */ - /* Aggregated attribute for advertising multipath route */ + /* + * Aggregated attribute for advertising multipath route, + * attached to bestpath + */ struct attr *mp_attr; - /* Cumulative bandiwdth of all multipaths - attached to best path. */ + /* Cumulative bandiwdth of all multipaths - attached to bestpath. */ uint64_t cum_bw; }; From 6ff85fc7484ebf2749eea9a91ef1bf21232cb466 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Mon, 30 Sep 2024 14:57:45 -0400 Subject: [PATCH 19/73] tests: Clean up some logging in test_bgp_default_originate_2links.py Test was confusing. Add some useful data and clean up some debugs Signed-off-by: Donald Sharp --- .../test_bgp_default_originate_2links.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py index f4f874f94221..bcdd98788989 100644 --- a/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py +++ b/tests/topotests/bgp_default_originate/test_bgp_default_originate_2links.py @@ -266,21 +266,21 @@ def verify_the_uptime(time_stamp_before, time_stamp_after, incremented=None): if incremented == True: if uptime_before < uptime_after: logger.info( - " The Uptime [{}] is incremented than [{}].......PASSED ".format( + " The Uptime before [{}] is less than [{}].......PASSED ".format( time_stamp_before, time_stamp_after ) ) return True else: logger.error( - " The Uptime [{}] is expected to be incremented than [{}].......FAILED ".format( + " The Uptime before [{}] is greater than the uptime after [{}].......FAILED ".format( time_stamp_before, time_stamp_after ) ) return False else: logger.info( - " The Uptime [{}] is not incremented than [{}] ".format( + " The Uptime before [{}] the same as after [{}] ".format( time_stamp_before, time_stamp_after ) ) @@ -1027,7 +1027,7 @@ def test_verify_bgp_default_originate_with_default_static_route_p1(request): result = verify_the_uptime(uptime_before_ipv6, uptime_after_ipv6, incremented=False) assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) - step("Taking uptime snapshot before removing redisctribute static ") + step("Taking uptime snapshot before removing redistribute static") uptime_before_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict) uptime_before_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict) sleep(1) @@ -1074,6 +1074,7 @@ def test_verify_bgp_default_originate_with_default_static_route_p1(request): ) assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) + step("Now look that the route is not pointed at link2") result = verify_rib_default_route( tgen, topo, @@ -1093,7 +1094,7 @@ def test_verify_bgp_default_originate_with_default_static_route_p1(request): ) assert result is not True, "Testcase {} : Failed Error: {}".format(tc_name, result) - step("Taking uptime snapshot before removing redisctribute static ") + step("Taking uptime snapshot after removing redistribute static") uptime_after_ipv4 = get_rib_route_uptime(tgen, "ipv4", "r2", ipv4_uptime_dict) uptime_after_ipv6 = get_rib_route_uptime(tgen, "ipv6", "r2", ipv6_uptime_dict) From 421cf856ef86db250a86be01437d0a668b463dcc Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Mon, 30 Sep 2024 15:09:42 -0400 Subject: [PATCH 20/73] bgpd: Cleanup multipath figuring out in bgp Currently bgp multipath has these properties: a) mp_info may or may not be on a single path, based upon path perturbations in the past. b) mp_info->count started counting at 0( meaning 1 ). As that the bestpath path_info was never included in the count c) The first mp_info in the list held the multipath data associated with the multipath. As such if you were at any other node that data was not filled in. d) As such the mp_info's that are not first on the list basically were just pointers to the corresponding bgp_path_info that was in the multipath. e) On bestpath calculation, a linklist(struct linklist *) of bgp_path_info's was created. f) This linklist was passed in to a comparison function that took the old mpinfo list and compared it item by item to the linklist and doing magic to figure out how to create a new mp_info list. g) the old mp_info and the link list had to be memory managed and freed up. h) BGP_PATH_MULTIPATH is only set on non bestpath nodes in the multipath. This is really complicated. Let's change the algorithm to this: a) When running bestpath, mark a bgp_path_info node that could be in the ecmp path as BGP_PATH_MULTIPATH_NEW. b) When running multipath, just walk the list of bgp_path_info's and if it has BGP_PATH_MULTIPATH_NEW on it, decide if it is in BGP_MULTIPATH. If we run out of space to put in the ecmp, clear the flag on the rest. c) Clean up the counting of sometimes adding 1 to the mpath count. d) Only allocate a mpath_info node for the bestpath. Clean it up when done with it. e) remove the unneeded list management associated with the linklist and the mp_list. This greatly simplifies multipath computation for bgp and reduces memory load for large scale deployments. 2 full feeds in work_queue_run prior: 0 56367.471 1123 50193 493695 50362 493791 0 0 0 TE work_queue_run BGP multipath info : 1941844 48 110780992 1941844 110780992 2 full feeds in work_queue_run after change: 1 52924.931 1296 40837 465968 41025 487390 0 0 1 TE work_queue_run BGP multipath info : 970860 32 38836880 970866 38837120 Aproximately 4 seconds of saved cpu time for convergence and ~75 mb smaller run time. Signed-off-by: Donald Sharp --- bgpd/bgp_mpath.c | 427 +++++++++------------------------- bgpd/bgp_mpath.h | 18 +- bgpd/bgp_route.c | 21 +- bgpd/bgp_route.h | 14 ++ bgpd/bgp_routemap.c | 2 +- tests/bgpd/subdir.am | 11 - tests/bgpd/test_mpath.c | 482 --------------------------------------- tests/bgpd/test_mpath.py | 10 - 8 files changed, 135 insertions(+), 850 deletions(-) delete mode 100644 tests/bgpd/test_mpath.c delete mode 100644 tests/bgpd/test_mpath.py diff --git a/bgpd/bgp_mpath.c b/bgpd/bgp_mpath.c index 3b8b27455663..e27b78977708 100644 --- a/bgpd/bgp_mpath.c +++ b/bgpd/bgp_mpath.c @@ -2,8 +2,10 @@ /* * BGP Multipath * Copyright (C) 2010 Google Inc. + * 2024 Nvidia Corporation + * Donald Sharp * - * This file is part of Quagga + * This file is part of FRR */ #include @@ -190,78 +192,6 @@ int bgp_path_info_nexthop_cmp(struct bgp_path_info *bpi1, return compare; } -/* - * bgp_path_info_mpath_cmp - * - * This function determines our multipath list ordering. By ordering - * the list we can deterministically select which paths are included - * in the multipath set. The ordering also helps in detecting changes - * in the multipath selection so we can detect whether to send an - * update to zebra. - * - * The order of paths is determined first by received nexthop, and then - * by peer address if the nexthops are the same. - */ -static int bgp_path_info_mpath_cmp(void *val1, void *val2) -{ - struct bgp_path_info *bpi1, *bpi2; - int compare; - - bpi1 = val1; - bpi2 = val2; - - compare = bgp_path_info_nexthop_cmp(bpi1, bpi2); - - if (!compare) { - if (!bpi1->peer->su_remote && !bpi2->peer->su_remote) - compare = 0; - else if (!bpi1->peer->su_remote) - compare = 1; - else if (!bpi2->peer->su_remote) - compare = -1; - else - compare = sockunion_cmp(bpi1->peer->su_remote, - bpi2->peer->su_remote); - } - - return compare; -} - -/* - * bgp_mp_list_init - * - * Initialize the mp_list, which holds the list of multipaths - * selected by bgp_best_selection - */ -void bgp_mp_list_init(struct list *mp_list) -{ - assert(mp_list); - memset(mp_list, 0, sizeof(struct list)); - mp_list->cmp = bgp_path_info_mpath_cmp; -} - -/* - * bgp_mp_list_clear - * - * Clears all entries out of the mp_list - */ -void bgp_mp_list_clear(struct list *mp_list) -{ - assert(mp_list); - list_delete_all_node(mp_list); -} - -/* - * bgp_mp_list_add - * - * Adds a multipath entry to the mp_list - */ -void bgp_mp_list_add(struct list *mp_list, struct bgp_path_info *mpinfo) -{ - assert(mp_list && mpinfo); - listnode_add_sort(mp_list, mpinfo); -} - /* * bgp_path_info_mpath_new * @@ -274,6 +204,7 @@ static struct bgp_path_info_mpath *bgp_path_info_mpath_new(void) new_mpath = XCALLOC(MTYPE_BGP_MPATH_INFO, sizeof(struct bgp_path_info_mpath)); + new_mpath->mp_count = 1; return new_mpath; } @@ -287,6 +218,8 @@ void bgp_path_info_mpath_free(struct bgp_path_info_mpath **mpath) if (mpath && *mpath) { if ((*mpath)->mp_attr) bgp_attr_unintern(&(*mpath)->mp_attr); + (*mpath)->mp_attr = NULL; + XFREE(MTYPE_BGP_MPATH_INFO, *mpath); } } @@ -313,31 +246,6 @@ bgp_path_info_mpath_get(struct bgp_path_info *path) return path->mpath; } -/* - * bgp_path_info_mpath_enqueue - * - * Enqueue a path onto the multipath list given the previous multipath - * list entry - */ -static void bgp_path_info_mpath_enqueue(struct bgp_path_info *prev_info, - struct bgp_path_info *path) -{ - struct bgp_path_info_mpath *prev, *mpath; - - prev = bgp_path_info_mpath_get(prev_info); - mpath = bgp_path_info_mpath_get(path); - if (!prev || !mpath) - return; - - mpath->mp_next = prev->mp_next; - mpath->mp_prev = prev; - if (prev->mp_next) - prev->mp_next->mp_prev = mpath; - prev->mp_next = mpath; - - SET_FLAG(path->flags, BGP_PATH_MULTIPATH); -} - /* * bgp_path_info_mpath_dequeue * @@ -346,14 +254,9 @@ static void bgp_path_info_mpath_enqueue(struct bgp_path_info *prev_info, void bgp_path_info_mpath_dequeue(struct bgp_path_info *path) { struct bgp_path_info_mpath *mpath = path->mpath; + if (!mpath) return; - if (mpath->mp_prev) - mpath->mp_prev->mp_next = mpath->mp_next; - if (mpath->mp_next) - mpath->mp_next->mp_prev = mpath->mp_prev; - mpath->mp_next = mpath->mp_prev = NULL; - UNSET_FLAG(path->flags, BGP_PATH_MULTIPATH); } /* @@ -363,9 +266,16 @@ void bgp_path_info_mpath_dequeue(struct bgp_path_info *path) */ struct bgp_path_info *bgp_path_info_mpath_next(struct bgp_path_info *path) { - if (!path->mpath || !path->mpath->mp_next) - return NULL; - return path->mpath->mp_next->mp_info; + path = path->next; + + while (path) { + if (CHECK_FLAG(path->flags, BGP_PATH_MULTIPATH)) + return path; + + path = path->next; + } + + return NULL; } /* @@ -386,7 +296,8 @@ struct bgp_path_info *bgp_path_info_mpath_first(struct bgp_path_info *path) uint32_t bgp_path_info_mpath_count(struct bgp_path_info *path) { if (!path->mpath) - return 0; + return 1; + return path->mpath->mp_count; } @@ -515,58 +426,51 @@ static void bgp_path_info_mpath_attr_set(struct bgp_path_info *path, /* * bgp_path_info_mpath_update * - * Compare and sync up the multipath list with the mp_list generated by - * bgp_best_selection + * Compare and sync up the multipath flags with what was choosen + * in best selection */ void bgp_path_info_mpath_update(struct bgp *bgp, struct bgp_dest *dest, - struct bgp_path_info *new_best, - struct bgp_path_info *old_best, - struct list *mp_list, - struct bgp_maxpaths_cfg *mpath_cfg) + struct bgp_path_info *new_best, struct bgp_path_info *old_best, + uint32_t num_candidates, struct bgp_maxpaths_cfg *mpath_cfg) { uint16_t maxpaths, mpath_count, old_mpath_count; uint64_t bwval; uint64_t cum_bw, old_cum_bw; - struct listnode *mp_node, *mp_next_node; - struct bgp_path_info *cur_mpath, *new_mpath, *next_mpath, *prev_mpath; - int mpath_changed, debug; + struct bgp_path_info *cur_iterator = NULL; + bool mpath_changed, debug; bool all_paths_lb; char path_buf[PATH_ADDPATH_STR_BUFFER]; + bool old_mpath, new_mpath; - mpath_changed = 0; + mpath_changed = false; maxpaths = multipath_num; mpath_count = 0; - cur_mpath = NULL; old_mpath_count = 0; old_cum_bw = cum_bw = 0; - prev_mpath = new_best; - mp_node = listhead(mp_list); debug = bgp_debug_bestpath(dest); - if (new_best) { - mpath_count++; - if (new_best != old_best) - bgp_path_info_mpath_dequeue(new_best); - maxpaths = (new_best->peer->sort == BGP_PEER_IBGP) - ? mpath_cfg->maxpaths_ibgp - : mpath_cfg->maxpaths_ebgp; - } - if (old_best) { - cur_mpath = bgp_path_info_mpath_first(old_best); old_mpath_count = bgp_path_info_mpath_count(old_best); + if (old_mpath_count == 1) + SET_FLAG(old_best->flags, BGP_PATH_MULTIPATH); old_cum_bw = bgp_path_info_mpath_cumbw(old_best); bgp_path_info_mpath_count_set(old_best, 0); bgp_path_info_mpath_lb_update(old_best, false, false, 0); - bgp_path_info_mpath_dequeue(old_best); + bgp_path_info_mpath_free(&old_best->mpath); + old_best->mpath = NULL; + } + + if (new_best) { + maxpaths = (new_best->peer->sort == BGP_PEER_IBGP) ? mpath_cfg->maxpaths_ibgp + : mpath_cfg->maxpaths_ebgp; + cur_iterator = new_best; } if (debug) - zlog_debug("%pBD(%s): starting mpath update, newbest %s num candidates %d old-mpath-count %d old-cum-bw %" PRIu64, - dest, bgp->name_pretty, - new_best ? new_best->peer->host : "NONE", - mp_list ? listcount(mp_list) : 0, old_mpath_count, - old_cum_bw); + zlog_debug("%pBD(%s): starting mpath update, newbest %s num candidates %d old-mpath-count %d old-cum-bw %" PRIu64 + " maxpaths set %u", + dest, bgp->name_pretty, new_best ? new_best->peer->host : "NONE", + num_candidates, old_mpath_count, old_cum_bw, maxpaths); /* * We perform an ordered walk through both lists in parallel. @@ -580,210 +484,100 @@ void bgp_path_info_mpath_update(struct bgp *bgp, struct bgp_dest *dest, * to skip over it */ all_paths_lb = true; /* We'll reset if any path doesn't have LB. */ - while (mp_node || cur_mpath) { - struct bgp_path_info *tmp_info; + while (cur_iterator) { + old_mpath = CHECK_FLAG(cur_iterator->flags, BGP_PATH_MULTIPATH); + new_mpath = CHECK_FLAG(cur_iterator->flags, BGP_PATH_MULTIPATH_NEW); + + UNSET_FLAG(cur_iterator->flags, BGP_PATH_MULTIPATH_NEW); /* - * We can bail out of this loop if all existing paths on the - * multipath list have been visited (for cleanup purposes) and - * the maxpath requirement is fulfulled + * If the current mpath count is equal to the number of + * maxpaths that can be used then we can bail, after + * we clean up the flags associated with the rest of the + * bestpaths */ - if (!cur_mpath && (mpath_count >= maxpaths)) + if (mpath_count >= maxpaths) { + while (cur_iterator) { + UNSET_FLAG(cur_iterator->flags, BGP_PATH_MULTIPATH); + UNSET_FLAG(cur_iterator->flags, BGP_PATH_MULTIPATH_NEW); + + cur_iterator = cur_iterator->next; + } break; - mp_next_node = mp_node ? listnextnode(mp_node) : NULL; - next_mpath = - cur_mpath ? bgp_path_info_mpath_next(cur_mpath) : NULL; - tmp_info = mp_node ? listgetdata(mp_node) : NULL; + if (debug) + zlog_debug("%pBD(%s): Mpath count %u is equal to maximum paths allowed, finished comparision for MPATHS", + dest, bgp->name_pretty, mpath_count); + } if (debug) - zlog_debug("%pBD(%s): comparing candidate %s with existing mpath %s", - dest, bgp->name_pretty, - tmp_info ? tmp_info->peer->host : "NONE", - cur_mpath ? cur_mpath->peer->host : "NONE"); - + zlog_debug("%pBD(%s): Candidate %s old_mpath: %u new_mpath: %u, Nexthop %pI4 current mpath count: %u", + dest, bgp->name_pretty, cur_iterator->peer->host, old_mpath, + new_mpath, &cur_iterator->attr->nexthop, mpath_count); /* - * If equal, the path was a multipath and is still a multipath. - * Insert onto new multipath list if maxpaths allows. + * There is nothing to do if the cur_iterator is neither a old path + * or a new path */ - if (mp_node && (listgetdata(mp_node) == cur_mpath)) { - list_delete_node(mp_list, mp_node); - bgp_path_info_mpath_dequeue(cur_mpath); - if ((mpath_count < maxpaths) && prev_mpath) { - mpath_count++; - if (bgp_path_info_nexthop_cmp(prev_mpath, - cur_mpath)) { - if (ecommunity_linkbw_present( - bgp_attr_get_ecommunity( - cur_mpath->attr), - &bwval) || - ecommunity_linkbw_present( - bgp_attr_get_ipv6_ecommunity( - cur_mpath->attr), - &bwval)) - cum_bw += bwval; - else - all_paths_lb = false; - if (debug) { - bgp_path_info_path_with_addpath_rx_str( - cur_mpath, path_buf, - sizeof(path_buf)); - zlog_debug("%pBD: %s is still multipath, cur count %d", - dest, path_buf, - mpath_count); - } - } else { - if (debug) { - bgp_path_info_path_with_addpath_rx_str( - cur_mpath, path_buf, - sizeof(path_buf)); - zlog_debug("%pBD: nexthop equal, however add mpath %s nexthop %pI4, cur count %d", - dest, path_buf, - &cur_mpath->attr->nexthop, - mpath_count); - } - } - bgp_path_info_mpath_enqueue(prev_mpath, - cur_mpath); - prev_mpath = cur_mpath; - } else { - mpath_changed = 1; - if (debug) { - bgp_path_info_path_with_addpath_rx_str( - cur_mpath, path_buf, - sizeof(path_buf)); - zlog_debug("%pBD: remove mpath %s nexthop %pI4, cur count %d", - dest, path_buf, - &cur_mpath->attr->nexthop, - mpath_count); - } - } - mp_node = mp_next_node; - cur_mpath = next_mpath; + if (!old_mpath && !new_mpath) { + UNSET_FLAG(cur_iterator->flags, BGP_PATH_MULTIPATH); + cur_iterator = cur_iterator->next; continue; } - if (cur_mpath - && (!mp_node - || (bgp_path_info_mpath_cmp(cur_mpath, - listgetdata(mp_node)) - < 0))) { - /* - * If here, we have an old multipath and either the - * mp_list - * is finished or the next mp_node points to a later - * multipath, so we need to purge this path from the - * multipath list - */ - bgp_path_info_mpath_dequeue(cur_mpath); - mpath_changed = 1; + if (new_mpath) { + mpath_count++; + + if (cur_iterator != new_best) + SET_FLAG(cur_iterator->flags, BGP_PATH_MULTIPATH); + + if (!old_mpath) + mpath_changed = true; + + if (ecommunity_linkbw_present(bgp_attr_get_ecommunity(cur_iterator->attr), + &bwval) || + ecommunity_linkbw_present(bgp_attr_get_ipv6_ecommunity( + cur_iterator->attr), + &bwval)) + cum_bw += bwval; + else + all_paths_lb = false; + if (debug) { - bgp_path_info_path_with_addpath_rx_str( - cur_mpath, path_buf, sizeof(path_buf)); - zlog_debug("%pBD: remove mpath %s nexthop %pI4, cur count %d", - dest, path_buf, - &cur_mpath->attr->nexthop, - mpath_count); + bgp_path_info_path_with_addpath_rx_str(cur_iterator, path_buf, + sizeof(path_buf)); + zlog_debug("%pBD: add mpath %s nexthop %pI4, cur count %d cum_bw: %" PRIu64 + " all_paths_lb: %u", + dest, path_buf, &cur_iterator->attr->nexthop, + mpath_count, cum_bw, all_paths_lb); } - cur_mpath = next_mpath; } else { /* - * If here, we have a path on the mp_list that was not - * previously - * a multipath (due to non-equivalance or maxpaths - * exceeded), - * or the matching multipath is sorted later in the - * multipath - * list. Before we enqueue the path on the new multipath - * list, - * make sure its not on the old_best multipath list or - * referenced - * via next_mpath: - * - If next_mpath points to this new path, update - * next_mpath to - * point to the multipath after this one - * - Dequeue the path from the multipath list just to - * make sure + * We know that old_mpath is true and new_mpath is false in this path */ - new_mpath = listgetdata(mp_node); - list_delete_node(mp_list, mp_node); - assert(new_mpath); - assert(prev_mpath); - if ((mpath_count < maxpaths) && (new_mpath != new_best)) { - /* keep duplicate nexthop */ - bgp_path_info_mpath_dequeue(new_mpath); - - bgp_path_info_mpath_enqueue(prev_mpath, - new_mpath); - mpath_changed = 1; - mpath_count++; - if (bgp_path_info_nexthop_cmp(prev_mpath, - new_mpath)) { - if (ecommunity_linkbw_present( - bgp_attr_get_ecommunity( - new_mpath->attr), - &bwval) || - ecommunity_linkbw_present( - bgp_attr_get_ipv6_ecommunity( - new_mpath->attr), - &bwval)) - cum_bw += bwval; - else - all_paths_lb = false; - if (debug) { - bgp_path_info_path_with_addpath_rx_str( - new_mpath, path_buf, - sizeof(path_buf)); - zlog_debug("%pBD: add mpath %s nexthop %pI4, cur count %d", - dest, path_buf, - &new_mpath->attr - ->nexthop, - mpath_count); - } - } else { - if (debug) { - bgp_path_info_path_with_addpath_rx_str( - new_mpath, path_buf, - sizeof(path_buf)); - zlog_debug("%pBD: nexthop equal, however add mpath %s nexthop %pI4, cur count %d", - dest, path_buf, - &new_mpath->attr - ->nexthop, - mpath_count); - } - } - prev_mpath = new_mpath; - } - mp_node = mp_next_node; + mpath_changed = true; + UNSET_FLAG(cur_iterator->flags, BGP_PATH_MULTIPATH); } + + cur_iterator = cur_iterator->next; } if (new_best) { - bgp_path_info_mpath_count_set(new_best, mpath_count - 1); - if (mpath_count <= 1 || - (!ecommunity_linkbw_present(bgp_attr_get_ecommunity( - new_best->attr), - &bwval) && - !ecommunity_linkbw_present(bgp_attr_get_ipv6_ecommunity( - new_best->attr), - &bwval))) - all_paths_lb = false; - else - cum_bw += bwval; - bgp_path_info_mpath_lb_update(new_best, true, - all_paths_lb, cum_bw); - + if (mpath_count > 1 || new_best->mpath) { + bgp_path_info_mpath_count_set(new_best, mpath_count); + bgp_path_info_mpath_lb_update(new_best, true, all_paths_lb, cum_bw); + } if (debug) zlog_debug("%pBD(%s): New mpath count (incl newbest) %d mpath-change %s all_paths_lb %d cum_bw %" PRIu64, dest, bgp->name_pretty, mpath_count, mpath_changed ? "YES" : "NO", all_paths_lb, cum_bw); + if (mpath_count == 1) + UNSET_FLAG(new_best->flags, BGP_PATH_MULTIPATH); if (mpath_changed || (bgp_path_info_mpath_count(new_best) != old_mpath_count)) SET_FLAG(new_best->flags, BGP_PATH_MULTIPATH_CHG); - if ((mpath_count - 1) != old_mpath_count || - old_cum_bw != cum_bw) + if ((mpath_count) != old_mpath_count || old_cum_bw != cum_bw) SET_FLAG(new_best->flags, BGP_PATH_LINK_BW_CHG); } } @@ -796,20 +590,13 @@ void bgp_path_info_mpath_update(struct bgp *bgp, struct bgp_dest *dest, */ void bgp_mp_dmed_deselect(struct bgp_path_info *dmed_best) { - struct bgp_path_info *mpinfo, *mpnext; - if (!dmed_best) return; - for (mpinfo = bgp_path_info_mpath_first(dmed_best); mpinfo; - mpinfo = mpnext) { - mpnext = bgp_path_info_mpath_next(mpinfo); - bgp_path_info_mpath_dequeue(mpinfo); - } - bgp_path_info_mpath_count_set(dmed_best, 0); UNSET_FLAG(dmed_best->flags, BGP_PATH_MULTIPATH_CHG); UNSET_FLAG(dmed_best->flags, BGP_PATH_LINK_BW_CHG); + assert(bgp_path_info_mpath_first(dmed_best) == NULL); } @@ -847,7 +634,7 @@ void bgp_path_info_mpath_aggregate_update(struct bgp_path_info *new_best, if (!new_best) return; - if (!bgp_path_info_mpath_count(new_best)) { + if (bgp_path_info_mpath_count(new_best) == 1) { if ((new_attr = bgp_path_info_mpath_attr(new_best))) { bgp_attr_unintern(&new_attr); bgp_path_info_mpath_attr_set(new_best, NULL); diff --git a/bgpd/bgp_mpath.h b/bgpd/bgp_mpath.h index 267d729e06d4..a7107deb0e74 100644 --- a/bgpd/bgp_mpath.h +++ b/bgpd/bgp_mpath.h @@ -2,8 +2,9 @@ /* * BGP Multipath * Copyright (C) 2010 Google Inc. + * 2024 Nvidia Corporation * - * This file is part of Quagga + * This file is part of FRR */ #ifndef _FRR_BGP_MPATH_H @@ -13,12 +14,6 @@ * multipath selections, lazily allocated to save memory */ struct bgp_path_info_mpath { - /* Points to the first multipath (on bestpath) or the next multipath */ - struct bgp_path_info_mpath *mp_next; - - /* Points to the previous multipath or NULL on bestpath */ - struct bgp_path_info_mpath *mp_prev; - /* Points to bgp_path_info associated with this multipath info */ struct bgp_path_info *mp_info; @@ -50,16 +45,11 @@ extern int bgp_maximum_paths_unset(struct bgp *bgp, afi_t afi, safi_t safi, /* Functions used by bgp_best_selection to record current * multipath selections */ -extern int bgp_path_info_nexthop_cmp(struct bgp_path_info *bpi1, - struct bgp_path_info *bpi2); -extern void bgp_mp_list_init(struct list *mp_list); -extern void bgp_mp_list_clear(struct list *mp_list); -extern void bgp_mp_list_add(struct list *mp_list, struct bgp_path_info *mpinfo); +extern int bgp_path_info_nexthop_cmp(struct bgp_path_info *bpi1, struct bgp_path_info *bpi2); extern void bgp_mp_dmed_deselect(struct bgp_path_info *dmed_best); extern void bgp_path_info_mpath_update(struct bgp *bgp, struct bgp_dest *dest, struct bgp_path_info *new_best, - struct bgp_path_info *old_best, - struct list *mp_list, + struct bgp_path_info *old_best, uint32_t num_candidates, struct bgp_maxpaths_cfg *mpath_cfg); extern void bgp_path_info_mpath_aggregate_update(struct bgp_path_info *new_best, diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index f28c9adda218..00d128557009 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -2173,8 +2173,7 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, from = pi->peer; filter = &peer->filter[afi][safi]; bgp = SUBGRP_INST(subgrp); - piattr = bgp_path_info_mpath_count(pi) ? bgp_path_info_mpath_attr(pi) - : pi->attr; + piattr = bgp_path_info_mpath_count(pi) > 1 ? bgp_path_info_mpath_attr(pi) : pi->attr; if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_MAX_PREFIX_OUT) && peer->pmax_out[afi][safi] != 0 && @@ -2854,13 +2853,12 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest, struct bgp_path_info *pi2; int paths_eq, do_mpath; bool debug, any_comparisons; - struct list mp_list; char pfx_buf[PREFIX2STR_BUFFER] = {}; char path_buf[PATH_ADDPATH_STR_BUFFER]; enum bgp_path_selection_reason reason = bgp_path_selection_none; bool unsorted_items = true; + uint32_t num_candidates = 0; - bgp_mp_list_init(&mp_list); do_mpath = (mpath_cfg->maxpaths_ebgp > 1 || mpath_cfg->maxpaths_ibgp > 1); @@ -3235,7 +3233,8 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest, "%pBD(%s): %s is the bestpath, add to the multipath list", dest, bgp->name_pretty, path_buf); - bgp_mp_list_add(&mp_list, pi); + SET_FLAG(pi->flags, BGP_PATH_MULTIPATH_NEW); + num_candidates++; continue; } @@ -3258,15 +3257,14 @@ void bgp_best_selection(struct bgp *bgp, struct bgp_dest *dest, "%pBD(%s): %s is equivalent to the bestpath, add to the multipath list", dest, bgp->name_pretty, path_buf); - bgp_mp_list_add(&mp_list, pi); + SET_FLAG(pi->flags, BGP_PATH_MULTIPATH_NEW); + num_candidates++; } } } - bgp_path_info_mpath_update(bgp, dest, new_select, old_select, &mp_list, - mpath_cfg); + bgp_path_info_mpath_update(bgp, dest, new_select, old_select, num_candidates, mpath_cfg); bgp_path_info_mpath_aggregate_update(new_select, old_select); - bgp_mp_list_clear(&mp_list); bgp_addpath_update_ids(bgp, dest, afi, safi); @@ -11189,9 +11187,8 @@ void route_vty_out_detail(struct vty *vty, struct bgp *bgp, struct bgp_dest *bn, vty_out(vty, ", otc %u", attr->otc); } - if (CHECK_FLAG(path->flags, BGP_PATH_MULTIPATH) - || (CHECK_FLAG(path->flags, BGP_PATH_SELECTED) - && bgp_path_info_mpath_count(path))) { + if (CHECK_FLAG(path->flags, BGP_PATH_MULTIPATH) || + (CHECK_FLAG(path->flags, BGP_PATH_SELECTED) && bgp_path_info_mpath_count(path) > 1)) { if (json_paths) json_object_boolean_true_add(json_path, "multipath"); else diff --git a/bgpd/bgp_route.h b/bgpd/bgp_route.h index b6df2411812b..d71bfd3ebc62 100644 --- a/bgpd/bgp_route.h +++ b/bgpd/bgp_route.h @@ -313,6 +313,11 @@ struct bgp_path_info { #define BGP_PATH_STALE (1 << 8) #define BGP_PATH_REMOVED (1 << 9) #define BGP_PATH_COUNTED (1 << 10) +/* + * A BGP_PATH_MULTIPATH flag is not set on the best path + * it is set on every other node that is part of ECMP + * for that particular dest + */ #define BGP_PATH_MULTIPATH (1 << 11) #define BGP_PATH_MULTIPATH_CHG (1 << 12) #define BGP_PATH_RIB_ATTR_CHG (1 << 13) @@ -322,6 +327,15 @@ struct bgp_path_info { #define BGP_PATH_MPLSVPN_LABEL_NH (1 << 17) #define BGP_PATH_MPLSVPN_NH_LABEL_BIND (1 << 18) #define BGP_PATH_UNSORTED (1 << 19) +/* + * BGP_PATH_MULTIPATH_NEW is set on those bgp_path_info + * nodes that we have decided should possibly be in the + * ecmp path for a particular dest. This flag is + * removed when the bgp_path_info's are looked at to + * decide on whether or not a bgp_path_info is on + * the actual ecmp path. + */ +#define BGP_PATH_MULTIPATH_NEW (1 << 20) /* BGP route type. This can be static, RIP, OSPF, BGP etc. */ uint8_t type; diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c index ec60e5db86a3..583b9e7980f5 100644 --- a/bgpd/bgp_routemap.c +++ b/bgpd/bgp_routemap.c @@ -3220,7 +3220,7 @@ route_set_ecommunity_lb(void *rule, const struct prefix *prefix, void *object) return RMAP_OKAY; bw_bytes = (peer->bgp->lb_ref_bw * 1000 * 1000) / 8; - mpath_count = bgp_path_info_mpath_count(path) + 1; + mpath_count = bgp_path_info_mpath_count(path); bw_bytes *= mpath_count; } diff --git a/tests/bgpd/subdir.am b/tests/bgpd/subdir.am index 5148e7e4408a..97845ec1aa8b 100644 --- a/tests/bgpd/subdir.am +++ b/tests/bgpd/subdir.am @@ -52,17 +52,6 @@ tests_bgpd_test_mp_attr_LDADD = $(BGP_TEST_LDADD) tests_bgpd_test_mp_attr_SOURCES = tests/bgpd/test_mp_attr.c EXTRA_DIST += tests/bgpd/test_mp_attr.py - -if BGPD -check_PROGRAMS += tests/bgpd/test_mpath -endif -tests_bgpd_test_mpath_CFLAGS = $(TESTS_CFLAGS) -tests_bgpd_test_mpath_CPPFLAGS = $(TESTS_CPPFLAGS) -tests_bgpd_test_mpath_LDADD = $(BGP_TEST_LDADD) -tests_bgpd_test_mpath_SOURCES = tests/bgpd/test_mpath.c -EXTRA_DIST += tests/bgpd/test_mpath.py - - if BGPD check_PROGRAMS += tests/bgpd/test_packet endif diff --git a/tests/bgpd/test_mpath.c b/tests/bgpd/test_mpath.c deleted file mode 100644 index ebbe3ac3e28f..000000000000 --- a/tests/bgpd/test_mpath.c +++ /dev/null @@ -1,482 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-or-later -/* - * BGP Multipath Unit Test - * Copyright (C) 2010 Google Inc. - * - * This file is part of Quagga - */ - -#include - -#include "qobj.h" -#include "vty.h" -#include "stream.h" -#include "privs.h" -#include "linklist.h" -#include "memory.h" -#include "zclient.h" -#include "queue.h" -#include "filter.h" - -#include "bgpd/bgpd.h" -#include "bgpd/bgp_table.h" -#include "bgpd/bgp_route.h" -#include "bgpd/bgp_attr.h" -#include "bgpd/bgp_nexthop.h" -#include "bgpd/bgp_mpath.h" -#include "bgpd/bgp_evpn.h" -#include "bgpd/bgp_network.h" - -#define VT100_RESET "\x1b[0m" -#define VT100_RED "\x1b[31m" -#define VT100_GREEN "\x1b[32m" -#define VT100_YELLOW "\x1b[33m" -#define OK VT100_GREEN "OK" VT100_RESET -#define FAILED VT100_RED "failed" VT100_RESET - -#define TEST_PASSED 0 -#define TEST_FAILED -1 - -#define EXPECT_TRUE(expr, res) \ - if (!(expr)) { \ - printf("Test failure in %s line %u: %s\n", __func__, __LINE__, \ - #expr); \ - (res) = TEST_FAILED; \ - } - -typedef struct testcase_t__ testcase_t; - -typedef int (*test_setup_func)(testcase_t *); -typedef int (*test_run_func)(testcase_t *); -typedef int (*test_cleanup_func)(testcase_t *); - -struct testcase_t__ { - const char *desc; - void *test_data; - void *verify_data; - void *tmp_data; - test_setup_func setup; - test_run_func run; - test_cleanup_func cleanup; -}; - -/* need these to link in libbgp */ -struct event_loop *master = NULL; -extern struct zclient *zclient; -struct zebra_privs_t bgpd_privs = { - .user = NULL, - .group = NULL, - .vty_group = NULL, -}; - -static int tty = 0; - -/* Create fake bgp instance */ -static struct bgp *bgp_create_fake(as_t *as, const char *name) -{ - struct bgp *bgp; - afi_t afi; - safi_t safi; - - if ((bgp = XCALLOC(MTYPE_BGP, sizeof(struct bgp))) == NULL) - return NULL; - - bgp_lock(bgp); - // bgp->peer_self = peer_new (bgp); - // bgp->peer_self->host = XSTRDUP (MTYPE_BGP_PEER_HOST, "Static - // announcement"); - - bgp->peer = list_new(); - // bgp->peer->cmp = (int (*)(void *, void *)) peer_cmp; - - bgp->group = list_new(); - // bgp->group->cmp = (int (*)(void *, void *)) peer_group_cmp; - - bgp_evpn_init(bgp); - FOREACH_AFI_SAFI (afi, safi) { - bgp->route[afi][safi] = bgp_table_init(bgp, afi, safi); - bgp->aggregate[afi][safi] = bgp_table_init(bgp, afi, safi); - bgp->rib[afi][safi] = bgp_table_init(bgp, afi, safi); - bgp->maxpaths[afi][safi].maxpaths_ebgp = MULTIPATH_NUM; - bgp->maxpaths[afi][safi].maxpaths_ibgp = MULTIPATH_NUM; - } - - bgp_scan_init(bgp); - bgp->default_local_pref = BGP_DEFAULT_LOCAL_PREF; - bgp->default_holdtime = BGP_DEFAULT_HOLDTIME; - bgp->default_keepalive = BGP_DEFAULT_KEEPALIVE; - bgp->restart_time = BGP_DEFAULT_RESTART_TIME; - bgp->stalepath_time = BGP_DEFAULT_STALEPATH_TIME; - - bgp->as = *as; - - if (name) - bgp->name = strdup(name); - - return bgp; -} - -/*========================================================= - * Testcase for maximum-paths configuration - */ -static int setup_bgp_cfg_maximum_paths(testcase_t *t) -{ - as_t asn = 1; - t->tmp_data = bgp_create_fake(&asn, NULL); - if (!t->tmp_data) - return -1; - return 0; -} - -static int run_bgp_cfg_maximum_paths(testcase_t *t) -{ - afi_t afi; - safi_t safi; - struct bgp *bgp; - int api_result; - int test_result = TEST_PASSED; - - bgp = t->tmp_data; - FOREACH_AFI_SAFI (afi, safi) { - /* test bgp_maximum_paths_set */ - api_result = bgp_maximum_paths_set(bgp, afi, safi, - BGP_PEER_EBGP, 10, 0); - EXPECT_TRUE(api_result == 0, test_result); - api_result = bgp_maximum_paths_set(bgp, afi, safi, - BGP_PEER_IBGP, 10, 0); - EXPECT_TRUE(api_result == 0, test_result); - EXPECT_TRUE(bgp->maxpaths[afi][safi].maxpaths_ebgp == 10, - test_result); - EXPECT_TRUE(bgp->maxpaths[afi][safi].maxpaths_ibgp == 10, - test_result); - - /* test bgp_maximum_paths_unset */ - api_result = - bgp_maximum_paths_unset(bgp, afi, safi, BGP_PEER_EBGP); - EXPECT_TRUE(api_result == 0, test_result); - api_result = - bgp_maximum_paths_unset(bgp, afi, safi, BGP_PEER_IBGP); - EXPECT_TRUE(api_result == 0, test_result); - EXPECT_TRUE((bgp->maxpaths[afi][safi].maxpaths_ebgp - == MULTIPATH_NUM), - test_result); - EXPECT_TRUE((bgp->maxpaths[afi][safi].maxpaths_ibgp - == MULTIPATH_NUM), - test_result); - } - - return test_result; -} - -static int cleanup_bgp_cfg_maximum_paths(testcase_t *t) -{ - return bgp_delete((struct bgp *)t->tmp_data); -} - -testcase_t test_bgp_cfg_maximum_paths = { - .desc = "Test bgp maximum-paths config", - .setup = setup_bgp_cfg_maximum_paths, - .run = run_bgp_cfg_maximum_paths, - .cleanup = cleanup_bgp_cfg_maximum_paths, -}; - -/*========================================================= - * Testcase for bgp_mp_list - */ -struct peer test_mp_list_peer[] = { - {.local_as = 1, .as = 2}, {.local_as = 1, .as = 2}, - {.local_as = 1, .as = 2}, {.local_as = 1, .as = 2}, - {.local_as = 1, .as = 2}, -}; -int test_mp_list_peer_count = array_size(test_mp_list_peer); -struct attr test_mp_list_attr[4]; -struct bgp_path_info test_mp_list_info[] = { - {.peer = &test_mp_list_peer[0], .attr = &test_mp_list_attr[0]}, - {.peer = &test_mp_list_peer[1], .attr = &test_mp_list_attr[1]}, - {.peer = &test_mp_list_peer[2], .attr = &test_mp_list_attr[1]}, - {.peer = &test_mp_list_peer[3], .attr = &test_mp_list_attr[2]}, - {.peer = &test_mp_list_peer[4], .attr = &test_mp_list_attr[3]}, -}; -int test_mp_list_info_count = array_size(test_mp_list_info); - -static int setup_bgp_mp_list(testcase_t *t) -{ - test_mp_list_attr[0].nexthop.s_addr = 0x01010101; - test_mp_list_attr[1].nexthop.s_addr = 0x02020202; - test_mp_list_attr[2].nexthop.s_addr = 0x03030303; - test_mp_list_attr[3].nexthop.s_addr = 0x04040404; - - if ((test_mp_list_peer[0].su_remote = sockunion_str2su("1.1.1.1")) - == NULL) - return -1; - if ((test_mp_list_peer[1].su_remote = sockunion_str2su("2.2.2.2")) - == NULL) - return -1; - if ((test_mp_list_peer[2].su_remote = sockunion_str2su("3.3.3.3")) - == NULL) - return -1; - if ((test_mp_list_peer[3].su_remote = sockunion_str2su("4.4.4.4")) - == NULL) - return -1; - if ((test_mp_list_peer[4].su_remote = sockunion_str2su("5.5.5.5")) - == NULL) - return -1; - - return 0; -} - -static int run_bgp_mp_list(testcase_t *t) -{ - struct list mp_list; - struct listnode *mp_node; - struct bgp_path_info *info; - int i; - int test_result = TEST_PASSED; - bgp_mp_list_init(&mp_list); - EXPECT_TRUE(listcount(&mp_list) == 0, test_result); - - bgp_mp_list_add(&mp_list, &test_mp_list_info[1]); - bgp_mp_list_add(&mp_list, &test_mp_list_info[4]); - bgp_mp_list_add(&mp_list, &test_mp_list_info[2]); - bgp_mp_list_add(&mp_list, &test_mp_list_info[3]); - bgp_mp_list_add(&mp_list, &test_mp_list_info[0]); - - for (i = 0, mp_node = mp_list.head; i < test_mp_list_info_count; - i++, mp_node = listnextnode(mp_node)) { - info = listgetdata(mp_node); - info->lock++; - EXPECT_TRUE(info == &test_mp_list_info[i], test_result); - } - - bgp_mp_list_clear(&mp_list); - EXPECT_TRUE(listcount(&mp_list) == 0, test_result); - - return test_result; -} - -static int cleanup_bgp_mp_list(testcase_t *t) -{ - int i; - - for (i = 0; i < test_mp_list_peer_count; i++) - sockunion_free(test_mp_list_peer[i].su_remote); - - return 0; -} - -testcase_t test_bgp_mp_list = { - .desc = "Test bgp_mp_list", - .setup = setup_bgp_mp_list, - .run = run_bgp_mp_list, - .cleanup = cleanup_bgp_mp_list, -}; - -/*========================================================= - * Testcase for bgp_path_info_mpath_update - */ - -static struct bgp_dest *dest; - -static int setup_bgp_path_info_mpath_update(testcase_t *t) -{ - int i; - struct bgp *bgp; - struct bgp_table *rt; - struct prefix p; - as_t asn = 1; - - t->tmp_data = bgp_create_fake(&asn, NULL); - if (!t->tmp_data) - return -1; - - bgp = t->tmp_data; - rt = bgp->rib[AFI_IP][SAFI_UNICAST]; - - if (!rt) - return -1; - - str2prefix("42.1.1.0/24", &p); - dest = bgp_node_get(rt, &p); - - setup_bgp_mp_list(t); - for (i = 0; i < test_mp_list_info_count; i++) - bgp_path_info_add(dest, &test_mp_list_info[i]); - return 0; -} - -static int run_bgp_path_info_mpath_update(testcase_t *t) -{ - struct bgp_path_info *new_best, *old_best, *mpath; - struct list mp_list; - struct bgp_maxpaths_cfg mp_cfg = {3, 3}; - - int test_result = TEST_PASSED; - bgp_mp_list_init(&mp_list); - bgp_mp_list_add(&mp_list, &test_mp_list_info[4]); - bgp_mp_list_add(&mp_list, &test_mp_list_info[3]); - bgp_mp_list_add(&mp_list, &test_mp_list_info[0]); - bgp_mp_list_add(&mp_list, &test_mp_list_info[1]); - new_best = &test_mp_list_info[3]; - old_best = NULL; - bgp_path_info_mpath_update(NULL, dest, new_best, old_best, &mp_list, - &mp_cfg); - bgp_mp_list_clear(&mp_list); - EXPECT_TRUE(bgp_path_info_mpath_count(new_best) == 2, test_result); - mpath = bgp_path_info_mpath_first(new_best); - EXPECT_TRUE(mpath == &test_mp_list_info[0], test_result); - EXPECT_TRUE(CHECK_FLAG(mpath->flags, BGP_PATH_MULTIPATH), test_result); - mpath = bgp_path_info_mpath_next(mpath); - EXPECT_TRUE(mpath == &test_mp_list_info[1], test_result); - EXPECT_TRUE(CHECK_FLAG(mpath->flags, BGP_PATH_MULTIPATH), test_result); - - bgp_mp_list_add(&mp_list, &test_mp_list_info[0]); - bgp_mp_list_add(&mp_list, &test_mp_list_info[1]); - new_best = &test_mp_list_info[0]; - old_best = &test_mp_list_info[3]; - bgp_path_info_mpath_update(NULL, dest, new_best, old_best, &mp_list, - &mp_cfg); - bgp_mp_list_clear(&mp_list); - EXPECT_TRUE(bgp_path_info_mpath_count(new_best) == 1, test_result); - mpath = bgp_path_info_mpath_first(new_best); - EXPECT_TRUE(mpath == &test_mp_list_info[1], test_result); - EXPECT_TRUE(CHECK_FLAG(mpath->flags, BGP_PATH_MULTIPATH), test_result); - EXPECT_TRUE(!CHECK_FLAG(test_mp_list_info[0].flags, BGP_PATH_MULTIPATH), - test_result); - - return test_result; -} - -static int cleanup_bgp_path_info_mpath_update(testcase_t *t) -{ - int i; - - for (i = 0; i < test_mp_list_peer_count; i++) - sockunion_free(test_mp_list_peer[i].su_remote); - - return bgp_delete((struct bgp *)t->tmp_data); -} - -testcase_t test_bgp_path_info_mpath_update = { - .desc = "Test bgp_path_info_mpath_update", - .setup = setup_bgp_path_info_mpath_update, - .run = run_bgp_path_info_mpath_update, - .cleanup = cleanup_bgp_path_info_mpath_update, -}; - -/*========================================================= - * Set up testcase vector - */ -testcase_t *all_tests[] = { - &test_bgp_cfg_maximum_paths, &test_bgp_mp_list, - &test_bgp_path_info_mpath_update, -}; - -int all_tests_count = array_size(all_tests); - -/*========================================================= - * Test Driver Functions - */ -static int global_test_init(void) -{ - qobj_init(); - master = event_master_create(NULL); - zclient = zclient_new(master, &zclient_options_default, NULL, 0); - bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new()); - vrf_init(NULL, NULL, NULL, NULL); - bgp_option_set(BGP_OPT_NO_LISTEN); - - if (fileno(stdout) >= 0) - tty = isatty(fileno(stdout)); - return 0; -} - -static int global_test_cleanup(void) -{ - if (zclient != NULL) - zclient_free(zclient); - event_master_free(master); - return 0; -} - -static void display_result(testcase_t *test, int result) -{ - if (tty) - printf("%s: %s\n", test->desc, - result == TEST_PASSED ? OK : FAILED); - else - printf("%s: %s\n", test->desc, - result == TEST_PASSED ? "OK" : "FAILED"); -} - -static int setup_test(testcase_t *t) -{ - int res = 0; - if (t->setup) - res = t->setup(t); - return res; -} - -static int cleanup_test(testcase_t *t) -{ - int res = 0; - if (t->cleanup) - res = t->cleanup(t); - return res; -} - -static void run_tests(testcase_t *tests[], int num_tests, int *pass_count, - int *fail_count) -{ - int test_index, result; - testcase_t *cur_test; - - *pass_count = *fail_count = 0; - - for (test_index = 0; test_index < num_tests; test_index++) { - cur_test = tests[test_index]; - if (!cur_test->desc) { - printf("error: test %d has no description!\n", - test_index); - continue; - } - if (!cur_test->run) { - printf("error: test %s has no run function!\n", - cur_test->desc); - continue; - } - if (setup_test(cur_test) != 0) { - printf("error: setup failed for test %s\n", - cur_test->desc); - continue; - } - result = cur_test->run(cur_test); - if (result == TEST_PASSED) - *pass_count += 1; - else - *fail_count += 1; - display_result(cur_test, result); - if (cleanup_test(cur_test) != 0) { - printf("error: cleanup failed for test %s\n", - cur_test->desc); - continue; - } - } -} - -int main(void) -{ - int pass_count, fail_count; - time_t cur_time; - char buf[32]; - - time(&cur_time); - printf("BGP Multipath Tests Run at %s", ctime_r(&cur_time, buf)); - if (global_test_init() != 0) { - printf("Global init failed. Terminating.\n"); - exit(1); - } - run_tests(all_tests, all_tests_count, &pass_count, &fail_count); - global_test_cleanup(); - printf("Total pass/fail: %d/%d\n", pass_count, fail_count); - return fail_count; -} diff --git a/tests/bgpd/test_mpath.py b/tests/bgpd/test_mpath.py deleted file mode 100644 index 582fd25c2024..000000000000 --- a/tests/bgpd/test_mpath.py +++ /dev/null @@ -1,10 +0,0 @@ -import frrtest - - -class TestMpath(frrtest.TestMultiOut): - program = "./test_mpath" - - -TestMpath.okfail("bgp maximum-paths config") -TestMpath.okfail("bgp_mp_list") -TestMpath.okfail("bgp_path_info_mpath_update") From 5b8bca89b8101ba943b9a3b1c0d602690b78499f Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Tue, 1 Oct 2024 09:18:44 -0400 Subject: [PATCH 21/73] bgpd: Remove bgp_path_info_mpath_dequeue This function is no doing any work. Let's remove. Signed-off-by: Donald Sharp --- bgpd/bgp_mpath.c | 13 ------------- bgpd/bgp_mpath.h | 1 - bgpd/bgp_route.c | 4 ---- 3 files changed, 18 deletions(-) diff --git a/bgpd/bgp_mpath.c b/bgpd/bgp_mpath.c index e27b78977708..e0cc5f189a49 100644 --- a/bgpd/bgp_mpath.c +++ b/bgpd/bgp_mpath.c @@ -246,19 +246,6 @@ bgp_path_info_mpath_get(struct bgp_path_info *path) return path->mpath; } -/* - * bgp_path_info_mpath_dequeue - * - * Remove a path from the multipath list - */ -void bgp_path_info_mpath_dequeue(struct bgp_path_info *path) -{ - struct bgp_path_info_mpath *mpath = path->mpath; - - if (!mpath) - return; -} - /* * bgp_path_info_mpath_next * diff --git a/bgpd/bgp_mpath.h b/bgpd/bgp_mpath.h index a7107deb0e74..8201896593b0 100644 --- a/bgpd/bgp_mpath.h +++ b/bgpd/bgp_mpath.h @@ -56,7 +56,6 @@ bgp_path_info_mpath_aggregate_update(struct bgp_path_info *new_best, struct bgp_path_info *old_best); /* Unlink and free multipath information associated with a bgp_path_info */ -extern void bgp_path_info_mpath_dequeue(struct bgp_path_info *path); extern void bgp_path_info_mpath_free(struct bgp_path_info_mpath **mpath); /* Walk list of multipaths associated with a best path */ diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index 00d128557009..8dbb4e3b0486 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -525,8 +525,6 @@ struct bgp_dest *bgp_path_info_reap(struct bgp_dest *dest, else bgp_dest_set_bgp_path_info(dest, pi->next); - bgp_path_info_mpath_dequeue(pi); - pi->next = NULL; pi->prev = NULL; @@ -541,8 +539,6 @@ struct bgp_dest *bgp_path_info_reap(struct bgp_dest *dest, static struct bgp_dest *bgp_path_info_reap_unsorted(struct bgp_dest *dest, struct bgp_path_info *pi) { - bgp_path_info_mpath_dequeue(pi); - pi->next = NULL; pi->prev = NULL; From 2c6eb34af88bb6fa94c13413a57c85d343782a5d Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Fri, 27 Sep 2024 12:55:39 +0300 Subject: [PATCH 22/73] tests: Drop test_bgp_with_loopback_with_same_subnet_p1 It's replaced and simplified by c3fd1e9520c619babb3004cea6df622ca67b0dfa. JSON topo is just horrible to debug. Signed-off-by: Donatas Abraitis --- .../test_bgp_basic_functionality.py | 289 ------------------ 1 file changed, 289 deletions(-) diff --git a/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py b/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py index c97fc5f0eb48..5662e5935beb 100644 --- a/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py +++ b/tests/topotests/bgp_basic_functionality_topo1/test_bgp_basic_functionality.py @@ -831,7 +831,6 @@ def test_bgp_with_loopback_interface(request): for bgp_neighbor in topo["routers"][routerN]["bgp"]["address_family"]["ipv4"][ "unicast" ]["neighbor"].keys(): - # Adding ['source_link'] = 'lo' key:value pair topo["routers"][routerN]["bgp"]["address_family"]["ipv4"]["unicast"][ "neighbor" @@ -876,294 +875,6 @@ def test_bgp_with_loopback_interface(request): write_test_footer(tc_name) -def test_bgp_with_loopback_with_same_subnet_p1(request): - """ - Verify routes not installed in zebra when /32 routes received - with loopback BGP session subnet - """ - - tgen = get_topogen() - if BGP_CONVERGENCE is not True: - pytest.skip("skipped because of BGP Convergence failure") - - # test case name - tc_name = request.node.name - write_test_header(tc_name) - - # Creating configuration from JSON - reset_config_on_routers(tgen) - step("Delete BGP seesion created initially") - input_dict_r1 = { - "r1": {"bgp": {"delete": True}}, - "r2": {"bgp": {"delete": True}}, - "r3": {"bgp": {"delete": True}}, - "r4": {"bgp": {"delete": True}}, - } - result = create_router_bgp(tgen, topo, input_dict_r1) - assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - - step("Create BGP session over loop address") - topo_modify = deepcopy(topo) - - for routerN in sorted(topo["routers"].keys()): - for addr_type in ADDR_TYPES: - for bgp_neighbor in topo_modify["routers"][routerN]["bgp"][ - "address_family" - ][addr_type]["unicast"]["neighbor"].keys(): - - # Adding ['source_link'] = 'lo' key:value pair - topo_modify["routers"][routerN]["bgp"]["address_family"][addr_type][ - "unicast" - ]["neighbor"][bgp_neighbor]["dest_link"] = { - "lo": {"source_link": "lo", "ebgp_multihop": 2} - } - - result = create_router_bgp(tgen, topo_modify["routers"]) - assert result is True, "Testcase {} : Failed \n Error: {}".format(tc_name, result) - - step("Disable IPv6 BGP nbr from ipv4 address family") - raw_config = { - "r1": { - "raw_config": [ - "router bgp {}".format(topo["routers"]["r1"]["bgp"]["local_as"]), - "address-family ipv4 unicast", - "no neighbor {} activate".format( - topo["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0] - ), - "no neighbor {} activate".format( - topo["routers"]["r3"]["links"]["lo"]["ipv6"].split("/")[0] - ), - ] - }, - "r2": { - "raw_config": [ - "router bgp {}".format(topo["routers"]["r2"]["bgp"]["local_as"]), - "address-family ipv4 unicast", - "no neighbor {} activate".format( - topo["routers"]["r1"]["links"]["lo"]["ipv6"].split("/")[0] - ), - "no neighbor {} activate".format( - topo["routers"]["r3"]["links"]["lo"]["ipv6"].split("/")[0] - ), - ] - }, - "r3": { - "raw_config": [ - "router bgp {}".format(topo["routers"]["r3"]["bgp"]["local_as"]), - "address-family ipv4 unicast", - "no neighbor {} activate".format( - topo["routers"]["r1"]["links"]["lo"]["ipv6"].split("/")[0] - ), - "no neighbor {} activate".format( - topo["routers"]["r2"]["links"]["lo"]["ipv6"].split("/")[0] - ), - "no neighbor {} activate".format( - topo["routers"]["r4"]["links"]["lo"]["ipv6"].split("/")[0] - ), - ] - }, - "r4": { - "raw_config": [ - "router bgp {}".format(topo["routers"]["r4"]["bgp"]["local_as"]), - "address-family ipv4 unicast", - "no neighbor {} activate".format( - topo["routers"]["r3"]["links"]["lo"]["ipv6"].split("/")[0] - ), - ] - }, - } - - step("Configure kernel routes") - result = apply_raw_config(tgen, raw_config) - assert result is True, "Testcase {} : Failed Error: {}".format(tc_name, result) - - r1_ipv4_lo = topo["routers"]["r1"]["links"]["lo"]["ipv4"] - r1_ipv6_lo = topo["routers"]["r1"]["links"]["lo"]["ipv6"] - r2_ipv4_lo = topo["routers"]["r2"]["links"]["lo"]["ipv4"] - r2_ipv6_lo = topo["routers"]["r2"]["links"]["lo"]["ipv6"] - r3_ipv4_lo = topo["routers"]["r3"]["links"]["lo"]["ipv4"] - r3_ipv6_lo = topo["routers"]["r3"]["links"]["lo"]["ipv6"] - r4_ipv4_lo = topo["routers"]["r4"]["links"]["lo"]["ipv4"] - r4_ipv6_lo = topo["routers"]["r4"]["links"]["lo"]["ipv6"] - - r1_r2 = topo["routers"]["r1"]["links"]["r2"]["ipv6"].split("/")[0] - r2_r1 = topo["routers"]["r2"]["links"]["r1"]["ipv6"].split("/")[0] - r1_r3 = topo["routers"]["r1"]["links"]["r3"]["ipv6"].split("/")[0] - r3_r1 = topo["routers"]["r3"]["links"]["r1"]["ipv6"].split("/")[0] - r2_r3 = topo["routers"]["r2"]["links"]["r3"]["ipv6"].split("/")[0] - r3_r2 = topo["routers"]["r3"]["links"]["r2"]["ipv6"].split("/")[0] - r3_r4 = topo["routers"]["r3"]["links"]["r4"]["ipv6"].split("/")[0] - r4_r3 = topo["routers"]["r4"]["links"]["r3"]["ipv6"].split("/")[0] - - r1_r2_ipv4 = topo["routers"]["r1"]["links"]["r2"]["ipv4"].split("/")[0] - r2_r1_ipv4 = topo["routers"]["r2"]["links"]["r1"]["ipv4"].split("/")[0] - r1_r3_ipv4 = topo["routers"]["r1"]["links"]["r3"]["ipv4"].split("/")[0] - r3_r1_ipv4 = topo["routers"]["r3"]["links"]["r1"]["ipv4"].split("/")[0] - r2_r3_ipv4 = topo["routers"]["r2"]["links"]["r3"]["ipv4"].split("/")[0] - r3_r2_ipv4 = topo["routers"]["r3"]["links"]["r2"]["ipv4"].split("/")[0] - r3_r4_ipv4 = topo["routers"]["r3"]["links"]["r4"]["ipv4"].split("/")[0] - r4_r3_ipv4 = topo["routers"]["r4"]["links"]["r3"]["ipv4"].split("/")[0] - - r1_r2_intf = topo["routers"]["r1"]["links"]["r2"]["interface"] - r2_r1_intf = topo["routers"]["r2"]["links"]["r1"]["interface"] - r1_r3_intf = topo["routers"]["r1"]["links"]["r3"]["interface"] - r3_r1_intf = topo["routers"]["r3"]["links"]["r1"]["interface"] - r2_r3_intf = topo["routers"]["r2"]["links"]["r3"]["interface"] - r3_r2_intf = topo["routers"]["r3"]["links"]["r2"]["interface"] - r3_r4_intf = topo["routers"]["r3"]["links"]["r4"]["interface"] - r4_r3_intf = topo["routers"]["r4"]["links"]["r3"]["interface"] - - ipv4_list = [ - ("r1", r1_r2_intf, r2_ipv4_loopback), - ("r1", r1_r3_intf, r3_ipv4_loopback), - ("r2", r2_r1_intf, r1_ipv4_loopback), - ("r2", r2_r3_intf, r3_ipv4_loopback), - ("r3", r3_r1_intf, r1_ipv4_loopback), - ("r3", r3_r2_intf, r2_ipv4_loopback), - ("r3", r3_r4_intf, r4_ipv4_loopback), - ("r4", r4_r3_intf, r3_ipv4_loopback), - ] - - ipv6_list = [ - ("r1", r1_r2_intf, r2_ipv6_loopback, r2_r1), - ("r1", r1_r3_intf, r3_ipv6_loopback, r3_r1), - ("r2", r2_r1_intf, r1_ipv6_loopback, r1_r2), - ("r2", r2_r3_intf, r3_ipv6_loopback, r3_r2), - ("r3", r3_r1_intf, r1_ipv6_loopback, r1_r3), - ("r3", r3_r2_intf, r2_ipv6_loopback, r2_r3), - ("r3", r3_r4_intf, r4_ipv6_loopback, r4_r3), - ("r4", r4_r3_intf, r3_ipv6_loopback, r3_r4), - ] - - for dut, intf, loop_addr in ipv4_list: - result = addKernelRoute(tgen, dut, intf, loop_addr) - assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) - - for dut, intf, loop_addr, next_hop in ipv6_list: - result = addKernelRoute(tgen, dut, intf, loop_addr, next_hop) - assert result is True, "Testcase {}:Failed \n Error: {}".format(tc_name, result) - - step("Configure static routes") - - input_dict = { - "r1": { - "static_routes": [ - {"network": r2_ipv4_loopback, "next_hop": r2_r1_ipv4}, - {"network": r3_ipv4_loopback, "next_hop": r3_r1_ipv4}, - {"network": r2_ipv6_loopback, "next_hop": r2_r1}, - {"network": r3_ipv6_loopback, "next_hop": r3_r1}, - ] - }, - "r2": { - "static_routes": [ - {"network": r1_ipv4_loopback, "next_hop": r1_r2_ipv4}, - {"network": r3_ipv4_loopback, "next_hop": r3_r2_ipv4}, - {"network": r1_ipv6_loopback, "next_hop": r1_r2}, - {"network": r3_ipv6_loopback, "next_hop": r3_r2}, - ] - }, - "r3": { - "static_routes": [ - {"network": r1_ipv4_loopback, "next_hop": r1_r3_ipv4}, - {"network": r2_ipv4_loopback, "next_hop": r2_r3_ipv4}, - {"network": r4_ipv4_loopback, "next_hop": r4_r3_ipv4}, - {"network": r1_ipv6_loopback, "next_hop": r1_r3}, - {"network": r2_ipv6_loopback, "next_hop": r2_r3}, - {"network": r4_ipv6_loopback, "next_hop": r4_r3}, - ] - }, - "r4": { - "static_routes": [ - {"network": r3_ipv4_loopback, "next_hop": r3_r4_ipv4}, - {"network": r3_ipv6_loopback, "next_hop": r3_r4}, - ] - }, - } - result = create_static_routes(tgen, input_dict) - assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) - - step("Verify BGP session convergence") - - result = verify_bgp_convergence(tgen, topo_modify) - assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) - - step("Configure redistribute connected on R2 and R4") - input_dict_1 = { - "r2": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": {"redistribute": [{"redist_type": "connected"}]} - }, - "ipv6": { - "unicast": {"redistribute": [{"redist_type": "connected"}]} - }, - } - } - }, - "r4": { - "bgp": { - "address_family": { - "ipv4": { - "unicast": {"redistribute": [{"redist_type": "connected"}]} - }, - "ipv6": { - "unicast": {"redistribute": [{"redist_type": "connected"}]} - }, - } - } - }, - } - - result = create_router_bgp(tgen, topo, input_dict_1) - assert result is True, "Testcase {} :Failed \n Error: {}".format(tc_name, result) - - step("Verify Ipv4 and Ipv6 network installed in R1 RIB but not in FIB") - input_dict_r1 = { - "r1": { - "static_routes": [ - {"network": "1.0.2.17/32"}, - {"network": "2001:db8:f::2:17/128"}, - ] - } - } - - dut = "r1" - protocol = "bgp" - for addr_type in ADDR_TYPES: - result = verify_fib_routes( - tgen, addr_type, dut, input_dict_r1, expected=False - ) # pylint: disable=E1123 - assert result is not True, ( - "Testcase {} : Failed \n " - "Expected: Routes should not be present in {} FIB \n " - "Found: {}".format(tc_name, dut, result) - ) - - step("Verify Ipv4 and Ipv6 network installed in r3 RIB but not in FIB") - input_dict_r3 = { - "r3": { - "static_routes": [ - {"network": "1.0.4.17/32"}, - {"network": "2001:db8:f::4:17/128"}, - ] - } - } - dut = "r3" - protocol = "bgp" - for addr_type in ADDR_TYPES: - result = verify_fib_routes( - tgen, addr_type, dut, input_dict_r1, expected=False - ) # pylint: disable=E1123 - assert result is not True, ( - "Testcase {} : Failed \n " - "Expected: Routes should not be present in {} FIB \n " - "Found: {}".format(tc_name, dut, result) - ) - - write_test_footer(tc_name) - - if __name__ == "__main__": args = ["-s"] + sys.argv[1:] sys.exit(pytest.main(args)) From c66642d7f054a43e9843312d2155ae4e52c05ad8 Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Fri, 27 Sep 2024 10:52:55 +0300 Subject: [PATCH 23/73] bgpd: Relax the same prefix and nexthop to be valid Treat as next-hop invalid if import check is enabled. Fixes: 654a5978f695087af062bfc9a382321fa2ccc4ae ("bgpd: prevent routes loop through itself") Fixes: https://github.com/FRRouting/frr/issues/16877 Signed-off-by: Donatas Abraitis --- bgpd/bgp_nht.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c index 8719af56b3f5..49042e8c238a 100644 --- a/bgpd/bgp_nht.c +++ b/bgpd/bgp_nht.c @@ -347,12 +347,11 @@ int bgp_find_or_add_nexthop(struct bgp *bgp_route, struct bgp *bgp_nexthop, &p.u.prefix6)) ifindex = pi->peer->connection->su.sin6.sin6_scope_id; - if (!is_bgp_static_route && orig_prefix - && prefix_same(&p, orig_prefix)) { + if (!is_bgp_static_route && orig_prefix && prefix_same(&p, orig_prefix) && + CHECK_FLAG(bgp_route->flags, BGP_FLAG_IMPORT_CHECK)) { if (BGP_DEBUG(nht, NHT)) { - zlog_debug( - "%s(%pFX): prefix loops through itself", - __func__, &p); + zlog_debug("%s(%pFX): prefix loops through itself (import-check enabled)", + __func__, &p); } return 0; } From dab1441128fe81457488cf75d8a640a7376aea7c Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Fri, 27 Sep 2024 10:52:22 +0300 Subject: [PATCH 24/73] tests: Check if loopback routes are considered valid for nexthop tracking Signed-off-by: Donatas Abraitis --- tests/topotests/bgp_self_prefix/__init__.py | 0 tests/topotests/bgp_self_prefix/r1/frr.conf | 19 +++ tests/topotests/bgp_self_prefix/r2/frr.conf | 20 ++++ tests/topotests/bgp_self_prefix/r3/frr.conf | 20 ++++ .../bgp_self_prefix/test_bgp_self_prefix.py | 111 ++++++++++++++++++ 5 files changed, 170 insertions(+) create mode 100644 tests/topotests/bgp_self_prefix/__init__.py create mode 100644 tests/topotests/bgp_self_prefix/r1/frr.conf create mode 100644 tests/topotests/bgp_self_prefix/r2/frr.conf create mode 100644 tests/topotests/bgp_self_prefix/r3/frr.conf create mode 100644 tests/topotests/bgp_self_prefix/test_bgp_self_prefix.py diff --git a/tests/topotests/bgp_self_prefix/__init__.py b/tests/topotests/bgp_self_prefix/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/bgp_self_prefix/r1/frr.conf b/tests/topotests/bgp_self_prefix/r1/frr.conf new file mode 100644 index 000000000000..879afb19478f --- /dev/null +++ b/tests/topotests/bgp_self_prefix/r1/frr.conf @@ -0,0 +1,19 @@ +! +int lo + ip address 10.0.0.1/32 +! +int r1-eth0 + ip address 192.168.1.1/24 +! +router bgp 65000 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 10.0.0.2 remote-as internal + neighbor 10.0.0.2 update-source lo + neighbor 10.0.0.2 next-hop-self + neighbor 10.0.0.3 remote-as external + neighbor 10.0.0.3 update-source lo + neighbor 10.0.0.3 next-hop-self +! +ip route 10.0.0.2/32 192.168.1.2 +ip route 10.0.0.3/32 192.168.1.3 diff --git a/tests/topotests/bgp_self_prefix/r2/frr.conf b/tests/topotests/bgp_self_prefix/r2/frr.conf new file mode 100644 index 000000000000..eb0db356ea0a --- /dev/null +++ b/tests/topotests/bgp_self_prefix/r2/frr.conf @@ -0,0 +1,20 @@ +! +int lo + ip address 10.0.0.2/32 +! +int r2-eth0 + ip address 192.168.1.2/24 +! +router bgp 65000 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 10.0.0.1 remote-as internal + neighbor 10.0.0.1 timers 1 3 + neighbor 10.0.0.1 timers connect 1 + neighbor 10.0.0.1 update-source lo + neighbor 10.0.0.1 next-hop-self + address-family ipv4 unicast + network 10.0.0.2/32 + exit-address-family +! +ip route 10.0.0.1/32 192.168.1.1 diff --git a/tests/topotests/bgp_self_prefix/r3/frr.conf b/tests/topotests/bgp_self_prefix/r3/frr.conf new file mode 100644 index 000000000000..e2348f4a68ba --- /dev/null +++ b/tests/topotests/bgp_self_prefix/r3/frr.conf @@ -0,0 +1,20 @@ +! +int lo + ip address 10.0.0.3/32 +! +int r3-eth0 + ip address 192.168.1.3/24 +! +router bgp 65003 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 10.0.0.1 remote-as external + neighbor 10.0.0.1 timers 1 3 + neighbor 10.0.0.1 timers connect 1 + neighbor 10.0.0.1 update-source lo + neighbor 10.0.0.1 next-hop-self + address-family ipv4 unicast + network 10.0.0.3/32 + exit-address-family +! +ip route 10.0.0.1/32 192.168.1.1 diff --git a/tests/topotests/bgp_self_prefix/test_bgp_self_prefix.py b/tests/topotests/bgp_self_prefix/test_bgp_self_prefix.py new file mode 100644 index 000000000000..104580036844 --- /dev/null +++ b/tests/topotests/bgp_self_prefix/test_bgp_self_prefix.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# Copyright (c) 2024 by +# Donatas Abraitis +# + +import os +import sys +import json +import pytest +import functools + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +from lib import topotest +from lib.topogen import Topogen, get_topogen + +pytestmark = [pytest.mark.bgpd] + + +def setup_module(mod): + topodef = {"s1": ("r1", "r2", "r3")} + tgen = Topogen(topodef, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for _, (rname, router) in enumerate(router_list.items(), 1): + router.load_frr_config(os.path.join(CWD, "{}/frr.conf".format(rname))) + + tgen.start_router() + + +def teardown_module(mod): + tgen = get_topogen() + tgen.stop_topology() + + +def test_bgp_self_prefix(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r1 = tgen.gears["r1"] + r3 = tgen.gears["r3"] + + def _bgp_converge(): + output = json.loads(r1.vtysh_cmd("show bgp ipv4 unicast json")) + expected = { + "routes": { + "10.0.0.2/32": [ + { + "valid": True, + "path": "", + "nexthops": [ + {"ip": "10.0.0.2", "hostname": "r2", "afi": "ipv4"} + ], + } + ], + "10.0.0.3/32": [ + { + "valid": True, + "path": "65003", + "nexthops": [ + {"ip": "10.0.0.3", "hostname": "r3", "afi": "ipv4"} + ], + } + ], + } + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_converge, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't converge" + + def _bgp_check_received_routes(): + output = json.loads(r3.vtysh_cmd("show bgp ipv4 unicast json")) + expected = { + "routes": { + "10.0.0.2/32": [ + { + "valid": True, + "bestpath": True, + "nexthops": [ + {"ip": "10.0.0.1", "hostname": "r1", "afi": "ipv4"} + ], + } + ], + } + } + + return topotest.json_cmp(output, expected) + + test_func = functools.partial( + _bgp_check_received_routes, + ) + _, result = topotest.run_and_expect(test_func, None, count=30, wait=1) + assert result is None, "Can't see 10.0.0.2/32" + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) From 0bc79f5e51827fad611d714da5e926da2bd123a7 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Tue, 1 Oct 2024 14:31:08 -0400 Subject: [PATCH 25/73] lib: nexthop code should use uint16_t for nexthop counting It's possible to specify via the cli and configure how many nexthops that are allowed on the system. If you happen to have > 255 then things are about to get interesting otherwise. Let's allow up to 65k nexthops (ha!) Signed-off-by: Donald Sharp --- lib/nexthop_group.c | 21 +++++++++------------ lib/nexthop_group.h | 5 ++--- 2 files changed, 11 insertions(+), 15 deletions(-) diff --git a/lib/nexthop_group.c b/lib/nexthop_group.c index 3f408e0a71f9..cb1ebb5d09b9 100644 --- a/lib/nexthop_group.c +++ b/lib/nexthop_group.c @@ -70,10 +70,10 @@ static struct nexthop *nexthop_group_tail(const struct nexthop_group *nhg) return nexthop; } -uint8_t nexthop_group_nexthop_num(const struct nexthop_group *nhg) +uint16_t nexthop_group_nexthop_num(const struct nexthop_group *nhg) { struct nexthop *nhop; - uint8_t num = 0; + uint16_t num = 0; for (ALL_NEXTHOPS_PTR(nhg, nhop)) num++; @@ -81,11 +81,10 @@ uint8_t nexthop_group_nexthop_num(const struct nexthop_group *nhg) return num; } -static uint8_t -nexthop_group_nexthop_num_no_recurse(const struct nexthop_group *nhg) +static uint16_t nexthop_group_nexthop_num_no_recurse(const struct nexthop_group *nhg) { struct nexthop *nhop; - uint8_t num = 0; + uint16_t num = 0; for (nhop = nhg->nexthop; nhop; nhop = nhop->next) num++; @@ -93,10 +92,10 @@ nexthop_group_nexthop_num_no_recurse(const struct nexthop_group *nhg) return num; } -uint8_t nexthop_group_active_nexthop_num(const struct nexthop_group *nhg) +uint16_t nexthop_group_active_nexthop_num(const struct nexthop_group *nhg) { struct nexthop *nhop; - uint8_t num = 0; + uint16_t num = 0; for (ALL_NEXTHOPS_PTR(nhg, nhop)) { if (CHECK_FLAG(nhop->flags, NEXTHOP_FLAG_ACTIVE)) @@ -184,11 +183,9 @@ static struct nexthop *nhg_nh_find(const struct nexthop_group *nhg, return NULL; } -static bool -nexthop_group_equal_common(const struct nexthop_group *nhg1, - const struct nexthop_group *nhg2, - uint8_t (*nexthop_group_nexthop_num_func)( - const struct nexthop_group *nhg)) +static bool nexthop_group_equal_common( + const struct nexthop_group *nhg1, const struct nexthop_group *nhg2, + uint16_t (*nexthop_group_nexthop_num_func)(const struct nexthop_group *nhg)) { if (nhg1 && !nhg2) return false; diff --git a/lib/nexthop_group.h b/lib/nexthop_group.h index 822a35439cc0..910329941884 100644 --- a/lib/nexthop_group.h +++ b/lib/nexthop_group.h @@ -149,9 +149,8 @@ extern void nexthop_group_json_nexthop(json_object *j, const struct nexthop *nh); /* Return the number of nexthops in this nhg */ -extern uint8_t nexthop_group_nexthop_num(const struct nexthop_group *nhg); -extern uint8_t -nexthop_group_active_nexthop_num(const struct nexthop_group *nhg); +extern uint16_t nexthop_group_nexthop_num(const struct nexthop_group *nhg); +extern uint16_t nexthop_group_active_nexthop_num(const struct nexthop_group *nhg); extern bool nexthop_group_has_label(const struct nexthop_group *nhg); From b70835d690d98f1549202d89d9bab34297ba5bd0 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Tue, 1 Oct 2024 18:58:24 -0300 Subject: [PATCH 26/73] bfdd: add no variants to interval configurations Add missing no commands to various interval configurations. Signed-off-by: Rafael Zalamena --- bfdd/bfdd_cli.c | 41 +++++++++++++++++++++++++---------------- 1 file changed, 25 insertions(+), 16 deletions(-) diff --git a/bfdd/bfdd_cli.c b/bfdd/bfdd_cli.c index 75034d220c5e..e51aa21b4932 100644 --- a/bfdd/bfdd_cli.c +++ b/bfdd/bfdd_cli.c @@ -356,14 +356,15 @@ void bfd_cli_show_mult(struct vty *vty, const struct lyd_node *dnode, DEFPY_YANG( bfd_peer_rx, bfd_peer_rx_cmd, - "receive-interval (10-60000)$interval", + "[no] receive-interval ![(10-60000)$interval]", + NO_STR "Configure peer receive interval\n" "Configure peer receive interval value in milliseconds\n") { char value[32]; snprintf(value, sizeof(value), "%ld", interval * 1000); - nb_cli_enqueue_change(vty, "./required-receive-interval", NB_OP_MODIFY, + nb_cli_enqueue_change(vty, "./required-receive-interval", no ? NB_OP_DESTROY : NB_OP_MODIFY, value); return nb_cli_apply_changes(vty, NULL); @@ -379,7 +380,8 @@ void bfd_cli_show_rx(struct vty *vty, const struct lyd_node *dnode, DEFPY_YANG( bfd_peer_tx, bfd_peer_tx_cmd, - "transmit-interval (10-60000)$interval", + "[no] transmit-interval ![(10-60000)$interval]", + NO_STR "Configure peer transmit interval\n" "Configure peer transmit interval value in milliseconds\n") { @@ -387,7 +389,7 @@ DEFPY_YANG( snprintf(value, sizeof(value), "%ld", interval * 1000); nb_cli_enqueue_change(vty, "./desired-transmission-interval", - NB_OP_MODIFY, value); + no ? NB_OP_DESTROY : NB_OP_MODIFY, value); return nb_cli_apply_changes(vty, NULL); } @@ -436,7 +438,8 @@ void bfd_cli_show_echo(struct vty *vty, const struct lyd_node *dnode, DEFPY_YANG( bfd_peer_echo_interval, bfd_peer_echo_interval_cmd, - "echo-interval (10-60000)$interval", + "[no] echo-interval ![(10-60000)$interval]", + NO_STR "Configure peer echo intervals\n" "Configure peer echo rx/tx intervals value in milliseconds\n") { @@ -449,16 +452,17 @@ DEFPY_YANG( snprintf(value, sizeof(value), "%ld", interval * 1000); nb_cli_enqueue_change(vty, "./desired-echo-transmission-interval", - NB_OP_MODIFY, value); + no ? NB_OP_DESTROY : NB_OP_MODIFY, value); nb_cli_enqueue_change(vty, "./required-echo-receive-interval", - NB_OP_MODIFY, value); + no ? NB_OP_DESTROY : NB_OP_MODIFY, value); return nb_cli_apply_changes(vty, NULL); } DEFPY_YANG( bfd_peer_echo_transmit_interval, bfd_peer_echo_transmit_interval_cmd, - "echo transmit-interval (10-60000)$interval", + "[no] echo transmit-interval ![(10-60000)$interval]", + NO_STR "Configure peer echo intervals\n" "Configure desired transmit interval\n" "Configure interval value in milliseconds\n") @@ -472,7 +476,7 @@ DEFPY_YANG( snprintf(value, sizeof(value), "%ld", interval * 1000); nb_cli_enqueue_change(vty, "./desired-echo-transmission-interval", - NB_OP_MODIFY, value); + no ? NB_OP_DESTROY : NB_OP_MODIFY, value); return nb_cli_apply_changes(vty, NULL); } @@ -487,7 +491,8 @@ void bfd_cli_show_desired_echo_transmission_interval( DEFPY_YANG( bfd_peer_echo_receive_interval, bfd_peer_echo_receive_interval_cmd, - "echo receive-interval ", + "[no] echo receive-interval ![]", + NO_STR "Configure peer echo intervals\n" "Configure required receive interval\n" "Disable echo packets receive\n" @@ -504,9 +509,9 @@ DEFPY_YANG( snprintf(value, sizeof(value), "0"); else snprintf(value, sizeof(value), "%ld", interval * 1000); - + nb_cli_enqueue_change(vty, "./required-echo-receive-interval", - NB_OP_MODIFY, value); + no ? NB_OP_DESTROY : NB_OP_MODIFY, value); return nb_cli_apply_changes(vty, NULL); } @@ -576,12 +581,14 @@ ALIAS_YANG(bfd_peer_mult, bfd_profile_mult_cmd, "Configure peer detection multiplier value\n") ALIAS_YANG(bfd_peer_tx, bfd_profile_tx_cmd, - "transmit-interval (10-60000)$interval", + "[no] transmit-interval ![(10-60000)$interval]", + NO_STR "Configure peer transmit interval\n" "Configure peer transmit interval value in milliseconds\n") ALIAS_YANG(bfd_peer_rx, bfd_profile_rx_cmd, - "receive-interval (10-60000)$interval", + "[no] receive-interval ![(10-60000)$interval]", + NO_STR "Configure peer receive interval\n" "Configure peer receive interval value in milliseconds\n") @@ -618,14 +625,16 @@ ALIAS_YANG(bfd_peer_echo_interval, bfd_profile_echo_interval_cmd, ALIAS_YANG( bfd_peer_echo_transmit_interval, bfd_profile_echo_transmit_interval_cmd, - "echo transmit-interval (10-60000)$interval", + "[no] echo transmit-interval ![(10-60000)$interval]", + NO_STR "Configure peer echo intervals\n" "Configure desired transmit interval\n" "Configure interval value in milliseconds\n") ALIAS_YANG( bfd_peer_echo_receive_interval, bfd_profile_echo_receive_interval_cmd, - "echo receive-interval ", + "[no] echo receive-interval ![]", + NO_STR "Configure peer echo intervals\n" "Configure required receive interval\n" "Disable echo packets receive\n" From 0495cac837ad0f6ff1082746c37e4a48c1068035 Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Wed, 2 Oct 2024 13:57:30 +0300 Subject: [PATCH 27/73] bgpd: Actually make ` --v6-with-v4-nexthops` it work It was using `-v` which is actually a _version_. Fixes: 0435b31bb8ed55377f83d0e19bc085abc3c71b44 ("bgpd: Allow bgp to specify if it will allow v6 routing with v4 nexthops") Signed-off-by: Donatas Abraitis --- bgpd/bgp_main.c | 32 ++++++++++++++++---------------- doc/user/bgp.rst | 2 +- 2 files changed, 17 insertions(+), 17 deletions(-) diff --git a/bgpd/bgp_main.c b/bgpd/bgp_main.c index 5e6a62c9b929..535d2fc5f434 100644 --- a/bgpd/bgp_main.c +++ b/bgpd/bgp_main.c @@ -63,18 +63,16 @@ DEFINE_HOOK(bgp_hook_vrf_update, (struct vrf *vrf, bool enabled), (vrf, enabled)); /* bgpd options, we use GNU getopt library. */ -static const struct option longopts[] = { - { "bgp_port", required_argument, NULL, 'p' }, - { "listenon", required_argument, NULL, 'l' }, - { "no_kernel", no_argument, NULL, 'n' }, - { "skip_runas", no_argument, NULL, 'S' }, - { "ecmp", required_argument, NULL, 'e' }, - { "int_num", required_argument, NULL, 'I' }, - { "no_zebra", no_argument, NULL, 'Z' }, - { "socket_size", required_argument, NULL, 's' }, - { "v6-with-v4-nexthops", no_argument, NULL, 'v' }, - { 0 } -}; +static const struct option longopts[] = { { "bgp_port", required_argument, NULL, 'p' }, + { "listenon", required_argument, NULL, 'l' }, + { "no_kernel", no_argument, NULL, 'n' }, + { "skip_runas", no_argument, NULL, 'S' }, + { "ecmp", required_argument, NULL, 'e' }, + { "int_num", required_argument, NULL, 'I' }, + { "no_zebra", no_argument, NULL, 'Z' }, + { "socket_size", required_argument, NULL, 's' }, + { "v6-with-v4-nexthops", no_argument, NULL, 'x' }, + { 0 } }; /* signal definitions */ void sighup(void); @@ -424,11 +422,12 @@ int main(int argc, char **argv) int buffer_size = BGP_SOCKET_SNDBUF_SIZE; char *address; struct listnode *node; + bool v6_with_v4_nexthops = false; addresses->cmp = (int (*)(void *, void *))strcmp; frr_preinit(&bgpd_di, argc, argv); - frr_opt_add("p:l:SnZe:I:s:" DEPRECATED_OPTIONS, longopts, + frr_opt_add("p:l:SnZe:I:s:x" DEPRECATED_OPTIONS, longopts, " -p, --bgp_port Set BGP listen port number (0 means do not listen).\n" " -l, --listenon Listen on specified address (implies -n)\n" " -n, --no_kernel Do not install route to kernel.\n" @@ -437,7 +436,7 @@ int main(int argc, char **argv) " -e, --ecmp Specify ECMP to use.\n" " -I, --int_num Set instance number (label-manager)\n" " -s, --socket_size Set BGP peer socket send buffer size\n" - " , --v6-with-v4-nexthop Allow BGP to form v6 neighbors using v4 nexthops\n"); + " -x, --v6-with-v4-nexthop Allow BGP to form v6 neighbors using v4 nexthops\n"); /* Command line argument treatment. */ while (1) { @@ -499,8 +498,8 @@ int main(int argc, char **argv) case 's': buffer_size = atoi(optarg); break; - case 'v': - bm->v6_with_v4_nexthops = true; + case 'x': + v6_with_v4_nexthops = true; break; default: frr_help_exit(1); @@ -513,6 +512,7 @@ int main(int argc, char **argv) bgp_master_init(frr_init(), buffer_size, addresses); bm->startup_time = monotime(NULL); bm->port = bgp_port; + bm->v6_with_v4_nexthops = v6_with_v4_nexthops; if (bgp_port == 0) bgp_option_set(BGP_OPT_NO_LISTEN); if (no_fib_flag || no_zebra_flag) diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index 438c60a3f651..4632c70d53c4 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -83,7 +83,7 @@ be specified (:ref:`common-invocation-options`). be done to see if this is helping or not at the scale you are running at. -.. option:: --v6-with-v4-nexthops +.. option:: -x, --v6-with-v4-nexthops Allow BGP to peer in the V6 afi, when the interface only has v4 addresses. This allows bgp to install the v6 routes with a v6 nexthop that has the From b56cfc6c808d330a85a06421c7fc5f5f9066938f Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Wed, 2 Oct 2024 14:38:15 +0200 Subject: [PATCH 28/73] bgpd: fix printfrr_bp for non initialized peers Fix printfrr_bp for non initialized peers. For example: > Sep 26 17:56:44 r1 bgpd[26295]: [GJPH1-W8PZV] Resetting peer (null)(Unknown) due to change in addpath config Is now: > Oct 02 14:00:59 r1 bgpd[12795]: [MNE5N-K0G4Z] Resetting peer 2.2.2.2 due to change in addpath config Signed-off-by: Louis Scalbert --- bgpd/bgpd.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index a3caa5a80626..80b1ae39d4d1 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -8901,6 +8901,12 @@ static ssize_t printfrr_bp(struct fbuf *buf, struct printfrr_eargs *ea, if (!peer) return bputs(buf, "(null)"); + if (!peer->host) { + if (peer->conf_if) + return bprintfrr(buf, "%s", peer->conf_if); + return bprintfrr(buf, "%pSU", &peer->connection->su); + } + return bprintfrr(buf, "%s(%s)", peer->host, peer->hostname ? peer->hostname : "Unknown"); } From 49944f77fe08c81c1770bbf21005a3d6c83b688c Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 2 Oct 2024 10:41:37 -0400 Subject: [PATCH 29/73] bgpd: Remove unused bgp_mp_dmed_deselect function Signed-off-by: Donald Sharp --- bgpd/bgp_mpath.c | 18 ------------------ bgpd/bgp_mpath.h | 1 - 2 files changed, 19 deletions(-) diff --git a/bgpd/bgp_mpath.c b/bgpd/bgp_mpath.c index e0cc5f189a49..84db7e88ce18 100644 --- a/bgpd/bgp_mpath.c +++ b/bgpd/bgp_mpath.c @@ -569,24 +569,6 @@ void bgp_path_info_mpath_update(struct bgp *bgp, struct bgp_dest *dest, } } -/* - * bgp_mp_dmed_deselect - * - * Clean up multipath information for BGP_PATH_DMED_SELECTED path that - * is not selected as best path - */ -void bgp_mp_dmed_deselect(struct bgp_path_info *dmed_best) -{ - if (!dmed_best) - return; - - bgp_path_info_mpath_count_set(dmed_best, 0); - UNSET_FLAG(dmed_best->flags, BGP_PATH_MULTIPATH_CHG); - UNSET_FLAG(dmed_best->flags, BGP_PATH_LINK_BW_CHG); - - assert(bgp_path_info_mpath_first(dmed_best) == NULL); -} - /* * bgp_path_info_mpath_aggregate_update * diff --git a/bgpd/bgp_mpath.h b/bgpd/bgp_mpath.h index 8201896593b0..c5a009a4c8bb 100644 --- a/bgpd/bgp_mpath.h +++ b/bgpd/bgp_mpath.h @@ -46,7 +46,6 @@ extern int bgp_maximum_paths_unset(struct bgp *bgp, afi_t afi, safi_t safi, * multipath selections */ extern int bgp_path_info_nexthop_cmp(struct bgp_path_info *bpi1, struct bgp_path_info *bpi2); -extern void bgp_mp_dmed_deselect(struct bgp_path_info *dmed_best); extern void bgp_path_info_mpath_update(struct bgp *bgp, struct bgp_dest *dest, struct bgp_path_info *new_best, struct bgp_path_info *old_best, uint32_t num_candidates, From 20710da797204ca75c36bdbcc8334906a63404fe Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Thu, 3 Oct 2024 09:20:40 +0300 Subject: [PATCH 30/73] bgpd: Print debug message about reaching maximum allowed multi paths Fixes: 421cf856ef86db250a86be01437d0a668b463dcc ("bgpd: Cleanup multipath figuring out in bgp") Signed-off-by: Donatas Abraitis --- bgpd/bgp_mpath.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/bgpd/bgp_mpath.c b/bgpd/bgp_mpath.c index 84db7e88ce18..609afa4245e6 100644 --- a/bgpd/bgp_mpath.c +++ b/bgpd/bgp_mpath.c @@ -490,11 +490,12 @@ void bgp_path_info_mpath_update(struct bgp *bgp, struct bgp_dest *dest, cur_iterator = cur_iterator->next; } - break; if (debug) zlog_debug("%pBD(%s): Mpath count %u is equal to maximum paths allowed, finished comparision for MPATHS", dest, bgp->name_pretty, mpath_count); + + break; } if (debug) From c9866909486ba0021ad73ed26ab5d495bb6f8a2e Mon Sep 17 00:00:00 2001 From: sri-mohan1 Date: Thu, 3 Oct 2024 16:25:36 +0530 Subject: [PATCH 31/73] bgpd: changes for code maintainability these changes are for improving the code maintainability and readability Signed-off-by: sri-mohan1 --- bgpd/bgp_pbr.c | 175 ++++++++++++++++++++++--------------------------- 1 file changed, 79 insertions(+), 96 deletions(-) diff --git a/bgpd/bgp_pbr.c b/bgpd/bgp_pbr.c index ec5b50a08ffc..2d61c0f00a1b 100644 --- a/bgpd/bgp_pbr.c +++ b/bgpd/bgp_pbr.c @@ -173,33 +173,33 @@ static int snprintf_bgp_pbr_match_val(char *str, int len, ptr += delta; len -= delta; } else { - if (mval->unary_operator & OPERATOR_UNARY_OR) { + if (CHECK_FLAG(mval->unary_operator, OPERATOR_UNARY_OR)) { delta = snprintf(ptr, len, ", or "); ptr += delta; len -= delta; } - if (mval->unary_operator & OPERATOR_UNARY_AND) { + if (CHECK_FLAG(mval->unary_operator, OPERATOR_UNARY_AND)) { delta = snprintf(ptr, len, ", and "); ptr += delta; len -= delta; } } - if (mval->compare_operator & OPERATOR_COMPARE_LESS_THAN) { + if (CHECK_FLAG(mval->compare_operator, OPERATOR_COMPARE_LESS_THAN)) { delta = snprintf(ptr, len, "<"); ptr += delta; len -= delta; } - if (mval->compare_operator & OPERATOR_COMPARE_GREATER_THAN) { + if (CHECK_FLAG(mval->compare_operator, OPERATOR_COMPARE_GREATER_THAN)) { delta = snprintf(ptr, len, ">"); ptr += delta; len -= delta; } - if (mval->compare_operator & OPERATOR_COMPARE_EQUAL_TO) { + if (CHECK_FLAG(mval->compare_operator, OPERATOR_COMPARE_EQUAL_TO)) { delta = snprintf(ptr, len, "="); ptr += delta; len -= delta; } - if (mval->compare_operator & OPERATOR_COMPARE_EXACT_MATCH) { + if (CHECK_FLAG(mval->compare_operator, OPERATOR_COMPARE_EXACT_MATCH)) { delta = snprintf(ptr, len, "match"); ptr += delta; len -= delta; @@ -287,9 +287,7 @@ static bool bgp_pbr_extract_enumerate_unary_opposite( { if (unary_operator == OPERATOR_UNARY_AND && and_valmask) { if (type_entry == FLOWSPEC_TCP_FLAGS) { - and_valmask->mask |= - TCP_HEADER_ALL_FLAGS & - ~(value); + SET_FLAG(and_valmask->mask, CHECK_FLAG(TCP_HEADER_ALL_FLAGS, ~(value))); } else if (type_entry == FLOWSPEC_DSCP || type_entry == FLOWSPEC_FLOW_LABEL || type_entry == FLOWSPEC_PKT_LEN || @@ -302,9 +300,7 @@ static bool bgp_pbr_extract_enumerate_unary_opposite( sizeof(struct bgp_pbr_val_mask)); if (type_entry == FLOWSPEC_TCP_FLAGS) { and_valmask->val = TCP_HEADER_ALL_FLAGS; - and_valmask->mask |= - TCP_HEADER_ALL_FLAGS & - ~(value); + SET_FLAG(and_valmask->mask, CHECK_FLAG(TCP_HEADER_ALL_FLAGS, ~(value))); } else if (type_entry == FLOWSPEC_DSCP || type_entry == FLOWSPEC_FLOW_LABEL || type_entry == FLOWSPEC_FRAGMENT || @@ -346,14 +342,10 @@ static bool bgp_pbr_extract_enumerate_unary(struct bgp_pbr_match_val list[], if (i != 0 && list[i].unary_operator != unary_operator) return false; - if (!(list[i].compare_operator & - OPERATOR_COMPARE_EQUAL_TO) && - !(list[i].compare_operator & - OPERATOR_COMPARE_EXACT_MATCH)) { - if ((list[i].compare_operator & - OPERATOR_COMPARE_LESS_THAN) && - (list[i].compare_operator & - OPERATOR_COMPARE_GREATER_THAN)) { + if (!CHECK_FLAG(list[i].compare_operator, OPERATOR_COMPARE_EQUAL_TO) && + !CHECK_FLAG(list[i].compare_operator, OPERATOR_COMPARE_EXACT_MATCH)) { + if (CHECK_FLAG(list[i].compare_operator, OPERATOR_COMPARE_LESS_THAN) && + CHECK_FLAG(list[i].compare_operator, OPERATOR_COMPARE_GREATER_THAN)) { ret = bgp_pbr_extract_enumerate_unary_opposite( unary_operator, and_valmask, or_valmask, list[i].value, @@ -366,15 +358,15 @@ static bool bgp_pbr_extract_enumerate_unary(struct bgp_pbr_match_val list[], } if (unary_operator == OPERATOR_UNARY_AND && and_valmask) { if (type_entry == FLOWSPEC_TCP_FLAGS) - and_valmask->mask |= - TCP_HEADER_ALL_FLAGS & list[i].value; + SET_FLAG(and_valmask->mask, + CHECK_FLAG(TCP_HEADER_ALL_FLAGS, list[i].value)); } else if (unary_operator == OPERATOR_UNARY_OR && or_valmask) { and_valmask = XCALLOC(MTYPE_PBR_VALMASK, sizeof(struct bgp_pbr_val_mask)); if (type_entry == FLOWSPEC_TCP_FLAGS) { and_valmask->val = TCP_HEADER_ALL_FLAGS; - and_valmask->mask |= - TCP_HEADER_ALL_FLAGS & list[i].value; + SET_FLAG(and_valmask->mask, + CHECK_FLAG(TCP_HEADER_ALL_FLAGS, list[i].value)); } else if (type_entry == FLOWSPEC_DSCP || type_entry == FLOWSPEC_FLOW_LABEL || type_entry == FLOWSPEC_ICMP_TYPE || @@ -402,8 +394,8 @@ static bool bgp_pbr_extract_enumerate(struct bgp_pbr_match_val list[], uint8_t unary_operator_val; bool double_check = false; - if ((unary_operator & OPERATOR_UNARY_OR) && - (unary_operator & OPERATOR_UNARY_AND)) { + if (CHECK_FLAG(unary_operator, OPERATOR_UNARY_OR) && + CHECK_FLAG(unary_operator, OPERATOR_UNARY_AND)) { unary_operator_val = OPERATOR_UNARY_AND; double_check = true; } else @@ -431,12 +423,12 @@ static uint8_t bgp_pbr_match_val_get_operator(struct bgp_pbr_match_val list[], for (i = 0; i < num; i++) { if (i == 0) continue; - if (list[i].unary_operator & OPERATOR_UNARY_OR) + if (CHECK_FLAG(list[i].unary_operator, OPERATOR_UNARY_OR)) unary_operator = OPERATOR_UNARY_OR; - if ((list[i].unary_operator & OPERATOR_UNARY_AND - && unary_operator == OPERATOR_UNARY_OR) || - (list[i].unary_operator & OPERATOR_UNARY_OR - && unary_operator == OPERATOR_UNARY_AND)) + if ((CHECK_FLAG(list[i].unary_operator, OPERATOR_UNARY_AND) && + unary_operator == OPERATOR_UNARY_OR) || + (CHECK_FLAG(list[i].unary_operator, OPERATOR_UNARY_OR) && + unary_operator == OPERATOR_UNARY_AND)) return 0; } return unary_operator; @@ -723,8 +715,8 @@ static int bgp_pbr_validate_policy_route(struct bgp_pbr_entry_main *api) } } - } else if (!(api->match_bitmask & PREFIX_SRC_PRESENT) && - !(api->match_bitmask & PREFIX_DST_PRESENT)) { + } else if (!CHECK_FLAG(api->match_bitmask, PREFIX_SRC_PRESENT) && + !CHECK_FLAG(api->match_bitmask, PREFIX_DST_PRESENT)) { if (BGP_DEBUG(pbr, PBR)) { bgp_pbr_print_policy_route(api); zlog_debug("BGP: match actions without src or dst address can not operate. ignoring."); @@ -786,8 +778,7 @@ int bgp_pbr_build_and_validate_entry(const struct prefix *p, memcpy(&ecom_copy, ecom_eval, sizeof(struct ecommunity_val)); - ecom_copy.val[0] &= - ~ECOMMUNITY_ENCODE_TRANS_EXP; + UNSET_FLAG(ecom_copy.val[0], ECOMMUNITY_ENCODE_TRANS_EXP); ecom_copy.val[1] = ECOMMUNITY_ROUTE_TARGET; ecommunity_add_val(eckey, &ecom_copy, false, false); @@ -955,12 +946,12 @@ int bgp_pbr_build_and_validate_entry(const struct prefix *p, return -1; /* check inconsistency in the match rule */ - if (api->match_bitmask & PREFIX_SRC_PRESENT) { + if (CHECK_FLAG(api->match_bitmask, PREFIX_SRC_PRESENT)) { src = &api->src_prefix; afi = family2afi(src->family); valid_prefix = 1; } - if (api->match_bitmask & PREFIX_DST_PRESENT) { + if (CHECK_FLAG(api->match_bitmask, PREFIX_DST_PRESENT)) { dst = &api->dst_prefix; if (valid_prefix && afi != family2afi(dst->family)) { if (BGP_DEBUG(pbr, PBR)) { @@ -1204,12 +1195,10 @@ bool bgp_pbr_rule_hash_equal(const void *arg1, const void *arg2) if (r1->action != r2->action) return false; - if ((r1->flags & MATCH_IP_SRC_SET) && - !prefix_same(&r1->src, &r2->src)) + if (CHECK_FLAG(r1->flags, MATCH_IP_SRC_SET) && !prefix_same(&r1->src, &r2->src)) return false; - if ((r1->flags & MATCH_IP_DST_SET) && - !prefix_same(&r1->dst, &r2->dst)) + if (CHECK_FLAG(r1->flags, MATCH_IP_DST_SET) && !prefix_same(&r1->dst, &r2->dst)) return false; return true; @@ -1426,7 +1415,7 @@ void bgp_pbr_print_policy_route(struct bgp_pbr_entry_main *api) delta = snprintf(ptr, sizeof(return_string), "MATCH : "); len -= delta; ptr += delta; - if (api->match_bitmask & PREFIX_SRC_PRESENT) { + if (CHECK_FLAG(api->match_bitmask, PREFIX_SRC_PRESENT)) { struct prefix *p = &(api->src_prefix); if (api->src_prefix_offset) @@ -1438,7 +1427,7 @@ void bgp_pbr_print_policy_route(struct bgp_pbr_entry_main *api) ptr += delta; INCREMENT_DISPLAY(ptr, nb_items, len); } - if (api->match_bitmask & PREFIX_DST_PRESENT) { + if (CHECK_FLAG(api->match_bitmask, PREFIX_DST_PRESENT)) { struct prefix *p = &(api->dst_prefix); INCREMENT_DISPLAY(ptr, nb_items, len); @@ -1581,21 +1570,18 @@ void bgp_pbr_print_policy_route(struct bgp_pbr_entry_main *api) delta = snprintf(ptr, len, "@action "); len -= delta; ptr += delta; - if (api->actions[i].u.za.filter - & TRAFFIC_ACTION_TERMINATE) { + if (CHECK_FLAG(api->actions[i].u.za.filter, TRAFFIC_ACTION_TERMINATE)) { delta = snprintf(ptr, len, " terminate (apply filter(s))"); len -= delta; ptr += delta; } - if (api->actions[i].u.za.filter - & TRAFFIC_ACTION_DISTRIBUTE) { + if (CHECK_FLAG(api->actions[i].u.za.filter, TRAFFIC_ACTION_DISTRIBUTE)) { delta = snprintf(ptr, len, " distribute"); len -= delta; ptr += delta; } - if (api->actions[i].u.za.filter - & TRAFFIC_ACTION_SAMPLE) { + if (CHECK_FLAG(api->actions[i].u.za.filter, TRAFFIC_ACTION_SAMPLE)) { delta = snprintf(ptr, len, " sample"); len -= delta; ptr += delta; @@ -1746,12 +1732,10 @@ static int bgp_pbr_get_same_rule(struct hash_bucket *bucket, void *arg) if (r1->flags != r2->flags) return HASHWALK_CONTINUE; - if ((r1->flags & MATCH_IP_SRC_SET) && - !prefix_same(&r1->src, &r2->src)) + if (CHECK_FLAG(r1->flags, MATCH_IP_SRC_SET) && !prefix_same(&r1->src, &r2->src)) return HASHWALK_CONTINUE; - if ((r1->flags & MATCH_IP_DST_SET) && - !prefix_same(&r1->dst, &r2->dst)) + if (CHECK_FLAG(r1->flags, MATCH_IP_DST_SET) && !prefix_same(&r1->dst, &r2->dst)) return HASHWALK_CONTINUE; /* this function is used for two cases: @@ -1840,11 +1824,11 @@ static void bgp_pbr_policyroute_remove_from_zebra_unit( pbr_rule.vrf_id = bpf->vrf_id; if (bpf->src) { prefix_copy(&pbr_rule.src, bpf->src); - pbr_rule.flags |= MATCH_IP_SRC_SET; + SET_FLAG(pbr_rule.flags, MATCH_IP_SRC_SET); } if (bpf->dst) { prefix_copy(&pbr_rule.dst, bpf->dst); - pbr_rule.flags |= MATCH_IP_DST_SET; + SET_FLAG(pbr_rule.flags, MATCH_IP_DST_SET); } bpr = &pbr_rule; /* A previous entry may already exist @@ -1867,32 +1851,32 @@ static void bgp_pbr_policyroute_remove_from_zebra_unit( temp.family = bpf->family; if (bpf->src) { - temp.flags |= MATCH_IP_SRC_SET; + SET_FLAG(temp.flags, MATCH_IP_SRC_SET); prefix_copy(&temp2.src, bpf->src); } else temp2.src.family = bpf->family; if (bpf->dst) { - temp.flags |= MATCH_IP_DST_SET; + SET_FLAG(temp.flags, MATCH_IP_DST_SET); prefix_copy(&temp2.dst, bpf->dst); } else temp2.dst.family = bpf->family; if (src_port && (src_port->min_port || bpf->protocol == IPPROTO_ICMP)) { if (bpf->protocol == IPPROTO_ICMP) - temp.flags |= MATCH_ICMP_SET; - temp.flags |= MATCH_PORT_SRC_SET; + SET_FLAG(temp.flags, MATCH_ICMP_SET); + SET_FLAG(temp.flags, MATCH_PORT_SRC_SET); temp2.src_port_min = src_port->min_port; if (src_port->max_port) { - temp.flags |= MATCH_PORT_SRC_RANGE_SET; + SET_FLAG(temp.flags, MATCH_PORT_SRC_RANGE_SET); temp2.src_port_max = src_port->max_port; } } if (dst_port && (dst_port->min_port || bpf->protocol == IPPROTO_ICMP)) { if (bpf->protocol == IPPROTO_ICMP) - temp.flags |= MATCH_ICMP_SET; - temp.flags |= MATCH_PORT_DST_SET; + SET_FLAG(temp.flags, MATCH_ICMP_SET); + SET_FLAG(temp.flags, MATCH_PORT_DST_SET); temp2.dst_port_min = dst_port->min_port; if (dst_port->max_port) { - temp.flags |= MATCH_PORT_DST_RANGE_SET; + SET_FLAG(temp.flags, MATCH_PORT_DST_RANGE_SET); temp2.dst_port_max = dst_port->max_port; } } @@ -1904,7 +1888,7 @@ static void bgp_pbr_policyroute_remove_from_zebra_unit( temp.pkt_len_max = pkt_len->max_port; } else if (bpf->pkt_len_val) { if (bpf->pkt_len_val->mask) - temp.flags |= MATCH_PKT_LEN_INVERSE_SET; + SET_FLAG(temp.flags, MATCH_PKT_LEN_INVERSE_SET); temp.pkt_len_min = bpf->pkt_len_val->val; } if (bpf->tcp_flags) { @@ -1913,32 +1897,32 @@ static void bgp_pbr_policyroute_remove_from_zebra_unit( } if (bpf->dscp) { if (bpf->dscp->mask) - temp.flags |= MATCH_DSCP_INVERSE_SET; + SET_FLAG(temp.flags, MATCH_DSCP_INVERSE_SET); else - temp.flags |= MATCH_DSCP_SET; + SET_FLAG(temp.flags, MATCH_DSCP_SET); temp.dscp_value = bpf->dscp->val; } if (bpf->flow_label) { if (bpf->flow_label->mask) - temp.flags |= MATCH_FLOW_LABEL_INVERSE_SET; + SET_FLAG(temp.flags, MATCH_FLOW_LABEL_INVERSE_SET); else - temp.flags |= MATCH_FLOW_LABEL_SET; + SET_FLAG(temp.flags, MATCH_FLOW_LABEL_SET); temp.flow_label = bpf->flow_label->val; } if (bpf->fragment) { if (bpf->fragment->mask) - temp.flags |= MATCH_FRAGMENT_INVERSE_SET; + SET_FLAG(temp.flags, MATCH_FRAGMENT_INVERSE_SET); temp.fragment = bpf->fragment->val; } if (bpf->src == NULL || bpf->dst == NULL) { - if (temp.flags & (MATCH_PORT_DST_SET | MATCH_PORT_SRC_SET)) + if (CHECK_FLAG(temp.flags, (MATCH_PORT_DST_SET | MATCH_PORT_SRC_SET))) temp.type = IPSET_NET_PORT; else temp.type = IPSET_NET; } else { - if (temp.flags & (MATCH_PORT_DST_SET | MATCH_PORT_SRC_SET)) + if (CHECK_FLAG(temp.flags, (MATCH_PORT_DST_SET | MATCH_PORT_SRC_SET))) temp.type = IPSET_NET_PORT_NET; else temp.type = IPSET_NET_NET; @@ -2316,11 +2300,11 @@ static void bgp_pbr_policyroute_add_to_zebra_unit(struct bgp *bgp, pbr_rule.vrf_id = bpf->vrf_id; pbr_rule.priority = 20; if (bpf->src) { - pbr_rule.flags |= MATCH_IP_SRC_SET; + SET_FLAG(pbr_rule.flags, MATCH_IP_SRC_SET); prefix_copy(&pbr_rule.src, bpf->src); } if (bpf->dst) { - pbr_rule.flags |= MATCH_IP_DST_SET; + SET_FLAG(pbr_rule.flags, MATCH_IP_DST_SET); prefix_copy(&pbr_rule.dst, bpf->dst); } pbr_rule.action = bpa; @@ -2377,32 +2361,32 @@ static void bgp_pbr_policyroute_add_to_zebra_unit(struct bgp *bgp, temp.vrf_id = bpf->vrf_id; temp.family = bpf->family; if (bpf->src) - temp.flags |= MATCH_IP_SRC_SET; + SET_FLAG(temp.flags, MATCH_IP_SRC_SET); if (bpf->dst) - temp.flags |= MATCH_IP_DST_SET; + SET_FLAG(temp.flags, MATCH_IP_DST_SET); if (src_port && (src_port->min_port || bpf->protocol == IPPROTO_ICMP)) { if (bpf->protocol == IPPROTO_ICMP) - temp.flags |= MATCH_ICMP_SET; - temp.flags |= MATCH_PORT_SRC_SET; + SET_FLAG(temp.flags, MATCH_ICMP_SET); + SET_FLAG(temp.flags, MATCH_PORT_SRC_SET); } if (dst_port && (dst_port->min_port || bpf->protocol == IPPROTO_ICMP)) { if (bpf->protocol == IPPROTO_ICMP) - temp.flags |= MATCH_ICMP_SET; - temp.flags |= MATCH_PORT_DST_SET; + SET_FLAG(temp.flags, MATCH_ICMP_SET); + SET_FLAG(temp.flags, MATCH_PORT_DST_SET); } if (src_port && src_port->max_port) - temp.flags |= MATCH_PORT_SRC_RANGE_SET; + SET_FLAG(temp.flags, MATCH_PORT_SRC_RANGE_SET); if (dst_port && dst_port->max_port) - temp.flags |= MATCH_PORT_DST_RANGE_SET; + SET_FLAG(temp.flags, MATCH_PORT_DST_RANGE_SET); if (bpf->src == NULL || bpf->dst == NULL) { - if (temp.flags & (MATCH_PORT_DST_SET | MATCH_PORT_SRC_SET)) + if (CHECK_FLAG(temp.flags, (MATCH_PORT_DST_SET | MATCH_PORT_SRC_SET))) temp.type = IPSET_NET_PORT; else temp.type = IPSET_NET; } else { - if (temp.flags & (MATCH_PORT_DST_SET | MATCH_PORT_SRC_SET)) + if (CHECK_FLAG(temp.flags, (MATCH_PORT_DST_SET | MATCH_PORT_SRC_SET))) temp.type = IPSET_NET_PORT_NET; else temp.type = IPSET_NET_NET; @@ -2413,7 +2397,7 @@ static void bgp_pbr_policyroute_add_to_zebra_unit(struct bgp *bgp, temp.pkt_len_max = pkt_len->max_port; } else if (bpf->pkt_len_val) { if (bpf->pkt_len_val->mask) - temp.flags |= MATCH_PKT_LEN_INVERSE_SET; + SET_FLAG(temp.flags, MATCH_PKT_LEN_INVERSE_SET); temp.pkt_len_min = bpf->pkt_len_val->val; } if (bpf->tcp_flags) { @@ -2422,26 +2406,26 @@ static void bgp_pbr_policyroute_add_to_zebra_unit(struct bgp *bgp, } if (bpf->dscp) { if (bpf->dscp->mask) - temp.flags |= MATCH_DSCP_INVERSE_SET; + SET_FLAG(temp.flags, MATCH_DSCP_INVERSE_SET); else - temp.flags |= MATCH_DSCP_SET; + SET_FLAG(temp.flags, MATCH_DSCP_SET); temp.dscp_value = bpf->dscp->val; } if (bpf->flow_label) { if (bpf->flow_label->mask) - temp.flags |= MATCH_FLOW_LABEL_INVERSE_SET; + SET_FLAG(temp.flags, MATCH_FLOW_LABEL_INVERSE_SET); else - temp.flags |= MATCH_FLOW_LABEL_SET; + SET_FLAG(temp.flags, MATCH_FLOW_LABEL_SET); temp.flow_label = bpf->flow_label->val; } if (bpf->fragment) { if (bpf->fragment->mask) - temp.flags |= MATCH_FRAGMENT_INVERSE_SET; + SET_FLAG(temp.flags, MATCH_FRAGMENT_INVERSE_SET); temp.fragment = bpf->fragment->val; } if (bpf->protocol) { temp.protocol = bpf->protocol; - temp.flags |= MATCH_PROTOCOL_SET; + SET_FLAG(temp.flags, MATCH_PROTOCOL_SET); } temp.action = bpa; bpm = hash_get(bgp->pbr_match_hash, &temp, @@ -2658,13 +2642,13 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path, memset(&nh, 0, sizeof(nh)); memset(&bpf, 0, sizeof(bpf)); memset(&bpof, 0, sizeof(bpof)); - if (api->match_bitmask & PREFIX_SRC_PRESENT || + if (CHECK_FLAG(api->match_bitmask, PREFIX_SRC_PRESENT) || (api->type == BGP_PBR_IPRULE && - api->match_bitmask_iprule & PREFIX_SRC_PRESENT)) + CHECK_FLAG(api->match_bitmask_iprule, PREFIX_SRC_PRESENT))) src = &api->src_prefix; - if (api->match_bitmask & PREFIX_DST_PRESENT || + if (CHECK_FLAG(api->match_bitmask, PREFIX_DST_PRESENT) || (api->type == BGP_PBR_IPRULE && - api->match_bitmask_iprule & PREFIX_DST_PRESENT)) + CHECK_FLAG(api->match_bitmask_iprule, PREFIX_DST_PRESENT))) dst = &api->dst_prefix; if (api->type == BGP_PBR_IPRULE) bpf.type = api->type; @@ -2809,8 +2793,7 @@ static void bgp_pbr_handle_entry(struct bgp *bgp, struct bgp_path_info *path, } break; case ACTION_TRAFFIC_ACTION: - if (api->actions[i].u.za.filter - & TRAFFIC_ACTION_SAMPLE) { + if (CHECK_FLAG(api->actions[i].u.za.filter, TRAFFIC_ACTION_SAMPLE)) { if (BGP_DEBUG(pbr, PBR)) { bgp_pbr_print_policy_route(api); zlog_warn("PBR: Sample action Ignored"); From b5a23c029c8cd924b4f964620f340b5981ad7f6b Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Thu, 3 Oct 2024 13:08:43 +0200 Subject: [PATCH 32/73] bfdd: add no variants to all configurations Continue the work of b70835d690 ("bfdd: add no variants to interval configurations") Signed-off-by: Louis Scalbert --- bfdd/bfdd_cli.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/bfdd/bfdd_cli.c b/bfdd/bfdd_cli.c index e51aa21b4932..2e213a2237ea 100644 --- a/bfdd/bfdd_cli.c +++ b/bfdd/bfdd_cli.c @@ -338,11 +338,12 @@ void bfd_cli_show_minimum_ttl(struct vty *vty, const struct lyd_node *dnode, DEFPY_YANG( bfd_peer_mult, bfd_peer_mult_cmd, - "detect-multiplier (2-255)$multiplier", + "[no] detect-multiplier ![(2-255)$multiplier]", + NO_STR "Configure peer detection multiplier\n" "Configure peer detection multiplier value\n") { - nb_cli_enqueue_change(vty, "./detection-multiplier", NB_OP_MODIFY, + nb_cli_enqueue_change(vty, "./detection-multiplier", no ? NB_OP_DESTROY : NB_OP_MODIFY, multiplier_str); return nb_cli_apply_changes(vty, NULL); } @@ -576,7 +577,8 @@ void bfd_cli_show_profile(struct vty *vty, const struct lyd_node *dnode, } ALIAS_YANG(bfd_peer_mult, bfd_profile_mult_cmd, - "detect-multiplier (2-255)$multiplier", + "[no] detect-multiplier ![(2-255)$multiplier]", + NO_STR "Configure peer detection multiplier\n" "Configure peer detection multiplier value\n") @@ -619,7 +621,8 @@ ALIAS_YANG(bfd_peer_echo, bfd_profile_echo_cmd, "Configure echo mode\n") ALIAS_YANG(bfd_peer_echo_interval, bfd_profile_echo_interval_cmd, - "echo-interval (10-60000)$interval", + "[no] echo-interval ![(10-60000)$interval]", + NO_STR "Configure peer echo interval\n" "Configure peer echo interval value in milliseconds\n") From db7b9a5a7098c4d53740df012664972c1ddc33ae Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Thu, 3 Oct 2024 13:37:51 +0200 Subject: [PATCH 33/73] lib: remove unused bfd defines Remove unused bfd defines Signed-off-by: Louis Scalbert --- lib/bfd.h | 6 ------ 1 file changed, 6 deletions(-) diff --git a/lib/bfd.h b/lib/bfd.h index 48929a95642c..99790f96a54b 100644 --- a/lib/bfd.h +++ b/lib/bfd.h @@ -16,14 +16,8 @@ extern "C" { #endif #define BFD_DEF_MIN_RX 300 -#define BFD_MIN_MIN_RX 50 -#define BFD_MAX_MIN_RX 60000 #define BFD_DEF_MIN_TX 300 -#define BFD_MIN_MIN_TX 50 -#define BFD_MAX_MIN_TX 60000 #define BFD_DEF_DETECT_MULT 3 -#define BFD_MIN_DETECT_MULT 2 -#define BFD_MAX_DETECT_MULT 255 #define BFD_STATUS_UNKNOWN (1 << 0) /* BFD session status never received */ #define BFD_STATUS_DOWN (1 << 1) /* BFD session status is down */ From ddf09413d1aecd59981275bae959eabcc0046f52 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Tue, 27 Feb 2024 19:35:32 +0100 Subject: [PATCH 34/73] bgpd: export labels to pre-policy bmp Export labels to pre-policy BMP Signed-off-by: Louis Scalbert --- bgpd/bgp_bmp.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/bgpd/bgp_bmp.c b/bgpd/bgp_bmp.c index 556738a606dd..9d99c2c7fda8 100644 --- a/bgpd/bgp_bmp.c +++ b/bgpd/bgp_bmp.c @@ -1047,7 +1047,7 @@ static void bmp_monitor(struct bmp *bmp, struct peer *peer, uint8_t flags, static bool bmp_wrsync(struct bmp *bmp, struct pullwr *pullwr) { - uint8_t bpi_num_labels; + uint8_t bpi_num_labels, adjin_num_labels; afi_t afi; safi_t safi; @@ -1241,11 +1241,12 @@ static bool bmp_wrsync(struct bmp *bmp, struct pullwr *pullwr) bpi_num_labels ? bpi->extra->labels->label : NULL, bpi_num_labels); - if (adjin) - /* TODO: set label here when adjin supports labels */ - bmp_monitor(bmp, adjin->peer, 0, BMP_PEER_TYPE_GLOBAL_INSTANCE, - bn_p, prd, adjin->attr, afi, safi, adjin->uptime, - NULL, 0); + if (adjin) { + adjin_num_labels = adjin->labels ? adjin->labels->num_labels : 0; + bmp_monitor(bmp, adjin->peer, 0, BMP_PEER_TYPE_GLOBAL_INSTANCE, bn_p, prd, + adjin->attr, afi, safi, adjin->uptime, + adjin_num_labels ? &adjin->labels->label[0] : NULL, adjin_num_labels); + } if (bn) bgp_dest_unlock_node(bn); @@ -1382,7 +1383,7 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr) struct peer *peer; struct bgp_dest *bn = NULL; bool written = false; - uint8_t bpi_num_labels; + uint8_t bpi_num_labels, adjin_num_labels; bqe = bmp_pull(bmp); if (!bqe) @@ -1453,10 +1454,11 @@ static bool bmp_wrqueue(struct bmp *bmp, struct pullwr *pullwr) if (adjin->peer == peer) break; } - /* TODO: set label here when adjin supports labels */ - bmp_monitor(bmp, peer, 0, BMP_PEER_TYPE_GLOBAL_INSTANCE, - &bqe->p, prd, adjin ? adjin->attr : NULL, afi, safi, - adjin ? adjin->uptime : monotime(NULL), NULL, 0); + adjin_num_labels = adjin && adjin->labels ? adjin->labels->num_labels : 0; + bmp_monitor(bmp, peer, 0, BMP_PEER_TYPE_GLOBAL_INSTANCE, &bqe->p, prd, + adjin ? adjin->attr : NULL, afi, safi, + adjin ? adjin->uptime : monotime(NULL), + adjin_num_labels ? &adjin->labels->label[0] : NULL, adjin_num_labels); written = true; } From a3877e4444dc3a1253135d3b9479935fc9a966f8 Mon Sep 17 00:00:00 2001 From: Igor Zhukov Date: Fri, 4 Oct 2024 13:16:02 +0700 Subject: [PATCH 35/73] zebra: Fix crash during reconnect fpm_enqueue_rmac_table expects an fpm_rmac_arg* as its argument. The issue can be reproduced by dropping the TCP session using: ss -K dst 127.0.0.1 dport = 2620 I used Fedora 40 and frr 9.1.2 and I got the gdb backtrace: (gdb) bt 0 0x00007fdd7d6997ea in fpm_enqueue_rmac_table (bucket=0x2134dd0, arg=0x2132b60) at zebra/dplane_fpm_nl.c:1217 1 0x00007fdd7dd1560d in hash_iterate (hash=0x21335f0, func=0x7fdd7d6997a0 , arg=0x2132b60) at lib/hash.c:252 2 0x00007fdd7dd1560d in hash_iterate (hash=0x1e5bf10, func=func@entry=0x7fdd7d698900 , arg=arg@entry=0x7ffed983bef0) at lib/hash.c:252 3 0x00007fdd7d698b5c in fpm_rmac_send (t=) at zebra/dplane_fpm_nl.c:1262 4 0x00007fdd7dd6ce22 in event_call (thread=thread@entry=0x7ffed983c010) at lib/event.c:1970 5 0x00007fdd7dd20758 in frr_run (master=0x1d27f10) at lib/libfrr.c:1213 6 0x0000000000425588 in main (argc=10, argv=0x7ffed983c2e8) at zebra/main.c:492 Signed-off-by: Igor Zhukov --- zebra/dplane_fpm_nl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c index 1d2f9e695f30..d594fc2c8640 100644 --- a/zebra/dplane_fpm_nl.c +++ b/zebra/dplane_fpm_nl.c @@ -1336,7 +1336,7 @@ static void fpm_enqueue_l3vni_table(struct hash_bucket *bucket, void *arg) struct zebra_l3vni *zl3vni = bucket->data; fra->zl3vni = zl3vni; - hash_iterate(zl3vni->rmac_table, fpm_enqueue_rmac_table, zl3vni); + hash_iterate(zl3vni->rmac_table, fpm_enqueue_rmac_table, fra); } static void fpm_rmac_send(struct event *t) From e3a846754d8fc973903cc64c9024a548ecb337a4 Mon Sep 17 00:00:00 2001 From: anlan_cs Date: Sat, 5 Oct 2024 16:43:53 +0800 Subject: [PATCH 36/73] tools: fix some special commands for reloading pim The issue is we can't remove all pim configurations including some special configurations (e.g., `no ip pim bsm`) for one interface. For one pim-disable interface, all such pim depdendent options (including `ip pim ` and `no ip pim `) should be completely removed. Also append `no ip multicast` for the same purpose, it is no use at present, but for future use. The running config: ``` interface A ip pim no ip pim bsm exit ``` Reload the new config: ``` interface A exit ``` Before: ``` 2024-10-05 20:52:33,467 INFO: Executed "interface A no ip pim exit" 2024-10-05 20:52:33,482 INFO: Executed "interface A ip pim bsm exit" ``` And the pim configurations in running configuration are not removed after reloading: ``` interface A ip pim <- Wrong exit ``` After: ``` 2024-10-05 20:56:27,489 INFO: Executed "interface A no ip pim exit" ``` And all the pim configuration are removed. Signed-off-by: anlan_cs --- tools/frr-reload.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/frr-reload.py b/tools/frr-reload.py index 53bb6513e21a..08a1f1e07eac 100755 --- a/tools/frr-reload.py +++ b/tools/frr-reload.py @@ -1163,7 +1163,12 @@ def pim_delete_move_lines(lines_to_add, lines_to_del): ctx_keys[0] in pim_disable and ctx_keys[0].startswith("interface") and line - and (line.startswith("ip pim ") or line.startswith("ip multicast ")) + and ( + line.startswith("ip pim ") + or line.startswith("no ip pim ") + or line.startswith("ip multicast ") + or line.startswith("no ip multicast ") + ) ): lines_to_del_to_del.append((ctx_keys, line)) From 8eb5f4f5069d5b5be4673b4b3f8a72efa4c51fb6 Mon Sep 17 00:00:00 2001 From: Donna Sharp Date: Sun, 6 Oct 2024 18:41:09 -0400 Subject: [PATCH 37/73] zebra: remove unused function rib_lookup_ipv4 Signed-off-by: Donna Sharp --- zebra/rib.h | 3 --- zebra/zebra_rib.c | 39 --------------------------------------- 2 files changed, 42 deletions(-) diff --git a/zebra/rib.h b/zebra/rib.h index 071cc7b3dee9..5fedb07335ef 100644 --- a/zebra/rib.h +++ b/zebra/rib.h @@ -408,9 +408,6 @@ extern struct route_entry *rib_match_multicast(afi_t afi, vrf_id_t vrf_id, union g_addr *gaddr, struct route_node **rn_out); -extern struct route_entry *rib_lookup_ipv4(struct prefix_ipv4 *p, - vrf_id_t vrf_id); - extern void rib_update(enum rib_update_event event); extern void rib_update_table(struct route_table *table, enum rib_update_event event, int rtype); diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index 721eca70a493..2d2be4fc780b 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -611,45 +611,6 @@ struct route_entry *rib_match_multicast(afi_t afi, vrf_id_t vrf_id, return re; } -struct route_entry *rib_lookup_ipv4(struct prefix_ipv4 *p, vrf_id_t vrf_id) -{ - struct route_table *table; - struct route_node *rn; - struct route_entry *match = NULL; - rib_dest_t *dest; - - /* Lookup table. */ - table = zebra_vrf_table(AFI_IP, SAFI_UNICAST, vrf_id); - if (!table) - return 0; - - rn = route_node_lookup(table, (struct prefix *)p); - - /* No route for this prefix. */ - if (!rn) - return NULL; - - /* Unlock node. */ - route_unlock_node(rn); - dest = rib_dest_from_rnode(rn); - - if (dest && dest->selected_fib - && !CHECK_FLAG(dest->selected_fib->status, ROUTE_ENTRY_REMOVED)) - match = dest->selected_fib; - - if (!match) - return NULL; - - if (match->type == ZEBRA_ROUTE_CONNECT || - match->type == ZEBRA_ROUTE_LOCAL) - return match; - - if (CHECK_FLAG(match->status, ROUTE_ENTRY_INSTALLED)) - return match; - - return NULL; -} - /* * Is this RIB labeled-unicast? It must be of type BGP and all paths * (nexthops) must have a label. From b6dd4ff8bc7d52e2e5f379ecdaddaf44e37e8418 Mon Sep 17 00:00:00 2001 From: Donna Sharp Date: Sun, 6 Oct 2024 19:08:44 -0400 Subject: [PATCH 38/73] zebra: remove unused function from tc_netlink.c Signed-off-by: Donna Sharp --- zebra/tc_netlink.c | 19 ------------------- zebra/tc_netlink.h | 2 -- 2 files changed, 21 deletions(-) diff --git a/zebra/tc_netlink.c b/zebra/tc_netlink.c index 19667e66acc0..2f005d6b0a27 100644 --- a/zebra/tc_netlink.c +++ b/zebra/tc_netlink.c @@ -852,23 +852,4 @@ int netlink_qdisc_read(struct zebra_ns *zns) return 0; } -int netlink_tfilter_read_for_interface(struct zebra_ns *zns, ifindex_t ifindex) -{ - int ret; - struct zebra_dplane_info dp_info; - - zebra_dplane_info_from_zns(&dp_info, zns, true); - - ret = netlink_request_filters(zns, AF_UNSPEC, RTM_GETTFILTER, ifindex); - if (ret < 0) - return ret; - - ret = netlink_parse_info(netlink_tfilter_change, &zns->netlink_cmd, - &dp_info, 0, true); - if (ret < 0) - return ret; - - return 0; -} - #endif /* HAVE_NETLINK */ diff --git a/zebra/tc_netlink.h b/zebra/tc_netlink.h index 5e95e6c1d80d..300c53b6f587 100644 --- a/zebra/tc_netlink.h +++ b/zebra/tc_netlink.h @@ -50,8 +50,6 @@ netlink_put_tc_filter_update_msg(struct nl_batch *bth, */ extern int netlink_qdisc_read(struct zebra_ns *zns); -extern int netlink_tfilter_read_for_interface(struct zebra_ns *zns, - ifindex_t ifindex); extern int netlink_tfilter_change(struct nlmsghdr *h, ns_id_t ns_id, int startup); From 7a63799a843b3e001bef05f2fdd6f8082b778abf Mon Sep 17 00:00:00 2001 From: Donna Sharp Date: Sun, 6 Oct 2024 19:25:44 -0400 Subject: [PATCH 39/73] zebra: remove unused function from if_netlink.c Signed-off-by: Donna Sharp --- zebra/if_netlink.c | 206 -------------------------------------------- zebra/if_netlink.h | 3 - zebra/zebra_trace.h | 14 --- 3 files changed, 223 deletions(-) diff --git a/zebra/if_netlink.c b/zebra/if_netlink.c index 5fb908eb0d70..8beae125d2da 100644 --- a/zebra/if_netlink.c +++ b/zebra/if_netlink.c @@ -1032,212 +1032,6 @@ netlink_put_intf_update_msg(struct nl_batch *bth, struct zebra_dplane_ctx *ctx) return netlink_batch_add_msg(bth, ctx, netlink_intf_msg_encoder, false); } -int netlink_interface_addr(struct nlmsghdr *h, ns_id_t ns_id, int startup) -{ - int len; - struct ifaddrmsg *ifa; - struct rtattr *tb[IFA_MAX + 1]; - struct interface *ifp; - void *addr; - void *broad; - uint8_t flags = 0; - char *label = NULL; - struct zebra_ns *zns; - uint32_t metric = METRIC_MAX; - uint32_t kernel_flags = 0; - - frrtrace(3, frr_zebra, netlink_interface_addr, h, ns_id, startup); - - zns = zebra_ns_lookup(ns_id); - ifa = NLMSG_DATA(h); - - if (ifa->ifa_family != AF_INET && ifa->ifa_family != AF_INET6) { - flog_warn( - EC_ZEBRA_UNKNOWN_FAMILY, - "Invalid address family: %u received from kernel interface addr change: %s", - ifa->ifa_family, nl_msg_type_to_str(h->nlmsg_type)); - return 0; - } - - if (h->nlmsg_type != RTM_NEWADDR && h->nlmsg_type != RTM_DELADDR) - return 0; - - len = h->nlmsg_len - NLMSG_LENGTH(sizeof(struct ifaddrmsg)); - if (len < 0) { - zlog_err( - "%s: Message received from netlink is of a broken size: %d %zu", - __func__, h->nlmsg_len, - (size_t)NLMSG_LENGTH(sizeof(struct ifaddrmsg))); - return -1; - } - - netlink_parse_rtattr(tb, IFA_MAX, IFA_RTA(ifa), len); - - ifp = if_lookup_by_index_per_ns(zns, ifa->ifa_index); - if (ifp == NULL) { - if (startup) { - /* During startup, failure to lookup the referenced - * interface should not be an error, so we have - * downgraded this condition to warning, and we permit - * the startup interface state retrieval to continue. - */ - flog_warn(EC_LIB_INTERFACE, - "%s: can't find interface by index %d", - __func__, ifa->ifa_index); - return 0; - } else { - flog_err(EC_LIB_INTERFACE, - "%s: can't find interface by index %d", - __func__, ifa->ifa_index); - return -1; - } - } - - /* Flags passed through */ - if (tb[IFA_FLAGS]) - kernel_flags = *(int *)RTA_DATA(tb[IFA_FLAGS]); - else - kernel_flags = ifa->ifa_flags; - - if (IS_ZEBRA_DEBUG_KERNEL) /* remove this line to see initial ifcfg */ - { - char buf[BUFSIZ]; - zlog_debug("%s %s %s flags 0x%x:", __func__, - nl_msg_type_to_str(h->nlmsg_type), ifp->name, - kernel_flags); - if (tb[IFA_LOCAL]) - zlog_debug(" IFA_LOCAL %s/%d", - inet_ntop(ifa->ifa_family, - RTA_DATA(tb[IFA_LOCAL]), buf, - BUFSIZ), - ifa->ifa_prefixlen); - if (tb[IFA_ADDRESS]) - zlog_debug(" IFA_ADDRESS %s/%d", - inet_ntop(ifa->ifa_family, - RTA_DATA(tb[IFA_ADDRESS]), buf, - BUFSIZ), - ifa->ifa_prefixlen); - if (tb[IFA_BROADCAST]) - zlog_debug(" IFA_BROADCAST %s/%d", - inet_ntop(ifa->ifa_family, - RTA_DATA(tb[IFA_BROADCAST]), buf, - BUFSIZ), - ifa->ifa_prefixlen); - if (tb[IFA_LABEL] && strcmp(ifp->name, RTA_DATA(tb[IFA_LABEL]))) - zlog_debug(" IFA_LABEL %s", - (char *)RTA_DATA(tb[IFA_LABEL])); - - if (tb[IFA_CACHEINFO]) { - struct ifa_cacheinfo *ci = RTA_DATA(tb[IFA_CACHEINFO]); - zlog_debug(" IFA_CACHEINFO pref %d, valid %d", - ci->ifa_prefered, ci->ifa_valid); - } - } - - /* logic copied from iproute2/ip/ipaddress.c:print_addrinfo() */ - if (tb[IFA_LOCAL] == NULL) - tb[IFA_LOCAL] = tb[IFA_ADDRESS]; - if (tb[IFA_ADDRESS] == NULL) - tb[IFA_ADDRESS] = tb[IFA_LOCAL]; - - /* local interface address */ - addr = (tb[IFA_LOCAL] ? RTA_DATA(tb[IFA_LOCAL]) : NULL); - - /* is there a peer address? */ - if (tb[IFA_ADDRESS] - && memcmp(RTA_DATA(tb[IFA_ADDRESS]), RTA_DATA(tb[IFA_LOCAL]), - RTA_PAYLOAD(tb[IFA_ADDRESS]))) { - broad = RTA_DATA(tb[IFA_ADDRESS]); - SET_FLAG(flags, ZEBRA_IFA_PEER); - } else - /* seeking a broadcast address */ - broad = (tb[IFA_BROADCAST] ? RTA_DATA(tb[IFA_BROADCAST]) - : NULL); - - /* addr is primary key, SOL if we don't have one */ - if (addr == NULL) { - zlog_debug("%s: Local Interface Address is NULL for %s", - __func__, ifp->name); - return -1; - } - - /* Flags. */ - if (kernel_flags & IFA_F_SECONDARY) - SET_FLAG(flags, ZEBRA_IFA_SECONDARY); - - /* Label */ - if (tb[IFA_LABEL]) - label = (char *)RTA_DATA(tb[IFA_LABEL]); - - if (label && strcmp(ifp->name, label) == 0) - label = NULL; - - if (tb[IFA_RT_PRIORITY]) - metric = *(uint32_t *)RTA_DATA(tb[IFA_RT_PRIORITY]); - - /* Register interface address to the interface. */ - if (ifa->ifa_family == AF_INET) { - if (ifa->ifa_prefixlen > IPV4_MAX_BITLEN) { - zlog_err( - "Invalid prefix length: %u received from kernel interface addr change: %s", - ifa->ifa_prefixlen, - nl_msg_type_to_str(h->nlmsg_type)); - return -1; - } - - if (h->nlmsg_type == RTM_NEWADDR) - connected_add_ipv4(ifp, flags, (struct in_addr *)addr, - ifa->ifa_prefixlen, - (struct in_addr *)broad, label, - metric); - else if (CHECK_FLAG(flags, ZEBRA_IFA_PEER)) { - /* Delete with a peer address */ - connected_delete_ipv4( - ifp, flags, (struct in_addr *)addr, - ifa->ifa_prefixlen, broad); - } else - connected_delete_ipv4( - ifp, flags, (struct in_addr *)addr, - ifa->ifa_prefixlen, NULL); - } - - if (ifa->ifa_family == AF_INET6) { - if (ifa->ifa_prefixlen > IPV6_MAX_BITLEN) { - zlog_err( - "Invalid prefix length: %u received from kernel interface addr change: %s", - ifa->ifa_prefixlen, - nl_msg_type_to_str(h->nlmsg_type)); - return -1; - } - if (h->nlmsg_type == RTM_NEWADDR) { - /* Only consider valid addresses; we'll not get a - * notification from - * the kernel till IPv6 DAD has completed, but at init - * time, Quagga - * does query for and will receive all addresses. - */ - if (!(kernel_flags - & (IFA_F_DADFAILED | IFA_F_TENTATIVE))) - connected_add_ipv6(ifp, flags, - (struct in6_addr *)addr, - (struct in6_addr *)broad, - ifa->ifa_prefixlen, label, - metric); - } else - connected_delete_ipv6(ifp, (struct in6_addr *)addr, - NULL, ifa->ifa_prefixlen); - } - - /* - * Linux kernel does not send route delete on interface down/addr del - * so we have to re-process routes it owns (i.e. kernel routes) - */ - if (h->nlmsg_type != RTM_NEWADDR) - rib_update(RIB_UPDATE_KERNEL); - - return 0; -} - /* * Parse and validate an incoming interface address change message, * generating a dplane context object. diff --git a/zebra/if_netlink.h b/zebra/if_netlink.h index 9b31906a17da..dc1f71cb7769 100644 --- a/zebra/if_netlink.h +++ b/zebra/if_netlink.h @@ -12,9 +12,6 @@ extern "C" { #endif -extern int netlink_interface_addr(struct nlmsghdr *h, ns_id_t ns_id, - int startup); - /* * Parse an incoming interface address change message, generate a dplane * context object for processing. diff --git a/zebra/zebra_trace.h b/zebra/zebra_trace.h index 17528c4bf1f8..645156246698 100644 --- a/zebra/zebra_trace.h +++ b/zebra/zebra_trace.h @@ -68,20 +68,6 @@ TRACEPOINT_EVENT( ) ) -TRACEPOINT_EVENT( - frr_zebra, - netlink_interface_addr, - TP_ARGS( - struct nlmsghdr *, header, - ns_id_t, ns_id, - int, startup), - TP_FIELDS( - ctf_integer_hex(intptr_t, header, header) - ctf_integer(uint32_t, ns_id, ns_id) - ctf_integer(uint32_t, startup, startup) - ) - ) - TRACEPOINT_EVENT( frr_zebra, netlink_route_change_read_unicast, From 103f24485c96b37cf80afce1694454693a69327c Mon Sep 17 00:00:00 2001 From: Donna Sharp Date: Sun, 6 Oct 2024 19:30:56 -0400 Subject: [PATCH 40/73] zebra: remove unsued function from tc_netlink.c Signed-off-by: Donna Sharp --- zebra/tc_netlink.c | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/zebra/tc_netlink.c b/zebra/tc_netlink.c index 2f005d6b0a27..3c4db0090ccb 100644 --- a/zebra/tc_netlink.c +++ b/zebra/tc_netlink.c @@ -660,27 +660,6 @@ netlink_put_tc_filter_update_msg(struct nl_batch *bth, return ret; } -/* - * Request filters from the kernel - */ -static int netlink_request_filters(struct zebra_ns *zns, int family, int type, - ifindex_t ifindex) -{ - struct { - struct nlmsghdr n; - struct tcmsg tc; - } req; - - memset(&req, 0, sizeof(req)); - req.n.nlmsg_type = type; - req.n.nlmsg_flags = NLM_F_ROOT | NLM_F_MATCH | NLM_F_REQUEST; - req.n.nlmsg_len = NLMSG_LENGTH(sizeof(struct tcmsg)); - req.tc.tcm_family = family; - req.tc.tcm_ifindex = ifindex; - - return netlink_request(&zns->netlink_cmd, &req); -} - /* * Request queue discipline from the kernel */ From f62dfc5d53ff0de904d475c1dd87b074e32ca480 Mon Sep 17 00:00:00 2001 From: Donna Sharp Date: Sun, 6 Oct 2024 19:40:49 -0400 Subject: [PATCH 41/73] lib,zebra: remove unused ZEBRA_VRF_UNREGISTER Signed-off-by: Donna Sharp --- lib/log.c | 1 - lib/zclient.h | 1 - zebra/zapi_msg.c | 17 ----------------- 3 files changed, 19 deletions(-) diff --git a/lib/log.c b/lib/log.c index 880180ae5a94..04b789b5da5d 100644 --- a/lib/log.c +++ b/lib/log.c @@ -351,7 +351,6 @@ static const struct zebra_desc_table command_types[] = { DESC_ENTRY(ZEBRA_BFD_DEST_REPLAY), DESC_ENTRY(ZEBRA_REDISTRIBUTE_ROUTE_ADD), DESC_ENTRY(ZEBRA_REDISTRIBUTE_ROUTE_DEL), - DESC_ENTRY(ZEBRA_VRF_UNREGISTER), DESC_ENTRY(ZEBRA_VRF_ADD), DESC_ENTRY(ZEBRA_VRF_DELETE), DESC_ENTRY(ZEBRA_VRF_LABEL), diff --git a/lib/zclient.h b/lib/zclient.h index 2877b347d8d0..91c0c9ed6d3d 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -124,7 +124,6 @@ typedef enum { ZEBRA_BFD_DEST_REPLAY, ZEBRA_REDISTRIBUTE_ROUTE_ADD, ZEBRA_REDISTRIBUTE_ROUTE_DEL, - ZEBRA_VRF_UNREGISTER, ZEBRA_VRF_ADD, ZEBRA_VRF_DELETE, ZEBRA_VRF_LABEL, diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index c45c61a208ac..b13d58f99d12 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -2477,22 +2477,6 @@ static void zread_hello(ZAPI_HANDLER_ARGS) return; } -/* Unregister all information in a VRF. */ -static void zread_vrf_unregister(ZAPI_HANDLER_ARGS) -{ - int i; - afi_t afi; - - for (afi = AFI_IP; afi < AFI_MAX; afi++) { - for (i = 0; i < ZEBRA_ROUTE_MAX; i++) - vrf_bitmap_unset(&client->redist[afi][i], - zvrf_id(zvrf)); - vrf_bitmap_unset(&client->redist_default[afi], zvrf_id(zvrf)); - vrf_bitmap_unset(&client->ridinfo[afi], zvrf_id(zvrf)); - vrf_bitmap_unset(&client->neighinfo[afi], zvrf_id(zvrf)); - } -} - /* * Validate incoming zapi mpls lsp / labels message */ @@ -4055,7 +4039,6 @@ void (*const zserv_handlers[])(ZAPI_HANDLER_ARGS) = { #if HAVE_BFDD > 0 [ZEBRA_BFD_DEST_REPLAY] = zebra_ptm_bfd_dst_replay, #endif /* HAVE_BFDD */ - [ZEBRA_VRF_UNREGISTER] = zread_vrf_unregister, [ZEBRA_VRF_LABEL] = zread_vrf_label, [ZEBRA_BFD_CLIENT_REGISTER] = zebra_ptm_bfd_client_register, [ZEBRA_INTERFACE_ENABLE_RADV] = zebra_interface_radv_enable, From 424cec61c0860c74436954a1f8ba6ee7fe8e04a5 Mon Sep 17 00:00:00 2001 From: anlan_cs Date: Sun, 6 Oct 2024 21:06:15 +0800 Subject: [PATCH 42/73] isisd: fix wrong check for MT commands ``` anlan# show run ! interface eth0 ip router isis A exit ! router isis A metric-style narrow <- NOT wide exit ! end anlan (config)# int eth0 anlan (config-if)# no isis topology ipv6-unicast % Configuration failed. Error type: validation Error description: Multi topology IS-IS can only be used with wide metrics ``` The MT commands are mainly controlled by the binded area, not by interface. Currently if there is any MT configuration in the area, `metric-style` must be with the `wide` mode, this requirement is sufficient. So, the unnecessary/wrong check for MT in the interface should be removed. Signed-off-by: anlan_cs --- isisd/isis_nb_config.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/isisd/isis_nb_config.c b/isisd/isis_nb_config.c index 7286a692f596..0f0c900ec243 100644 --- a/isisd/isis_nb_config.c +++ b/isisd/isis_nb_config.c @@ -4297,14 +4297,6 @@ static int lib_interface_isis_multi_topology_common( switch (event) { case NB_EV_VALIDATE: - circuit = nb_running_get_entry(dnode, NULL, false); - if (circuit && circuit->area && circuit->area->oldmetric) { - snprintf( - errmsg, errmsg_len, - "Multi topology IS-IS can only be used with wide metrics"); - return NB_ERR_VALIDATION; - } - break; case NB_EV_PREPARE: case NB_EV_ABORT: break; From e8648a0c72f89d4ee5abe271ba079df71c4c1418 Mon Sep 17 00:00:00 2001 From: Christian Hopps Date: Mon, 7 Oct 2024 03:23:31 +0000 Subject: [PATCH 43/73] lib: add flag to have libyang load internal ietf-yang-library module Mgmtd makes use of libyang's internal ietf-yang-library module to add support for said module to FRR management. Previously, mgmtd was loading this module explicitly; however, that required that libyang's `ietf-yang-library.yang` module definition file be co-located with FRR's yang files so that it (and ietf-datastore.yang) would be found when searched for by libyang using FRRs search path. This isn't always the case depending on how the user compiles and installs libyang so mgmtd was failing to run in some cases. Instead of doing it the above way we simply tell libyang to load it's internal version of ietf-yang-library when we initialize the libyang context. This required adding a boolean to a couple of the init functions which is why so many files are touched (although all the changes are minimal). Signed-off-by: Christian Hopps --- lib/grammar_sandbox_main.c | 2 +- lib/libfrr.c | 3 ++- lib/libfrr.h | 5 +++++ lib/northbound.c | 4 ++-- lib/northbound.h | 8 +++++--- lib/yang.c | 10 ++++++---- lib/yang.h | 10 +++++++--- lib/yang_translator.c | 4 ++-- mgmtd/mgmt.c | 3 --- mgmtd/mgmt_main.c | 2 +- tests/bgpd/test_peer_attr.c | 2 +- tests/helpers/c/main.c | 2 +- tests/isisd/test_isis_spf.c | 2 +- tests/lib/cli/common_cli.c | 2 +- tests/lib/cli/test_commands.c | 2 +- tests/lib/northbound/test_oper_data.c | 2 +- tests/lib/test_grpc.cpp | 3 +-- tools/gen_northbound_callbacks.c | 2 +- tools/gen_yang_deviations.c | 2 +- 19 files changed, 40 insertions(+), 30 deletions(-) diff --git a/lib/grammar_sandbox_main.c b/lib/grammar_sandbox_main.c index abd42f359f05..05088d52d113 100644 --- a/lib/grammar_sandbox_main.c +++ b/lib/grammar_sandbox_main.c @@ -40,7 +40,7 @@ int main(int argc, char **argv) vty_init(master, true); lib_cmd_init(); - nb_init(master, NULL, 0, false); + nb_init(master, NULL, 0, false, false); vty_stdio(vty_do_exit); diff --git a/lib/libfrr.c b/lib/libfrr.c index a1982841d3a7..f2247a48e555 100644 --- a/lib/libfrr.c +++ b/lib/libfrr.c @@ -820,7 +820,8 @@ struct event_loop *frr_init(void) log_ref_vty_init(); lib_error_init(); - nb_init(master, di->yang_modules, di->n_yang_modules, true); + nb_init(master, di->yang_modules, di->n_yang_modules, true, + (di->flags & FRR_LOAD_YANG_LIBRARY) != 0); if (nb_db_init() != NB_OK) flog_warn(EC_LIB_NB_DATABASE, "%s: failed to initialize northbound database", diff --git a/lib/libfrr.h b/lib/libfrr.h index 7ed7be4d984d..df537e2e3b43 100644 --- a/lib/libfrr.h +++ b/lib/libfrr.h @@ -46,6 +46,11 @@ extern "C" { * is responsible for calling frr_vty_serv() itself. */ #define FRR_MANUAL_VTY_START (1 << 7) +/* If FRR_LOAD_YANG_LIBRARY is set then libyang will be told to load and + * implement it's internal ietf-yang-library implementation. This should + * normally only be done from mgmtd. + */ +#define FRR_LOAD_YANG_LIBRARY (1 << 8) PREDECL_DLIST(log_args); struct log_arg { diff --git a/lib/northbound.c b/lib/northbound.c index 2dae21341e59..a385cc9ece94 100644 --- a/lib/northbound.c +++ b/lib/northbound.c @@ -2701,7 +2701,7 @@ void nb_validate_callbacks(void) void nb_init(struct event_loop *tm, const struct frr_yang_module_info *const modules[], - size_t nmodules, bool db_enabled) + size_t nmodules, bool db_enabled, bool load_library) { struct yang_module *loaded[nmodules], **loadedp = loaded; @@ -2717,7 +2717,7 @@ void nb_init(struct event_loop *tm, nb_db_enabled = db_enabled; - yang_init(true, explicit_compile); + yang_init(true, explicit_compile, load_library); /* Load YANG modules and their corresponding northbound callbacks. */ for (size_t i = 0; i < nmodules; i++) { diff --git a/lib/northbound.h b/lib/northbound.h index dd3fbf8f7341..97a1d31e5792 100644 --- a/lib/northbound.h +++ b/lib/northbound.h @@ -1703,10 +1703,12 @@ void nb_validate_callbacks(void); * * db_enabled * Set this to record the transactions in the transaction log. + * + * load_library + * Set this to have libyang to load/implement the ietf-yang-library. */ -extern void nb_init(struct event_loop *tm, - const struct frr_yang_module_info *const modules[], - size_t nmodules, bool db_enabled); +extern void nb_init(struct event_loop *tm, const struct frr_yang_module_info *const modules[], + size_t nmodules, bool db_enabled, bool load_library); /* * Finish the northbound layer gracefully. Should be called only when the daemon diff --git a/lib/yang.c b/lib/yang.c index 14d5b118c652..b847b8b77b68 100644 --- a/lib/yang.c +++ b/lib/yang.c @@ -976,7 +976,7 @@ void yang_debugging_set(bool enable) } } -struct ly_ctx *yang_ctx_new_setup(bool embedded_modules, bool explicit_compile) +struct ly_ctx *yang_ctx_new_setup(bool embedded_modules, bool explicit_compile, bool load_library) { struct ly_ctx *ctx = NULL; const char *yang_models_path = YANG_MODELS_PATH; @@ -994,7 +994,9 @@ struct ly_ctx *yang_ctx_new_setup(bool embedded_modules, bool explicit_compile) YANG_MODELS_PATH); } - options = LY_CTX_NO_YANGLIBRARY | LY_CTX_DISABLE_SEARCHDIR_CWD; + options = LY_CTX_DISABLE_SEARCHDIR_CWD; + if (!load_library) + options |= LY_CTX_NO_YANGLIBRARY; if (explicit_compile) options |= LY_CTX_EXPLICIT_COMPILE; err = ly_ctx_new(yang_models_path, options, &ctx); @@ -1007,7 +1009,7 @@ struct ly_ctx *yang_ctx_new_setup(bool embedded_modules, bool explicit_compile) return ctx; } -void yang_init(bool embedded_modules, bool defer_compile) +void yang_init(bool embedded_modules, bool defer_compile, bool load_library) { /* Initialize libyang global parameters that affect all containers. */ ly_set_log_clb(ly_zlog_cb @@ -1019,7 +1021,7 @@ void yang_init(bool embedded_modules, bool defer_compile) ly_log_options(LY_LOLOG | LY_LOSTORE); /* Initialize libyang container for native models. */ - ly_native_ctx = yang_ctx_new_setup(embedded_modules, defer_compile); + ly_native_ctx = yang_ctx_new_setup(embedded_modules, defer_compile, load_library); if (!ly_native_ctx) { flog_err(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__); exit(1); diff --git a/lib/yang.h b/lib/yang.h index c4fc78b8aed7..52857ecf003f 100644 --- a/lib/yang.h +++ b/lib/yang.h @@ -607,9 +607,11 @@ extern struct yang_data *yang_data_list_find(const struct list *list, * explicit_compile * True if the caller will later call ly_ctx_compile to compile all loaded * modules at once. + * load_library + * Set this to have libyang to load/implement the ietf-yang-library. */ -extern struct ly_ctx *yang_ctx_new_setup(bool embedded_modules, - bool explicit_compile); +extern struct ly_ctx *yang_ctx_new_setup(bool embedded_modules, bool explicit_compile, + bool load_library); /* * Enable or disable libyang verbose debugging. @@ -727,8 +729,10 @@ extern const char *yang_print_errors(struct ly_ctx *ly_ctx, char *buf, * Specify whether libyang should attempt to look for embedded YANG modules. * defer_compile * Hold off on compiling modules until yang_init_loading_complete is called. + * load_library + * Set this to have libyang to load/implement the ietf-yang-library. */ -extern void yang_init(bool embedded_modules, bool defer_compile); +extern void yang_init(bool embedded_modules, bool defer_compile, bool load_library); /* * Should be called after yang_init and all yang_module_load()s have been done, diff --git a/lib/yang_translator.c b/lib/yang_translator.c index 005f6422f3b3..b7599e0a7120 100644 --- a/lib/yang_translator.c +++ b/lib/yang_translator.c @@ -166,7 +166,7 @@ struct yang_translator *yang_translator_load(const char *path) RB_INSERT(yang_translators, &yang_translators, translator); /* Initialize the translator libyang context. */ - translator->ly_ctx = yang_ctx_new_setup(false, false); + translator->ly_ctx = yang_ctx_new_setup(false, false, false); if (!translator->ly_ctx) { flog_warn(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__); goto error; @@ -512,7 +512,7 @@ static unsigned int yang_module_nodes_count(const struct lys_module *module) void yang_translator_init(void) { - ly_translator_ctx = yang_ctx_new_setup(true, false); + ly_translator_ctx = yang_ctx_new_setup(true, false, false); if (!ly_translator_ctx) { flog_err(EC_LIB_LIBYANG, "%s: ly_ctx_new() failed", __func__); exit(1); diff --git a/mgmtd/mgmt.c b/mgmtd/mgmt.c index 02c54b921542..cfadad4829e2 100644 --- a/mgmtd/mgmt.c +++ b/mgmtd/mgmt.c @@ -57,9 +57,6 @@ void mgmt_init(void) /* Initialize MGMTD Transaction module */ mgmt_txn_init(mm, mm->master); - /* Add yang-library module */ - yang_module_load("ietf-yang-library", NULL); - /* Initialize the MGMTD Frontend Adapter Module */ mgmt_fe_adapter_init(mm->master); diff --git a/mgmtd/mgmt_main.c b/mgmtd/mgmt_main.c index e181d0da5ef0..1880d94415fa 100644 --- a/mgmtd/mgmt_main.c +++ b/mgmtd/mgmt_main.c @@ -214,7 +214,7 @@ FRR_DAEMON_INFO(mgmtd, MGMTD, .n_yang_modules = array_size(mgmt_yang_modules), /* avoid libfrr trying to read our config file for us */ - .flags = FRR_MANUAL_VTY_START | FRR_NO_SPLIT_CONFIG, + .flags = FRR_MANUAL_VTY_START | FRR_NO_SPLIT_CONFIG | FRR_LOAD_YANG_LIBRARY, ); /* clang-format on */ diff --git a/tests/bgpd/test_peer_attr.c b/tests/bgpd/test_peer_attr.c index d5faa33ca875..17002464e14e 100644 --- a/tests/bgpd/test_peer_attr.c +++ b/tests/bgpd/test_peer_attr.c @@ -1355,7 +1355,7 @@ static void bgp_startup(void) zprivs_init(&bgpd_privs); master = event_master_create(NULL); - nb_init(master, NULL, 0, false); + nb_init(master, NULL, 0, false, false); bgp_master_init(master, BGP_SOCKET_SNDBUF_SIZE, list_new()); bgp_option_set(BGP_OPT_NO_LISTEN); vrf_init(NULL, NULL, NULL, NULL); diff --git a/tests/helpers/c/main.c b/tests/helpers/c/main.c index 9cb395bb1f73..344af82fca5a 100644 --- a/tests/helpers/c/main.c +++ b/tests/helpers/c/main.c @@ -143,7 +143,7 @@ int main(int argc, char **argv) vty_init(master, false); lib_cmd_init(); debug_init(); - nb_init(master, NULL, 0, false); + nb_init(master, NULL, 0, false, false); /* OSPF vty inits. */ test_vty_init(); diff --git a/tests/isisd/test_isis_spf.c b/tests/isisd/test_isis_spf.c index 93009a1b84e1..e5a8f7a51392 100644 --- a/tests/isisd/test_isis_spf.c +++ b/tests/isisd/test_isis_spf.c @@ -546,7 +546,7 @@ int main(int argc, char **argv) cmd_init(1); cmd_hostname_set("test"); vty_init(master, false); - yang_init(true, false); + yang_init(true, false, false); if (debug) zlog_aux_init("NONE: ", LOG_DEBUG); else diff --git a/tests/lib/cli/common_cli.c b/tests/lib/cli/common_cli.c index 640197143590..342a91cc79a7 100644 --- a/tests/lib/cli/common_cli.c +++ b/tests/lib/cli/common_cli.c @@ -77,7 +77,7 @@ int main(int argc, char **argv) for (yangcount = 0; test_yang_modules && test_yang_modules[yangcount]; yangcount++) ; - nb_init(master, test_yang_modules, yangcount, false); + nb_init(master, test_yang_modules, yangcount, false, false); test_init(argc, argv); diff --git a/tests/lib/cli/test_commands.c b/tests/lib/cli/test_commands.c index 0034c2af8960..9873383fdcf7 100644 --- a/tests/lib/cli/test_commands.c +++ b/tests/lib/cli/test_commands.c @@ -197,7 +197,7 @@ static void test_init(void) cmd_init(1); debug_init(); - nb_init(master, NULL, 0, false); + nb_init(master, NULL, 0, false, false); install_node(&bgp_node); install_node(&rip_node); diff --git a/tests/lib/northbound/test_oper_data.c b/tests/lib/northbound/test_oper_data.c index 74a0dfe6cc25..fdc9e53ca3ea 100644 --- a/tests/lib/northbound/test_oper_data.c +++ b/tests/lib/northbound/test_oper_data.c @@ -461,7 +461,7 @@ int main(int argc, char **argv) vty_init(master, false); lib_cmd_init(); debug_init(); - nb_init(master, modules, array_size(modules), false); + nb_init(master, modules, array_size(modules), false, false); install_element(ENABLE_NODE, &test_rpc_cmd); diff --git a/tests/lib/test_grpc.cpp b/tests/lib/test_grpc.cpp index 2f0282704e15..379a8688a7b8 100644 --- a/tests/lib/test_grpc.cpp +++ b/tests/lib/test_grpc.cpp @@ -111,8 +111,7 @@ static void static_startup(void) static_debug_init(); master = event_master_create(NULL); - nb_init(master, staticd_yang_modules, array_size(staticd_yang_modules), - false); + nb_init(master, staticd_yang_modules, array_size(staticd_yang_modules), false, false); static_zebra_init(); vty_init(master, true); diff --git a/tools/gen_northbound_callbacks.c b/tools/gen_northbound_callbacks.c index a87981136357..046dc9e99ef1 100644 --- a/tools/gen_northbound_callbacks.c +++ b/tools/gen_northbound_callbacks.c @@ -448,7 +448,7 @@ int main(int argc, char *argv[]) if (argc != 1) usage(EXIT_FAILURE); - yang_init(false, true); + yang_init(false, true, false); if (search_path) ly_ctx_set_searchdir(ly_native_ctx, search_path); diff --git a/tools/gen_yang_deviations.c b/tools/gen_yang_deviations.c index 251643c69ebb..c2e7fd91c617 100644 --- a/tools/gen_yang_deviations.c +++ b/tools/gen_yang_deviations.c @@ -52,7 +52,7 @@ int main(int argc, char *argv[]) if (argc != 1) usage(EXIT_FAILURE); - yang_init(false, false); + yang_init(false, false, false); /* Load YANG module. */ module = yang_module_load(argv[0], NULL); From 5e1a3cd2ad1148f60f5a59474bcf3d176b9ab789 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Mon, 7 Oct 2024 13:02:44 -0300 Subject: [PATCH 44/73] vrrpd: iterate over all ancillary messages Assign the return of `CMSG_NXTHDR` so we can really iterate over the ancillary data. Signed-off-by: Rafael Zalamena --- vrrpd/vrrp_packet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vrrpd/vrrp_packet.c b/vrrpd/vrrp_packet.c index 36494c7df8b7..a2fb2bc32143 100644 --- a/vrrpd/vrrp_packet.c +++ b/vrrpd/vrrp_packet.c @@ -234,7 +234,7 @@ ssize_t vrrp_pkt_parse_datagram(int family, int version, bool ipv4_ph, } else if (family == AF_INET6) { struct cmsghdr *c; - for (c = CMSG_FIRSTHDR(m); c != NULL; CMSG_NXTHDR(m, c)) { + for (c = CMSG_FIRSTHDR(m); c != NULL; c = CMSG_NXTHDR(m, c)) { if (c->cmsg_level == IPPROTO_IPV6 && c->cmsg_type == IPV6_HOPLIMIT) break; From a49acba1d422e13235ca1b0891073e43af4a1605 Mon Sep 17 00:00:00 2001 From: Jafar Al-Gharaibeh Date: Mon, 7 Oct 2024 18:20:34 -0500 Subject: [PATCH 45/73] pimd: fix a possible crash when enabling debug autorp Signed-off-by: Jafar Al-Gharaibeh --- pimd/pim_autorp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pimd/pim_autorp.c b/pimd/pim_autorp.c index 1f4d0c65af67..35347a2790c8 100644 --- a/pimd/pim_autorp.c +++ b/pimd/pim_autorp.c @@ -290,8 +290,8 @@ static bool pim_autorp_add_rp(struct pim_autorp *autorp, pim_addr rpaddr, event_add_timer(router->master, autorp_rp_holdtime, trp, holdtime, &(trp->hold_timer)); if (PIM_DEBUG_AUTORP) - zlog_debug("%s: Started %u second hold timer for RP %pI4", - __func__, holdtime, &rp->addr); + zlog_debug("%s: Started %u second hold timer for RP %pI4", __func__, + holdtime, &trp->addr); } else { /* If hold time is zero, make sure there doesn't exist a hold timer for it already */ event_cancel(&trp->hold_timer); From f50b1f7c226753c1f4c0ccfeeb78271106924cd8 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Fri, 4 Oct 2024 09:38:25 -0400 Subject: [PATCH 46/73] zebra: Move pw status settting until after we get results Currently the pw code sets the status of the pw for install and uninstall immediately when notifying the dplane. This is incorrect in that we do not actually know the status at this point in time. When we get the result is when to set the status. Signed-off-by: Donald Sharp --- zebra/zebra_pw.c | 26 +++++++++++++++++++++----- zebra/zebra_pw.h | 1 + zebra/zebra_rib.c | 25 +------------------------ 3 files changed, 23 insertions(+), 29 deletions(-) diff --git a/zebra/zebra_pw.c b/zebra/zebra_pw.c index deed3b6ad3c3..d7128a1f3946 100644 --- a/zebra/zebra_pw.c +++ b/zebra/zebra_pw.c @@ -170,9 +170,6 @@ static void zebra_pw_install(struct zebra_pw *pw) zebra_pw_install_failure(pw, PW_NOT_FORWARDING); return; } - - if (pw->status != PW_FORWARDING) - zebra_pw_update_status(pw, PW_FORWARDING); } static void zebra_pw_uninstall(struct zebra_pw *pw) @@ -188,9 +185,28 @@ static void zebra_pw_uninstall(struct zebra_pw *pw) /* ignore any possible error */ hook_call(pw_uninstall, pw); dplane_pw_uninstall(pw); +} + +void zebra_pw_handle_dplane_results(struct zebra_dplane_ctx *ctx) +{ + struct zebra_pw *pw; + struct zebra_vrf *vrf; + enum dplane_op_e op; + + op = dplane_ctx_get_op(ctx); + + vrf = zebra_vrf_lookup_by_id(dplane_ctx_get_vrf(ctx)); + pw = zebra_pw_find(vrf, dplane_ctx_get_ifname(ctx)); - if (zebra_pw_enabled(pw)) - zebra_pw_update_status(pw, PW_NOT_FORWARDING); + if (dplane_ctx_get_status(ctx) != ZEBRA_DPLANE_REQUEST_SUCCESS) { + if (pw) + zebra_pw_install_failure(pw, dplane_ctx_get_pw_status(ctx)); + } else { + if (op == DPLANE_OP_PW_INSTALL && pw->status != PW_FORWARDING) + zebra_pw_update_status(pw, PW_FORWARDING); + else if (op == DPLANE_OP_PW_UNINSTALL && zebra_pw_enabled(pw)) + zebra_pw_update_status(pw, PW_NOT_FORWARDING); + } } /* diff --git a/zebra/zebra_pw.h b/zebra/zebra_pw.h index 431d663f7ce1..e037a55048d8 100644 --- a/zebra/zebra_pw.h +++ b/zebra/zebra_pw.h @@ -64,6 +64,7 @@ void zebra_pw_init_vrf(struct zebra_vrf *); void zebra_pw_exit_vrf(struct zebra_vrf *); void zebra_pw_terminate(void); void zebra_pw_vty_init(void); +void zebra_pw_handle_dplane_results(struct zebra_dplane_ctx *ctx); #ifdef __cplusplus } diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index 2d2be4fc780b..8ebc193fba99 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -4850,29 +4850,6 @@ void rib_close_table(struct route_table *table) } } -/* - * Handler for async dataplane results after a pseudowire installation - */ -static void handle_pw_result(struct zebra_dplane_ctx *ctx) -{ - struct zebra_pw *pw; - struct zebra_vrf *vrf; - - /* The pseudowire code assumes success - we act on an error - * result for installation attempts here. - */ - if (dplane_ctx_get_op(ctx) != DPLANE_OP_PW_INSTALL) - return; - - if (dplane_ctx_get_status(ctx) != ZEBRA_DPLANE_REQUEST_SUCCESS) { - vrf = zebra_vrf_lookup_by_id(dplane_ctx_get_vrf(ctx)); - pw = zebra_pw_find(vrf, dplane_ctx_get_ifname(ctx)); - if (pw) - zebra_pw_install_failure(pw, - dplane_ctx_get_pw_status(ctx)); - } -} - /* * Handle results from the dataplane system. Dequeue update context * structs, dispatch to appropriate internal handlers. @@ -4979,7 +4956,7 @@ static void rib_process_dplane_results(struct event *thread) case DPLANE_OP_PW_INSTALL: case DPLANE_OP_PW_UNINSTALL: - handle_pw_result(ctx); + zebra_pw_handle_dplane_results(ctx); break; case DPLANE_OP_SYS_ROUTE_ADD: From a8af2b2a9d0f9fe059f645ee033087a293ab6b35 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Fri, 4 Oct 2024 09:51:46 -0400 Subject: [PATCH 47/73] zebra: Do not retry in 30 seconds on pw reachability failure Currently the zebra pw code has setup a retry to install the pw after 30 seconds when it is decided that reachability to the pw is gone. This causes a failure mode where the pw code just goes and re-installs the pw after 30 seconds in the non-reachability case. Instead it should just be reinstalling after reachability is restored. Signed-off-by: Donald Sharp --- zebra/zebra_pw.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/zebra/zebra_pw.c b/zebra/zebra_pw.c index d7128a1f3946..c8ffaf0bbe36 100644 --- a/zebra/zebra_pw.c +++ b/zebra/zebra_pw.c @@ -147,7 +147,6 @@ void zebra_pw_update(struct zebra_pw *pw) { if (zebra_pw_check_reachability(pw) < 0) { zebra_pw_uninstall(pw); - zebra_pw_install_failure(pw, PW_NOT_FORWARDING); /* wait for NHT and try again later */ } else { /* @@ -167,6 +166,14 @@ static void zebra_pw_install(struct zebra_pw *pw) hook_call(pw_install, pw); if (dplane_pw_install(pw) == ZEBRA_DPLANE_REQUEST_FAILURE) { + /* + * Realistically this is never going to fail passing + * the pw data down to the dplane. The failure modes + * look like impossible events but we still return + * on them.... but I don't see a real clean way to remove this + * at all. So let's just leave the retry mechanism for + * the moment. + */ zebra_pw_install_failure(pw, PW_NOT_FORWARDING); return; } From 244155ac47ed0331727f05fd494029a6b2496f17 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Fri, 12 Jul 2019 10:32:42 +0200 Subject: [PATCH 48/73] bfdd, doc, yang: change bfd timer and multiplier values The minimum and maximum values for BFD timers and multiplier settings have been updated to align with RFC 5880 requirements. Since the values inputted via VTY are in milliseconds, the maximum permissible value on the VTY interface is 4,294,967 milliseconds. For the multiplier setting, the minimum value is now restricted to be greater than zero, as zero is not allowed. The minimum transmit interval has been set to 10 milliseconds to ensure reliable service performance. Signed-off-by: Philippe Guibert Signed-off-by: Louis Scalbert --- bfdd/bfdd_cli.c | 24 ++++++++++++------------ doc/user/bfd.rst | 10 +++++----- yang/frr-bfdd.yang | 10 +++++----- 3 files changed, 22 insertions(+), 22 deletions(-) diff --git a/bfdd/bfdd_cli.c b/bfdd/bfdd_cli.c index 2e213a2237ea..6527ec5f41a3 100644 --- a/bfdd/bfdd_cli.c +++ b/bfdd/bfdd_cli.c @@ -338,7 +338,7 @@ void bfd_cli_show_minimum_ttl(struct vty *vty, const struct lyd_node *dnode, DEFPY_YANG( bfd_peer_mult, bfd_peer_mult_cmd, - "[no] detect-multiplier ![(2-255)$multiplier]", + "[no] detect-multiplier ![(1-255)$multiplier]", NO_STR "Configure peer detection multiplier\n" "Configure peer detection multiplier value\n") @@ -357,7 +357,7 @@ void bfd_cli_show_mult(struct vty *vty, const struct lyd_node *dnode, DEFPY_YANG( bfd_peer_rx, bfd_peer_rx_cmd, - "[no] receive-interval ![(10-60000)$interval]", + "[no] receive-interval ![(10-4294967)$interval]", NO_STR "Configure peer receive interval\n" "Configure peer receive interval value in milliseconds\n") @@ -381,7 +381,7 @@ void bfd_cli_show_rx(struct vty *vty, const struct lyd_node *dnode, DEFPY_YANG( bfd_peer_tx, bfd_peer_tx_cmd, - "[no] transmit-interval ![(10-60000)$interval]", + "[no] transmit-interval ![(10-4294967)$interval]", NO_STR "Configure peer transmit interval\n" "Configure peer transmit interval value in milliseconds\n") @@ -439,7 +439,7 @@ void bfd_cli_show_echo(struct vty *vty, const struct lyd_node *dnode, DEFPY_YANG( bfd_peer_echo_interval, bfd_peer_echo_interval_cmd, - "[no] echo-interval ![(10-60000)$interval]", + "[no] echo-interval ![(10-4294967)$interval]", NO_STR "Configure peer echo intervals\n" "Configure peer echo rx/tx intervals value in milliseconds\n") @@ -462,7 +462,7 @@ DEFPY_YANG( DEFPY_YANG( bfd_peer_echo_transmit_interval, bfd_peer_echo_transmit_interval_cmd, - "[no] echo transmit-interval ![(10-60000)$interval]", + "[no] echo transmit-interval ![(10-4294967)$interval]", NO_STR "Configure peer echo intervals\n" "Configure desired transmit interval\n" @@ -492,7 +492,7 @@ void bfd_cli_show_desired_echo_transmission_interval( DEFPY_YANG( bfd_peer_echo_receive_interval, bfd_peer_echo_receive_interval_cmd, - "[no] echo receive-interval ![]", + "[no] echo receive-interval ![]", NO_STR "Configure peer echo intervals\n" "Configure required receive interval\n" @@ -577,19 +577,19 @@ void bfd_cli_show_profile(struct vty *vty, const struct lyd_node *dnode, } ALIAS_YANG(bfd_peer_mult, bfd_profile_mult_cmd, - "[no] detect-multiplier ![(2-255)$multiplier]", + "[no] detect-multiplier ![(1-255)$multiplier]", NO_STR "Configure peer detection multiplier\n" "Configure peer detection multiplier value\n") ALIAS_YANG(bfd_peer_tx, bfd_profile_tx_cmd, - "[no] transmit-interval ![(10-60000)$interval]", + "[no] transmit-interval ![(10-4294967)$interval]", NO_STR "Configure peer transmit interval\n" "Configure peer transmit interval value in milliseconds\n") ALIAS_YANG(bfd_peer_rx, bfd_profile_rx_cmd, - "[no] receive-interval ![(10-60000)$interval]", + "[no] receive-interval ![(10-4294967)$interval]", NO_STR "Configure peer receive interval\n" "Configure peer receive interval value in milliseconds\n") @@ -621,14 +621,14 @@ ALIAS_YANG(bfd_peer_echo, bfd_profile_echo_cmd, "Configure echo mode\n") ALIAS_YANG(bfd_peer_echo_interval, bfd_profile_echo_interval_cmd, - "[no] echo-interval ![(10-60000)$interval]", + "[no] echo-interval ![(10-4294967)$interval]", NO_STR "Configure peer echo interval\n" "Configure peer echo interval value in milliseconds\n") ALIAS_YANG( bfd_peer_echo_transmit_interval, bfd_profile_echo_transmit_interval_cmd, - "[no] echo transmit-interval ![(10-60000)$interval]", + "[no] echo transmit-interval ![(10-4294967)$interval]", NO_STR "Configure peer echo intervals\n" "Configure desired transmit interval\n" @@ -636,7 +636,7 @@ ALIAS_YANG( ALIAS_YANG( bfd_peer_echo_receive_interval, bfd_profile_echo_receive_interval_cmd, - "[no] echo receive-interval ![]", + "[no] echo receive-interval ![]", NO_STR "Configure peer echo intervals\n" "Configure required receive interval\n" diff --git a/doc/user/bfd.rst b/doc/user/bfd.rst index 4c142cfbbb06..5f698a1cd092 100644 --- a/doc/user/bfd.rst +++ b/doc/user/bfd.rst @@ -139,7 +139,7 @@ Peer / Profile Configuration BFD peers and profiles share the same BFD session configuration commands. -.. clicmd:: detect-multiplier (2-255) +.. clicmd:: detect-multiplier (1-255) Configures the detection multiplier to determine packet loss. The remote transmission interval will be multiplied by this value to @@ -151,23 +151,23 @@ BFD peers and profiles share the same BFD session configuration commands. detect failures only after 900 milliseconds without receiving packets. -.. clicmd:: receive-interval (10-60000) +.. clicmd:: receive-interval (10-4294967) Configures the minimum interval that this system is capable of receiving control packets. The default value is 300 milliseconds. -.. clicmd:: transmit-interval (10-60000) +.. clicmd:: transmit-interval (10-4294967) The minimum transmission interval (less jitter) that this system wants to use to send BFD control packets. Defaults to 300ms. -.. clicmd:: echo receive-interval +.. clicmd:: echo receive-interval Configures the minimum interval that this system is capable of receiving echo packets. Disabled means that this system doesn't want to receive echo packets. The default value is 50 milliseconds. -.. clicmd:: echo transmit-interval (10-60000) +.. clicmd:: echo transmit-interval (10-4294967) The minimum transmission interval (less jitter) that this system wants to use to send BFD echo packets. Defaults to 50ms. diff --git a/yang/frr-bfdd.yang b/yang/frr-bfdd.yang index 02ed9214599f..c5c824f7927f 100644 --- a/yang/frr-bfdd.yang +++ b/yang/frr-bfdd.yang @@ -65,7 +65,7 @@ module frr-bfdd { typedef multiplier { description "Detection multiplier"; type uint8 { - range "2..255"; + range "1..255"; } } @@ -169,7 +169,7 @@ module frr-bfdd { leaf desired-transmission-interval { type uint32 { - range "10000..60000000"; + range "10000..max"; } units microseconds; default 300000; @@ -178,7 +178,7 @@ module frr-bfdd { leaf required-receive-interval { type uint32 { - range "10000..60000000"; + range "10000..max"; } units microseconds; default 300000; @@ -210,7 +210,7 @@ module frr-bfdd { leaf desired-echo-transmission-interval { type uint32 { - range "10000..60000000"; + range "10000..max"; } units microseconds; default 50000; @@ -219,7 +219,7 @@ module frr-bfdd { leaf required-echo-receive-interval { type uint32 { - range "0 | 10000..60000000"; + range "0 | 10000..max"; } units microseconds; default 50000; From 9f8968fc5ae700a95867010bc69a51319027bd71 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Mon, 7 Oct 2024 12:40:46 -0400 Subject: [PATCH 49/73] *: Allow 16 bit size for nexthops Currently FRR is limiting the nexthop count to a uint8_t not a uint16_t. This leads to issues when the nexthop count is 256 which results in the count to overflow to 0 causing problems in the code. Signed-off-by: Donald Sharp --- bgpd/bgp_addpath.c | 6 ++---- bgpd/bgp_addpath.h | 6 ++---- bgpd/bgp_nexthop.h | 2 +- bgpd/bgp_nht.c | 2 +- pimd/pim_nht.c | 4 ++-- pimd/pim_nht.h | 2 +- zebra/rt_netlink.c | 25 ++++++++++--------------- zebra/zapi_msg.c | 2 +- zebra/zebra_dplane.c | 4 ++-- zebra/zebra_dplane.h | 2 +- zebra/zebra_nhg.c | 32 ++++++++++++++------------------ zebra/zebra_nhg.h | 11 ++++------- 12 files changed, 41 insertions(+), 57 deletions(-) diff --git a/bgpd/bgp_addpath.c b/bgpd/bgp_addpath.c index f391c138472d..aada6e555f0b 100644 --- a/bgpd/bgp_addpath.c +++ b/bgpd/bgp_addpath.c @@ -361,8 +361,7 @@ void bgp_addpath_type_changed(struct bgp *bgp) } } -int bgp_addpath_capability_action(enum bgp_addpath_strat addpath_type, - uint8_t paths) +int bgp_addpath_capability_action(enum bgp_addpath_strat addpath_type, uint16_t paths) { int action = CAPABILITY_ACTION_UNSET; @@ -392,8 +391,7 @@ int bgp_addpath_capability_action(enum bgp_addpath_strat addpath_type, * change take effect. */ void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi, - enum bgp_addpath_strat addpath_type, - uint8_t paths) + enum bgp_addpath_strat addpath_type, uint16_t paths) { struct bgp *bgp = peer->bgp; enum bgp_addpath_strat old_type; diff --git a/bgpd/bgp_addpath.h b/bgpd/bgp_addpath.h index c267ebe43ece..f1ff98ea7a5f 100644 --- a/bgpd/bgp_addpath.h +++ b/bgpd/bgp_addpath.h @@ -62,13 +62,11 @@ bool bgp_addpath_tx_path(enum bgp_addpath_strat strat, * Change the type of addpath used for a peer. */ void bgp_addpath_set_peer_type(struct peer *peer, afi_t afi, safi_t safi, - enum bgp_addpath_strat addpath_type, - uint8_t paths); + enum bgp_addpath_strat addpath_type, uint16_t paths); void bgp_addpath_update_ids(struct bgp *bgp, struct bgp_dest *dest, afi_t afi, safi_t safi); void bgp_addpath_type_changed(struct bgp *bgp); -extern int bgp_addpath_capability_action(enum bgp_addpath_strat addpath_type, - uint8_t paths); +extern int bgp_addpath_capability_action(enum bgp_addpath_strat addpath_type, uint16_t paths); #endif diff --git a/bgpd/bgp_nexthop.h b/bgpd/bgp_nexthop.h index 0280960da8a5..5014eb8f3455 100644 --- a/bgpd/bgp_nexthop.h +++ b/bgpd/bgp_nexthop.h @@ -38,7 +38,7 @@ struct bgp_nexthop_cache { uint32_t metric; /* Nexthop number and nexthop linked list.*/ - uint8_t nexthop_num; + uint16_t nexthop_num; /* This flag is set to TRUE for a bnc that is gateway IP overlay index * nexthop. diff --git a/bgpd/bgp_nht.c b/bgpd/bgp_nht.c index 8719af56b3f5..aaa9e223c38d 100644 --- a/bgpd/bgp_nht.c +++ b/bgpd/bgp_nht.c @@ -761,7 +761,7 @@ static void bgp_nht_ifp_table_handle(struct bgp *bgp, { struct bgp_nexthop_cache *bnc; struct nexthop *nhop; - uint8_t other_nh_count; + uint16_t other_nh_count; bool nhop_ll_found = false; bool nhop_found = false; diff --git a/pimd/pim_nht.c b/pimd/pim_nht.c index 030b933e0948..5a161c4f16dc 100644 --- a/pimd/pim_nht.c +++ b/pimd/pim_nht.c @@ -573,7 +573,7 @@ static int pim_ecmp_nexthop_search(struct pim_instance *pim, ifindex_t first_ifindex; struct interface *ifp = NULL; uint32_t hash_val = 0, mod_val = 0; - uint8_t nh_iter = 0, found = 0; + uint16_t nh_iter = 0, found = 0; uint32_t i, num_nbrs = 0; struct pim_interface *pim_ifp; @@ -947,7 +947,7 @@ int pim_ecmp_nexthop_lookup(struct pim_instance *pim, struct interface *ifps[router->multipath], *ifp; int first_ifindex; int found = 0; - uint8_t i = 0; + uint16_t i = 0; uint32_t hash_val = 0, mod_val = 0; uint32_t num_nbrs = 0; struct pim_interface *pim_ifp; diff --git a/pimd/pim_nht.h b/pimd/pim_nht.h index e74b375dc6d2..d064f714a528 100644 --- a/pimd/pim_nht.h +++ b/pimd/pim_nht.h @@ -23,7 +23,7 @@ struct pim_nexthop_cache { uint32_t metric; uint32_t distance; /* Nexthop number and nexthop linked list. */ - uint8_t nexthop_num; + uint16_t nexthop_num; struct nexthop *nexthop; int64_t last_update; uint16_t flags; diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index 75e4396e920c..dc679ed4954d 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -591,12 +591,9 @@ parse_nexthop_unicast(ns_id_t ns_id, struct rtmsg *rtm, struct rtattr **tb, return nh; } -static uint8_t parse_multipath_nexthops_unicast(ns_id_t ns_id, - struct nexthop_group *ng, - struct rtmsg *rtm, - struct rtnexthop *rtnh, - struct rtattr **tb, - void *prefsrc, vrf_id_t vrf_id) +static uint16_t parse_multipath_nexthops_unicast(ns_id_t ns_id, struct nexthop_group *ng, + struct rtmsg *rtm, struct rtnexthop *rtnh, + struct rtattr **tb, void *prefsrc, vrf_id_t vrf_id) { void *gate = NULL; struct interface *ifp = NULL; @@ -721,7 +718,7 @@ static uint8_t parse_multipath_nexthops_unicast(ns_id_t ns_id, rtnh = RTNH_NEXT(rtnh); } - uint8_t nhop_num = nexthop_group_nexthop_num(ng); + uint16_t nhop_num = nexthop_group_nexthop_num(ng); return nhop_num; } @@ -1000,7 +997,7 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, (struct rtnexthop *)RTA_DATA(tb[RTA_MULTIPATH]); if (!nhe_id) { - uint8_t nhop_num; + uint16_t nhop_num; /* Use temporary list of nexthops; parse * message payload's nexthops. @@ -2644,11 +2641,9 @@ int kernel_get_ipmr_sg_stats(struct zebra_vrf *zvrf, void *in) /* Char length to debug ID with */ #define ID_LENGTH 10 -static bool _netlink_nexthop_build_group(struct nlmsghdr *n, size_t req_size, - uint32_t id, - const struct nh_grp *z_grp, - const uint8_t count, bool resilient, - const struct nhg_resilience *nhgr) +static bool _netlink_nexthop_build_group(struct nlmsghdr *n, size_t req_size, uint32_t id, + const struct nh_grp *z_grp, const uint16_t count, + bool resilient, const struct nhg_resilience *nhgr) { struct nexthop_grp grp[count]; /* Need space for max group size, "/", and null term */ @@ -3285,7 +3280,7 @@ static int netlink_nexthop_process_group(struct rtattr **tb, struct nh_grp *z_grp, int z_grp_size, struct nhg_resilience *nhgr) { - uint8_t count = 0; + uint16_t count = 0; /* linux/nexthop.h group struct */ struct nexthop_grp *n_grp = NULL; @@ -3358,7 +3353,7 @@ int netlink_nexthop_change(struct nlmsghdr *h, ns_id_t ns_id, int startup) struct nexthop nh = {.weight = 1}; struct nh_grp grp[MULTIPATH_NUM] = {}; /* Count of nexthops in group array */ - uint8_t grp_count = 0; + uint16_t grp_count = 0; struct rtattr *tb[NHA_MAX + 1] = {}; frrtrace(3, frr_zebra, netlink_nexthop_change, h, ns_id, startup); diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index b13d58f99d12..7dae75baccd9 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -517,7 +517,7 @@ int zsend_redistribute_route(int cmd, struct zserv *client, struct zapi_nexthop *api_nh; struct nexthop *nexthop; const struct prefix *p, *src_p; - uint8_t count = 0; + uint16_t count = 0; afi_t afi; size_t stream_size = MAX(ZEBRA_MAX_PACKET_SIZ, sizeof(struct zapi_route)); diff --git a/zebra/zebra_dplane.c b/zebra/zebra_dplane.c index 75147e713649..00e990e856fd 100644 --- a/zebra/zebra_dplane.c +++ b/zebra/zebra_dplane.c @@ -86,7 +86,7 @@ struct dplane_nexthop_info { struct nexthop_group ng; struct nh_grp nh_grp[MULTIPATH_NUM]; - uint8_t nh_grp_count; + uint16_t nh_grp_count; }; /* @@ -2316,7 +2316,7 @@ dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx) return ctx->u.rinfo.nhe.nh_grp; } -uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx) +uint16_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx) { DPLANE_CTX_VALID(ctx); return ctx->u.rinfo.nhe.nh_grp_count; diff --git a/zebra/zebra_dplane.h b/zebra/zebra_dplane.h index 0e9a8bfb99f5..a3318bf5e995 100644 --- a/zebra/zebra_dplane.h +++ b/zebra/zebra_dplane.h @@ -597,7 +597,7 @@ const struct nexthop_group * dplane_ctx_get_nhe_ng(const struct zebra_dplane_ctx *ctx); const struct nh_grp * dplane_ctx_get_nhe_nh_grp(const struct zebra_dplane_ctx *ctx); -uint8_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx); +uint16_t dplane_ctx_get_nhe_nh_grp_count(const struct zebra_dplane_ctx *ctx); /* Accessors for LSP information */ diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index 81f1411ee5c5..1519246c179e 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -625,9 +625,8 @@ bool zebra_nhg_hash_id_equal(const void *arg1, const void *arg2) return nhe1->id == nhe2->id; } -static int zebra_nhg_process_grp(struct nexthop_group *nhg, - struct nhg_connected_tree_head *depends, - struct nh_grp *grp, uint8_t count, +static int zebra_nhg_process_grp(struct nexthop_group *nhg, struct nhg_connected_tree_head *depends, + struct nh_grp *grp, uint16_t count, struct nhg_resilience *resilience) { nhg_connected_tree_init(depends); @@ -982,7 +981,7 @@ static struct nexthop *nhg_ctx_get_nh(struct nhg_ctx *ctx) return &ctx->u.nh; } -static uint8_t nhg_ctx_get_count(const struct nhg_ctx *ctx) +static uint16_t nhg_ctx_get_count(const struct nhg_ctx *ctx) { return ctx->count; } @@ -1028,9 +1027,8 @@ void nhg_ctx_free(struct nhg_ctx **ctx) XFREE(MTYPE_NHG_CTX, *ctx); } -static struct nhg_ctx *nhg_ctx_init(uint32_t id, struct nexthop *nh, - struct nh_grp *grp, vrf_id_t vrf_id, - afi_t afi, int type, uint8_t count, +static struct nhg_ctx *nhg_ctx_init(uint32_t id, struct nexthop *nh, struct nh_grp *grp, + vrf_id_t vrf_id, afi_t afi, int type, uint16_t count, struct nhg_resilience *resilience) { struct nhg_ctx *ctx = NULL; @@ -1204,7 +1202,7 @@ static int nhg_ctx_process_new(struct nhg_ctx *ctx) struct nhg_hash_entry *nhe = NULL; uint32_t id = nhg_ctx_get_id(ctx); - uint8_t count = nhg_ctx_get_count(ctx); + uint16_t count = nhg_ctx_get_count(ctx); vrf_id_t vrf_id = nhg_ctx_get_vrf_id(ctx); int type = nhg_ctx_get_type(ctx); afi_t afi = nhg_ctx_get_afi(ctx); @@ -1356,9 +1354,9 @@ int nhg_ctx_process(struct nhg_ctx *ctx) } /* Kernel-side, you either get a single new nexthop or a array of ID's */ -int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp, - uint8_t count, vrf_id_t vrf_id, afi_t afi, int type, - int startup, struct nhg_resilience *nhgr) +int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp, uint16_t count, + vrf_id_t vrf_id, afi_t afi, int type, int startup, + struct nhg_resilience *nhgr) { struct nhg_ctx *ctx = NULL; @@ -3180,15 +3178,14 @@ int nexthop_active_update(struct route_node *rn, struct route_entry *re, * I'm pretty sure we only allow ONE level of group within group currently. * But making this recursive just in case that ever changes. */ -static uint8_t zebra_nhg_nhe2grp_internal(struct nh_grp *grp, uint8_t curr_index, - struct nhg_hash_entry *nhe, - struct nhg_hash_entry *original, - int max_num) +static uint16_t zebra_nhg_nhe2grp_internal(struct nh_grp *grp, uint16_t curr_index, + struct nhg_hash_entry *nhe, + struct nhg_hash_entry *original, int max_num) { struct nhg_connected *rb_node_dep = NULL; struct nhg_hash_entry *depend = NULL; struct nexthop *nexthop; - uint8_t i = curr_index; + uint16_t i = curr_index; frr_each(nhg_connected_tree, &nhe->nhg_depends, rb_node_dep) { bool duplicate = false; @@ -3309,8 +3306,7 @@ static uint8_t zebra_nhg_nhe2grp_internal(struct nh_grp *grp, uint8_t curr_index } /* Convert a nhe into a group array */ -uint8_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe, - int max_num) +uint16_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe, int max_num) { /* Call into the recursive function */ return zebra_nhg_nhe2grp_internal(grp, 0, nhe, nhe, max_num); diff --git a/zebra/zebra_nhg.h b/zebra/zebra_nhg.h index 435ccb0d01f5..0f90627a0d15 100644 --- a/zebra/zebra_nhg.h +++ b/zebra/zebra_nhg.h @@ -231,7 +231,7 @@ struct nhg_ctx { int type; /* If its a group array, how many? */ - uint8_t count; + uint16_t count; /* Its either a single nexthop or an array of ID's */ union { @@ -317,10 +317,8 @@ extern int nhg_ctx_process(struct nhg_ctx *ctx); void nhg_ctx_free(struct nhg_ctx **ctx); /* Find via kernel nh creation */ -extern int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, - struct nh_grp *grp, uint8_t count, - vrf_id_t vrf_id, afi_t afi, int type, - int startup, +extern int zebra_nhg_kernel_find(uint32_t id, struct nexthop *nh, struct nh_grp *grp, + uint16_t count, vrf_id_t vrf_id, afi_t afi, int type, int startup, struct nhg_resilience *resilience); /* Del via kernel */ extern int zebra_nhg_kernel_del(uint32_t id, vrf_id_t vrf_id); @@ -379,8 +377,7 @@ extern void zebra_nhg_increment_ref(struct nhg_hash_entry *nhe); extern void zebra_nhg_check_valid(struct nhg_hash_entry *nhe); /* Convert nhe depends to a grp context that can be passed around safely */ -extern uint8_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe, - int size); +extern uint16_t zebra_nhg_nhe2grp(struct nh_grp *grp, struct nhg_hash_entry *nhe, int size); /* Dataplane install/uninstall */ extern void zebra_nhg_install_kernel(struct nhg_hash_entry *nhe, uint8_t type); From 47cdfbda7640f9e10dc298cb98ea0411dc14e0ab Mon Sep 17 00:00:00 2001 From: Jafar Al-Gharaibeh Date: Tue, 8 Oct 2024 13:06:40 -0500 Subject: [PATCH 50/73] build: FRR 10.3 development version Signed-off-by: Jafar Al-Gharaibeh --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index e1aa05b869af..e8036fcff1f5 100644 --- a/configure.ac +++ b/configure.ac @@ -7,7 +7,7 @@ ## AC_PREREQ([2.69]) -AC_INIT([frr], [10.2-dev], [https://github.com/frrouting/frr/issues]) +AC_INIT([frr], [10.3-dev], [https://github.com/frrouting/frr/issues]) PACKAGE_URL="https://frrouting.org/" AC_SUBST([PACKAGE_URL]) PACKAGE_FULLNAME="FRRouting" From 154a89bc31ab676bf8d037a37a98587ca0236ba9 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Wed, 9 Oct 2024 07:16:37 -0400 Subject: [PATCH 51/73] zebra: Fix crash in pw code Recent PR #17009 introduced a crash in pw handing for deletion. Let's fix that problem. Fixes: #17041 Signed-off-by: Donald Sharp --- zebra/zebra_pw.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/zebra/zebra_pw.c b/zebra/zebra_pw.c index c8ffaf0bbe36..6adc0b1b4aa5 100644 --- a/zebra/zebra_pw.c +++ b/zebra/zebra_pw.c @@ -205,9 +205,11 @@ void zebra_pw_handle_dplane_results(struct zebra_dplane_ctx *ctx) vrf = zebra_vrf_lookup_by_id(dplane_ctx_get_vrf(ctx)); pw = zebra_pw_find(vrf, dplane_ctx_get_ifname(ctx)); + if (!pw) + return; + if (dplane_ctx_get_status(ctx) != ZEBRA_DPLANE_REQUEST_SUCCESS) { - if (pw) - zebra_pw_install_failure(pw, dplane_ctx_get_pw_status(ctx)); + zebra_pw_install_failure(pw, dplane_ctx_get_pw_status(ctx)); } else { if (op == DPLANE_OP_PW_INSTALL && pw->status != PW_FORWARDING) zebra_pw_update_status(pw, PW_FORWARDING); From c2b70369b641250363c919ccba80476cacd350a3 Mon Sep 17 00:00:00 2001 From: baozhen-H3C Date: Wed, 9 Oct 2024 17:58:05 +0800 Subject: [PATCH 52/73] isisd: Lsp fragments will delete the corresponding dyn_cache entry. When LSP fragments age, isis_dynhn_remove() is also called to remove the corresponding dyhn_cache entries. Signed-off-by: baozhen-H3C --- isisd/isis_lsp.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/isisd/isis_lsp.c b/isisd/isis_lsp.c index 391d42fba156..c00b7efed77a 100644 --- a/isisd/isis_lsp.c +++ b/isisd/isis_lsp.c @@ -119,6 +119,10 @@ static void lsp_destroy(struct isis_lsp *lsp) lsp_clear_data(lsp); if (!LSP_FRAGMENT(lsp->hdr.lsp_id)) { + /* Only non-pseudo nodes and non-fragment LSPs can delete nodes. */ + if (!LSP_PSEUDO_ID(lsp->hdr.lsp_id)) + isis_dynhn_remove(lsp->area->isis, lsp->hdr.lsp_id); + if (lsp->lspu.frags) { lsp_remove_frags(&lsp->area->lspdb[lsp->level - 1], lsp->lspu.frags); @@ -2226,10 +2230,6 @@ void lsp_tick(struct event *thread) &area->lspdb[level], next); - if (!LSP_PSEUDO_ID(lsp->hdr.lsp_id)) - isis_dynhn_remove(area->isis, - lsp->hdr.lsp_id); - lspdb_del(&area->lspdb[level], lsp); lsp_destroy(lsp); lsp = NULL; From 38ad215feb313775e066c807a9a2bdf672d3c067 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Tue, 8 Oct 2024 13:01:45 +0200 Subject: [PATCH 53/73] Revert "bgpd: Exclude case for remote prefix w/o link-local" This reverts commit 5f6a61f91fdbfa33df1b7112c961135156fbd894. Signed-off-by: Louis Scalbert --- bgpd/bgp_route.c | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index 475b709a0766..18f1e5fd0436 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -2467,16 +2467,13 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, if (NEXTHOP_IS_V6) { attr->mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL; if ((CHECK_FLAG(peer->af_flags[afi][safi], - PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED) && - IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_local)) || - (!reflect && !transparent && - IN6_IS_ADDR_LINKLOCAL(&peer->nexthop.v6_local) && - peer->shared_network && - ((from == bgp->peer_self && peer->sort == BGP_PEER_EBGP) || - (from == bgp->peer_self && peer->sort != BGP_PEER_EBGP) || - (from != bgp->peer_self && - IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_local) && - peer->sort == BGP_PEER_EBGP)))) { + PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED) + && IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_local)) + || (!reflect && !transparent + && IN6_IS_ADDR_LINKLOCAL(&peer->nexthop.v6_local) + && peer->shared_network + && (from == bgp->peer_self + || peer->sort == BGP_PEER_EBGP))) { if (safi == SAFI_MPLS_VPN) attr->mp_nexthop_len = BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL; From dbf39ad0766c60da92006bcbedf9b5d375b30c2b Mon Sep 17 00:00:00 2001 From: Chris Wiggins Date: Thu, 10 Oct 2024 17:39:54 +1300 Subject: [PATCH 54/73] doc: VRRP troubleshooting info for linux-hosted VMs Signed-off-by: Chris Wiggins --- doc/user/vrrp.rst | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/doc/user/vrrp.rst b/doc/user/vrrp.rst index d99fc23ef5be..cad850e7acbd 100644 --- a/doc/user/vrrp.rst +++ b/doc/user/vrrp.rst @@ -519,6 +519,7 @@ Check: - Do you have unusual ``sysctls`` enabled that could affect the operation of multicast traffic? - Are you running in ESXi? See below. +- Are you running in a linux VM with a bridged network? See below. My master router is not forwarding traffic @@ -552,6 +553,24 @@ feature instead, explained `here Issue reference: https://github.com/FRRouting/frr/issues/5386 +My router is running in a linux VM with a bridged host network and VRRP has issues +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Issues can arise with VRRP (especially IPv6) when you have a VM running on top +of a linux host, where your physical network is in a bridge, and the VM +has an interface attached to the bridge. By default, the linux bridge will +snoop multicast traffic, and you will likely see sporadic VRRP advertisements failing +to be received. IPv6 traffic was be particularly affected. + +This was observed on a VM running on proxmox, and the solution was to disable +multicast snooping on the bridge: + +.. code-block:: console + + echo 0 > /sys/devices/virtual/net/vmbr0/bridge/multicast_snooping + +Issue reference: https://github.com/FRRouting/frr/issues/5386 + My router cannot interoperate with branded routers / L3 switches ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From 34c1dd076e5f9ea2a7eb8820b204696dc692667d Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Wed, 9 Oct 2024 17:06:19 +0200 Subject: [PATCH 55/73] bgpd: do not insert link-local with local unchanged Do not add an IPv6 link-local nexthop if the originating peer does not provide one and the nexthop-local unchanged setting is enabled. Signed-off-by: Louis Scalbert --- bgpd/bgp_route.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index 18f1e5fd0436..b0f91b58b9c9 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -2155,6 +2155,7 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, bool nh_reset = false; uint64_t cum_bw; mpls_label_t label; + bool global_and_ll = false; if (DISABLE_BGP_ANNOUNCE) return false; @@ -2465,22 +2466,26 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, * we do not announce LL address as `::`. */ if (NEXTHOP_IS_V6) { - attr->mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL; - if ((CHECK_FLAG(peer->af_flags[afi][safi], - PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED) - && IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_local)) - || (!reflect && !transparent - && IN6_IS_ADDR_LINKLOCAL(&peer->nexthop.v6_local) - && peer->shared_network - && (from == bgp->peer_self - || peer->sort == BGP_PEER_EBGP))) { + if (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED)) { + /* nexthop local unchanged: only include the link-local nexthop if it + * was already present. + */ + if (IN6_IS_ADDR_LINKLOCAL(&attr->mp_nexthop_local)) + global_and_ll = true; + } else if (!reflect && !transparent && + IN6_IS_ADDR_LINKLOCAL(&peer->nexthop.v6_local) && peer->shared_network && + (from == bgp->peer_self || peer->sort == BGP_PEER_EBGP)) + global_and_ll = true; + + if (global_and_ll) { if (safi == SAFI_MPLS_VPN) attr->mp_nexthop_len = BGP_ATTR_NHLEN_VPNV6_GLOBAL_AND_LL; else attr->mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL_AND_LL; - } + } else + attr->mp_nexthop_len = BGP_ATTR_NHLEN_IPV6_GLOBAL; /* Clear off link-local nexthop in source, whenever it is not * needed to From 5bb99ccad2935adb006f7f727c2779b2c6e746e5 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Wed, 9 Oct 2024 17:08:44 +0200 Subject: [PATCH 56/73] bgpd: reset ipv6 invalid link-local nexthop If the "nexthop-local unchanged" setting is enabled, it preserves the IPv6 link-local nexthop from the originating peer. However, if the originating and destination peers are not on the same network segment, the originating peer's IPv6 link-local address will be unreachable from the destination peer. In such cases, reset the IPv6 link-local nexthop, even if "nexthop-local unchanged" is set on the destination peer. Signed-off-by: Louis Scalbert --- bgpd/bgp_route.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/bgpd/bgp_route.c b/bgpd/bgp_route.c index b0f91b58b9c9..9cefec0706ee 100644 --- a/bgpd/bgp_route.c +++ b/bgpd/bgp_route.c @@ -2492,8 +2492,11 @@ bool subgroup_announce_check(struct bgp_dest *dest, struct bgp_path_info *pi, * ensure more prefixes share the same attribute for * announcement. */ - if (!(CHECK_FLAG(peer->af_flags[afi][safi], - PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED))) + if (!(CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED)) || + !IPV6_ADDR_SAME(&peer->nexthop.v6_global, &from->nexthop.v6_global)) + /* Reset if "nexthop-local unchanged" is not set or originating and destination peer + * does not share the same subnet. + */ memset(&attr->mp_nexthop_local, 0, IPV6_MAX_BYTELEN); } From 6dc4d9506181e370680283a4f9ec34d84b06d5e2 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Tue, 8 Oct 2024 15:04:34 +0200 Subject: [PATCH 57/73] topotests: add bgp_nexthop_ipv6 Add bgp_nexthop_ipv6 to check the ipv6 link-local nexthop conformity in several situations. Signed-off-by: Louis Scalbert --- tests/topotests/bgp_nexthop_ipv6/exabgp.env | 53 ++++ tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf | 22 ++ .../r1/show_bgp_ipv6_step1.json | 162 +++++++++++ .../r1/show_bgp_ipv6_step2.json | 162 +++++++++++ .../topotests/bgp_nexthop_ipv6/r1/zebra.conf | 14 + tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf | 22 ++ .../r2/show_bgp_ipv6_step1.json | 162 +++++++++++ .../r2/show_bgp_ipv6_step2.json | 162 +++++++++++ .../topotests/bgp_nexthop_ipv6/r2/zebra.conf | 14 + .../topotests/bgp_nexthop_ipv6/r3/exabgp.cfg | 16 ++ .../r3/show_bgp_ipv6_step1.json | 0 .../r3/show_bgp_ipv6_step2.json | 0 tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf | 22 ++ .../r4/show_bgp_ipv6_step1.json | 210 ++++++++++++++ .../r4/show_bgp_ipv6_step2.json | 222 +++++++++++++++ .../topotests/bgp_nexthop_ipv6/r4/zebra.conf | 14 + tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf | 22 ++ .../r5/show_bgp_ipv6_step1.json | 222 +++++++++++++++ .../r5/show_bgp_ipv6_step2.json | 222 +++++++++++++++ .../topotests/bgp_nexthop_ipv6/r5/zebra.conf | 14 + tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf | 22 ++ .../r6/show_bgp_ipv6_step1.json | 162 +++++++++++ .../r6/show_bgp_ipv6_step2.json | 162 +++++++++++ .../topotests/bgp_nexthop_ipv6/r6/zebra.conf | 14 + tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf | 30 ++ .../rr/show_bgp_ipv6_step1.json | 220 +++++++++++++++ .../rr/show_bgp_ipv6_step2.json | 220 +++++++++++++++ .../rr/show_bgp_ipv6_summary.json | 6 + .../topotests/bgp_nexthop_ipv6/rr/zebra.conf | 18 ++ .../test_bgp_nexthop_ipv6_topo1.py | 263 ++++++++++++++++++ 30 files changed, 2854 insertions(+) create mode 100644 tests/topotests/bgp_nexthop_ipv6/exabgp.env create mode 100644 tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf create mode 100644 tests/topotests/bgp_nexthop_ipv6/r1/show_bgp_ipv6_step1.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/r1/show_bgp_ipv6_step2.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/r1/zebra.conf create mode 100644 tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf create mode 100644 tests/topotests/bgp_nexthop_ipv6/r2/show_bgp_ipv6_step1.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/r2/show_bgp_ipv6_step2.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/r2/zebra.conf create mode 100644 tests/topotests/bgp_nexthop_ipv6/r3/exabgp.cfg create mode 100644 tests/topotests/bgp_nexthop_ipv6/r3/show_bgp_ipv6_step1.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/r3/show_bgp_ipv6_step2.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf create mode 100644 tests/topotests/bgp_nexthop_ipv6/r4/show_bgp_ipv6_step1.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/r4/show_bgp_ipv6_step2.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/r4/zebra.conf create mode 100644 tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf create mode 100644 tests/topotests/bgp_nexthop_ipv6/r5/show_bgp_ipv6_step1.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/r5/show_bgp_ipv6_step2.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/r5/zebra.conf create mode 100644 tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf create mode 100644 tests/topotests/bgp_nexthop_ipv6/r6/show_bgp_ipv6_step1.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/r6/show_bgp_ipv6_step2.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/r6/zebra.conf create mode 100644 tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf create mode 100644 tests/topotests/bgp_nexthop_ipv6/rr/show_bgp_ipv6_step1.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/rr/show_bgp_ipv6_step2.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/rr/show_bgp_ipv6_summary.json create mode 100644 tests/topotests/bgp_nexthop_ipv6/rr/zebra.conf create mode 100644 tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py diff --git a/tests/topotests/bgp_nexthop_ipv6/exabgp.env b/tests/topotests/bgp_nexthop_ipv6/exabgp.env new file mode 100644 index 000000000000..28e642360a39 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/exabgp.env @@ -0,0 +1,53 @@ +[exabgp.api] +encoder = text +highres = false +respawn = false +socket = '' + +[exabgp.bgp] +openwait = 60 + +[exabgp.cache] +attributes = true +nexthops = true + +[exabgp.daemon] +daemonize = true +pid = '/var/run/exabgp/exabgp.pid' +user = 'exabgp' +##daemonize = false + +[exabgp.log] +all = false +configuration = true +daemon = true +destination = '/var/log/exabgp.log' +enable = true +level = INFO +message = false +network = true +packets = false +parser = false +processes = true +reactor = true +rib = false +routes = false +short = false +timers = false + +[exabgp.pdb] +enable = false + +[exabgp.profile] +enable = false +file = '' + +[exabgp.reactor] +speed = 1.0 + +[exabgp.tcp] +acl = false +bind = '' +delay = 0 +once = false +port = 179 diff --git a/tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf new file mode 100644 index 000000000000..7efa1b79fa5d --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r1/bgpd.conf @@ -0,0 +1,22 @@ +router bgp 65000 + no bgp ebgp-requires-policy + neighbor fd00:0:2::9 remote-as internal + neighbor fd00:0:2::9 timers 3 10 + address-family ipv4 unicast + redistribute connected route-map RMAP4 + ! + address-family ipv6 unicast + redistribute connected route-map RMAP6 + neighbor fd00:0:2::9 activate + +ip prefix-list RANGE4 seq 10 permit 172.16.0.0/16 le 24 +ip prefix-list RANGE4 seq 20 permit 10.0.0.0/8 ge 32 + +ipv6 prefix-list RANGE6 seq 10 permit fd00:100::0/64 +ipv6 prefix-list RANGE6 seq 20 permit 2001:db8::0/64 ge 128 + +route-map RMAP4 permit 10 + match ip address prefix-list RANGE4 +! +route-map RMAP6 permit 10 + match ipv6 address prefix-list RANGE6 diff --git a/tests/topotests/bgp_nexthop_ipv6/r1/show_bgp_ipv6_step1.json b/tests/topotests/bgp_nexthop_ipv6/r1/show_bgp_ipv6_step1.json new file mode 100644 index 000000000000..9923edb348da --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r1/show_bgp_ipv6_step1.json @@ -0,0 +1,162 @@ +{ + "routerId": "10.1.1.1", + "localAS": 65000, + "routes": { + "2001:db8::1/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r1", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::2/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::3/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::4/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::5/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::6/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:4::6", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:100::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r1", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:200::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:300::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:400::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:500::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:600::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:4::6", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/r1/show_bgp_ipv6_step2.json b/tests/topotests/bgp_nexthop_ipv6/r1/show_bgp_ipv6_step2.json new file mode 100644 index 000000000000..9923edb348da --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r1/show_bgp_ipv6_step2.json @@ -0,0 +1,162 @@ +{ + "routerId": "10.1.1.1", + "localAS": 65000, + "routes": { + "2001:db8::1/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r1", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::2/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::3/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::4/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::5/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::6/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:4::6", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:100::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r1", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:200::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:300::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:400::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:500::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:600::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:4::6", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/r1/zebra.conf b/tests/topotests/bgp_nexthop_ipv6/r1/zebra.conf new file mode 100644 index 000000000000..d06a3e194ba3 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r1/zebra.conf @@ -0,0 +1,14 @@ +ip forwarding +ipv6 forwarding + +int eth-dummy + ip addr 172.16.1.1/24 + ip addr fd00:100::1/64 + +int eth-sw + ip addr 192.168.2.1/24 + ipv6 address fd00:0:2::1/64 + +int lo + ip addr 10.1.1.1/32 + ipv6 address 2001:db8::1/128 diff --git a/tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf new file mode 100644 index 000000000000..4d4ae44e284b --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r2/bgpd.conf @@ -0,0 +1,22 @@ +router bgp 65000 + no bgp ebgp-requires-policy + neighbor fd00:0:2::9 remote-as internal + neighbor fd00:0:2::9 timers 3 10 + address-family ipv4 unicast + redistribute connected route-map RMAP4 + ! + address-family ipv6 unicast + redistribute connected route-map RMAP6 + neighbor fd00:0:2::9 activate + +ip prefix-list RANGE4 seq 10 permit 172.16.0.0/16 le 24 +ip prefix-list RANGE4 seq 20 permit 10.0.0.0/8 ge 32 + +ipv6 prefix-list RANGE6 seq 10 permit fd00:200::0/64 +ipv6 prefix-list RANGE6 seq 20 permit 2001:db8::0/64 ge 128 + +route-map RMAP4 permit 10 + match ip address prefix-list RANGE4 +! +route-map RMAP6 permit 10 + match ipv6 address prefix-list RANGE6 diff --git a/tests/topotests/bgp_nexthop_ipv6/r2/show_bgp_ipv6_step1.json b/tests/topotests/bgp_nexthop_ipv6/r2/show_bgp_ipv6_step1.json new file mode 100644 index 000000000000..bb2efa16d9e8 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r2/show_bgp_ipv6_step1.json @@ -0,0 +1,162 @@ +{ + "routerId": "10.2.2.2", + "localAS": 65000, + "routes": { + "2001:db8::1/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::2/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::3/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::4/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::5/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::6/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:4::6", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:100::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:200::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:300::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:400::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:500::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:600::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:4::6", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/r2/show_bgp_ipv6_step2.json b/tests/topotests/bgp_nexthop_ipv6/r2/show_bgp_ipv6_step2.json new file mode 100644 index 000000000000..bb2efa16d9e8 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r2/show_bgp_ipv6_step2.json @@ -0,0 +1,162 @@ +{ + "routerId": "10.2.2.2", + "localAS": 65000, + "routes": { + "2001:db8::1/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::2/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::3/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::4/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::5/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::6/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:4::6", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:100::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:200::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:300::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:400::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:500::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:600::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:4::6", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/r2/zebra.conf b/tests/topotests/bgp_nexthop_ipv6/r2/zebra.conf new file mode 100644 index 000000000000..de8746df98f8 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r2/zebra.conf @@ -0,0 +1,14 @@ +ip forwarding +ipv6 forwarding + +int eth-dummy + ip addr 172.16.2.2/24 + ip addr fd00:200::1/64 + +int eth-sw + ip addr 192.168.2.2/24 + ipv6 address fd00:0:2::2/64 + +int lo + ip addr 10.2.2.2/32 + ipv6 address 2001:db8::2/128 \ No newline at end of file diff --git a/tests/topotests/bgp_nexthop_ipv6/r3/exabgp.cfg b/tests/topotests/bgp_nexthop_ipv6/r3/exabgp.cfg new file mode 100644 index 000000000000..373169bcbab4 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r3/exabgp.cfg @@ -0,0 +1,16 @@ +neighbor fd00:0:2::9 { + router-id 10.3.3.3; + local-address fd00:0:2::3; + local-as 65000; + peer-as 65000; + + family { + ipv6 unicast; + } + + static { + route fd00:300::0/64 next-hop fd00:0:2::3; + route 2001:db8::3/128 next-hop fd00:0:2::3; + } + hold-time 10; +} diff --git a/tests/topotests/bgp_nexthop_ipv6/r3/show_bgp_ipv6_step1.json b/tests/topotests/bgp_nexthop_ipv6/r3/show_bgp_ipv6_step1.json new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/bgp_nexthop_ipv6/r3/show_bgp_ipv6_step2.json b/tests/topotests/bgp_nexthop_ipv6/r3/show_bgp_ipv6_step2.json new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf new file mode 100644 index 000000000000..b14c9bace4a6 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r4/bgpd.conf @@ -0,0 +1,22 @@ +router bgp 65001 + no bgp ebgp-requires-policy + neighbor fd00:0:2::9 remote-as external + neighbor fd00:0:2::9 timers 3 10 + address-family ipv4 unicast + redistribute connected route-map RMAP4 + ! + address-family ipv6 unicast + redistribute connected route-map RMAP6 + neighbor fd00:0:2::9 activate + +ip prefix-list RANGE4 seq 10 permit 172.16.0.0/16 le 24 +ip prefix-list RANGE4 seq 20 permit 10.0.0.0/8 ge 32 + +ipv6 prefix-list RANGE6 seq 10 permit fd00:400::0/64 +ipv6 prefix-list RANGE6 seq 20 permit 2001:db8::0/64 ge 128 + +route-map RMAP4 permit 10 + match ip address prefix-list RANGE4 +! +route-map RMAP6 permit 10 + match ipv6 address prefix-list RANGE6 diff --git a/tests/topotests/bgp_nexthop_ipv6/r4/show_bgp_ipv6_step1.json b/tests/topotests/bgp_nexthop_ipv6/r4/show_bgp_ipv6_step1.json new file mode 100644 index 000000000000..dd8603f4e321 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r4/show_bgp_ipv6_step1.json @@ -0,0 +1,210 @@ +{ + "routerId": "10.4.4.4", + "localAS": 65001, + "routes": { + "2001:db8::1/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::2/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r2:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::3/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::4/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r4", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::5/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::6/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:100::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:200::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r2:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:300::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:400::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r4", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:500::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:600::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/r4/show_bgp_ipv6_step2.json b/tests/topotests/bgp_nexthop_ipv6/r4/show_bgp_ipv6_step2.json new file mode 100644 index 000000000000..35a31e63f921 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r4/show_bgp_ipv6_step2.json @@ -0,0 +1,222 @@ +{ + "routerId": "10.4.4.4", + "localAS": 65001, + "routes": { + "2001:db8::1/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::2/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::3/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::4/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r4", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::5/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::6/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:100::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:200::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:300::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:400::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r4", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:500::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:600::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/r4/zebra.conf b/tests/topotests/bgp_nexthop_ipv6/r4/zebra.conf new file mode 100644 index 000000000000..edc39684e1f5 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r4/zebra.conf @@ -0,0 +1,14 @@ +ip forwarding +ipv6 forwarding + +int eth-dummy + ip addr 172.16.4.4/24 + ip addr fd00:400::4/64 + +int eth-sw + ip addr 192.168.2.4/24 + ipv6 address fd00:0:2::4/64 + +int lo + ip addr 10.4.4.4/32 + ipv6 address 2001:db8::4/128 \ No newline at end of file diff --git a/tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf new file mode 100644 index 000000000000..becea2bbe648 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r5/bgpd.conf @@ -0,0 +1,22 @@ +router bgp 65002 + no bgp ebgp-requires-policy + neighbor fd00:0:3::9 remote-as external + neighbor fd00:0:3::9 timers 3 10 + address-family ipv4 unicast + redistribute connected route-map RMAP4 + ! + address-family ipv6 unicast + redistribute connected route-map RMAP6 + neighbor fd00:0:3::9 activate + +ip prefix-list RANGE4 seq 10 permit 172.16.0.0/16 le 24 +ip prefix-list RANGE4 seq 20 permit 10.0.0.0/8 ge 32 + +ipv6 prefix-list RANGE6 seq 10 permit fd00:500::0/64 +ipv6 prefix-list RANGE6 seq 20 permit 2001:db8::0/64 ge 128 + +route-map RMAP4 permit 10 + match ip address prefix-list RANGE4 +! +route-map RMAP6 permit 10 + match ipv6 address prefix-list RANGE6 diff --git a/tests/topotests/bgp_nexthop_ipv6/r5/show_bgp_ipv6_step1.json b/tests/topotests/bgp_nexthop_ipv6/r5/show_bgp_ipv6_step1.json new file mode 100644 index 000000000000..d0875474ae94 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r5/show_bgp_ipv6_step1.json @@ -0,0 +1,222 @@ +{ + "routerId": "10.5.5.5", + "localAS": 65002, + "routes": { + "2001:db8::1/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::2/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::3/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::4/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::5/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r5", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::6/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:100::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:200::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:300::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:400::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:500::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r5", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:600::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/r5/show_bgp_ipv6_step2.json b/tests/topotests/bgp_nexthop_ipv6/r5/show_bgp_ipv6_step2.json new file mode 100644 index 000000000000..d0875474ae94 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r5/show_bgp_ipv6_step2.json @@ -0,0 +1,222 @@ +{ + "routerId": "10.5.5.5", + "localAS": 65002, + "routes": { + "2001:db8::1/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::2/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::3/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::4/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::5/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r5", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::6/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:100::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:200::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:300::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:400::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:500::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r5", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:600::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::9", + "hostname": "rr", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r5", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/r5/zebra.conf b/tests/topotests/bgp_nexthop_ipv6/r5/zebra.conf new file mode 100644 index 000000000000..d278889ed908 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r5/zebra.conf @@ -0,0 +1,14 @@ +ip forwarding +ipv6 forwarding + +int eth-dummy + ip addr 172.16.5.5/24 + ip addr fd00:500::5/64 + +int eth-rr + ip addr 192.168.3.5/24 + ipv6 address fd00:0:3::5/64 + +int lo + ip addr 10.5.5.5/32 + ipv6 address 2001:db8::5/128 diff --git a/tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf new file mode 100644 index 000000000000..801736ab988e --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r6/bgpd.conf @@ -0,0 +1,22 @@ +router bgp 65000 + no bgp ebgp-requires-policy + neighbor fd00:0:4::9 remote-as internal + neighbor fd00:0:4::9 timers 3 10 + address-family ipv4 unicast + redistribute connected route-map RMAP4 + ! + address-family ipv6 unicast + redistribute connected route-map RMAP6 + neighbor fd00:0:4::9 activate + +ip prefix-list RANGE4 seq 10 permit 172.16.0.0/16 le 24 +ip prefix-list RANGE4 seq 20 permit 10.0.0.0/8 ge 32 + +ipv6 prefix-list RANGE6 seq 10 permit fd00:600::0/64 +ipv6 prefix-list RANGE6 seq 20 permit 2001:db8::0/64 ge 128 + +route-map RMAP4 permit 10 + match ip address prefix-list RANGE4 +! +route-map RMAP6 permit 10 + match ipv6 address prefix-list RANGE6 diff --git a/tests/topotests/bgp_nexthop_ipv6/r6/show_bgp_ipv6_step1.json b/tests/topotests/bgp_nexthop_ipv6/r6/show_bgp_ipv6_step1.json new file mode 100644 index 000000000000..cd48dd4697ce --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r6/show_bgp_ipv6_step1.json @@ -0,0 +1,162 @@ +{ + "routerId": "10.6.6.6", + "localAS": 65000, + "routes": { + "2001:db8::1/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::2/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::3/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::4/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::5/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::6/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r6", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:100::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:200::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:300::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:400::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:500::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:600::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r6", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/r6/show_bgp_ipv6_step2.json b/tests/topotests/bgp_nexthop_ipv6/r6/show_bgp_ipv6_step2.json new file mode 100644 index 000000000000..cd48dd4697ce --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r6/show_bgp_ipv6_step2.json @@ -0,0 +1,162 @@ +{ + "routerId": "10.6.6.6", + "localAS": 65000, + "routes": { + "2001:db8::1/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::2/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::3/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::4/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::5/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::6/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r6", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:100::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:200::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:300::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:400::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:500::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "rr", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:600::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r6", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/r6/zebra.conf b/tests/topotests/bgp_nexthop_ipv6/r6/zebra.conf new file mode 100644 index 000000000000..84bbb95a833a --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/r6/zebra.conf @@ -0,0 +1,14 @@ +ip forwarding +ipv6 forwarding + +int eth-dummy + ip addr 172.16.6.6/24 + ip addr fd00:600::6/64 + +int eth-rr + ip addr 192.168.4.6/24 + ipv6 address fd00:0:4::6/64 + +int lo + ip addr 10.6.6.6/32 + ipv6 address 2001:db8::6/128 diff --git a/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf new file mode 100644 index 000000000000..6dcded15258c --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf @@ -0,0 +1,30 @@ +router bgp 65000 + no bgp ebgp-requires-policy + neighbor fd00:0:2::1 remote-as internal + neighbor fd00:0:2::1 timers 3 10 + neighbor fd00:0:2::2 remote-as internal + neighbor fd00:0:2::2 timers 3 10 + neighbor fd00:0:2::3 remote-as internal + neighbor fd00:0:2::3 timers 3 10 + neighbor fd00:0:2::4 remote-as external + neighbor fd00:0:2::4 timers 3 10 + neighbor fd00:0:3::5 remote-as external + neighbor fd00:0:3::5 timers 3 10 + neighbor fd00:0:4::6 remote-as internal + neighbor fd00:0:4::6 timers 3 10 + address-family ipv4 unicast + neighbor fd00:0:2::1 route-reflector-client + neighbor fd00:0:2::2 route-reflector-client + neighbor fd00:0:2::3 route-reflector-client + neighbor fd00:0:4::6 route-reflector-client + address-family ipv6 unicast + neighbor fd00:0:2::1 route-reflector-client + neighbor fd00:0:2::1 activate + neighbor fd00:0:2::2 route-reflector-client + neighbor fd00:0:2::2 activate + neighbor fd00:0:2::3 route-reflector-client + neighbor fd00:0:2::3 activate + neighbor fd00:0:2::4 nexthop-local unchanged + neighbor fd00:0:2::4 activate + neighbor fd00:0:3::5 activate + neighbor fd00:0:4::6 activate diff --git a/tests/topotests/bgp_nexthop_ipv6/rr/show_bgp_ipv6_step1.json b/tests/topotests/bgp_nexthop_ipv6/rr/show_bgp_ipv6_step1.json new file mode 100644 index 000000000000..c8b8e1e89356 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/rr/show_bgp_ipv6_step1.json @@ -0,0 +1,220 @@ +{ + "routerId": "10.9.9.9", + "localAS": 65000, + "routes": { + "2001:db8::1/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:eth-sw", + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::2/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r2:eth-sw", + "hostname": "r2", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::3/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::4/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "r4", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:eth-sw", + "hostname": "r4", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::5/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "r5", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r5:eth-rr", + "hostname": "r5", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::6/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:4::6", + "hostname": "r6", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r6:eth-rr", + "hostname": "r6", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:100::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:eth-sw", + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:200::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r2:eth-sw", + "hostname": "r2", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:300::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:400::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "r4", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:eth-sw", + "hostname": "r4", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:500::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "r5", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r5:eth-rr", + "hostname": "r5", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:600::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:4::6", + "hostname": "r6", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r6:eth-rr", + "hostname": "r6", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/rr/show_bgp_ipv6_step2.json b/tests/topotests/bgp_nexthop_ipv6/rr/show_bgp_ipv6_step2.json new file mode 100644 index 000000000000..c8b8e1e89356 --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/rr/show_bgp_ipv6_step2.json @@ -0,0 +1,220 @@ +{ + "routerId": "10.9.9.9", + "localAS": 65000, + "routes": { + "2001:db8::1/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:eth-sw", + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::2/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r2:eth-sw", + "hostname": "r2", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::3/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8::4/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "r4", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:eth-sw", + "hostname": "r4", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::5/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "r5", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r5:eth-rr", + "hostname": "r5", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8::6/128": [ + { + "nexthops": [ + { + "ip": "fd00:0:4::6", + "hostname": "r6", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r6:eth-rr", + "hostname": "r6", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:100::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::1", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:eth-sw", + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:200::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r2:eth-sw", + "hostname": "r2", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:300::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::3", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "fd00:400::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:2::4", + "hostname": "r4", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:eth-sw", + "hostname": "r4", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:500::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:3::5", + "hostname": "r5", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r5:eth-rr", + "hostname": "r5", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "fd00:600::/64": [ + { + "nexthops": [ + { + "ip": "fd00:0:4::6", + "hostname": "r6", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r6:eth-rr", + "hostname": "r6", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/rr/show_bgp_ipv6_summary.json b/tests/topotests/bgp_nexthop_ipv6/rr/show_bgp_ipv6_summary.json new file mode 100644 index 000000000000..ceac91215dad --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/rr/show_bgp_ipv6_summary.json @@ -0,0 +1,6 @@ +{ + "ipv6Unicast": { + "failedPeers": 0, + "totalPeers": 6 + } +} diff --git a/tests/topotests/bgp_nexthop_ipv6/rr/zebra.conf b/tests/topotests/bgp_nexthop_ipv6/rr/zebra.conf new file mode 100644 index 000000000000..21d44f3a15fa --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/rr/zebra.conf @@ -0,0 +1,18 @@ +ip forwarding +ipv6 forwarding + +int eth-sw + ip addr 192.168.2.9/24 + ipv6 address fd00:0:2::9/64 + +int eth-r5 + ip addr 192.168.3.9/24 + ipv6 address fd00:0:3::9/64 + +int eth-r6 + ip addr 192.168.4.9/24 + ipv6 address fd00:0:4::9/64 + +int lo + ip addr 10.9.9.9/32 + ipv6 address 2001:db8::9/128 diff --git a/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py b/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py new file mode 100644 index 000000000000..24d71f5622ae --- /dev/null +++ b/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py @@ -0,0 +1,263 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# test_bgp_nexthop_ipv6_topo1.py +# +# Copyright (c) 2024 by +# Cumulus Networks, Inc. +# 6WIND S.A. +# + +""" +Ensure that BGP ipv6 nexthops are correct +""" + +import os +import sys +import pytest +from functools import partial +import json + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. + + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + """ + All peers are FRR BGP peers except r3 that is a exabgp peer. + rr is a route-reflector for AS 65000 iBGP peers. + Exabgp does not send any IPv6 Link-Local nexthop + + iBGP peers | eBGP peers + | + AS 65000 | + | + +---+ | + | r6| | + +---+ | + | | + fd00:0:3::0/64 | + | | AS 65002 + +---+ | +---+ + |rr |----fd00:0:4::0/64---| r5| + +---+ |_________+---+ + | | +---+ + fd00:0:2::0/64----------------| r4| + / | \ | +---+ + +---+ +---+ +---+ | AS 65001 + | r1| | r2| |r3 | | + +---+ +---+ +---+ + """ + + def connect_routers(tgen, left, right): + for rname in [left, right]: + if rname not in tgen.routers().keys(): + tgen.add_router(rname) + + switch = tgen.add_switch("s-{}-{}".format(left, right)) + switch.add_link(tgen.gears[left], nodeif="eth-{}".format(right)) + switch.add_link(tgen.gears[right], nodeif="eth-{}".format(left)) + + def connect_switchs(tgen, rname, switch): + if rname not in tgen.routers().keys(): + tgen.add_router(rname) + + switch.add_link(tgen.gears[rname], nodeif="eth-{}".format(switch.name)) + + def connect_dummy(tgen, rname, switch): + if rname not in tgen.routers().keys(): + tgen.add_router(rname) + + switch.add_link(tgen.gears[rname], nodeif="eth-dummy") + + # sw_du switch is for a dummy interface (for local network) + for i in range(1, 7): + if i == 3: + # r3 is an exabgp peer + continue + sw_du = tgen.add_switch("sw%s" % i) + connect_dummy(tgen, "r%s" % i, sw_du) + + # sw switch is for interconnecting peers on the same subnet + sw = tgen.add_switch("sw") + connect_switchs(tgen, "rr", sw) + connect_switchs(tgen, "r1", sw) + connect_switchs(tgen, "r2", sw) + connect_switchs(tgen, "r4", sw) + + # directly connected without switch routers + connect_routers(tgen, "rr", "r5") + connect_routers(tgen, "rr", "r6") + + ## Add iBGP ExaBGP neighbor + peer_ip = "fd00:0:2::3" ## peer + peer_route = "via fd00:0:2::9" ## router + r3 = tgen.add_exabgp_peer("r3", ip=peer_ip, defaultRoute=peer_route) + sw.add_link(r3) + + +##################################################### +## +## Tests starting +## +##################################################### + + +def setup_module(module): + "Setup topology" + tgen = Topogen(build_topo, module.__name__) + tgen.start_topology() + + # This is a sample of configuration loading. + router_list = tgen.routers() + for rname, router in router_list.items(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + tgen.start_router() + + # Start r3 exabgp peer + r3 = tgen.gears["r3"] + r3.start(os.path.join(CWD, "r3"), os.path.join(CWD, "exabgp.env")) + + +def get_link_local(rname, ifname, cache): + ip = cache.get(rname, {}).get(ifname) + if ip: + return ip + + tgen = get_topogen() + out = tgen.gears[rname].vtysh_cmd("show interface %s json" % ifname, isjson=True) + for address in out[ifname]["ipAddresses"]: + if not address["address"].startswith("fe80::"): + continue + ip = address["address"].split("/")[0] + cache.setdefault(rname, {})[ifname] = ip + return ip + + +def replace_link_local(expected, cache): + for prefix, prefix_info in expected.get("routes", {}).items(): + for nexthop in prefix_info[0].get("nexthops", []): + ip = nexthop.get("ip", "") + if not ip.startswith("link-local:"): + continue + rname = ip.split(":")[1] + ifname = ip.split(":")[2] + ip = get_link_local(rname, ifname, cache) + nexthop["ip"] = ip + + +def teardown_module(_mod): + "Teardown the pytest environment" + tgen = get_topogen() + + # This function tears down the whole topology. + tgen.stop_topology() + + +def test_converge_protocols(): + "Wait for protocol convergence" + + tgen = get_topogen() + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + rr = tgen.gears["rr"] + ref_file = "{}/{}/show_bgp_ipv6_summary.json".format(CWD, rr.name) + expected = json.loads(open(ref_file).read()) + + test_func = partial( + topotest.router_json_cmp, + rr, + "show bgp ipv6 summary json", + expected, + ) + _, res = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = "{}: BGP convergence".format(rr.name) + assert res is None, assertmsg + + +def test_bgp_ipv6_table_step1(): + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + global link_local_cache + link_local_cache = {} + router_list = tgen.routers().values() + for router in router_list: + # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name) + ref_file = "{}/{}/show_bgp_ipv6_step1.json".format(CWD, router.name) + expected = json.loads(open(ref_file).read()) + replace_link_local(expected, link_local_cache) + + test_func = partial( + topotest.router_json_cmp, + router, + "show bgp ipv6 unicast json", + expected, + ) + _, res = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = "{}: BGP IPv6 Nexthop failure".format(router.name) + assert res is None, assertmsg + + +def test_bgp_ipv6_table_step2(): + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + rr = tgen.gears["rr"] + rr.vtysh_cmd( + """ +configure terminal +router bgp 65000 + address-family ipv6 unicast + no neighbor fd00:0:2::4 nexthop-local unchanged +""" + ) + + router_list = tgen.routers().values() + for router in router_list: + # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name) + ref_file = "{}/{}/show_bgp_ipv6_step2.json".format(CWD, router.name) + expected = json.loads(open(ref_file).read()) + replace_link_local(expected, link_local_cache) + + test_func = partial( + topotest.router_json_cmp, + router, + "show bgp ipv6 unicast json", + expected, + ) + _, res = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = "{}: BGP IPv6 Nexthop failure".format(router.name) + assert res is None, assertmsg + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) From f57a56bca0093e52e6edce8a1829520f7f4f4fbb Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Thu, 10 Oct 2024 12:58:43 +0300 Subject: [PATCH 58/73] tools: Add missing mgmtd into logrotate/rsyslogd Signed-off-by: Donatas Abraitis --- redhat/frr.logrotate | 8 ++++++++ tools/etc/logrotate.d/frr | 2 +- tools/etc/rsyslog.d/45-frr.conf | 2 ++ 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/redhat/frr.logrotate b/redhat/frr.logrotate index 31061e3ae0fe..1224a332b46f 100644 --- a/redhat/frr.logrotate +++ b/redhat/frr.logrotate @@ -78,6 +78,14 @@ endscript } +/var/log/frr/mgmtd.log { + notifempty + missingok + postrotate + /bin/kill -USR1 `cat /var/run/frr/mgmtd.pid 2> /dev/null` 2> /dev/null || true + endscript +} + /var/log/frr/nhrpd.log { notifempty missingok diff --git a/tools/etc/logrotate.d/frr b/tools/etc/logrotate.d/frr index 735af6539b26..2da554350bc6 100644 --- a/tools/etc/logrotate.d/frr +++ b/tools/etc/logrotate.d/frr @@ -16,7 +16,7 @@ # between file and syslog, rsyslogd might still have file # open, as well as the daemons, so always signal the daemons. # It's safe, a NOP if (only) syslog is being used. - for i in babeld bgpd eigrpd isisd ldpd nhrpd ospf6d ospfd sharpd \ + for i in babeld bgpd eigrpd isisd ldpd mgmtd nhrpd ospf6d ospfd sharpd \ pimd pim6d ripd ripngd zebra pathd pbrd staticd bfdd fabricd vrrpd; do if [ -e /var/run/frr/$i.pid ] ; then pids="$pids $(cat /var/run/frr/$i.pid)" diff --git a/tools/etc/rsyslog.d/45-frr.conf b/tools/etc/rsyslog.d/45-frr.conf index 75b20d76bc10..ef37d66d820e 100644 --- a/tools/etc/rsyslog.d/45-frr.conf +++ b/tools/etc/rsyslog.d/45-frr.conf @@ -11,6 +11,7 @@ if $programname == 'babeld' or $programname == 'isisd' or $programname == 'fabricd' or $programname == 'ldpd' or + $programname == 'mgmtd' or $programname == 'nhrpd' or $programname == 'ospf6d' or $programname == 'ospfd' or @@ -33,6 +34,7 @@ if $programname == 'babeld' or $programname == 'isisd' or $programname == 'fabricd' or $programname == 'ldpd' or + $programname == 'mgmtd' or $programname == 'nhrpd' or $programname == 'ospf6d' or $programname == 'ospfd' or From 0f8842ed18ff0466b81267ff29cc8102906a9340 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 10 Oct 2024 09:13:39 -0400 Subject: [PATCH 59/73] bgpd: Allow specification of vrf in show bgp neighbor graceful-restart This command did not allow the operator to display neighbor information related to graceful-restart when used inside of a vrf. Signed-off-by: Donald Sharp --- bgpd/bgp_vty.c | 88 +++++++++++++++++++----------------------------- doc/user/bgp.rst | 8 +++++ 2 files changed, 42 insertions(+), 54 deletions(-) diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index e7be2a33d2b6..1877d451ad8a 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -161,9 +161,8 @@ static struct peer_group *listen_range_exists(struct bgp *bgp, static void bgp_show_global_graceful_restart_mode_vty(struct vty *vty, struct bgp *bgp); -static int bgp_show_neighbor_graceful_restart_afi_all(struct vty *vty, - enum show_type type, - const char *ip_str, +static int bgp_show_neighbor_graceful_restart_afi_all(struct vty *vty, struct bgp *bgp, + enum show_type type, const char *ip_str, afi_t afi, bool use_json); static enum node_type bgp_node_type(afi_t afi, safi_t safi) @@ -16144,21 +16143,13 @@ static int bgp_show_neighbor(struct vty *vty, struct bgp *bgp, return CMD_SUCCESS; } -static void bgp_show_neighbor_graceful_restart_vty(struct vty *vty, - enum show_type type, - const char *ip_str, +static void bgp_show_neighbor_graceful_restart_vty(struct vty *vty, struct bgp *bgp, + enum show_type type, const char *ip_str, afi_t afi, json_object *json) { - int ret; - struct bgp *bgp; union sockunion su; - bgp = bgp_get_default(); - - if (!bgp) - return; - if (!json) bgp_show_global_graceful_restart_mode_vty(vty, bgp); @@ -16313,48 +16304,41 @@ static int bgp_show_neighbor_vty(struct vty *vty, const char *name, return CMD_SUCCESS; } - - /* "show [ip] bgp neighbors graceful-restart" commands. */ -DEFUN (show_ip_bgp_neighbors_graceful_restart, - show_ip_bgp_neighbors_graceful_restart_cmd, - "show bgp [] neighbors [] graceful-restart [json]", - SHOW_STR - BGP_STR - IP_STR - IPV6_STR - NEIGHBOR_STR - "Neighbor to display information about\n" - "Neighbor to display information about\n" - "Neighbor on BGP configured interface\n" - GR_SHOW +DEFPY (show_ip_bgp_neighbors_graceful_restart, + show_ip_bgp_neighbors_graceful_restart_cmd, + "show bgp []$afi [ VIEWVRFNAME$vrf] neighbors [$neigh] graceful-restart [json]$json", + SHOW_STR + BGP_STR + IP_STR + IPV6_STR + BGP_INSTANCE_HELP_STR + NEIGHBOR_STR + "Neighbor to display information about\n" + "Neighbor to display information about\n" + "Neighbor on BGP configured interface\n" + GR_SHOW JSON_STR) { - char *sh_arg = NULL; - enum show_type sh_type; - int idx = 0; - afi_t afi = AFI_MAX; - bool uj = use_json(argc, argv); - - if (!argv_find_and_parse_afi(argv, argc, &idx, &afi)) - afi = AFI_MAX; + enum show_type sh_type = show_all; + afi_t afiz = AFI_IP; + bool uj = !!json; + struct bgp *bgp; - idx++; + if (afi) + afiz = bgp_vty_afi_from_str(afi); - if (argv_find(argv, argc, "A.B.C.D", &idx) - || argv_find(argv, argc, "X:X::X:X", &idx) - || argv_find(argv, argc, "WORD", &idx)) { + if (neigh) sh_type = show_peer; - sh_arg = argv[idx]->arg; - } else - sh_type = show_all; - if (!argv_find(argv, argc, "graceful-restart", &idx)) - return CMD_SUCCESS; + bgp = vrf ? bgp_lookup_by_name(vrf) : bgp_get_default(); + if (!bgp) { + vty_out(vty, "No such bgp instance %s", vrf ? vrf : ""); + return CMD_WARNING; + } - return bgp_show_neighbor_graceful_restart_afi_all(vty, sh_type, sh_arg, - afi, uj); + return bgp_show_neighbor_graceful_restart_afi_all(vty, bgp, sh_type, neigh, afiz, uj); } /* "show [ip] bgp neighbors" commands. */ @@ -16528,9 +16512,8 @@ static void bgp_show_global_graceful_restart_mode_vty(struct vty *vty, vty_out(vty, "\n"); } -static int bgp_show_neighbor_graceful_restart_afi_all(struct vty *vty, - enum show_type type, - const char *ip_str, +static int bgp_show_neighbor_graceful_restart_afi_all(struct vty *vty, struct bgp *bgp, + enum show_type type, const char *ip_str, afi_t afi, bool use_json) { json_object *json = NULL; @@ -16542,14 +16525,11 @@ static int bgp_show_neighbor_graceful_restart_afi_all(struct vty *vty, afi = AFI_IP; while ((afi != AFI_L2VPN) && (afi < AFI_MAX)) { - - bgp_show_neighbor_graceful_restart_vty( - vty, type, ip_str, afi, json); + bgp_show_neighbor_graceful_restart_vty(vty, bgp, type, ip_str, afi, json); afi++; } } else if (afi != AFI_MAX) { - bgp_show_neighbor_graceful_restart_vty(vty, type, ip_str, afi, - json); + bgp_show_neighbor_graceful_restart_vty(vty, bgp, type, ip_str, afi, json); } else { if (json) json_object_free(json); diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index 4632c70d53c4..1e9b4f9d2764 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -1169,6 +1169,14 @@ BGP GR Peer Mode Commands at the peer level. +BGP GR Show Commands +^^^^^^^^^^^^^^^^^^^^ + +.. clicmd:: show bgp [] [ VRF] neighbors [] graceful-restart [json] + + This command will display information about the neighbors graceful-restart status + + Long-lived Graceful Restart --------------------------- From b9a9be492e8dde39a0040bd1140893ccab83c2be Mon Sep 17 00:00:00 2001 From: Donatas Abraitis Date: Thu, 10 Oct 2024 16:50:38 +0300 Subject: [PATCH 60/73] bgpd: Move some non BGP-specific route-map functions to lib They are managed under `frr-route-map`, not under `frr-bgp-route-map`. Fixes: https://github.com/FRRouting/frr/issues/17055 Signed-off-by: Donatas Abraitis --- bgpd/bgp_routemap.c | 80 --------------------------------------------- lib/routemap_cli.c | 73 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 73 insertions(+), 80 deletions(-) diff --git a/bgpd/bgp_routemap.c b/bgpd/bgp_routemap.c index 583b9e7980f5..4900bb3ce351 100644 --- a/bgpd/bgp_routemap.c +++ b/bgpd/bgp_routemap.c @@ -7232,43 +7232,6 @@ DEFUN_YANG (no_set_aggregator_as, return nb_cli_apply_changes(vty, NULL); } -DEFUN_YANG (match_ipv6_next_hop, - match_ipv6_next_hop_cmd, - "match ipv6 next-hop ACCESSLIST6_NAME", - MATCH_STR - IPV6_STR - "Match IPv6 next-hop address of route\n" - "IPv6 access-list name\n") -{ - const char *xpath = - "./match-condition[condition='frr-route-map:ipv6-next-hop-list']"; - char xpath_value[XPATH_MAXLEN]; - - nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); - snprintf(xpath_value, sizeof(xpath_value), - "%s/rmap-match-condition/list-name", xpath); - nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, - argv[argc - 1]->arg); - - return nb_cli_apply_changes(vty, NULL); -} - -DEFUN_YANG (no_match_ipv6_next_hop, - no_match_ipv6_next_hop_cmd, - "no match ipv6 next-hop [ACCESSLIST6_NAME]", - NO_STR - MATCH_STR - IPV6_STR - "Match IPv6 next-hop address of route\n" - "IPv6 access-list name\n") -{ - const char *xpath = - "./match-condition[condition='frr-route-map:ipv6-next-hop-list']"; - - nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); - return nb_cli_apply_changes(vty, NULL); -} - DEFUN_YANG (match_ipv6_next_hop_address, match_ipv6_next_hop_address_cmd, "match ipv6 next-hop address X:X::X:X", @@ -7326,45 +7289,6 @@ ALIAS_HIDDEN (no_match_ipv6_next_hop_address, "Match IPv6 next-hop address of route\n" "IPv6 address of next hop\n") -DEFUN_YANG (match_ipv6_next_hop_prefix_list, - match_ipv6_next_hop_prefix_list_cmd, - "match ipv6 next-hop prefix-list PREFIXLIST_NAME", - MATCH_STR - IPV6_STR - "Match IPv6 next-hop address of route\n" - "Match entries by prefix-list\n" - "IPv6 prefix-list name\n") -{ - const char *xpath = - "./match-condition[condition='frr-route-map:ipv6-next-hop-prefix-list']"; - char xpath_value[XPATH_MAXLEN]; - - nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); - snprintf(xpath_value, sizeof(xpath_value), - "%s/rmap-match-condition/list-name", xpath); - nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, - argv[argc - 1]->arg); - - return nb_cli_apply_changes(vty, NULL); -} - -DEFUN_YANG (no_match_ipv6_next_hop_prefix_list, - no_match_ipv6_next_hop_prefix_list_cmd, - "no match ipv6 next-hop prefix-list [PREFIXLIST_NAME]", - NO_STR - MATCH_STR - IPV6_STR - "Match IPv6 next-hop address of route\n" - "Match entries by prefix-list\n" - "IPv6 prefix-list name\n") -{ - const char *xpath = - "./match-condition[condition='frr-route-map:ipv6-next-hop-prefix-list']"; - - nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); - return nb_cli_apply_changes(vty, NULL); -} - DEFPY_YANG (match_ipv4_next_hop, match_ipv4_next_hop_cmd, "match ip next-hop address A.B.C.D", @@ -8037,12 +7961,8 @@ void bgp_route_map_init(void) route_map_install_set(&route_set_ipv6_nexthop_peer_cmd); route_map_install_match(&route_match_rpki_extcommunity_cmd); - install_element(RMAP_NODE, &match_ipv6_next_hop_cmd); install_element(RMAP_NODE, &match_ipv6_next_hop_address_cmd); - install_element(RMAP_NODE, &match_ipv6_next_hop_prefix_list_cmd); - install_element(RMAP_NODE, &no_match_ipv6_next_hop_cmd); install_element(RMAP_NODE, &no_match_ipv6_next_hop_address_cmd); - install_element(RMAP_NODE, &no_match_ipv6_next_hop_prefix_list_cmd); install_element(RMAP_NODE, &match_ipv6_next_hop_old_cmd); install_element(RMAP_NODE, &no_match_ipv6_next_hop_old_cmd); install_element(RMAP_NODE, &match_ipv4_next_hop_cmd); diff --git a/lib/routemap_cli.c b/lib/routemap_cli.c index f22d5880807c..f64c3c2376a9 100644 --- a/lib/routemap_cli.c +++ b/lib/routemap_cli.c @@ -417,6 +417,74 @@ DEFPY_YANG( return nb_cli_apply_changes(vty, NULL); } +DEFUN_YANG (match_ipv6_next_hop, + match_ipv6_next_hop_cmd, + "match ipv6 next-hop ACCESSLIST6_NAME", + MATCH_STR + IPV6_STR + "Match IPv6 next-hop address of route\n" + "IPv6 access-list name\n") +{ + const char *xpath = "./match-condition[condition='frr-route-map:ipv6-next-hop-list']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), "%s/rmap-match-condition/list-name", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, argv[argc - 1]->arg); + + return nb_cli_apply_changes(vty, NULL); +} + +DEFUN_YANG (no_match_ipv6_next_hop, + no_match_ipv6_next_hop_cmd, + "no match ipv6 next-hop [ACCESSLIST6_NAME]", + NO_STR + MATCH_STR + IPV6_STR + "Match IPv6 next-hop address of route\n" + "IPv6 access-list name\n") +{ + const char *xpath = "./match-condition[condition='frr-route-map:ipv6-next-hop-list']"; + + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + return nb_cli_apply_changes(vty, NULL); +} + +DEFUN_YANG (match_ipv6_next_hop_prefix_list, + match_ipv6_next_hop_prefix_list_cmd, + "match ipv6 next-hop prefix-list PREFIXLIST_NAME", + MATCH_STR + IPV6_STR + "Match IPv6 next-hop address of route\n" + "Match entries by prefix-list\n" + "IPv6 prefix-list name\n") +{ + const char *xpath = "./match-condition[condition='frr-route-map:ipv6-next-hop-prefix-list']"; + char xpath_value[XPATH_MAXLEN]; + + nb_cli_enqueue_change(vty, xpath, NB_OP_CREATE, NULL); + snprintf(xpath_value, sizeof(xpath_value), "%s/rmap-match-condition/list-name", xpath); + nb_cli_enqueue_change(vty, xpath_value, NB_OP_MODIFY, argv[argc - 1]->arg); + + return nb_cli_apply_changes(vty, NULL); +} + +DEFUN_YANG (no_match_ipv6_next_hop_prefix_list, + no_match_ipv6_next_hop_prefix_list_cmd, + "no match ipv6 next-hop prefix-list [PREFIXLIST_NAME]", + NO_STR + MATCH_STR + IPV6_STR + "Match IPv6 next-hop address of route\n" + "Match entries by prefix-list\n" + "IPv6 prefix-list name\n") +{ + const char *xpath = "./match-condition[condition='frr-route-map:ipv6-next-hop-prefix-list']"; + + nb_cli_enqueue_change(vty, xpath, NB_OP_DESTROY, NULL); + return nb_cli_apply_changes(vty, NULL); +} + DEFPY_YANG( match_ipv6_next_hop_type, match_ipv6_next_hop_type_cmd, "match ipv6 next-hop type $type", @@ -1665,6 +1733,11 @@ void route_map_cli_init(void) install_element(RMAP_NODE, &match_ipv6_next_hop_type_cmd); install_element(RMAP_NODE, &no_match_ipv6_next_hop_type_cmd); + install_element(RMAP_NODE, &match_ipv6_next_hop_cmd); + install_element(RMAP_NODE, &match_ipv6_next_hop_prefix_list_cmd); + install_element(RMAP_NODE, &no_match_ipv6_next_hop_cmd); + install_element(RMAP_NODE, &no_match_ipv6_next_hop_prefix_list_cmd); + install_element(RMAP_NODE, &match_metric_cmd); install_element(RMAP_NODE, &no_match_metric_cmd); From c853c8d13baeff86e99b3c5217fd32e09db10f80 Mon Sep 17 00:00:00 2001 From: sri-mohan1 Date: Wed, 9 Oct 2024 14:19:55 +0530 Subject: [PATCH 61/73] bgpd: changes for code maintainability these changes are for improving the code maintainability and readability Signed-off-by: sri-mohan1 --- bgpd/bgp_nexthop.c | 16 +++++------- bgpd/bgp_packet.c | 64 +++++++++++++++++++--------------------------- 2 files changed, 34 insertions(+), 46 deletions(-) diff --git a/bgpd/bgp_nexthop.c b/bgpd/bgp_nexthop.c index 98eb9565bf20..564ad118ebc3 100644 --- a/bgpd/bgp_nexthop.c +++ b/bgpd/bgp_nexthop.c @@ -528,14 +528,12 @@ bool bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type, tmp_addr.p.prefixlen = p->prefixlen; } else { /* Here we need to find out which nexthop to be used*/ - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP))) { tmp_addr.p.u.prefix4 = attr->nexthop; tmp_addr.p.prefixlen = IPV4_MAX_BITLEN; - } else if ((attr->mp_nexthop_len) - && ((attr->mp_nexthop_len - == BGP_ATTR_NHLEN_IPV4) - || (attr->mp_nexthop_len - == BGP_ATTR_NHLEN_VPNV4))) { + } else if ((attr->mp_nexthop_len) && + ((attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV4) || + (attr->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV4))) { tmp_addr.p.u.prefix4 = attr->mp_nexthop_global_in; tmp_addr.p.prefixlen = IPV4_MAX_BITLEN; @@ -564,11 +562,11 @@ bool bgp_nexthop_self(struct bgp *bgp, afi_t afi, uint8_t type, memset(&tmp_tip, 0, sizeof(tmp_tip)); tmp_tip.addr = attr->nexthop; - if (attr->flag & ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP)) { + if (CHECK_FLAG(attr->flag, ATTR_FLAG_BIT(BGP_ATTR_NEXT_HOP))) { tmp_tip.addr = attr->nexthop; } else if ((attr->mp_nexthop_len) && - ((attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV4) - || (attr->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV4))) { + ((attr->mp_nexthop_len == BGP_ATTR_NHLEN_IPV4) || + (attr->mp_nexthop_len == BGP_ATTR_NHLEN_VPNV4))) { tmp_tip.addr = attr->mp_nexthop_global_in; } diff --git a/bgpd/bgp_packet.c b/bgpd/bgp_packet.c index 62be7ffbf733..646ab1d95f62 100644 --- a/bgpd/bgp_packet.c +++ b/bgpd/bgp_packet.c @@ -2935,35 +2935,31 @@ static int bgp_route_refresh_receive(struct peer_connection *connection, if (bgp_debug_neighbor_events(peer)) { char buf[INET6_BUFSIZ]; - zlog_debug( - "%pBP rcvd %s %s seq %u %s/%d ge %d le %d%s", - peer, - (common & ORF_COMMON_PART_REMOVE - ? "Remove" - : "Add"), - (common & ORF_COMMON_PART_DENY - ? "deny" - : "permit"), - orfp.seq, - inet_ntop( - orfp.p.family, - &orfp.p.u.prefix, - buf, - INET6_BUFSIZ), - orfp.p.prefixlen, - orfp.ge, orfp.le, - ok ? "" : " MALFORMED"); + zlog_debug("%pBP rcvd %s %s seq %u %s/%d ge %d le %d%s", + peer, + (CHECK_FLAG(common, ORF_COMMON_PART_REMOVE) + ? "Remove" + : "Add"), + (CHECK_FLAG(common, ORF_COMMON_PART_DENY) + ? "deny" + : "permit"), + orfp.seq, + inet_ntop(orfp.p.family, &orfp.p.u.prefix, + buf, INET6_BUFSIZ), + orfp.p.prefixlen, orfp.ge, orfp.le, + ok ? "" : " MALFORMED"); } if (ok) - ret = prefix_bgp_orf_set( - name, afi, &orfp, - (common & ORF_COMMON_PART_DENY - ? 0 - : 1), - (common & ORF_COMMON_PART_REMOVE - ? 0 - : 1)); + ret = prefix_bgp_orf_set(name, afi, &orfp, + (CHECK_FLAG(common, + ORF_COMMON_PART_DENY) + ? 0 + : 1), + (CHECK_FLAG(common, + ORF_COMMON_PART_REMOVE) + ? 0 + : 1)); if (!ok || (ok && ret != CMD_SUCCESS)) { zlog_info( @@ -3190,17 +3186,11 @@ static void bgp_dynamic_capability_addpath(uint8_t *pnt, int action, if (bgp_debug_neighbor_events(peer)) zlog_debug("%s OPEN has %s capability for afi/safi: %s/%s%s%s", - peer->host, - lookup_msg(capcode_str, hdr->code, - NULL), - iana_afi2str(pkt_afi), - iana_safi2str(pkt_safi), - (bac.flags & BGP_ADDPATH_RX) - ? ", receive" - : "", - (bac.flags & BGP_ADDPATH_TX) - ? ", transmit" - : ""); + peer->host, lookup_msg(capcode_str, hdr->code, NULL), + iana_afi2str(pkt_afi), iana_safi2str(pkt_safi), + CHECK_FLAG(bac.flags, BGP_ADDPATH_RX) ? ", receive" : "", + CHECK_FLAG(bac.flags, BGP_ADDPATH_TX) ? ", transmit" + : ""); if (bgp_map_afi_safi_iana2int(pkt_afi, pkt_safi, &afi, &safi)) { From 400cc7055aca4c2813afaa4d29ac1c42378daea6 Mon Sep 17 00:00:00 2001 From: Jafar Al-Gharaibeh Date: Wed, 31 Jul 2024 23:31:57 -0500 Subject: [PATCH 62/73] debian, redhat: updating changelog for 10.1 release Signed-off-by: Jafar Al-Gharaibeh --- debian/changelog | 6 +++--- redhat/frr.spec.in | 15 ++++++++++++++- 2 files changed, 17 insertions(+), 4 deletions(-) diff --git a/debian/changelog b/debian/changelog index 96c66db89bf6..a556f00f41b5 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,8 @@ -frr (10.1~dev-1) UNRELEASED; urgency=medium +frr (10.1-0) unstable; urgency=medium - * FRR Dev 10.1 + * New upstream release FRR 10.1 - -- Jafar Al-Gharaibeh Tue, 26 Mar 2024 02:00:00 -0600 + -- Jafar Al-Gharaibeh Fri, 26 Jul 2024 02:00:00 -0600 frr (10.0-0) unstable; urgency=medium diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in index d6775e6e9cae..955a73e30795 100644 --- a/redhat/frr.spec.in +++ b/redhat/frr.spec.in @@ -834,7 +834,20 @@ sed -i 's/ -M rpki//' %{_sysconfdir}/frr/daemons %changelog -* Mon Mar 25 2024 Jafar Al-Gharaibeh - %{version} +* Fri Jul 26 2024 Jafar Al-Gharaibeh - %{version} + +* Fri Jul 26 2024 Jafar Al-Gharaibeh - 10.1 +- Breaking changes +- Enable BGP dynamic capability by default for datacenter profile +- Split BGP `rpki cache` command into separate per SSH/TCP +- Add deprecation cycle for OSPF `router-info X [A.B.C.D]` command +- Major highlights: +- BGP dampening per-neighbor support +- BMP send-experimental stats +- Implement extended link-bandwidth for BGP +- Paths Limit for Multiple Paths in BGP +- New command for OSPFv2 `ip ospf neighbor-filter NAME [A.B.C.D]` +- Implement non-broadcast support for point-to-multipoint networks * Mon Mar 25 2024 Jafar Al-Gharaibeh - 10.0 - Major highlights: From 426a566cb619b2850dbcecefc5fcfb60043ffaa1 Mon Sep 17 00:00:00 2001 From: Jafar Al-Gharaibeh Date: Thu, 10 Oct 2024 15:44:30 -0500 Subject: [PATCH 63/73] debian, redhat: frr dev 10.3 package housekeeping Signed-off-by: Jafar Al-Gharaibeh --- debian/changelog | 6 ++++++ redhat/frr.spec.in | 5 ++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index a556f00f41b5..9917b5bcb5fe 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +frr (10.3~dev-1) UNRELEASED; urgency=medium + + * FRR 10.3 Development + + -- Jafar Al-Gharaibeh Thu, 10 Oct 2024 02:00:00 -0600 + frr (10.1-0) unstable; urgency=medium * New upstream release FRR 10.1 diff --git a/redhat/frr.spec.in b/redhat/frr.spec.in index 955a73e30795..b65ebf966c9f 100644 --- a/redhat/frr.spec.in +++ b/redhat/frr.spec.in @@ -834,7 +834,10 @@ sed -i 's/ -M rpki//' %{_sysconfdir}/frr/daemons %changelog -* Fri Jul 26 2024 Jafar Al-Gharaibeh - %{version} +* Thu Oct 10 2024 Jafar Al-Gharaibeh - %{version} + +* Thu Oct 10 2024 Jafar Al-Gharaibeh - 10.3-dev +- FRR 10.3 Development * Fri Jul 26 2024 Jafar Al-Gharaibeh - 10.1 - Breaking changes From 8e818bf76c377553e7de305ff55539c2719e4322 Mon Sep 17 00:00:00 2001 From: Simon Ruderich Date: Fri, 11 Oct 2024 08:23:48 +0200 Subject: [PATCH 64/73] doc: routemap: fix typos Signed-off-by: Simon Ruderich --- doc/user/routemap.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/user/routemap.rst b/doc/user/routemap.rst index 1d2f4e352f38..9034af39c560 100644 --- a/doc/user/routemap.rst +++ b/doc/user/routemap.rst @@ -314,11 +314,11 @@ Route Map Set Command .. clicmd:: set min-metric <(0-4294967295)> - Set the minimum meric for the route. + Set the minimum metric for the route. .. clicmd:: set max-metric <(0-4294967295)> - Set the maximum meric for the route. + Set the maximum metric for the route. .. clicmd:: set aigp-metric From dd135843ad3454f4a12c70b729ff8ba04f2aa4d6 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Thu, 10 Oct 2024 13:29:43 +0200 Subject: [PATCH 65/73] tests: rework bgp_route_server_client Rework bgp_route_server_client in a more standard form in order to facilitate the next commut changes. Cosmetic change. Signed-off-by: Louis Scalbert --- .../r1/show_bgp_ipv6.json | 58 ++++++++++ .../r2/show_bgp_ipv6.json | 78 ++++++++++++++ .../r2/show_bgp_ipv6_summary.json | 6 ++ .../r3/show_bgp_ipv6.json | 32 ++++++ .../test_bgp_route_server_client.py | 100 ++++++++---------- 5 files changed, 219 insertions(+), 55 deletions(-) create mode 100644 tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6.json create mode 100644 tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6.json create mode 100644 tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_summary.json create mode 100644 tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6.json diff --git a/tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6.json b/tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6.json new file mode 100644 index 000000000000..1776b1924802 --- /dev/null +++ b/tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6.json @@ -0,0 +1,58 @@ +{ + "routerId": "10.10.10.1", + "localAS": 65001, + "routes": { + "2001:db8:1::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r1", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:3::/64": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:f::1/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r1", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:f::3/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6.json b/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6.json new file mode 100644 index 000000000000..78b4aaefda3f --- /dev/null +++ b/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6.json @@ -0,0 +1,78 @@ +{ + "routerId": "10.10.10.2", + "localAS": 65000, + "routes": { + "2001:db8:1::/64": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:3::/64": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r3", + "afi": "ipv6", + "scope": "global" + }, + { + "hostname": "r3", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::1/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::3/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r3", + "afi": "ipv6", + "scope": "global" + }, + { + "hostname": "r3", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_summary.json b/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_summary.json new file mode 100644 index 000000000000..b40192b452e0 --- /dev/null +++ b/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_summary.json @@ -0,0 +1,6 @@ +{ + "ipv6Unicast": { + "failedPeers": 0, + "totalPeers": 2 + } +} diff --git a/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6.json b/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6.json new file mode 100644 index 000000000000..aebffb2c552f --- /dev/null +++ b/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6.json @@ -0,0 +1,32 @@ +{ + "routerId": "10.10.10.3", + "localAS": 65003, + "routes": { + "2001:db8:3::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r3", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:f::3/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r3", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py b/tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py index 29d9842d59dd..8582b09c8601 100644 --- a/tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py +++ b/tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py @@ -13,6 +13,7 @@ import sys import json import pytest +from functools import partial import functools pytestmark = [pytest.mark.bgpd] @@ -60,67 +61,56 @@ def teardown_module(mod): tgen.stop_topology() -def test_bgp_route_server_client(): - tgen = get_topogen() +def test_converge_protocols(): + "Wait for protocol convergence" + tgen = get_topogen() + # Don't run this test if we have any failure. if tgen.routers_have_failure(): pytest.skip(tgen.errors) - r1 = tgen.gears["r1"] r2 = tgen.gears["r2"] + ref_file = "{}/{}/show_bgp_ipv6_summary.json".format(CWD, r2.name) + expected = json.loads(open(ref_file).read()) + + test_func = partial( + topotest.router_json_cmp, + r2, + "show bgp view RS ipv6 summary json", + expected, + ) + _, res = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = "{}: BGP convergence failed".format(r2.name) + assert res is None, assertmsg + + +def test_bgp_route_server_client_step1(): + tgen = get_topogen() - def _bgp_converge(router): - output = json.loads(router.vtysh_cmd("show bgp ipv6 unicast summary json")) - expected = {"peers": {"2001:db8:1::1": {"state": "Established", "pfxRcd": 2}}} - return topotest.json_cmp(output, expected) - - test_func = functools.partial(_bgp_converge, r1) - _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert result is None, "Cannot see BGP sessions to be up" - - def _bgp_prefix_received(router): - output = json.loads(router.vtysh_cmd("show bgp 2001:db8:f::3/128 json")) - expected = { - "prefix": "2001:db8:f::3/128", - "paths": [{"nexthops": [{"ip": "2001:db8:3::2"}]}], - } - return topotest.json_cmp(output, expected) - - test_func = functools.partial(_bgp_prefix_received, r1) - _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert result is None, "Cannot see BGP GUA next hop from r3 in r1" - - def _bgp_single_next_hop(router): - output = json.loads(router.vtysh_cmd("show bgp 2001:db8:f::3/128 json")) - return len(output["paths"][0]["nexthops"]) - - assert ( - _bgp_single_next_hop(r1) == 1 - ), "Not ONLY one Next Hop received for 2001:db8:f::3/128" - - def _bgp_gua_lla_next_hop(router): - output = json.loads(router.vtysh_cmd("show bgp view RS 2001:db8:f::3/128 json")) - expected = { - "prefix": "2001:db8:f::3/128", - "paths": [ - { - "nexthops": [ - { - "ip": "2001:db8:3::2", - "hostname": "r3", - "afi": "ipv6", - "scope": "global", - }, - {"hostname": "r3", "afi": "ipv6", "scope": "link-local"}, - ] - } - ], - } - return topotest.json_cmp(output, expected) - - test_func = functools.partial(_bgp_gua_lla_next_hop, r2) - _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) - assert result is None, "Cannot see BGP LLA next hop from r3 in r2" + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router_list = tgen.routers().values() + for router in router_list: + if router.name == "r2": + # route-server + cmd = "show bgp view RS ipv6 unicast json" + else: + cmd = "show bgp ipv6 unicast json" + + # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name) + ref_file = "{}/{}/show_bgp_ipv6.json".format(CWD, router.name) + expected = json.loads(open(ref_file).read()) + + test_func = partial( + topotest.router_json_cmp, + router, + cmd, + expected, + ) + _, res = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = "{}: BGP IPv6 table failure".format(router.name) + assert res is None, assertmsg if __name__ == "__main__": From da7b2d9831fb87c6b20b41a803214b06354f5cd8 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Thu, 10 Oct 2024 14:59:50 +0200 Subject: [PATCH 66/73] tests: unset r3 enforce-first-as bgp_route_server_client Unset enforce-first-as on r3 of bgp_route_server_client to enable the reception of routes on this router. Signed-off-by: Louis Scalbert --- .../bgp_route_server_client/r3/bgpd.conf | 1 + .../r3/show_bgp_ipv6.json | 26 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/tests/topotests/bgp_route_server_client/r3/bgpd.conf b/tests/topotests/bgp_route_server_client/r3/bgpd.conf index 60a5ffc559a0..f7daba87face 100644 --- a/tests/topotests/bgp_route_server_client/r3/bgpd.conf +++ b/tests/topotests/bgp_route_server_client/r3/bgpd.conf @@ -2,6 +2,7 @@ router bgp 65003 bgp router-id 10.10.10.3 no bgp ebgp-requires-policy + no bgp enforce-first-as neighbor 2001:db8:3::1 remote-as external neighbor 2001:db8:3::1 timers 3 10 neighbor 2001:db8:3::1 timers connect 5 diff --git a/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6.json b/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6.json index aebffb2c552f..5ebcbe4c6856 100644 --- a/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6.json +++ b/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6.json @@ -2,6 +2,19 @@ "routerId": "10.10.10.3", "localAS": 65003, "routes": { + "2001:db8:1::/64": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], "2001:db8:3::/64": [ { "nexthops": [ @@ -15,6 +28,19 @@ ] } ], + "2001:db8:f::1/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], "2001:db8:f::3/128": [ { "nexthops": [ From 91512c30487ceb1d306529886f4e5a629cd0e2f8 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Fri, 11 Oct 2024 07:12:23 +0200 Subject: [PATCH 67/73] bgpd: split nexthop-local unchanged peer subgroup 5bb99ccad2 ("bgpd: reset ipv6 invalid link-local nexthop") now resets the link-local when originating and destination peers are not on the same network segment. However, it does not work all the time. The fix compares the 'from' and 'peer' global IPv6 address. However, 'peer' refers to one of the peers of subgroup. The subgroup may contain peers located on different network segment. Split nexthop-local unchanged peer subgroup by network segment. Fixes: 5bb99ccad2 ("bgpd: reset ipv6 invalid link-local nexthop") Signed-off-by: Louis Scalbert --- bgpd/bgp_updgrp.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/bgpd/bgp_updgrp.c b/bgpd/bgp_updgrp.c index 90c43b938ffd..ef036067078f 100644 --- a/bgpd/bgp_updgrp.c +++ b/bgpd/bgp_updgrp.c @@ -455,6 +455,10 @@ static unsigned int updgrp_hash_key_make(const void *p) key = jhash_1word(jhash(soo_str, strlen(soo_str), SEED1), key); } + if (afi == AFI_IP6 && + (CHECK_FLAG(peer->af_flags[afi][safi], PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED))) + key = jhash(&peer->nexthop.v6_global, IPV6_MAX_BYTELEN, key); + /* * ANY NEW ITEMS THAT ARE ADDED TO THE key, ENSURE DEBUG * STATEMENT STAYS UP TO DATE @@ -521,6 +525,12 @@ static unsigned int updgrp_hash_key_make(const void *p) peer->soo[afi][safi] ? ecommunity_str(peer->soo[afi][safi]) : "(NONE)"); + zlog_debug("%pBP Update Group Hash: IPv6 nexthop-local unchanged: %d IPv6 global %pI6", + peer, + afi == AFI_IP6 && (CHECK_FLAG(peer->af_flags[afi][safi], + PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED)), + &peer->nexthop.v6_global); + zlog_debug("%pBP Update Group Hash key: %u", peer, key); } return key; @@ -655,6 +665,12 @@ static bool updgrp_hash_cmp(const void *p1, const void *p2) !sockunion_same(&pe1->connection->su, &pe2->connection->su)) return false; + if (afi == AFI_IP6 && + (CHECK_FLAG(flags1, PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED) || + CHECK_FLAG(flags2, PEER_FLAG_NEXTHOP_LOCAL_UNCHANGED)) && + !IPV6_ADDR_SAME(&pe1->nexthop.v6_global, &pe2->nexthop.v6_global)) + return false; + return true; } From 5f035edf25119c416951aba1ddac941d59edae75 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Thu, 10 Oct 2024 14:51:11 +0200 Subject: [PATCH 68/73] tests: test nexthop-local unchanged with route-server Test nexthop-local unchanged with route-server. Signed-off-by: Louis Scalbert --- .../bgp_route_server_client/exabgp.env | 53 +++++ ...bgp_ipv6.json => show_bgp_ipv6_step1.json} | 37 +++ .../r1/show_bgp_ipv6_step2.json | 113 ++++++++++ .../bgp_route_server_client/r2/bgpd.conf | 10 + .../r2/show_bgp_ipv6_step1.json | 208 +++++++++++++++++ .../r2/show_bgp_ipv6_step2.json | 208 +++++++++++++++++ .../r2/show_bgp_ipv6_summary.json | 2 +- ...bgp_ipv6.json => show_bgp_ipv6_step1.json} | 28 ++- .../show_bgp_ipv6_step2.json} | 60 +++-- .../bgp_route_server_client/r4/bgpd.conf | 13 ++ .../r4/show_bgp_ipv6_step1.json | 95 ++++++++ .../r4/show_bgp_ipv6_step2.json | 113 ++++++++++ .../bgp_route_server_client/r4/zebra.conf | 7 + .../bgp_route_server_client/r5/exabgp.cfg | 16 ++ .../test_bgp_route_server_client.py | 212 +++++++++++++++++- 15 files changed, 1144 insertions(+), 31 deletions(-) create mode 100644 tests/topotests/bgp_route_server_client/exabgp.env rename tests/topotests/bgp_route_server_client/r1/{show_bgp_ipv6.json => show_bgp_ipv6_step1.json} (59%) create mode 100644 tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6_step2.json create mode 100644 tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_step1.json create mode 100644 tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_step2.json rename tests/topotests/bgp_route_server_client/r3/{show_bgp_ipv6.json => show_bgp_ipv6_step1.json} (65%) rename tests/topotests/bgp_route_server_client/{r2/show_bgp_ipv6.json => r3/show_bgp_ipv6_step2.json} (56%) create mode 100644 tests/topotests/bgp_route_server_client/r4/bgpd.conf create mode 100644 tests/topotests/bgp_route_server_client/r4/show_bgp_ipv6_step1.json create mode 100644 tests/topotests/bgp_route_server_client/r4/show_bgp_ipv6_step2.json create mode 100644 tests/topotests/bgp_route_server_client/r4/zebra.conf create mode 100644 tests/topotests/bgp_route_server_client/r5/exabgp.cfg diff --git a/tests/topotests/bgp_route_server_client/exabgp.env b/tests/topotests/bgp_route_server_client/exabgp.env new file mode 100644 index 000000000000..28e642360a39 --- /dev/null +++ b/tests/topotests/bgp_route_server_client/exabgp.env @@ -0,0 +1,53 @@ +[exabgp.api] +encoder = text +highres = false +respawn = false +socket = '' + +[exabgp.bgp] +openwait = 60 + +[exabgp.cache] +attributes = true +nexthops = true + +[exabgp.daemon] +daemonize = true +pid = '/var/run/exabgp/exabgp.pid' +user = 'exabgp' +##daemonize = false + +[exabgp.log] +all = false +configuration = true +daemon = true +destination = '/var/log/exabgp.log' +enable = true +level = INFO +message = false +network = true +packets = false +parser = false +processes = true +reactor = true +rib = false +routes = false +short = false +timers = false + +[exabgp.pdb] +enable = false + +[exabgp.profile] +enable = false +file = '' + +[exabgp.reactor] +speed = 1.0 + +[exabgp.tcp] +acl = false +bind = '' +delay = 0 +once = false +port = 179 diff --git a/tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6.json b/tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6_step1.json similarity index 59% rename from tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6.json rename to tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6_step1.json index 1776b1924802..387d7b3374b7 100644 --- a/tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6.json +++ b/tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6_step1.json @@ -13,6 +13,17 @@ "used": true } ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] } ], "2001:db8:3::/64": [ @@ -53,6 +64,32 @@ } ] } + ], + "2001:db8:f::4/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::3", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:f::5/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } ] } } diff --git a/tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6_step2.json b/tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6_step2.json new file mode 100644 index 000000000000..f9e68b897723 --- /dev/null +++ b/tests/topotests/bgp_route_server_client/r1/show_bgp_ipv6_step2.json @@ -0,0 +1,113 @@ +{ + "routerId": "10.10.10.1", + "localAS": 65001, + "routes": { + "2001:db8:1::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r1", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:3::/64": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r2:r2-eth0", + "hostname": "r2", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::1/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r1", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:f::3/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r2:r2-eth0", + "hostname": "r2", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::4/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::3", + "hostname": "r2", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:r4-eth0", + "hostname": "r2", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::5/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_route_server_client/r2/bgpd.conf b/tests/topotests/bgp_route_server_client/r2/bgpd.conf index 3b0a24b8ba5b..19607660f98f 100644 --- a/tests/topotests/bgp_route_server_client/r2/bgpd.conf +++ b/tests/topotests/bgp_route_server_client/r2/bgpd.conf @@ -4,14 +4,24 @@ router bgp 65000 view RS neighbor 2001:db8:1::2 remote-as external neighbor 2001:db8:1::2 timers 3 10 neighbor 2001:db8:1::2 timers connect 5 + neighbor 2001:db8:1::3 remote-as external + neighbor 2001:db8:1::3 timers 3 10 + neighbor 2001:db8:1::3 timers connect 5 + neighbor 2001:db8:1::4 remote-as external + neighbor 2001:db8:1::4 timers 3 10 + neighbor 2001:db8:1::4 timers connect 5 neighbor 2001:db8:3::2 remote-as external neighbor 2001:db8:3::2 timers 3 10 neighbor 2001:db8:3::2 timers connect 5 address-family ipv6 unicast redistribute connected neighbor 2001:db8:1::2 activate + neighbor 2001:db8:1::3 activate + neighbor 2001:db8:1::4 activate neighbor 2001:db8:3::2 activate neighbor 2001:db8:1::2 route-server-client + neighbor 2001:db8:1::3 route-server-client + neighbor 2001:db8:1::4 route-server-client neighbor 2001:db8:3::2 route-server-client exit-address-family ! diff --git a/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_step1.json b/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_step1.json new file mode 100644 index 000000000000..c2f31f8c32ed --- /dev/null +++ b/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_step1.json @@ -0,0 +1,208 @@ +{ + "routerId": "10.10.10.2", + "localAS": 65000, + "routes": { + "2001:db8:1::/64": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:r1-eth0", + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::3", + "hostname": "r4", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:r4-eth0", + "hostname": "r4", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:3::/64": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r3", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r3:r3-eth0", + "hostname": "r3", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::1/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:r1-eth0", + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::3", + "hostname": "r4", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:r4-eth0", + "hostname": "r4", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::3/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r3", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r3:r3-eth0", + "hostname": "r3", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::4/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::3", + "hostname": "r4", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:r4-eth0", + "hostname": "r4", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:r1-eth0", + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::5/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:r1-eth0", + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::3", + "hostname": "r4", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:r4-eth0", + "hostname": "r4", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_step2.json b/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_step2.json new file mode 100644 index 000000000000..c2f31f8c32ed --- /dev/null +++ b/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_step2.json @@ -0,0 +1,208 @@ +{ + "routerId": "10.10.10.2", + "localAS": 65000, + "routes": { + "2001:db8:1::/64": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:r1-eth0", + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::3", + "hostname": "r4", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:r4-eth0", + "hostname": "r4", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:3::/64": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r3", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r3:r3-eth0", + "hostname": "r3", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::1/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:r1-eth0", + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::3", + "hostname": "r4", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:r4-eth0", + "hostname": "r4", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::3/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r3", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r3:r3-eth0", + "hostname": "r3", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::4/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::3", + "hostname": "r4", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:r4-eth0", + "hostname": "r4", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:r1-eth0", + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::5/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r1", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:r1-eth0", + "hostname": "r1", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::3", + "hostname": "r4", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r4:r4-eth0", + "hostname": "r4", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_summary.json b/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_summary.json index b40192b452e0..8a42a11c47a9 100644 --- a/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_summary.json +++ b/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6_summary.json @@ -1,6 +1,6 @@ { "ipv6Unicast": { "failedPeers": 0, - "totalPeers": 2 + "totalPeers": 4 } } diff --git a/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6.json b/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6_step1.json similarity index 65% rename from tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6.json rename to tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6_step1.json index 5ebcbe4c6856..bf8d74801dc3 100644 --- a/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6.json +++ b/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6_step1.json @@ -6,7 +6,7 @@ { "nexthops": [ { - "ip": "2001:db8:1::2", + "ip": "2001:db8:1::4", "hostname": "r2", "afi": "ipv6", "scope": "global", @@ -53,6 +53,32 @@ } ] } + ], + "2001:db8:f::4/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::3", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:f::5/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } ] } } diff --git a/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6.json b/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6_step2.json similarity index 56% rename from tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6.json rename to tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6_step2.json index 78b4aaefda3f..31c1eb781611 100644 --- a/tests/topotests/bgp_route_server_client/r2/show_bgp_ipv6.json +++ b/tests/topotests/bgp_route_server_client/r3/show_bgp_ipv6_step2.json @@ -1,20 +1,15 @@ { - "routerId": "10.10.10.2", - "localAS": 65000, + "routerId": "10.10.10.3", + "localAS": 65003, "routes": { "2001:db8:1::/64": [ { "nexthops": [ { - "ip": "2001:db8:1::2", - "hostname": "r1", - "afi": "ipv6", - "scope": "global" - }, - { - "hostname": "r1", + "ip": "2001:db8:1::4", + "hostname": "r2", "afi": "ipv6", - "scope": "link-local", + "scope": "global", "used": true } ] @@ -24,15 +19,10 @@ { "nexthops": [ { - "ip": "2001:db8:3::2", - "hostname": "r3", - "afi": "ipv6", - "scope": "global" - }, - { + "ip": "::", "hostname": "r3", "afi": "ipv6", - "scope": "link-local", + "scope": "global", "used": true } ] @@ -43,12 +33,13 @@ "nexthops": [ { "ip": "2001:db8:1::2", - "hostname": "r1", + "hostname": "r2", "afi": "ipv6", "scope": "global" }, { - "hostname": "r1", + "ip": "link-local:r2:r2-eth1", + "hostname": "r2", "afi": "ipv6", "scope": "link-local", "used": true @@ -60,19 +51,46 @@ { "nexthops": [ { - "ip": "2001:db8:3::2", + "ip": "::", "hostname": "r3", "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:f::4/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::3", + "hostname": "r2", + "afi": "ipv6", "scope": "global" }, { - "hostname": "r3", + "ip": "link-local:r2:r2-eth1", + "hostname": "r2", "afi": "ipv6", "scope": "link-local", "used": true } ] } + ], + "2001:db8:f::5/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } ] } } diff --git a/tests/topotests/bgp_route_server_client/r4/bgpd.conf b/tests/topotests/bgp_route_server_client/r4/bgpd.conf new file mode 100644 index 000000000000..c907d7284e22 --- /dev/null +++ b/tests/topotests/bgp_route_server_client/r4/bgpd.conf @@ -0,0 +1,13 @@ +! +router bgp 65004 + bgp router-id 10.10.10.4 + no bgp ebgp-requires-policy + no bgp enforce-first-as + neighbor 2001:db8:1::1 remote-as external + neighbor 2001:db8:1::1 timers 3 10 + neighbor 2001:db8:1::1 timers connect 5 + address-family ipv6 unicast + redistribute connected + neighbor 2001:db8:1::1 activate + exit-address-family +! diff --git a/tests/topotests/bgp_route_server_client/r4/show_bgp_ipv6_step1.json b/tests/topotests/bgp_route_server_client/r4/show_bgp_ipv6_step1.json new file mode 100644 index 000000000000..5c090d9cf947 --- /dev/null +++ b/tests/topotests/bgp_route_server_client/r4/show_bgp_ipv6_step1.json @@ -0,0 +1,95 @@ +{ + "routerId": "10.10.10.4", + "localAS": 65004, + "routes": { + "2001:db8:1::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r4", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:3::/64": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:f::1/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:f::3/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:f::4/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r4", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:f::5/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_route_server_client/r4/show_bgp_ipv6_step2.json b/tests/topotests/bgp_route_server_client/r4/show_bgp_ipv6_step2.json new file mode 100644 index 000000000000..01db18e926d4 --- /dev/null +++ b/tests/topotests/bgp_route_server_client/r4/show_bgp_ipv6_step2.json @@ -0,0 +1,113 @@ +{ + "routerId": "10.10.10.4", + "localAS": 65004, + "routes": { + "2001:db8:1::/64": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r4", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + }, + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:3::/64": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r2:r2-eth0", + "hostname": "r2", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::1/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r1:r1-eth0", + "hostname": "r2", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::3/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:3::2", + "hostname": "r2", + "afi": "ipv6", + "scope": "global" + }, + { + "ip": "link-local:r2:r2-eth0", + "hostname": "r2", + "afi": "ipv6", + "scope": "link-local", + "used": true + } + ] + } + ], + "2001:db8:f::4/128": [ + { + "nexthops": [ + { + "ip": "::", + "hostname": "r4", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ], + "2001:db8:f::5/128": [ + { + "nexthops": [ + { + "ip": "2001:db8:1::4", + "hostname": "r2", + "afi": "ipv6", + "scope": "global", + "used": true + } + ] + } + ] + } +} diff --git a/tests/topotests/bgp_route_server_client/r4/zebra.conf b/tests/topotests/bgp_route_server_client/r4/zebra.conf new file mode 100644 index 000000000000..849884045d98 --- /dev/null +++ b/tests/topotests/bgp_route_server_client/r4/zebra.conf @@ -0,0 +1,7 @@ +! +int lo + ipv6 address 2001:db8:f::4/128 +! +int r4-eth0 + ipv6 address 2001:db8:1::3/64 +! diff --git a/tests/topotests/bgp_route_server_client/r5/exabgp.cfg b/tests/topotests/bgp_route_server_client/r5/exabgp.cfg new file mode 100644 index 000000000000..b151f16caaeb --- /dev/null +++ b/tests/topotests/bgp_route_server_client/r5/exabgp.cfg @@ -0,0 +1,16 @@ +neighbor 2001:db8:1::1{ + router-id 10.10.10.5; + local-address 2001:db8:1::4; + local-as 65005; + peer-as 65000; + + family { + ipv6 unicast; + } + + static { + route 2001:db8:1::0/64 next-hop 2001:db8:1::4; + route 2001:db8:f::5/128 next-hop 2001:db8:1::4; + } + hold-time 10; +} diff --git a/tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py b/tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py index 8582b09c8601..a6334918dfcb 100644 --- a/tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py +++ b/tests/topotests/bgp_route_server_client/test_bgp_route_server_client.py @@ -27,16 +27,49 @@ def build_topo(tgen): - for routern in range(1, 4): + """ + All peers are FRR BGP peers except r5 that is a exabgp peer. + Exabgp does not send any IPv6 Link-Local nexthop + + r2 is a route-server view RS AS 65000 + Other routers rX has AS 6500X + + +---+ + | r3| + +---+ + | + 2001:db8:3::0/64 + | + eth1 + +---+ + |r2 | + +---+ + eth0 + | + 2001:db8:1::0/64 + / | \ + +---+ +---+ +---+ + | r1| | r4| |r5 | + +---+ +---+ +---+ + """ + + for routern in range(1, 5): tgen.add_router("r{}".format(routern)) - switch = tgen.add_switch("s1") - switch.add_link(tgen.gears["r1"]) - switch.add_link(tgen.gears["r2"]) + sw1 = tgen.add_switch("s1") + sw1.add_link(tgen.gears["r1"]) + sw1.add_link(tgen.gears["r2"]) + sw1.add_link(tgen.gears["r4"]) - switch = tgen.add_switch("s2") - switch.add_link(tgen.gears["r2"]) - switch.add_link(tgen.gears["r3"]) + sw2 = tgen.add_switch("s2") + sw2.add_link(tgen.gears["r2"]) + sw2.add_link(tgen.gears["r3"]) + + ## Add iBGP ExaBGP neighbor + peer_ip = "2001:db8:1::4" ## peer + peer_route = "via 2001:db8:1::1" ## router + r5 = tgen.add_exabgp_peer("r5", ip=peer_ip, defaultRoute=peer_route) + sw1.add_link(r5) def setup_module(mod): @@ -55,12 +88,59 @@ def setup_module(mod): tgen.start_router() + # Start r5 exabgp peer + r5 = tgen.gears["r5"] + r5.start(os.path.join(CWD, "r5"), os.path.join(CWD, "exabgp.env")) + def teardown_module(mod): tgen = get_topogen() tgen.stop_topology() +def get_link_local(rname, ifname, cache): + ip = cache.get(rname, {}).get(ifname) + if ip: + return ip + + tgen = get_topogen() + out = tgen.gears[rname].vtysh_cmd("show interface %s json" % ifname, isjson=True) + for address in out[ifname]["ipAddresses"]: + if not address["address"].startswith("fe80::"): + continue + ip = address["address"].split("/")[0] + cache.setdefault(rname, {})[ifname] = ip + return ip + + +def replace_link_local(expected, cache): + for prefix, prefix_infos in expected.get("routes", {}).items(): + for prefix_info in prefix_infos: + for nexthop in prefix_info.get("nexthops", []): + ip = nexthop.get("ip", "") + if not ip.startswith("link-local:"): + continue + rname = ip.split(":")[1] + ifname = ip.split(":")[2] + ip = get_link_local(rname, ifname, cache) + nexthop["ip"] = ip + + +def check_r2_sub_group(expected): + tgen = get_topogen() + + r2 = tgen.gears["r2"] + + output = json.loads(r2.vtysh_cmd("show bgp view RS update-groups json")) + actual = [ + subgroup["peers"] + for entry in output.get("RS", {}).values() + for subgroup in entry["subGroup"] + ] + + return topotest.json_cmp(actual, expected) + + def test_converge_protocols(): "Wait for protocol convergence" @@ -90,6 +170,58 @@ def test_bgp_route_server_client_step1(): if tgen.routers_have_failure(): pytest.skip(tgen.errors) + global link_local_cache + link_local_cache = {} + router_list = tgen.routers().values() + for router in router_list: + if router.name == "r2": + # route-server + cmd = "show bgp view RS ipv6 unicast json" + else: + cmd = "show bgp ipv6 unicast json" + + # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name) + ref_file = "{}/{}/show_bgp_ipv6_step1.json".format(CWD, router.name) + expected = json.loads(open(ref_file).read()) + replace_link_local(expected, link_local_cache) + + test_func = partial( + topotest.router_json_cmp, + router, + cmd, + expected, + ) + _, res = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = "{}: BGP IPv6 table failure".format(router.name) + assert res is None, assertmsg + + # check r2 sub-groups + expected = [["2001:db8:1::4"], ["2001:db8:1::3", "2001:db8:1::2", "2001:db8:3::2"]] + + test_func = functools.partial(check_r2_sub_group, expected) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Peer group split failed" + + +def test_bgp_route_server_client_step2(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r2 = tgen.gears["r2"] + r2.vtysh_cmd( + """ +configure terminal +router bgp 65000 view RS + address-family ipv6 unicast + neighbor 2001:db8:1::2 nexthop-local unchanged + neighbor 2001:db8:1::3 nexthop-local unchanged + neighbor 2001:db8:1::4 nexthop-local unchanged + neighbor 2001:db8:3::2 nexthop-local unchanged +""" + ) + router_list = tgen.routers().values() for router in router_list: if router.name == "r2": @@ -99,8 +231,9 @@ def test_bgp_route_server_client_step1(): cmd = "show bgp ipv6 unicast json" # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name) - ref_file = "{}/{}/show_bgp_ipv6.json".format(CWD, router.name) + ref_file = "{}/{}/show_bgp_ipv6_step2.json".format(CWD, router.name) expected = json.loads(open(ref_file).read()) + replace_link_local(expected, link_local_cache) test_func = partial( topotest.router_json_cmp, @@ -112,6 +245,69 @@ def test_bgp_route_server_client_step1(): assertmsg = "{}: BGP IPv6 table failure".format(router.name) assert res is None, assertmsg + # check r2 sub-groups + expected = [ + ["2001:db8:1::4"], + ["2001:db8:1::3", "2001:db8:1::2"], + ["2001:db8:3::2"], + ] + + test_func = functools.partial(check_r2_sub_group, expected) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Peer group split failed" + + +def test_bgp_route_server_client_step3(): + tgen = get_topogen() + + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + r2 = tgen.gears["r2"] + r2.vtysh_cmd( + """ +configure terminal +router bgp 65000 view RS + address-family ipv6 unicast + no neighbor 2001:db8:1::2 nexthop-local unchanged + no neighbor 2001:db8:1::3 nexthop-local unchanged + no neighbor 2001:db8:1::4 nexthop-local unchanged + no neighbor 2001:db8:3::2 nexthop-local unchanged +""" + ) + + global link_local_cache + link_local_cache = {} + router_list = tgen.routers().values() + for router in router_list: + if router.name == "r2": + # route-server + cmd = "show bgp view RS ipv6 unicast json" + else: + cmd = "show bgp ipv6 unicast json" + + # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name) + ref_file = "{}/{}/show_bgp_ipv6_step1.json".format(CWD, router.name) + expected = json.loads(open(ref_file).read()) + replace_link_local(expected, link_local_cache) + + test_func = partial( + topotest.router_json_cmp, + router, + cmd, + expected, + ) + _, res = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = "{}: BGP IPv6 table failure".format(router.name) + assert res is None, assertmsg + + # check r2 sub-groups + expected = [["2001:db8:1::4"], ["2001:db8:1::3", "2001:db8:1::2", "2001:db8:3::2"]] + + test_func = functools.partial(check_r2_sub_group, expected) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Peer group split failed" + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] From 1005c147684024a71412e44146e6f62673496cf2 Mon Sep 17 00:00:00 2001 From: Louis Scalbert Date: Fri, 11 Oct 2024 13:14:25 +0200 Subject: [PATCH 69/73] tests: test nexthop-local unchanged with reflector Test nexthop-local unchanged with route-reflector. Signed-off-by: Louis Scalbert --- .../r1/show_bgp_ipv6_step1.json | 64 ++++++++++-- .../r2/show_bgp_ipv6_step1.json | 64 ++++++++++-- .../r5/show_bgp_ipv6_step1.json | 16 +-- .../r6/show_bgp_ipv6_step1.json | 64 ++++++++++-- tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf | 5 + .../test_bgp_nexthop_ipv6_topo1.py | 98 +++++++++++++++++++ 6 files changed, 273 insertions(+), 38 deletions(-) diff --git a/tests/topotests/bgp_nexthop_ipv6/r1/show_bgp_ipv6_step1.json b/tests/topotests/bgp_nexthop_ipv6/r1/show_bgp_ipv6_step1.json index 9923edb348da..f468ae1b3e37 100644 --- a/tests/topotests/bgp_nexthop_ipv6/r1/show_bgp_ipv6_step1.json +++ b/tests/topotests/bgp_nexthop_ipv6/r1/show_bgp_ipv6_step1.json @@ -22,7 +22,13 @@ "ip": "fd00:0:2::2", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:r2:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -48,7 +54,13 @@ "ip": "fd00:0:2::4", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:r4:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -61,7 +73,13 @@ "ip": "fd00:0:3::5", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -74,7 +92,13 @@ "ip": "fd00:0:4::6", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -100,7 +124,13 @@ "ip": "fd00:0:2::2", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:r2:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -126,7 +156,13 @@ "ip": "fd00:0:2::4", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:r4:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -139,7 +175,13 @@ "ip": "fd00:0:3::5", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -152,7 +194,13 @@ "ip": "fd00:0:4::6", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] diff --git a/tests/topotests/bgp_nexthop_ipv6/r2/show_bgp_ipv6_step1.json b/tests/topotests/bgp_nexthop_ipv6/r2/show_bgp_ipv6_step1.json index bb2efa16d9e8..824db383a9ef 100644 --- a/tests/topotests/bgp_nexthop_ipv6/r2/show_bgp_ipv6_step1.json +++ b/tests/topotests/bgp_nexthop_ipv6/r2/show_bgp_ipv6_step1.json @@ -9,7 +9,13 @@ "ip": "fd00:0:2::1", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:r1:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -48,7 +54,13 @@ "ip": "fd00:0:2::4", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:r4:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -61,7 +73,13 @@ "ip": "fd00:0:3::5", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -74,7 +92,13 @@ "ip": "fd00:0:4::6", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -87,7 +111,13 @@ "ip": "fd00:0:2::1", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:r1:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -126,7 +156,13 @@ "ip": "fd00:0:2::4", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:r4:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -139,7 +175,13 @@ "ip": "fd00:0:3::5", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -152,7 +194,13 @@ "ip": "fd00:0:4::6", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-sw", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] diff --git a/tests/topotests/bgp_nexthop_ipv6/r5/show_bgp_ipv6_step1.json b/tests/topotests/bgp_nexthop_ipv6/r5/show_bgp_ipv6_step1.json index d0875474ae94..88e3efb61757 100644 --- a/tests/topotests/bgp_nexthop_ipv6/r5/show_bgp_ipv6_step1.json +++ b/tests/topotests/bgp_nexthop_ipv6/r5/show_bgp_ipv6_step1.json @@ -47,13 +47,7 @@ "ip": "fd00:0:3::9", "hostname": "rr", "afi": "ipv6", - "scope": "global" - }, - { - "ip": "link-local:rr:eth-r5", - "hostname": "rr", - "afi": "ipv6", - "scope": "link-local", + "scope": "global", "used": true } ] @@ -155,13 +149,7 @@ "ip": "fd00:0:3::9", "hostname": "rr", "afi": "ipv6", - "scope": "global" - }, - { - "ip": "link-local:rr:eth-r5", - "hostname": "rr", - "afi": "ipv6", - "scope": "link-local", + "scope": "global", "used": true } ] diff --git a/tests/topotests/bgp_nexthop_ipv6/r6/show_bgp_ipv6_step1.json b/tests/topotests/bgp_nexthop_ipv6/r6/show_bgp_ipv6_step1.json index cd48dd4697ce..1407eca359f1 100644 --- a/tests/topotests/bgp_nexthop_ipv6/r6/show_bgp_ipv6_step1.json +++ b/tests/topotests/bgp_nexthop_ipv6/r6/show_bgp_ipv6_step1.json @@ -9,7 +9,13 @@ "ip": "fd00:0:2::1", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r6", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -22,7 +28,13 @@ "ip": "fd00:0:2::2", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r6", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -48,7 +60,13 @@ "ip": "fd00:0:2::4", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r6", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -61,7 +79,13 @@ "ip": "fd00:0:3::5", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r6", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -87,7 +111,13 @@ "ip": "fd00:0:2::1", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r6", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -100,7 +130,13 @@ "ip": "fd00:0:2::2", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r6", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -126,7 +162,13 @@ "ip": "fd00:0:2::4", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r6", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] @@ -139,7 +181,13 @@ "ip": "fd00:0:3::5", "hostname": "rr", "afi": "ipv6", - "scope": "global", + "scope": "global" + }, + { + "ip": "link-local:rr:eth-r6", + "hostname": "rr", + "afi": "ipv6", + "scope": "link-local", "used": true } ] diff --git a/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf b/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf index 6dcded15258c..705ae78b8e1e 100644 --- a/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf +++ b/tests/topotests/bgp_nexthop_ipv6/rr/bgpd.conf @@ -19,12 +19,17 @@ router bgp 65000 neighbor fd00:0:4::6 route-reflector-client address-family ipv6 unicast neighbor fd00:0:2::1 route-reflector-client + neighbor fd00:0:2::1 nexthop-local unchanged neighbor fd00:0:2::1 activate neighbor fd00:0:2::2 route-reflector-client + neighbor fd00:0:2::2 nexthop-local unchanged neighbor fd00:0:2::2 activate neighbor fd00:0:2::3 route-reflector-client + neighbor fd00:0:2::3 nexthop-local unchanged neighbor fd00:0:2::3 activate neighbor fd00:0:2::4 nexthop-local unchanged neighbor fd00:0:2::4 activate + neighbor fd00:0:3::5 nexthop-local unchanged neighbor fd00:0:3::5 activate + neighbor fd00:0:4::6 nexthop-local unchanged neighbor fd00:0:4::6 activate diff --git a/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py b/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py index 24d71f5622ae..e478139eb1e6 100644 --- a/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py +++ b/tests/topotests/bgp_nexthop_ipv6/test_bgp_nexthop_ipv6_topo1.py @@ -165,6 +165,21 @@ def replace_link_local(expected, cache): nexthop["ip"] = ip +def check_rr_sub_group(expected): + tgen = get_topogen() + + rr = tgen.gears["rr"] + + output = json.loads(rr.vtysh_cmd("show bgp update-groups json")) + actual = [ + subgroup["peers"] + for entry in output.get("default", {}).values() + for subgroup in entry["subGroup"] + ] + + return topotest.json_cmp(actual, expected) + + def teardown_module(_mod): "Teardown the pytest environment" tgen = get_topogen() @@ -222,6 +237,19 @@ def test_bgp_ipv6_table_step1(): assertmsg = "{}: BGP IPv6 Nexthop failure".format(router.name) assert res is None, assertmsg + # check rr sub-groups + expected = [ + ["fd00:0:2::1", "fd00:0:2::2"], + ["fd00:0:2::3"], + ["fd00:0:2::4"], + ["fd00:0:3::5"], + ["fd00:0:4::6"], + ] + + test_func = partial(check_rr_sub_group, expected) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Peer group split failed" + def test_bgp_ipv6_table_step2(): tgen = get_topogen() @@ -236,7 +264,12 @@ def test_bgp_ipv6_table_step2(): configure terminal router bgp 65000 address-family ipv6 unicast + no neighbor fd00:0:2::1 nexthop-local unchanged + no neighbor fd00:0:2::2 nexthop-local unchanged + no neighbor fd00:0:2::3 nexthop-local unchanged no neighbor fd00:0:2::4 nexthop-local unchanged + no neighbor fd00:0:3::5 nexthop-local unchanged + no neighbor fd00:0:4::6 nexthop-local unchanged """ ) @@ -257,6 +290,71 @@ def test_bgp_ipv6_table_step2(): assertmsg = "{}: BGP IPv6 Nexthop failure".format(router.name) assert res is None, assertmsg + # check rr sub-groups + expected = [ + ["fd00:0:2::1", "fd00:0:2::2"], + ["fd00:0:2::3"], + ["fd00:0:3::5", "fd00:0:2::4"], + ["fd00:0:4::6"], + ] + + test_func = partial(check_rr_sub_group, expected) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Peer group split failed" + + +def test_bgp_ipv6_table_step3(): + tgen = get_topogen() + + # Don't run this test if we have any failure. + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + rr = tgen.gears["rr"] + rr.vtysh_cmd( + """ +configure terminal +router bgp 65000 + address-family ipv6 unicast + neighbor fd00:0:2::1 nexthop-local unchanged + neighbor fd00:0:2::2 nexthop-local unchanged + neighbor fd00:0:2::3 nexthop-local unchanged + neighbor fd00:0:2::4 nexthop-local unchanged + neighbor fd00:0:3::5 nexthop-local unchanged + neighbor fd00:0:4::6 nexthop-local unchanged +""" + ) + + router_list = tgen.routers().values() + for router in router_list: + # router.cmd("vtysh -c 'sh bgp ipv6 json' >/tmp/show_bgp_ipv6_%s.json" % router.name) + ref_file = "{}/{}/show_bgp_ipv6_step1.json".format(CWD, router.name) + expected = json.loads(open(ref_file).read()) + replace_link_local(expected, link_local_cache) + + test_func = partial( + topotest.router_json_cmp, + router, + "show bgp ipv6 unicast json", + expected, + ) + _, res = topotest.run_and_expect(test_func, None, count=30, wait=1) + assertmsg = "{}: BGP IPv6 Nexthop failure".format(router.name) + assert res is None, assertmsg + + # check rr sub-groups + expected = [ + ["fd00:0:2::1", "fd00:0:2::2"], + ["fd00:0:2::3"], + ["fd00:0:2::4"], + ["fd00:0:3::5"], + ["fd00:0:4::6"], + ] + + test_func = partial(check_rr_sub_group, expected) + _, result = topotest.run_and_expect(test_func, None, count=60, wait=0.5) + assert result is None, "Peer group split failed" + if __name__ == "__main__": args = ["-s"] + sys.argv[1:] From 963792e8c5ba834f1f7bb2555e90aa4b2ff6f33c Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 10 Oct 2024 16:00:08 -0400 Subject: [PATCH 70/73] zebra: Only notify dplane work pthread when needed The fpm_nl_process function was getting the count of the total number of ctx's processed. This leads to after having processed 1 context to always signal the dataplane that there is work to do. Change the code to only notify the dplane worker when a context was actually added to the outgoing context queue. Signed-off-by: Donald Sharp --- zebra/dplane_fpm_nl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c index d594fc2c8640..0e54952eea2c 100644 --- a/zebra/dplane_fpm_nl.c +++ b/zebra/dplane_fpm_nl.c @@ -1525,7 +1525,7 @@ static void fpm_process_queue(struct event *t) * until the dataplane thread gets scheduled for new, * unrelated work. */ - if (dplane_provider_out_ctx_queue_len(fnc->prov) > 0) + if (processed_contexts) dplane_provider_work_ready(); } From 8aa97a439fc21c66132fdaf8ce0113e16801be04 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Thu, 10 Oct 2024 20:08:32 -0400 Subject: [PATCH 71/73] zebra: Slow down fpm_process_queue When the fpm_process_queue has run out of space but has written to the fpm output buffer, schedule it to wake up immediately, as that the write will go out pretty much immediately, since it was scheduled first. If the fpm_process_queue has not written to the output buffer then delay the processing by 10 milliseconds to allow a possibly backed up write processing to have a chance to complete it's work. Signed-off-by: Donald Sharp --- zebra/dplane_fpm_nl.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/zebra/dplane_fpm_nl.c b/zebra/dplane_fpm_nl.c index 0e54952eea2c..4fb57d84d98f 100644 --- a/zebra/dplane_fpm_nl.c +++ b/zebra/dplane_fpm_nl.c @@ -1512,8 +1512,12 @@ static void fpm_process_queue(struct event *t) /* Re-schedule if we ran out of buffer space */ if (no_bufs) { - event_add_event(fnc->fthread->master, fpm_process_queue, fnc, 0, - &fnc->t_dequeue); + if (processed_contexts) + event_add_event(fnc->fthread->master, fpm_process_queue, fnc, 0, + &fnc->t_dequeue); + else + event_add_timer_msec(fnc->fthread->master, fpm_process_queue, fnc, 10, + &fnc->t_dequeue); event_add_timer(fnc->fthread->master, fpm_process_wedged, fnc, DPLANE_FPM_NL_WEDGIE_TIME, &fnc->t_wedged); } else From cf2624a993fd6992fbbce75434a5aefe22ce0bc2 Mon Sep 17 00:00:00 2001 From: Donald Sharp Date: Fri, 11 Oct 2024 09:33:35 -0400 Subject: [PATCH 72/73] fpm: Allow max fpm message size to float based on ecmp Currently the max message size is 4k. With a 256 way ecmp FRR is seeing message sizes that are in the 6k size. There is desire to allow this to increase as well to 512. Since the multipath size directly effects how big the message may be when sending the routes ecmp let's give a bit of headroom for this value when compiling FRR at greater sizes. Additionally since we know not everyone is using such large ecmp, allow them to build as appropriate for their use cases. Signed-off-by: Donald Sharp --- fpm/fpm.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fpm/fpm.h b/fpm/fpm.h index 70c0df57157e..9003c643b00c 100644 --- a/fpm/fpm.h +++ b/fpm/fpm.h @@ -65,7 +65,7 @@ /* * Largest message that can be sent to or received from the FPM. */ -#define FPM_MAX_MSG_LEN 4096 +#define FPM_MAX_MSG_LEN MAX(MULTIPATH_NUM * 32, 4096) #ifdef __SUNPRO_C #pragma pack(1) From 05e2472de7f5bca8686cb38be042949a51d0c6a4 Mon Sep 17 00:00:00 2001 From: anlan_cs Date: Sun, 13 Oct 2024 21:26:02 +0800 Subject: [PATCH 73/73] zebra: add back one field for debug The `flags` field is removed recently, so add back it for debug. Signed-off-by: anlan_cs --- zebra/interface.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/zebra/interface.c b/zebra/interface.c index d146004781a5..f1f1b17209a7 100644 --- a/zebra/interface.c +++ b/zebra/interface.c @@ -1999,10 +1999,9 @@ static void zebra_if_dplane_ifp_handling(struct zebra_dplane_ctx *ctx) !CHECK_FLAG(ifp->status, ZEBRA_INTERFACE_ACTIVE)) { /* Add interface notification from kernel */ if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - "RTM_NEWLINK ADD for %s(%u) vrf_id %u type %d sl_type %d master %u", - name, ifindex, vrf_id, zif_type, - zif_slave_type, master_ifindex); + zlog_debug("RTM_NEWLINK ADD for %s(%u) vrf_id %u type %d sl_type %d master %u flags 0x%llx", + name, ifindex, vrf_id, zif_type, zif_slave_type, + master_ifindex, (unsigned long long)flags); if (ifp == NULL) { /* unknown interface */ @@ -2087,10 +2086,9 @@ static void zebra_if_dplane_ifp_handling(struct zebra_dplane_ctx *ctx) /* Interface update. */ if (IS_ZEBRA_DEBUG_KERNEL) - zlog_debug( - "RTM_NEWLINK update for %s(%u) sl_type %d master %u", - name, ifp->ifindex, zif_slave_type, - master_ifindex); + zlog_debug("RTM_NEWLINK update for %s(%u) sl_type %d master %u flags 0x%llx", + name, ifp->ifindex, zif_slave_type, master_ifindex, + (unsigned long long)flags); set_ifindex(ifp, ifindex, zns); ifp->mtu6 = ifp->mtu = mtu;