From 7b970f187621882c7d60b40e1ce25b123c030ef2 Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Sat, 23 Mar 2024 19:07:33 +0100 Subject: [PATCH 01/45] bgpd: Add API to get SRv6 locator info Add an API to request information from the SRv6 SID Manager (zebra) regarding a specific SRv6 locator. Signed-off-by: Carmine Scarpitta --- bgpd/bgp_zebra.c | 18 ++++++++++++++++++ bgpd/bgp_zebra.h | 2 ++ 2 files changed, 20 insertions(+) diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index 718dc2de06bb..229a04b2f4b5 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -4093,6 +4093,24 @@ int bgp_zebra_srv6_manager_release_locator_chunk(const char *name) return srv6_manager_release_locator_chunk(zclient, name); } +/** + * Ask the SRv6 Manager (zebra) about a specific locator + * + * @param name Locator name + * @return 0 on success, -1 otherwise + */ +int bgp_zebra_srv6_manager_get_locator(const char *name) +{ + if (!name) + return -1; + + /* + * Send the Get Locator request to the SRv6 Manager and return the + * result + */ + return srv6_manager_get_locator(zclient, name); +} + void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label, ifindex_t ifindex, vrf_id_t vrf_id, enum lsp_types_t ltype, struct prefix *p, diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h index 55a4185bde71..737ffb6b99f1 100644 --- a/bgpd/bgp_zebra.h +++ b/bgpd/bgp_zebra.h @@ -117,6 +117,8 @@ extern int bgp_zebra_update(struct bgp *bgp, afi_t afi, safi_t safi, extern int bgp_zebra_stale_timer_update(struct bgp *bgp); extern int bgp_zebra_srv6_manager_get_locator_chunk(const char *name); extern int bgp_zebra_srv6_manager_release_locator_chunk(const char *name); +extern int bgp_zebra_srv6_manager_get_locator(const char *name); + extern void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label, ifindex_t index, vrf_id_t vrfid, enum lsp_types_t ltype, From 213a3b95e5deeee0483af6083441ccaa49c2703f Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Thu, 9 May 2024 15:45:10 +0200 Subject: [PATCH 02/45] bgpd: Deal with SRv6 locator instead of chunk Currently, when SRv6 is enabled in BGP, BGP requests a locator chunk from Zebra. Zebra assigns a locator chunk to BGP, and then BGP can allocate SIDs from the locator chunk. Recently, the implementation of SRv6 in Zebra has been improved, and a new API has been introduced for obtaining/releasing the SIDs. Now, the daemons no longer need to request a chunk. Instead, the daemons interact with Zebra to obtain information about the locator and subsequently to allocate/release the SIDs. This commit extends BGP to use the new SRv6 API. In particular, it removes the chunk throughout the BGP code and modifies BGP to request/save/advertise the locator instead of the chunk. Signed-off-by: Carmine Scarpitta --- bgpd/bgp_mplsvpn.c | 37 ++++++++++++++++++++++--------------- bgpd/bgp_vty.c | 38 +++++++++++++++++++++++++------------- bgpd/bgp_zebra.c | 36 ++++++++++++++++++++++++------------ bgpd/bgpd.c | 17 ++++++++++++----- bgpd/bgpd.h | 5 +++-- 5 files changed, 86 insertions(+), 47 deletions(-) diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index ad774b2b008c..b74ed657d59b 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -733,7 +733,7 @@ void ensure_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, afi_t afi) { int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF); - struct srv6_locator_chunk *tovpn_sid_locator; + struct srv6_locator *tovpn_sid_locator; struct in6_addr *tovpn_sid; uint32_t tovpn_sid_index = 0, tovpn_sid_transpose_label; bool tovpn_sid_auto = false; @@ -748,9 +748,9 @@ void ensure_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, /* * skip when bgp vpn instance ins't allocated - * or srv6 locator chunk isn't allocated + * or srv6 locator isn't allocated */ - if (!bgp_vpn || !bgp_vpn->srv6_locator_chunks) + if (!bgp_vpn || !bgp_vpn->srv6_locator) return; tovpn_sid_index = bgp_vrf->vpn_policy[afi].tovpn_sid_index; @@ -768,7 +768,9 @@ void ensure_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, return; } - tovpn_sid_locator = srv6_locator_chunk_alloc(); + tovpn_sid_locator = srv6_locator_alloc(bgp_vpn->srv6_locator_name); + srv6_locator_copy(tovpn_sid_locator, bgp_vpn->srv6_locator); + tovpn_sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); tovpn_sid_transpose_label = alloc_new_sid(bgp_vpn, tovpn_sid_index, @@ -779,7 +781,7 @@ void ensure_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, zlog_debug( "%s: not allocated new sid for vrf %s: afi %s", __func__, bgp_vrf->name_pretty, afi2str(afi)); - srv6_locator_chunk_free(&tovpn_sid_locator); + srv6_locator_free(tovpn_sid_locator); XFREE(MTYPE_BGP_SRV6_SID, tovpn_sid); return; } @@ -798,7 +800,7 @@ void ensure_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, void ensure_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) { int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF); - struct srv6_locator_chunk *tovpn_sid_locator; + struct srv6_locator *tovpn_sid_locator; struct in6_addr *tovpn_sid; uint32_t tovpn_sid_index = 0, tovpn_sid_transpose_label; bool tovpn_sid_auto = false; @@ -813,9 +815,9 @@ void ensure_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) /* * skip when bgp vpn instance ins't allocated - * or srv6 locator chunk isn't allocated + * or srv6 locator isn't allocated */ - if (!bgp_vpn || !bgp_vpn->srv6_locator_chunks) + if (!bgp_vpn || !bgp_vpn->srv6_locator) return; tovpn_sid_index = bgp_vrf->tovpn_sid_index; @@ -832,7 +834,9 @@ void ensure_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) return; } - tovpn_sid_locator = srv6_locator_chunk_alloc(); + tovpn_sid_locator = srv6_locator_alloc(bgp_vpn->srv6_locator_name); + srv6_locator_copy(tovpn_sid_locator, bgp_vpn->srv6_locator); + tovpn_sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); tovpn_sid_transpose_label = alloc_new_sid(bgp_vpn, tovpn_sid_index, @@ -842,7 +846,7 @@ void ensure_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) if (debug) zlog_debug("%s: not allocated new sid for vrf %s", __func__, bgp_vrf->name_pretty); - srv6_locator_chunk_free(&tovpn_sid_locator); + srv6_locator_free(tovpn_sid_locator); XFREE(MTYPE_BGP_SRV6_SID, tovpn_sid); return; } @@ -889,7 +893,8 @@ void delete_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, if (tovpn_sid_index != 0 || tovpn_sid_auto) return; - srv6_locator_chunk_free(&bgp_vrf->vpn_policy[afi].tovpn_sid_locator); + srv6_locator_free(bgp_vrf->vpn_policy[afi].tovpn_sid_locator); + bgp_vrf->vpn_policy[afi].tovpn_sid_locator = NULL; if (bgp_vrf->vpn_policy[afi].tovpn_sid) { sid_unregister(bgp_vpn, bgp_vrf->vpn_policy[afi].tovpn_sid); @@ -916,7 +921,8 @@ void delete_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) if (tovpn_sid_index != 0 || tovpn_sid_auto) return; - srv6_locator_chunk_free(&bgp_vrf->tovpn_sid_locator); + srv6_locator_free(bgp_vrf->tovpn_sid_locator); + bgp_vrf->tovpn_sid_locator = NULL; if (bgp_vrf->tovpn_sid) { sid_unregister(bgp_vpn, bgp_vrf->tovpn_sid); @@ -1763,8 +1769,9 @@ void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */ /* Set SID for SRv6 VPN */ if (from_bgp->vpn_policy[afi].tovpn_sid_locator) { - struct srv6_locator_chunk *locator = + struct srv6_locator *locator = from_bgp->vpn_policy[afi].tovpn_sid_locator; + encode_label( from_bgp->vpn_policy[afi].tovpn_sid_transpose_label, &label); @@ -1805,8 +1812,8 @@ void vpn_leak_from_vrf_update(struct bgp *to_bgp, /* to */ .tovpn_sid_locator->prefix.prefix, sizeof(struct in6_addr)); } else if (from_bgp->tovpn_sid_locator) { - struct srv6_locator_chunk *locator = - from_bgp->tovpn_sid_locator; + struct srv6_locator *locator = from_bgp->tovpn_sid_locator; + encode_label(from_bgp->tovpn_sid_transpose_label, &label); static_attr.srv6_l3vpn = XCALLOC(MTYPE_BGP_SRV6_L3VPN, diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index c9c7b8049644..f5d02eda9b67 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -302,18 +302,11 @@ static const char *get_afi_safi_json_str(afi_t afi, safi_t safi) /* unset srv6 locator */ static int bgp_srv6_locator_unset(struct bgp *bgp) { - int ret; struct listnode *node, *nnode; struct srv6_locator_chunk *chunk; struct bgp_srv6_function *func; struct bgp *bgp_vrf; - /* release chunk notification via ZAPI */ - ret = bgp_zebra_srv6_manager_release_locator_chunk( - bgp->srv6_locator_name); - if (ret < 0) - return -1; - /* refresh chunks */ for (ALL_LIST_ELEMENTS(bgp->srv6_locator_chunks, node, nnode, chunk)) { listnode_delete(bgp->srv6_locator_chunks, chunk); @@ -352,20 +345,28 @@ static int bgp_srv6_locator_unset(struct bgp *bgp) continue; /* refresh vpnv4 tovpn_sid_locator */ - srv6_locator_chunk_free( - &bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator); + srv6_locator_free(bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator); + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator = NULL; /* refresh vpnv6 tovpn_sid_locator */ - srv6_locator_chunk_free( - &bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator); + srv6_locator_free( + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator); + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator = NULL; /* refresh per-vrf tovpn_sid_locator */ - srv6_locator_chunk_free(&bgp_vrf->tovpn_sid_locator); + srv6_locator_free(bgp_vrf->tovpn_sid_locator); + bgp_vrf->tovpn_sid_locator = NULL; } /* clear locator name */ memset(bgp->srv6_locator_name, 0, sizeof(bgp->srv6_locator_name)); + /* clear SRv6 locator */ + if (bgp->srv6_locator) { + srv6_locator_free(bgp->srv6_locator); + bgp->srv6_locator = NULL; + } + return 0; } @@ -10878,7 +10879,7 @@ DEFPY (bgp_srv6_locator, snprintf(bgp->srv6_locator_name, sizeof(bgp->srv6_locator_name), "%s", name); - ret = bgp_zebra_srv6_manager_get_locator_chunk(name); + ret = bgp_zebra_srv6_manager_get_locator(name); if (ret < 0) return CMD_WARNING_CONFIG_FAILED; @@ -10929,6 +10930,17 @@ DEFPY (show_bgp_srv6, return CMD_SUCCESS; vty_out(vty, "locator_name: %s\n", bgp->srv6_locator_name); + if (bgp->srv6_locator) { + vty_out(vty, " prefix: %pFX\n", &bgp->srv6_locator->prefix); + vty_out(vty, " block-length: %d\n", + bgp->srv6_locator->block_bits_length); + vty_out(vty, " node-length: %d\n", + bgp->srv6_locator->node_bits_length); + vty_out(vty, " func-length: %d\n", + bgp->srv6_locator->function_bits_length); + vty_out(vty, " arg-length: %d\n", + bgp->srv6_locator->argument_bits_length); + } vty_out(vty, "locator_chunks:\n"); for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) { vty_out(vty, "- %pFX\n", &chunk->prefix); diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index 229a04b2f4b5..fc50d120d756 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -3402,7 +3402,8 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) struct srv6_locator loc = {}; struct bgp *bgp = bgp_get_default(); struct listnode *node, *nnode; - struct srv6_locator_chunk *chunk, *tovpn_sid_locator; + struct srv6_locator_chunk *chunk; + struct srv6_locator *tovpn_sid_locator; struct bgp_srv6_function *func; struct bgp *bgp_vrf; struct in6_addr *tovpn_sid; @@ -3414,6 +3415,12 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) if (zapi_srv6_locator_decode(zclient->ibuf, &loc) < 0) return -1; + // clear SRv6 locator + if (bgp->srv6_locator) { + srv6_locator_free(bgp->srv6_locator); + bgp->srv6_locator = NULL; + } + // refresh chunks for (ALL_LIST_ELEMENTS(bgp->srv6_locator_chunks, node, nnode, chunk)) if (prefix_match((struct prefix *)&loc.prefix, @@ -3490,10 +3497,12 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) tmp_prefi.prefixlen = IPV6_MAX_BITLEN; tmp_prefi.prefix = tovpn_sid_locator->prefix.prefix; if (prefix_match((struct prefix *)&loc.prefix, - (struct prefix *)&tmp_prefi)) - srv6_locator_chunk_free( - &bgp_vrf->vpn_policy[AFI_IP] - .tovpn_sid_locator); + (struct prefix *)&tmp_prefi)) { + srv6_locator_free(bgp_vrf->vpn_policy[AFI_IP] + .tovpn_sid_locator); + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator = + NULL; + } } /* refresh vpnv6 tovpn_sid_locator */ @@ -3504,10 +3513,12 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) tmp_prefi.prefixlen = IPV6_MAX_BITLEN; tmp_prefi.prefix = tovpn_sid_locator->prefix.prefix; if (prefix_match((struct prefix *)&loc.prefix, - (struct prefix *)&tmp_prefi)) - srv6_locator_chunk_free( - &bgp_vrf->vpn_policy[AFI_IP6] - .tovpn_sid_locator); + (struct prefix *)&tmp_prefi)) { + srv6_locator_free(bgp_vrf->vpn_policy[AFI_IP6] + .tovpn_sid_locator); + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator = + NULL; + } } /* refresh per-vrf tovpn_sid_locator */ @@ -3517,9 +3528,10 @@ static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) tmp_prefi.prefixlen = IPV6_MAX_BITLEN; tmp_prefi.prefix = tovpn_sid_locator->prefix.prefix; if (prefix_match((struct prefix *)&loc.prefix, - (struct prefix *)&tmp_prefi)) - srv6_locator_chunk_free( - &bgp_vrf->tovpn_sid_locator); + (struct prefix *)&tmp_prefi)) { + srv6_locator_free(bgp_vrf->tovpn_sid_locator); + bgp_vrf->tovpn_sid_locator = NULL; + } } } diff --git a/bgpd/bgpd.c b/bgpd/bgpd.c index a88de651f50c..f0c92824d618 100644 --- a/bgpd/bgpd.c +++ b/bgpd/bgpd.c @@ -1497,9 +1497,11 @@ static void bgp_srv6_init(struct bgp *bgp) static void bgp_srv6_cleanup(struct bgp *bgp) { for (afi_t afi = AFI_IP; afi < AFI_MAX; afi++) { - if (bgp->vpn_policy[afi].tovpn_sid_locator != NULL) - srv6_locator_chunk_free( - &bgp->vpn_policy[afi].tovpn_sid_locator); + if (bgp->vpn_policy[afi].tovpn_sid_locator != NULL) { + srv6_locator_free( + bgp->vpn_policy[afi].tovpn_sid_locator); + bgp->vpn_policy[afi].tovpn_sid_locator = NULL; + } if (bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent != NULL) XFREE(MTYPE_BGP_SRV6_SID, bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent); @@ -1510,8 +1512,10 @@ static void bgp_srv6_cleanup(struct bgp *bgp) } } - if (bgp->tovpn_sid_locator != NULL) - srv6_locator_chunk_free(&bgp->tovpn_sid_locator); + if (bgp->tovpn_sid_locator != NULL) { + srv6_locator_free(bgp->tovpn_sid_locator); + bgp->tovpn_sid_locator = NULL; + } if (bgp->tovpn_zebra_vrf_sid_last_sent != NULL) XFREE(MTYPE_BGP_SRV6_SID, bgp->tovpn_zebra_vrf_sid_last_sent); if (bgp->tovpn_sid != NULL) { @@ -1523,6 +1527,9 @@ static void bgp_srv6_cleanup(struct bgp *bgp) list_delete(&bgp->srv6_locator_chunks); if (bgp->srv6_functions) list_delete(&bgp->srv6_functions); + + srv6_locator_free(bgp->srv6_locator); + bgp->srv6_locator = NULL; } /* Allocate new peer object, implicitely locked. */ diff --git a/bgpd/bgpd.h b/bgpd/bgpd.h index 7f1b82d9c763..7d0810c84e6c 100644 --- a/bgpd/bgpd.h +++ b/bgpd/bgpd.h @@ -270,7 +270,7 @@ struct vpn_policy { */ uint32_t tovpn_sid_index; /* unset => set to 0 */ struct in6_addr *tovpn_sid; - struct srv6_locator_chunk *tovpn_sid_locator; + struct srv6_locator *tovpn_sid_locator; uint32_t tovpn_sid_transpose_label; struct in6_addr *tovpn_zebra_vrf_sid_last_sent; }; @@ -836,11 +836,12 @@ struct bgp { /* BGP VPN SRv6 backend */ bool srv6_enabled; char srv6_locator_name[SRV6_LOCNAME_SIZE]; + struct srv6_locator *srv6_locator; struct list *srv6_locator_chunks; struct list *srv6_functions; uint32_t tovpn_sid_index; /* unset => set to 0 */ struct in6_addr *tovpn_sid; - struct srv6_locator_chunk *tovpn_sid_locator; + struct srv6_locator *tovpn_sid_locator; uint32_t tovpn_sid_transpose_label; struct in6_addr *tovpn_zebra_vrf_sid_last_sent; From d735da42785c75389afebd75712155f4753079b2 Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Sat, 23 Mar 2024 19:16:36 +0100 Subject: [PATCH 03/45] bgpd: Receive SRv6 locator info from zebra This commit extends BGP to process locator information received from SRv6 Manager (zebra) and save the locator info in the SRv6 database. Signed-off-by: Carmine Scarpitta --- bgpd/bgp_zebra.c | 46 +++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 41 insertions(+), 5 deletions(-) diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index fc50d120d756..0d7e08825408 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -3379,11 +3379,50 @@ static int bgp_zebra_process_srv6_locator_chunk(ZAPI_CALLBACK_ARGS) return 0; } +/** + * Internal function to process an SRv6 locator + * + * @param locator The locator to be processed + */ +static int bgp_zebra_process_srv6_locator_internal(struct srv6_locator *locator) +{ + struct bgp *bgp = bgp_get_default(); + + if (!bgp || !bgp->srv6_enabled || !locator) + return -1; + + /* + * Check if the main BGP instance is configured to use the received + * locator + */ + if (strcmp(bgp->srv6_locator_name, locator->name) != 0) { + zlog_err("%s: SRv6 Locator name unmatch %s:%s", __func__, + bgp->srv6_locator_name, locator->name); + return 0; + } + + zlog_info("%s: Received SRv6 locator %s %pFX, loc-block-len=%u, loc-node-len=%u func-len=%u, arg-len=%u", + __func__, locator->name, &locator->prefix, + locator->block_bits_length, locator->node_bits_length, + locator->function_bits_length, locator->argument_bits_length); + + /* Store the locator in the main BGP instance */ + bgp->srv6_locator = srv6_locator_alloc(locator->name); + srv6_locator_copy(bgp->srv6_locator, locator); + + /* + * Process VPN-to-VRF and VRF-to-VPN leaks to advertise new locator + * and SIDs. + */ + vpn_leak_postchange_all(); + + return 0; +} + static int bgp_zebra_process_srv6_locator_add(ZAPI_CALLBACK_ARGS) { struct srv6_locator loc = {}; struct bgp *bgp = bgp_get_default(); - const char *loc_name = bgp->srv6_locator_name; if (!bgp || !bgp->srv6_enabled) return 0; @@ -3391,10 +3430,7 @@ static int bgp_zebra_process_srv6_locator_add(ZAPI_CALLBACK_ARGS) if (zapi_srv6_locator_decode(zclient->ibuf, &loc) < 0) return -1; - if (bgp_zebra_srv6_manager_get_locator_chunk(loc_name) < 0) - return -1; - - return 0; + return bgp_zebra_process_srv6_locator_internal(&loc); } static int bgp_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) From b72640a12536396ebac4e36127801b622811a706 Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Sat, 23 Mar 2024 19:17:48 +0100 Subject: [PATCH 04/45] bgpd: Add API to get/release SRv6 SIDs Add an API to get/release SRv6 SIDs through the SRv6 SID Manager. Signed-off-by: Carmine Scarpitta --- bgpd/bgp_zebra.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++ bgpd/bgp_zebra.h | 5 ++++ 2 files changed, 70 insertions(+) diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index 0d7e08825408..66125b4ef76d 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -4159,6 +4159,71 @@ int bgp_zebra_srv6_manager_get_locator(const char *name) return srv6_manager_get_locator(zclient, name); } +/** + * Ask the SRv6 Manager (zebra) to allocate a SID. + * + * Optionally, it is possible to provide an IPv6 address (sid_value parameter). + * + * When sid_value is provided, the SRv6 Manager allocates the requested SID + * address, if the request can be satisfied (explicit allocation). + * + * When sid_value is not provided, the SRv6 Manager allocates any available SID + * from the provided locator (dynamic allocation). + * + * @param ctx Context to be associated with the request SID + * @param sid_value IPv6 address to be associated with the requested SID (optional) + * @param locator_name Name of the locator from which the SID must be allocated + * @param sid_func SID Function allocated by the SRv6 Manager. + */ +bool bgp_zebra_request_srv6_sid(const struct srv6_sid_ctx *ctx, + struct in6_addr *sid_value, + const char *locator_name, uint32_t *sid_func) +{ + int ret; + + if (!ctx || !locator_name) + return false; + + /* + * Send the Get SRv6 SID request to the SRv6 Manager and check the + * result + */ + ret = srv6_manager_get_sid(zclient, ctx, sid_value, locator_name, + sid_func); + if (ret < 0) { + zlog_warn("%s: error getting SRv6 SID!", __func__); + return false; + } + + return true; +} + +/** + * Ask the SRv6 Manager (zebra) to release a previously allocated SID. + * + * This function is used to tell the SRv6 Manager that BGP no longer intends + * to use the SID. + * + * @param ctx Context to be associated with the SID to be released + */ +void bgp_zebra_release_srv6_sid(const struct srv6_sid_ctx *ctx) +{ + int ret; + + if (!ctx) + return; + + /* + * Send the Release SRv6 SID request to the SRv6 Manager and check the + * result + */ + ret = srv6_manager_release_sid(zclient, ctx); + if (ret < 0) { + zlog_warn("%s: error releasing SRv6 SID!", __func__); + return; + } +} + void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label, ifindex_t ifindex, vrf_id_t vrf_id, enum lsp_types_t ltype, struct prefix *p, diff --git a/bgpd/bgp_zebra.h b/bgpd/bgp_zebra.h index 737ffb6b99f1..8deecba747b3 100644 --- a/bgpd/bgp_zebra.h +++ b/bgpd/bgp_zebra.h @@ -118,6 +118,11 @@ extern int bgp_zebra_stale_timer_update(struct bgp *bgp); extern int bgp_zebra_srv6_manager_get_locator_chunk(const char *name); extern int bgp_zebra_srv6_manager_release_locator_chunk(const char *name); extern int bgp_zebra_srv6_manager_get_locator(const char *name); +extern bool bgp_zebra_request_srv6_sid(const struct srv6_sid_ctx *ctx, + struct in6_addr *sid_value, + const char *locator_name, + uint32_t *sid_func); +extern void bgp_zebra_release_srv6_sid(const struct srv6_sid_ctx *ctx); extern void bgp_zebra_send_nexthop_label(int cmd, mpls_label_t label, ifindex_t index, vrf_id_t vrfid, From 79210c744b0b3f94c9f09e4017f1e613873ff6b6 Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Sat, 23 Mar 2024 19:23:52 +0100 Subject: [PATCH 05/45] bgpd: Release SIDs when disabling SRv6 in BGP When SRv6 VPN is unconfigured in BGP, BGP needs to interact with SID Manager to release the SID and make it available to other daemons Signed-off-by: Carmine Scarpitta --- bgpd/bgp_mplsvpn.c | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index b74ed657d59b..14fd0c6e4b19 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -474,6 +474,7 @@ void vpn_leak_zebra_vrf_sid_update(struct bgp *bgp, afi_t afi) void vpn_leak_zebra_vrf_sid_withdraw_per_af(struct bgp *bgp, afi_t afi) { int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL); + struct srv6_sid_ctx ctx = {}; if (bgp->vrf_id == VRF_UNKNOWN) { if (debug) @@ -492,6 +493,11 @@ void vpn_leak_zebra_vrf_sid_withdraw_per_af(struct bgp *bgp, afi_t afi) XFREE(MTYPE_BGP_SRV6_SID, bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent); bgp->vpn_policy[afi].tovpn_zebra_vrf_sid_last_sent = NULL; + + ctx.vrf_id = bgp->vrf_id; + ctx.behavior = afi == AFI_IP ? ZEBRA_SEG6_LOCAL_ACTION_END_DT4 + : ZEBRA_SEG6_LOCAL_ACTION_END_DT6; + bgp_zebra_release_srv6_sid(&ctx); } /* @@ -501,6 +507,7 @@ void vpn_leak_zebra_vrf_sid_withdraw_per_af(struct bgp *bgp, afi_t afi) void vpn_leak_zebra_vrf_sid_withdraw_per_vrf(struct bgp *bgp) { int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL); + struct srv6_sid_ctx ctx = {}; if (bgp->vrf_id == VRF_UNKNOWN) { if (debug) @@ -519,6 +526,10 @@ void vpn_leak_zebra_vrf_sid_withdraw_per_vrf(struct bgp *bgp) NULL); XFREE(MTYPE_BGP_SRV6_SID, bgp->tovpn_zebra_vrf_sid_last_sent); bgp->tovpn_zebra_vrf_sid_last_sent = NULL; + + ctx.vrf_id = bgp->vrf_id; + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_DT46; + bgp_zebra_release_srv6_sid(&ctx); } /* @@ -880,6 +891,7 @@ void delete_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF); uint32_t tovpn_sid_index = 0; bool tovpn_sid_auto = false; + struct srv6_sid_ctx ctx = {}; if (debug) zlog_debug("%s: try to remove SID for vrf %s: afi %s", __func__, @@ -893,10 +905,22 @@ void delete_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, if (tovpn_sid_index != 0 || tovpn_sid_auto) return; + if (bgp_vrf->vrf_id == VRF_UNKNOWN) { + if (debug) + zlog_debug("%s: vrf %s: vrf_id not set, can't set zebra vrf label", + __func__, bgp_vrf->name_pretty); + return; + } + srv6_locator_free(bgp_vrf->vpn_policy[afi].tovpn_sid_locator); bgp_vrf->vpn_policy[afi].tovpn_sid_locator = NULL; if (bgp_vrf->vpn_policy[afi].tovpn_sid) { + ctx.vrf_id = bgp_vrf->vrf_id; + ctx.behavior = afi == AFI_IP ? ZEBRA_SEG6_LOCAL_ACTION_END_DT4 + : ZEBRA_SEG6_LOCAL_ACTION_END_DT6; + bgp_zebra_release_srv6_sid(&ctx); + sid_unregister(bgp_vpn, bgp_vrf->vpn_policy[afi].tovpn_sid); XFREE(MTYPE_BGP_SRV6_SID, bgp_vrf->vpn_policy[afi].tovpn_sid); } @@ -908,6 +932,7 @@ void delete_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF); uint32_t tovpn_sid_index = 0; bool tovpn_sid_auto = false; + struct srv6_sid_ctx ctx = {}; if (debug) zlog_debug("%s: try to remove SID for vrf %s", __func__, @@ -921,10 +946,21 @@ void delete_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) if (tovpn_sid_index != 0 || tovpn_sid_auto) return; + if (bgp_vrf->vrf_id == VRF_UNKNOWN) { + if (debug) + zlog_debug("%s: vrf %s: vrf_id not set, can't set zebra vrf label", + __func__, bgp_vrf->name_pretty); + return; + } + srv6_locator_free(bgp_vrf->tovpn_sid_locator); bgp_vrf->tovpn_sid_locator = NULL; if (bgp_vrf->tovpn_sid) { + ctx.vrf_id = bgp_vrf->vrf_id; + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_DT46; + bgp_zebra_release_srv6_sid(&ctx); + sid_unregister(bgp_vpn, bgp_vrf->tovpn_sid); XFREE(MTYPE_BGP_SRV6_SID, bgp_vrf->tovpn_sid); } From 0e243657fd26aaefd8bfc3835b080bf873c71f01 Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Sat, 23 Mar 2024 19:24:59 +0100 Subject: [PATCH 06/45] bgpd: Request SRv6 SIDs to SID Manager Currently, BGP allocates SIDs without interacting with Zebra. Recently, the SRv6 implementation has been improved. Now, the daemons need to interact with Zebra through ZAPI to obtain and release SIDs. This commit extends BGP to request SIDs from Zebra instead of allocating the SIDs on its own. Signed-off-by: Carmine Scarpitta --- bgpd/bgp_mplsvpn.c | 184 ++++++++++++++++++++++++++++++++------------- 1 file changed, 133 insertions(+), 51 deletions(-) diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index 14fd0c6e4b19..c9430b58822c 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -740,14 +740,97 @@ static uint32_t alloc_new_sid(struct bgp *bgp, uint32_t index, return label; } +/** + * Return the SRv6 SID value obtained by composing the LOCATOR and FUNCTION. + * + * @param sid_value SRv6 SID value returned + * @param locator Parent locator of the SRv6 SID + * @param sid_func Function part of the SID + * @return True if success, False otherwise + */ +static bool srv6_sid_compose(struct in6_addr *sid_value, + struct srv6_locator *locator, uint32_t sid_func) +{ + int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL); + int label = 0; + uint8_t offset = 0; + uint8_t func_len = 0, shift_len = 0; + uint32_t sid_func_max = 0; + + if (!locator || !sid_value) + return false; + + if (locator->function_bits_length > + BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH) { + if (debug) + zlog_debug("%s: invalid SRv6 Locator (%pFX): Function Length must be less or equal to %d", + __func__, &locator->prefix, + BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH); + return false; + } + + /* Max value that can be encoded in the Function part of the SID */ + sid_func_max = (1 << locator->function_bits_length) - 1; + + if (sid_func > sid_func_max) { + if (debug) + zlog_debug("%s: invalid SRv6 Locator (%pFX): Function Length is too short to support specified function (%u)", + __func__, &locator->prefix, sid_func); + return false; + } + + /** + * Let's build the SID value. + * sid_value = LOC:FUNC:: + */ + + /* First, we put the locator (LOC) in the most significant bits of sid_value */ + *sid_value = locator->prefix.prefix; + + /* + * Then, we compute the offset at which we have to place the function (FUNC). + * FUNC will be placed immediately after LOC, i.e. at block_bits_length + node_bits_length + */ + offset = locator->block_bits_length + locator->node_bits_length; + + /* + * The FUNC part of the SID is advertised in the label field of SRv6 Service TLV. + * (see SID Transposition Scheme, RFC 9252 section #4). + * Therefore, we need to encode the FUNC in the most significant bits of the + * 20-bit label. + */ + func_len = locator->function_bits_length; + shift_len = BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH - func_len; + + label = sid_func << shift_len; + if (label < MPLS_LABEL_UNRESERVED_MIN) { + if (debug) + zlog_debug("%s: skipped to allocate SRv6 SID (%pFX): Label (%u) is too small to use", + __func__, &locator->prefix, label); + return false; + } + + if (sid_exist(bgp_get_default(), sid_value)) { + zlog_warn("%s: skipped to allocate SRv6 SID (%pFX): SID %pI6 already in use", + __func__, &locator->prefix, sid_value); + return false; + } + + /* Finally, we put the FUNC in sid_value at the computed offset */ + transpose_sid(sid_value, label, offset, func_len); + + return true; +} + void ensure_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, afi_t afi) { int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF); - struct srv6_locator *tovpn_sid_locator; - struct in6_addr *tovpn_sid; - uint32_t tovpn_sid_index = 0, tovpn_sid_transpose_label; + struct in6_addr tovpn_sid = {}; + uint32_t tovpn_sid_index = 0; bool tovpn_sid_auto = false; + struct srv6_sid_ctx ctx = {}; + uint32_t sid_func; if (debug) zlog_debug("%s: try to allocate new SID for vrf %s: afi %s", @@ -764,6 +847,13 @@ void ensure_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, if (!bgp_vpn || !bgp_vpn->srv6_locator) return; + if (bgp_vrf->vrf_id == VRF_UNKNOWN) { + if (debug) + zlog_debug("%s: vrf %s: vrf_id not set, can't set zebra vrf SRv6 SID", + __func__, bgp_vrf->name_pretty); + return; + } + tovpn_sid_index = bgp_vrf->vpn_policy[afi].tovpn_sid_index; tovpn_sid_auto = CHECK_FLAG(bgp_vrf->vpn_policy[afi].flags, BGP_VPN_POLICY_TOVPN_SID_AUTO); @@ -779,42 +869,34 @@ void ensure_vrf_tovpn_sid_per_af(struct bgp *bgp_vpn, struct bgp *bgp_vrf, return; } - tovpn_sid_locator = srv6_locator_alloc(bgp_vpn->srv6_locator_name); - srv6_locator_copy(tovpn_sid_locator, bgp_vpn->srv6_locator); - - tovpn_sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); - - tovpn_sid_transpose_label = alloc_new_sid(bgp_vpn, tovpn_sid_index, - tovpn_sid_locator, tovpn_sid); + if (!tovpn_sid_auto) { + if (!srv6_sid_compose(&tovpn_sid, bgp_vpn->srv6_locator, + tovpn_sid_index)) { + zlog_err("%s: failed to compose sid for vrf %s: afi %s", + __func__, bgp_vrf->name_pretty, afi2str(afi)); + return; + } + } - if (tovpn_sid_transpose_label == 0) { - if (debug) - zlog_debug( - "%s: not allocated new sid for vrf %s: afi %s", - __func__, bgp_vrf->name_pretty, afi2str(afi)); - srv6_locator_free(tovpn_sid_locator); - XFREE(MTYPE_BGP_SRV6_SID, tovpn_sid); + ctx.vrf_id = bgp_vrf->vrf_id; + ctx.behavior = afi == AFI_IP ? ZEBRA_SEG6_LOCAL_ACTION_END_DT4 + : ZEBRA_SEG6_LOCAL_ACTION_END_DT6; + if (!bgp_zebra_request_srv6_sid(&ctx, &tovpn_sid, + bgp_vpn->srv6_locator_name, &sid_func)) { + zlog_err("%s: failed to request sid for vrf %s: afi %s", + __func__, bgp_vrf->name_pretty, afi2str(afi)); return; } - - if (debug) - zlog_debug("%s: new sid %pI6 allocated for vrf %s: afi %s", - __func__, tovpn_sid, bgp_vrf->name_pretty, - afi2str(afi)); - - bgp_vrf->vpn_policy[afi].tovpn_sid = tovpn_sid; - bgp_vrf->vpn_policy[afi].tovpn_sid_locator = tovpn_sid_locator; - bgp_vrf->vpn_policy[afi].tovpn_sid_transpose_label = - tovpn_sid_transpose_label; } void ensure_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) { int debug = BGP_DEBUG(vpn, VPN_LEAK_FROM_VRF); - struct srv6_locator *tovpn_sid_locator; - struct in6_addr *tovpn_sid; - uint32_t tovpn_sid_index = 0, tovpn_sid_transpose_label; + struct in6_addr tovpn_sid = {}; + uint32_t tovpn_sid_index = 0; bool tovpn_sid_auto = false; + struct srv6_sid_ctx ctx = {}; + uint32_t sid_func; if (debug) zlog_debug("%s: try to allocate new SID for vrf %s", __func__, @@ -831,6 +913,13 @@ void ensure_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) if (!bgp_vpn || !bgp_vpn->srv6_locator) return; + if (bgp_vrf->vrf_id == VRF_UNKNOWN) { + if (debug) + zlog_debug("%s: vrf %s: vrf_id not set, can't set zebra vrf SRv6 SID", + __func__, bgp_vrf->name_pretty); + return; + } + tovpn_sid_index = bgp_vrf->tovpn_sid_index; tovpn_sid_auto = CHECK_FLAG(bgp_vrf->vrf_flags, BGP_VRF_TOVPN_SID_AUTO); @@ -845,30 +934,23 @@ void ensure_vrf_tovpn_sid_per_vrf(struct bgp *bgp_vpn, struct bgp *bgp_vrf) return; } - tovpn_sid_locator = srv6_locator_alloc(bgp_vpn->srv6_locator_name); - srv6_locator_copy(tovpn_sid_locator, bgp_vpn->srv6_locator); - - tovpn_sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); - - tovpn_sid_transpose_label = alloc_new_sid(bgp_vpn, tovpn_sid_index, - tovpn_sid_locator, tovpn_sid); + if (!tovpn_sid_auto) { + if (!srv6_sid_compose(&tovpn_sid, bgp_vpn->srv6_locator, + bgp_vrf->tovpn_sid_index)) { + zlog_err("%s: failed to compose new sid for vrf %s", + __func__, bgp_vrf->name_pretty); + return; + } + } - if (tovpn_sid_transpose_label == 0) { - if (debug) - zlog_debug("%s: not allocated new sid for vrf %s", - __func__, bgp_vrf->name_pretty); - srv6_locator_free(tovpn_sid_locator); - XFREE(MTYPE_BGP_SRV6_SID, tovpn_sid); + ctx.vrf_id = bgp_vrf->vrf_id; + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_DT46; + if (!bgp_zebra_request_srv6_sid(&ctx, &tovpn_sid, + bgp_vpn->srv6_locator_name, &sid_func)) { + zlog_err("%s: failed to request new sid for vrf %s", __func__, + bgp_vrf->name_pretty); return; } - - if (debug) - zlog_debug("%s: new sid %pI6 allocated for vrf %s", __func__, - tovpn_sid, bgp_vrf->name_pretty); - - bgp_vrf->tovpn_sid = tovpn_sid; - bgp_vrf->tovpn_sid_locator = tovpn_sid_locator; - bgp_vrf->tovpn_sid_transpose_label = tovpn_sid_transpose_label; } void ensure_vrf_tovpn_sid(struct bgp *bgp_vpn, struct bgp *bgp_vrf, afi_t afi) From e493b5f4f7efa799541566d3fb073019c26c5ba0 Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Fri, 10 May 2024 12:50:27 +0200 Subject: [PATCH 07/45] bgpd: Make `sid_register()` non-static Make the `sid_register()` function non-static to allow other BGP modules (e.g. bgp_zebra.c) to register SIDs. Signed-off-by: Carmine Scarpitta --- bgpd/bgp_mplsvpn.c | 4 ++-- bgpd/bgp_mplsvpn.h | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index c9430b58822c..5cce1f01b02f 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -606,8 +606,8 @@ int vpn_leak_label_callback( return 0; } -static void sid_register(struct bgp *bgp, const struct in6_addr *sid, - const char *locator_name) +void sid_register(struct bgp *bgp, const struct in6_addr *sid, + const char *locator_name) { struct bgp_srv6_function *func; func = XCALLOC(MTYPE_BGP_SRV6_FUNCTION, diff --git a/bgpd/bgp_mplsvpn.h b/bgpd/bgp_mplsvpn.h index 92a9fba887ae..39fed667818a 100644 --- a/bgpd/bgp_mplsvpn.h +++ b/bgpd/bgp_mplsvpn.h @@ -419,6 +419,8 @@ struct bgp_mplsvpn_nh_label_bind_cache *bgp_mplsvpn_nh_label_bind_find( struct bgp_mplsvpn_nh_label_bind_cache_head *tree, struct prefix *p, mpls_label_t orig_label); void bgp_mplsvpn_nexthop_init(void); +extern void sid_register(struct bgp *bgp, const struct in6_addr *sid, + const char *locator_name); extern void sid_unregister(struct bgp *bgp, const struct in6_addr *sid); #endif /* _QUAGGA_BGP_MPLSVPN_H */ From 8a99927d0e6054a750c1dfedfc1ba324f88d2459 Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Thu, 2 May 2024 17:08:07 +0200 Subject: [PATCH 08/45] bgpd: Receive SRv6 SIDs notification from zebra Zebra sends a `SRV6_SID_NOTIFY` notification to inform clients about the result of a SID alloc/release operation. This commit adds a handler to process a `SRV6_SID_NOTIFY` notification received from zebra. If the notification indicates that a SID allocation operation was successful, then it stores the allocated SID in the SRv6 database, installs the SID into the RIB, and advertises the SID to the other BGP routers. If the notification indicates that an operation has failed, it logs the error. Signed-off-by: Carmine Scarpitta --- bgpd/bgp_zebra.c | 227 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 227 insertions(+) diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index 66125b4ef76d..f7e9b8beb274 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -3419,6 +3419,232 @@ static int bgp_zebra_process_srv6_locator_internal(struct srv6_locator *locator) return 0; } +static int bgp_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS) +{ + struct bgp *bgp = bgp_get_default(); + struct srv6_locator *locator; + struct srv6_sid_ctx ctx; + struct in6_addr sid_addr; + enum zapi_srv6_sid_notify note; + struct bgp *bgp_vrf; + struct vrf *vrf; + struct listnode *node, *nnode; + char buf[256]; + struct in6_addr *tovpn_sid; + struct prefix_ipv6 tmp_prefix; + uint32_t sid_func; + bool found = false; + + if (!bgp || !bgp->srv6_enabled) + return -1; + + if (!bgp->srv6_locator) { + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("%s: ignoring SRv6 SID notify: locator not set", + __func__); + return -1; + } + + /* Decode the received notification message */ + if (!zapi_srv6_sid_notify_decode(zclient->ibuf, &ctx, &sid_addr, + &sid_func, NULL, ¬e)) { + zlog_err("%s : error in msg decode", __func__); + return -1; + } + + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("%s: received SRv6 SID notify: ctx %s sid_value %pI6 %s", + __func__, srv6_sid_ctx2str(buf, sizeof(buf), &ctx), + &sid_addr, zapi_srv6_sid_notify2str(note)); + + /* Get the BGP instance for which the SID has been requested, if any */ + for (ALL_LIST_ELEMENTS(bm->bgp, node, nnode, bgp_vrf)) { + vrf = vrf_lookup_by_id(bgp_vrf->vrf_id); + if (!vrf) + continue; + + if (vrf->vrf_id == ctx.vrf_id) { + found = true; + break; + } + } + + if (!found) { + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("%s: ignoring SRv6 SID notify: No VRF suitable for received SID ctx %s sid_value %pI6", + __func__, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx), + &sid_addr); + return -1; + } + + /* Handle notification */ + switch (note) { + case ZAPI_SRV6_SID_ALLOCATED: + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("SRv6 SID %pI6 %s : ALLOCATED", &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + /* Verify that the received SID belongs to the configured locator */ + tmp_prefix.family = AF_INET6; + tmp_prefix.prefixlen = IPV6_MAX_BITLEN; + tmp_prefix.prefix = sid_addr; + + if (!prefix_match((struct prefix *)&bgp->srv6_locator->prefix, + (struct prefix *)&tmp_prefix)) + return -1; + + /* Get label */ + uint8_t func_len = bgp->srv6_locator->function_bits_length; + uint8_t shift_len = BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH - + func_len; + + int label = sid_func << shift_len; + + /* Un-export VPN to VRF routes */ + vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP, bgp, + bgp_vrf); + vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP6, bgp, + bgp_vrf); + + locator = srv6_locator_alloc(bgp->srv6_locator_name); + srv6_locator_copy(locator, bgp->srv6_locator); + + /* Store SID, locator, and label */ + tovpn_sid = XCALLOC(MTYPE_BGP_SRV6_SID, sizeof(struct in6_addr)); + *tovpn_sid = sid_addr; + if (ctx.behavior == ZEBRA_SEG6_LOCAL_ACTION_END_DT6) { + XFREE(MTYPE_BGP_SRV6_SID, + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid); + srv6_locator_free( + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator); + sid_unregister(bgp, + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid); + + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid = tovpn_sid; + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator = locator; + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_transpose_label = + label; + } else if (ctx.behavior == ZEBRA_SEG6_LOCAL_ACTION_END_DT4) { + XFREE(MTYPE_BGP_SRV6_SID, + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid); + srv6_locator_free( + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator); + sid_unregister(bgp, + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid); + + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid = tovpn_sid; + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator = locator; + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_transpose_label = + label; + } else if (ctx.behavior == ZEBRA_SEG6_LOCAL_ACTION_END_DT46) { + XFREE(MTYPE_BGP_SRV6_SID, bgp_vrf->tovpn_sid); + srv6_locator_free(bgp_vrf->tovpn_sid_locator); + sid_unregister(bgp, bgp_vrf->tovpn_sid); + + bgp_vrf->tovpn_sid = tovpn_sid; + bgp_vrf->tovpn_sid_locator = locator; + bgp_vrf->tovpn_sid_transpose_label = label; + } else { + srv6_locator_free(locator); + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("Unsupported behavior. Not assigned SRv6 SID: %s %pI6", + srv6_sid_ctx2str(buf, sizeof(buf), + &ctx), + &sid_addr); + return -1; + } + + /* Register the new SID */ + sid_register(bgp, tovpn_sid, bgp->srv6_locator_name); + + /* Export VPN to VRF routes */ + vpn_leak_postchange_all(); + + break; + case ZAPI_SRV6_SID_RELEASED: + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("SRv6 SID %pI6 %s: RELEASED", &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + /* Un-export VPN to VRF routes */ + vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP, bgp, + bgp_vrf); + vpn_leak_prechange(BGP_VPN_POLICY_DIR_TOVPN, AFI_IP6, bgp, + bgp_vrf); + + /* Remove SID, locator, and label */ + if (ctx.behavior == ZEBRA_SEG6_LOCAL_ACTION_END_DT6) { + XFREE(MTYPE_BGP_SRV6_SID, + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid); + if (bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator) { + srv6_locator_free(bgp->vpn_policy[AFI_IP6] + .tovpn_sid_locator); + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_locator = + NULL; + } + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid_transpose_label = + 0; + + /* Unregister the SID */ + sid_unregister(bgp, + bgp_vrf->vpn_policy[AFI_IP6].tovpn_sid); + } else if (ctx.behavior == ZEBRA_SEG6_LOCAL_ACTION_END_DT4) { + XFREE(MTYPE_BGP_SRV6_SID, + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid); + if (bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator) { + srv6_locator_free(bgp->vpn_policy[AFI_IP] + .tovpn_sid_locator); + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_locator = + NULL; + } + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid_transpose_label = + 0; + + /* Unregister the SID */ + sid_unregister(bgp, + bgp_vrf->vpn_policy[AFI_IP].tovpn_sid); + } else if (ctx.behavior == ZEBRA_SEG6_LOCAL_ACTION_END_DT46) { + XFREE(MTYPE_BGP_SRV6_SID, bgp_vrf->tovpn_sid); + if (bgp_vrf->tovpn_sid_locator) { + srv6_locator_free(bgp_vrf->tovpn_sid_locator); + bgp_vrf->tovpn_sid_locator = NULL; + } + bgp_vrf->tovpn_sid_transpose_label = 0; + + /* Unregister the SID */ + sid_unregister(bgp, bgp_vrf->tovpn_sid); + } else { + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("Unsupported behavior. Not assigned SRv6 SID: %s %pI6", + srv6_sid_ctx2str(buf, sizeof(buf), + &ctx), + &sid_addr); + return -1; + } + + /* Export VPN to VRF routes*/ + vpn_leak_postchange_all(); + break; + case ZAPI_SRV6_SID_FAIL_ALLOC: + if (BGP_DEBUG(zebra, ZEBRA)) + zlog_debug("SRv6 SID %pI6 %s: Failed to allocate", + &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + /* Error will be logged by zebra module */ + break; + case ZAPI_SRV6_SID_FAIL_RELEASE: + zlog_warn("%s: SRv6 SID %pI6 %s failure to release", __func__, + &sid_addr, srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + /* Error will be logged by zebra module */ + break; + } + + return 0; +} + static int bgp_zebra_process_srv6_locator_add(ZAPI_CALLBACK_ARGS) { struct srv6_locator loc = {}; @@ -3604,6 +3830,7 @@ static zclient_handler *const bgp_handlers[] = { [ZEBRA_SRV6_LOCATOR_DELETE] = bgp_zebra_process_srv6_locator_delete, [ZEBRA_SRV6_MANAGER_GET_LOCATOR_CHUNK] = bgp_zebra_process_srv6_locator_chunk, + [ZEBRA_SRV6_SID_NOTIFY] = bgp_zebra_srv6_sid_notify, }; static int bgp_if_new_hook(struct interface *ifp) From 1bb07d2ea15ee8f6b561ccc90ecc2cfdacfbd339 Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Sat, 23 Mar 2024 19:25:31 +0100 Subject: [PATCH 09/45] bgpd: Cleanup related to SRv6 Remove unused SRv6 code. Signed-off-by: Carmine Scarpitta --- bgpd/bgp_mplsvpn.c | 94 ---------------------------------------------- 1 file changed, 94 deletions(-) diff --git a/bgpd/bgp_mplsvpn.c b/bgpd/bgp_mplsvpn.c index 5cce1f01b02f..432ead793617 100644 --- a/bgpd/bgp_mplsvpn.c +++ b/bgpd/bgp_mplsvpn.c @@ -646,100 +646,6 @@ static bool sid_exist(struct bgp *bgp, const struct in6_addr *sid) return false; } -/* - * This function generates a new SID based on bgp->srv6_locator_chunks and - * index. The locator and generated SID are stored in arguments sid_locator - * and sid, respectively. - * - * if index != 0: try to allocate as index-mode - * else: try to allocate as auto-mode - */ -static uint32_t alloc_new_sid(struct bgp *bgp, uint32_t index, - struct srv6_locator_chunk *sid_locator_chunk, - struct in6_addr *sid) -{ - int debug = BGP_DEBUG(vpn, VPN_LEAK_LABEL); - struct listnode *node; - struct srv6_locator_chunk *chunk; - bool alloced = false; - int label = 0; - uint8_t offset = 0; - uint8_t func_len = 0, shift_len = 0; - uint32_t index_max = 0; - - if (!bgp || !sid_locator_chunk || !sid) - return false; - - for (ALL_LIST_ELEMENTS_RO(bgp->srv6_locator_chunks, node, chunk)) { - if (chunk->function_bits_length > - BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH) { - if (debug) - zlog_debug( - "%s: invalid SRv6 Locator chunk (%pFX): Function Length must be less or equal to %d", - __func__, &chunk->prefix, - BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH); - continue; - } - - index_max = (1 << chunk->function_bits_length) - 1; - - if (index > index_max) { - if (debug) - zlog_debug( - "%s: skipped SRv6 Locator chunk (%pFX): Function Length is too short to support specified index (%u)", - __func__, &chunk->prefix, index); - continue; - } - - *sid = chunk->prefix.prefix; - *sid_locator_chunk = *chunk; - offset = chunk->block_bits_length + chunk->node_bits_length; - func_len = chunk->function_bits_length; - shift_len = BGP_PREFIX_SID_SRV6_MAX_FUNCTION_LENGTH - func_len; - - if (index != 0) { - label = index << shift_len; - if (label < MPLS_LABEL_UNRESERVED_MIN) { - if (debug) - zlog_debug( - "%s: skipped to allocate SRv6 SID (%pFX): Label (%u) is too small to use", - __func__, &chunk->prefix, - label); - continue; - } - - transpose_sid(sid, label, offset, func_len); - if (sid_exist(bgp, sid)) - continue; - alloced = true; - break; - } - - for (uint32_t i = 1; i < index_max; i++) { - label = i << shift_len; - if (label < MPLS_LABEL_UNRESERVED_MIN) { - if (debug) - zlog_debug( - "%s: skipped to allocate SRv6 SID (%pFX): Label (%u) is too small to use", - __func__, &chunk->prefix, - label); - continue; - } - transpose_sid(sid, label, offset, func_len); - if (sid_exist(bgp, sid)) - continue; - alloced = true; - break; - } - } - - if (!alloced) - return 0; - - sid_register(bgp, sid, bgp->srv6_locator_name); - return label; -} - /** * Return the SRv6 SID value obtained by composing the LOCATOR and FUNCTION. * From d787836b4bf0726bcff1f22268232f6ce916acc7 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Sat, 8 Jun 2024 07:15:47 +0200 Subject: [PATCH 10/45] bgpd: add locator name in sid notify messages In the near future, some daemons may only register SIDs. This may be the case for the pathd daemon when creating SRv6 binding SIDs. When a locator is getting deleted at ZEBRA level, the daemon may have an easy way to find out the SIds to unregister to. This commit proposes to add the locator name to the SID_SRV6_NOTIFY message whenever possible. Only case when an allocation failure happens, the locator will not be present. In all other places, the notify API at procol levels has the locator name extra-parameter. Signed-off-by: Philippe Guibert Signed-off-by: Carmine Scarpitta --- bgpd/bgp_zebra.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index f7e9b8beb274..1f468ef2794a 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -3447,7 +3447,7 @@ static int bgp_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS) /* Decode the received notification message */ if (!zapi_srv6_sid_notify_decode(zclient->ibuf, &ctx, &sid_addr, - &sid_func, NULL, ¬e)) { + &sid_func, NULL, ¬e, NULL)) { zlog_err("%s : error in msg decode", __func__); return -1; } From f0c5d0dea6665678277e8a4aff994296f7d1e5f7 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Mon, 4 Dec 2023 16:06:26 +0100 Subject: [PATCH 11/45] zclient: add srv6 segs to zapi_srte_tunnel Add SRv6 segments to zapi_srte_stunnel structure. Signed-off-by: Dmytro Shytyi --- lib/zclient.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/zclient.h b/lib/zclient.h index 2877b347d8d0..0c5155a496f4 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -637,9 +637,14 @@ struct zapi_labels { struct zapi_srte_tunnel { enum lsp_types_t type; + + /* MPLS-TE */ mpls_label_t local_label; uint8_t label_num; mpls_label_t labels[MPLS_MAX_LABELS]; + + /* SRv6-TE */ + struct seg6_segs srv6_segs; }; struct zapi_sr_policy { From 8d8c5d151cbbb1f58457021998b4f599e9d3b827 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Thu, 30 Nov 2023 19:14:08 +0100 Subject: [PATCH 12/45] yang,pathd: add srv6-sid-value and nb handlers Add SRv6 SID value (srv6-sid-value) to the YANG model. Implement nb handlers for this new value. Signed-off-by: Dmytro Shytyi --- pathd/path_cli.c | 16 ++++++++++++++++ pathd/path_nb.c | 8 ++++++++ pathd/path_nb.h | 4 ++++ pathd/path_nb_config.c | 32 ++++++++++++++++++++++++++++++++ pathd/pathd.h | 3 +++ yang/frr-pathd.yang | 7 +++++++ 6 files changed, 70 insertions(+) diff --git a/pathd/path_cli.c b/pathd/path_cli.c index bf8a9ea02841..0d68926e0202 100644 --- a/pathd/path_cli.c +++ b/pathd/path_cli.c @@ -466,6 +466,8 @@ int segment_list_has_prefix( DEFPY(srte_segment_list_segment, srte_segment_list_segment_cmd, "index (0-4294967295)$index <[mpls$has_mpls_label label (16-1048575)$label] " "|" + "[ipv6-address$has_ipv6_address X:X::X:X$ipv6_address]" + "|" "[nai$has_nai <" "prefix " "" @@ -478,6 +480,8 @@ DEFPY(srte_segment_list_segment, srte_segment_list_segment_cmd, "MPLS or IP Label\n" "Label\n" "Label Value\n" + "IPv6 address\n" + "IPv6 address Value\n" "Segment NAI\n" "NAI prefix identifier\n" "NAI IPv4 prefix identifier\n" @@ -507,6 +511,14 @@ DEFPY(srte_segment_list_segment, srte_segment_list_segment_cmd, return nb_cli_apply_changes(vty, NULL); } + if (has_ipv6_address != NULL) { + snprintf(xpath, sizeof(xpath), + "./segment[index='%s']/srv6-sid-value", index_str); + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, + ipv6_address_str); + return nb_cli_apply_changes(vty, NULL); + } + if (has_adj != NULL) { status = segment_list_has_src_dst(vty, xpath, index, index_str, adj_src_ipv4, adj_dst_ipv4, @@ -551,6 +563,10 @@ void cli_show_srte_segment_list_segment(struct vty *vty, vty_out(vty, " mpls label %s", yang_dnode_get_string(dnode, "sid-value")); } + if (yang_dnode_exists(dnode, "srv6-sid-value")) { + vty_out(vty, " ipv6-address %s", + yang_dnode_get_string(dnode, "srv6-sid-value")); + } if (yang_dnode_exists(dnode, "nai")) { struct ipaddr addr; struct ipaddr addr_rmt; diff --git a/pathd/path_nb.c b/pathd/path_nb.c index e1c0cc3efa6c..29a143a3e1af 100644 --- a/pathd/path_nb.c +++ b/pathd/path_nb.c @@ -82,6 +82,14 @@ const struct frr_yang_module_info frr_pathd_info = { }, .priority = NB_DFLT_PRIORITY - 1 }, + { + .xpath = "/frr-pathd:pathd/srte/segment-list/segment/srv6-sid-value", + .cbs = { + .modify = pathd_srte_segment_list_segment_srv6_sid_value_modify, + .destroy = pathd_srte_segment_list_segment_srv6_sid_value_destroy, + }, + .priority = NB_DFLT_PRIORITY - 1 + }, { .xpath = "/frr-pathd:pathd/srte/segment-list/segment/nai", .cbs = { diff --git a/pathd/path_nb.h b/pathd/path_nb.h index 21876d788303..b89851e73929 100644 --- a/pathd/path_nb.h +++ b/pathd/path_nb.h @@ -32,6 +32,10 @@ void pathd_srte_segment_list_segment_nai_apply_finish( struct nb_cb_apply_finish_args *args); int pathd_srte_segment_list_segment_sid_value_destroy( struct nb_cb_destroy_args *args); +int pathd_srte_segment_list_segment_srv6_sid_value_modify( + struct nb_cb_modify_args *args); +int pathd_srte_segment_list_segment_srv6_sid_value_destroy( + struct nb_cb_destroy_args *args); int pathd_srte_policy_create(struct nb_cb_create_args *args); int pathd_srte_policy_destroy(struct nb_cb_destroy_args *args); const void *pathd_srte_policy_get_next(struct nb_cb_get_next_args *args); diff --git a/pathd/path_nb_config.c b/pathd/path_nb_config.c index 48531ba43339..7368e726d8df 100644 --- a/pathd/path_nb_config.c +++ b/pathd/path_nb_config.c @@ -162,6 +162,38 @@ int pathd_srte_segment_list_segment_sid_value_destroy( return NB_OK; } +/* + * XPath: /frr-pathd:pathd/srte/segment-list/segment/srv6-sid-value + */ +int pathd_srte_segment_list_segment_srv6_sid_value_modify( + struct nb_cb_modify_args *args) +{ + struct srte_segment_entry *segment; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + segment = nb_running_get_entry(args->dnode, NULL, true); + yang_dnode_get_ipv6(&segment->srv6_sid_value, args->dnode, NULL); + SET_FLAG(segment->segment_list->flags, F_SEGMENT_LIST_MODIFIED); + + return NB_OK; +} + +int pathd_srte_segment_list_segment_srv6_sid_value_destroy( + struct nb_cb_destroy_args *args) +{ + struct srte_segment_entry *segment; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + segment = nb_running_get_entry(args->dnode, NULL, true); + memset(&segment->srv6_sid_value, 0, sizeof(segment->srv6_sid_value)); + SET_FLAG(segment->segment_list->flags, F_SEGMENT_LIST_MODIFIED); + + return NB_OK; +} int pathd_srte_segment_list_segment_nai_destroy(struct nb_cb_destroy_args *args) { diff --git a/pathd/pathd.h b/pathd/pathd.h index 75e7eff920e7..f0080caa0a1b 100644 --- a/pathd/pathd.h +++ b/pathd/pathd.h @@ -167,6 +167,9 @@ struct srte_segment_entry { /* Label Value. */ mpls_label_t sid_value; + /* SRv6 SID. */ + struct in6_addr srv6_sid_value; + /* NAI Type */ enum srte_segment_nai_type nai_type; /* NAI local address when nai type is not NONE */ diff --git a/yang/frr-pathd.yang b/yang/frr-pathd.yang index 5beda769c1a3..7aca37beb36f 100644 --- a/yang/frr-pathd.yang +++ b/yang/frr-pathd.yang @@ -89,6 +89,13 @@ module frr-pathd { type rt-types:mpls-label; description "MPLS label value"; } + leaf srv6-sid-value { + type inet:ipv6-address; + description "SRv6 SID value"; + } + must "not(../segment/sid-value) or not(../segment/srv6-sid-value)" { + error-message "Only MPLS label or SRv6 SID value can be configured in the same moment."; + } container nai { presence "The segment has a Node or Adjacency Identifier"; leaf type { From 4cc248ec334cc43634703c3ac2599b89c189b136 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Mon, 4 Dec 2023 15:33:59 +0100 Subject: [PATCH 13/45] lib,pathd,zebra: fill segment_list with sr types Add structute sr_types_t, that contains the Segment Routing types. Fill the zapi_sr_policy's segment_list with values such as: - value - local_label - num_segs - srv6_segs Signed-off-by: Dmytro Shytyi --- lib/srte.h | 19 +++++++++++++++++++ lib/zclient.h | 2 +- pathd/path_zebra.c | 43 ++++++++++++++++++++++++++++++++++--------- zebra/zapi_msg.c | 2 +- zebra/zebra_nhg.c | 2 +- zebra/zebra_srte.c | 13 +++++++------ 6 files changed, 63 insertions(+), 18 deletions(-) diff --git a/lib/srte.h b/lib/srte.h index 69c3dbcd87db..2b008958ab1f 100644 --- a/lib/srte.h +++ b/lib/srte.h @@ -19,6 +19,25 @@ enum zebra_sr_policy_status { ZEBRA_SR_POLICY_DOWN, }; +/* SR types. */ +enum sr_types { + ZEBRA_SR_LSP_NONE = 0, /* No LSP. */ + ZEBRA_SR_LSP_SRTE = 1, /* SR-TE LSP */ + ZEBRA_SR_SRV6_SRTE = 2, /* SRv6 SID List*/ +}; + +static inline enum lsp_types_t lsp_type_from_sr_type(enum sr_types sr_type) +{ + switch (sr_type) { + case ZEBRA_SR_LSP_SRTE: + return ZEBRA_LSP_SRTE; + case ZEBRA_SR_LSP_NONE: + case ZEBRA_SR_SRV6_SRTE: + default: + return ZEBRA_LSP_NONE; + } +}; + static inline int sr_policy_compare(const struct ipaddr *a_endpoint, const struct ipaddr *b_endpoint, uint32_t a_color, uint32_t b_color) diff --git a/lib/zclient.h b/lib/zclient.h index 0c5155a496f4..05cfd39328f9 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -636,7 +636,7 @@ struct zapi_labels { }; struct zapi_srte_tunnel { - enum lsp_types_t type; + enum sr_types type; /* MPLS-TE */ mpls_label_t local_label; diff --git a/pathd/path_zebra.c b/pathd/path_zebra.c index ba03315c82f7..ee5da867d85e 100644 --- a/pathd/path_zebra.c +++ b/pathd/path_zebra.c @@ -181,12 +181,27 @@ void path_zebra_add_sr_policy(struct srte_policy *policy, zp.color = policy->color; zp.endpoint = policy->endpoint; strlcpy(zp.name, policy->name, sizeof(zp.name)); - zp.segment_list.type = ZEBRA_LSP_SRTE; - zp.segment_list.local_label = policy->binding_sid; - zp.segment_list.label_num = 0; - RB_FOREACH (segment, srte_segment_entry_head, &segment_list->segments) - zp.segment_list.labels[zp.segment_list.label_num++] = - segment->sid_value; + segment = RB_MIN(srte_segment_entry_head, &segment_list->segments); + + if (sid_zero_ipv6(&segment->srv6_sid_value)) { + zp.segment_list.type = ZEBRA_SR_LSP_SRTE; + zp.segment_list.local_label = policy->binding_sid; + zp.segment_list.label_num = 0; + RB_FOREACH (segment, srte_segment_entry_head, + &segment_list->segments) + zp.segment_list.labels[zp.segment_list.label_num++] = + segment->sid_value; + } else { + zp.segment_list.type = ZEBRA_SR_SRV6_SRTE; + zp.segment_list.local_label = MPLS_LABEL_NONE; + zp.segment_list.srv6_segs.num_segs = 0; + RB_FOREACH (segment, srte_segment_entry_head, + &segment_list->segments) + IPV6_ADDR_COPY(&zp.segment_list.srv6_segs + .segs[zp.segment_list.srv6_segs + .num_segs++], + &segment->srv6_sid_value); + } policy->status = SRTE_POLICY_STATUS_GOING_UP; (void)zebra_send_sr_policy(zclient, ZEBRA_SR_POLICY_SET, &zp); @@ -200,13 +215,23 @@ void path_zebra_add_sr_policy(struct srte_policy *policy, void path_zebra_delete_sr_policy(struct srte_policy *policy) { struct zapi_sr_policy zp = {}; + struct srte_segment_entry *segment; zp.color = policy->color; zp.endpoint = policy->endpoint; strlcpy(zp.name, policy->name, sizeof(zp.name)); - zp.segment_list.type = ZEBRA_LSP_SRTE; - zp.segment_list.local_label = policy->binding_sid; - zp.segment_list.label_num = 0; + segment = RB_MIN(srte_segment_entry_head, + &policy->best_candidate->segment_list->segments); + + if (sid_zero_ipv6(&segment->srv6_sid_value)) { + zp.segment_list.type = ZEBRA_SR_LSP_SRTE; + zp.segment_list.local_label = policy->binding_sid; + zp.segment_list.label_num = 0; + } else { + zp.segment_list.local_label = MPLS_LABEL_NONE; + zp.segment_list.type = ZEBRA_SR_SRV6_SRTE; + zp.segment_list.srv6_segs.num_segs = 0; + } policy->status = SRTE_POLICY_STATUS_DOWN; (void)zebra_send_sr_policy(zclient, ZEBRA_SR_POLICY_DELETE, &zp); diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index aecbba2ebc5f..0765f1ed8958 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -2680,7 +2680,7 @@ static void zread_sr_policy_set(ZAPI_HANDLER_ARGS) return; } - if (!mpls_enabled) + if (!mpls_enabled && zt->type != ZEBRA_SR_SRV6_SRTE) return; policy = zebra_sr_policy_find(zp.color, &zp.endpoint); diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index 637eabde8d2d..adc247f8d83a 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -1872,7 +1872,7 @@ static struct nexthop *nexthop_set_resolved(afi_t afi, for (; label_num < policy->segment_list.label_num; label_num++) labels[num_labels++] = policy->segment_list.labels[label_num]; - label_type = policy->segment_list.type; + label_type = lsp_type_from_sr_type(policy->segment_list.type); } else if (newhop->nh_label) { for (i = 0; i < newhop->nh_label->num_labels; i++) { /* Be a bit picky about overrunning the local array */ diff --git a/zebra/zebra_srte.c b/zebra/zebra_srte.c index c0b83382c457..28a7b2fbfeea 100644 --- a/zebra/zebra_srte.c +++ b/zebra/zebra_srte.c @@ -325,11 +325,11 @@ int zebra_sr_policy_bsid_install(struct zebra_sr_policy *policy) out_labels = zt->labels; } - if (mpls_lsp_install( - policy->zvrf, zt->type, zt->local_label, - num_out_labels, out_labels, nhlfe->nexthop->type, - &nhlfe->nexthop->gate, nhlfe->nexthop->ifindex) - < 0) + if (mpls_lsp_install(policy->zvrf, + lsp_type_from_sr_type(zt->type), + zt->local_label, num_out_labels, out_labels, + nhlfe->nexthop->type, &nhlfe->nexthop->gate, + nhlfe->nexthop->ifindex) < 0) return -1; } @@ -341,7 +341,8 @@ void zebra_sr_policy_bsid_uninstall(struct zebra_sr_policy *policy, { struct zapi_srte_tunnel *zt = &policy->segment_list; - mpls_lsp_uninstall_all_vrf(policy->zvrf, zt->type, old_bsid); + mpls_lsp_uninstall_all_vrf(policy->zvrf, + lsp_type_from_sr_type(zt->type), old_bsid); } int zebra_sr_policy_label_update(mpls_label_t label, From 83edae6d40a5501b071307a6e648082aeff5514d Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Thu, 30 Nov 2023 19:57:04 +0100 Subject: [PATCH 14/45] zebra: zapi, at least one label or srv6 sid check SR-TE tunnel must contain at least one label or SRv6 SID. Check this condition. Signed-off-by: Dmytro Shytyi --- zebra/zapi_msg.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index 0765f1ed8958..936429a10094 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -2671,12 +2671,12 @@ static void zread_sr_policy_set(ZAPI_HANDLER_ARGS) __func__); return; } + zt = &zp.segment_list; - if (zt->label_num < 1) { + if (!(zt->label_num > 0 || zt->srv6_segs.num_segs > 0)) { if (IS_ZEBRA_DEBUG_RECV) - zlog_debug( - "%s: SR-TE tunnel must contain at least one label", - __func__); + zlog_debug("%s: SR-TE tunnel must contain at least one label or SRv6 SID", + __func__); return; } From 84c7ff8d5b2178ff9c4ebca3d3cc36982fdf98b0 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Thu, 30 Nov 2023 19:58:21 +0100 Subject: [PATCH 15/45] zebra: fill nexthop with srv6 segs same as for labels When the segment list is not empty add SRv6 segments to a nexthop. Signed-off-by: Dmytro Shytyi --- zebra/zebra_nhg.c | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index adc247f8d83a..adc87792ec0e 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -1910,8 +1910,15 @@ static struct nexthop *nexthop_set_resolved(afi_t afi, if (num_labels) nexthop_add_labels(resolved_hop, label_type, num_labels, labels); - - if (nexthop->nh_srv6) { + if (policy) { + if (!sid_zero_ipv6(&policy->segment_list.srv6_segs.segs[0])) { + nexthop_add_srv6_seg6(resolved_hop, + &policy->segment_list.srv6_segs + .segs[0], + policy->segment_list.srv6_segs + .num_segs); + } + } else if (nexthop->nh_srv6) { if (nexthop->nh_srv6->seg6local_action != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) nexthop_add_srv6_seg6local(resolved_hop, From 4ede640d9b2105c7e1ee765a8c0f040c3e1954e8 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Mon, 4 Dec 2023 16:15:31 +0100 Subject: [PATCH 16/45] zebra: encode, decode srv6 segs Encode and Decode SRv6-TE. It includes: - number of segments - list of segments Signed-off-by: Dmytro Shytyi --- lib/zclient.c | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/lib/zclient.c b/lib/zclient.c index 0e832f0d8fe5..c1ba41f78256 100644 --- a/lib/zclient.c +++ b/lib/zclient.c @@ -3878,6 +3878,19 @@ int zapi_sr_policy_encode(struct stream *s, int cmd, struct zapi_sr_policy *zp) for (int i = 0; i < zt->label_num; i++) stream_putl(s, zt->labels[i]); + /* Encode SRv6-TE */ + if (zt->srv6_segs.num_segs > SRV6_MAX_SEGS) { + flog_err(EC_LIB_ZAPI_ENCODE, + "%s: can't encode %zu SRv6 SIDS (maximum is %u)", + __func__, zt->srv6_segs.num_segs, SRV6_MAX_SEGS); + return -1; + } + + stream_putw(s, zt->srv6_segs.num_segs); + if (zt->srv6_segs.num_segs) + stream_put(s, &zt->srv6_segs.segs[0], + zt->srv6_segs.num_segs * sizeof(struct in6_addr)); + /* Put length at the first point of the stream. */ stream_putw_at(s, 0, stream_get_endp(s)); @@ -3908,6 +3921,19 @@ int zapi_sr_policy_decode(struct stream *s, struct zapi_sr_policy *zp) for (int i = 0; i < zt->label_num; i++) STREAM_GETL(s, zt->labels[i]); + /* Decode SRv6-TE */ + STREAM_GETW(s, zt->srv6_segs.num_segs); + + if (zt->srv6_segs.num_segs > SRV6_MAX_SEGS) { + flog_err(EC_LIB_ZAPI_ENCODE, + "%s: can't encode %zu SRv6 SIDS (maximum is %u)", + __func__, zt->srv6_segs.num_segs, SRV6_MAX_SEGS); + return -1; + } + if (zt->srv6_segs.num_segs) + STREAM_GET(&zt->srv6_segs.segs[0], s, + zt->srv6_segs.num_segs * sizeof(struct in6_addr)); + return 0; stream_failure: From b4b83699c1265eec15c55b05dfa845d7319675b5 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Thu, 15 Feb 2024 13:37:31 +0100 Subject: [PATCH 17/45] zebra: fix srv6 segment-list sid order when read from kernel When configuring an SRv6 SID list via iproute2, the segment list that appears in vtysh, learned from the kernel, is inversed. Fix the order of SRv6 SIDs in segment list, learned from the kernel. Fixes: f20cf14 ("bgpd,lib,sharpd,zebra: srv6 introduce multiple segs/SIDs in nexthop") Signed-off-by: Dmytro Shytyi --- zebra/rt_netlink.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index ddcb83cd8ce7..900c999fe8d9 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -490,7 +490,8 @@ static int parse_encap_seg6(struct rtattr *tb, struct in6_addr *segs) RTA_DATA(tb_encap[SEG6_IPTUNNEL_SRH]); for (i = ipt->srh[0].first_segment; i >= 0; i--) - memcpy(&segs[i], &ipt->srh[0].segments[i], + memcpy(&segs[ipt->srh[0].first_segment - i], + &ipt->srh[0].segments[i], sizeof(struct in6_addr)); return ipt->srh[0].first_segment + 1; From 2e079111f0bad5bbb510afc6fa945fa162a471c8 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Fri, 1 Mar 2024 00:35:49 +0100 Subject: [PATCH 18/45] pathd: add registration mechanism Pathd needs to track the reachability of the first SID of any SRv6 segment-list. Use the nexthop tracking facility and add the pathd daemon as a client to nht service from zebra. Signed-off-by: Philippe Guibert Signed-off-by: Dmytro Shytyi --- lib/zclient.c | 4 +- lib/zclient.h | 6 + pathd/path_nb_config.c | 1 + pathd/path_pcep_config.c | 2 + pathd/path_zebra.c | 295 +++++++++++++++++++++++++++++++++++++-- pathd/path_zebra.h | 1 + pathd/pathd.c | 4 +- pathd/pathd.h | 1 + 8 files changed, 300 insertions(+), 14 deletions(-) diff --git a/lib/zclient.c b/lib/zclient.c index c1ba41f78256..79629e3920d6 100644 --- a/lib/zclient.c +++ b/lib/zclient.c @@ -2330,8 +2330,8 @@ const char *zapi_nexthop2str(const struct zapi_nexthop *znh, char *buf, /* * Decode the nexthop-tracking update message */ -static bool zapi_nexthop_update_decode(struct stream *s, struct prefix *match, - struct zapi_route *nhr) +bool zapi_nexthop_update_decode(struct stream *s, struct prefix *match, + struct zapi_route *nhr) { uint32_t i; diff --git a/lib/zclient.h b/lib/zclient.h index 05cfd39328f9..8a8a46a134f8 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -645,6 +645,9 @@ struct zapi_srte_tunnel { /* SRv6-TE */ struct seg6_segs srv6_segs; + + uint32_t metric; + uint8_t distance; }; struct zapi_sr_policy { @@ -1404,6 +1407,9 @@ extern int zapi_client_close_notify_decode(struct stream *s, extern int zclient_send_zebra_gre_request(struct zclient *client, struct interface *ifp); + +extern bool zapi_nexthop_update_decode(struct stream *s, struct prefix *match, + struct zapi_route *nhr); #ifdef __cplusplus } #endif diff --git a/pathd/path_nb_config.c b/pathd/path_nb_config.c index 7368e726d8df..f9dc82b58f9b 100644 --- a/pathd/path_nb_config.c +++ b/pathd/path_nb_config.c @@ -734,6 +734,7 @@ int pathd_srte_policy_candidate_path_segment_list_name_modify( candidate = nb_running_get_entry(args->dnode, NULL, true); segment_list_name = yang_dnode_get_string(args->dnode, NULL); + path_nht_removed(candidate); candidate->segment_list = srte_segment_list_find(segment_list_name); candidate->lsp->segment_list = candidate->segment_list; assert(candidate->segment_list); diff --git a/pathd/path_pcep_config.c b/pathd/path_pcep_config.c index da7ee89f2fef..e0dcc07260e3 100644 --- a/pathd/path_pcep_config.c +++ b/pathd/path_pcep_config.c @@ -13,6 +13,7 @@ #include "pathd/path_pcep.h" #include "pathd/path_pcep_config.h" #include "pathd/path_pcep_debug.h" +#include "pathd/path_zebra.h" #include "frrevent.h" #define MAX_XPATH 256 @@ -421,6 +422,7 @@ int path_pcep_config_update_path(struct path *path) number_of_sid_clashed++; } + path_nht_removed(candidate); candidate->lsp->segment_list = segment_list; SET_FLAG(candidate->flags, F_CANDIDATE_MODIFIED); diff --git a/pathd/path_zebra.c b/pathd/path_zebra.c index ee5da867d85e..d849d63bbdcc 100644 --- a/pathd/path_zebra.c +++ b/pathd/path_zebra.c @@ -10,6 +10,7 @@ #include "lib_errors.h" #include "if.h" #include "prefix.h" +#include "jhash.h" #include "zclient.h" #include "network.h" #include "stream.h" @@ -48,6 +49,90 @@ struct in6_addr g_router_id_v6; pthread_mutex_t g_router_id_v4_mtx = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t g_router_id_v6_mtx = PTHREAD_MUTEX_INITIALIZER; +DEFINE_MTYPE_STATIC(PATHD, PATH_NHT_DATA, "Pathd Nexthop tracking data"); +PREDECL_HASH(path_nht_hash); + +struct path_nht_data { + struct path_nht_hash_item itm; + + struct prefix nh; + + vrf_id_t nh_vrf_id; + + uint32_t refcount; + uint8_t nh_num; + struct nexthop *nexthop; + bool registered; + + uint32_t metric; + uint8_t distance; +}; + +static int path_nht_data_cmp(const struct path_nht_data *nhtd1, + const struct path_nht_data *nhtd2) +{ + if (nhtd1->nh_vrf_id != nhtd2->nh_vrf_id) + return numcmp(nhtd1->nh_vrf_id, nhtd2->nh_vrf_id); + + return prefix_cmp(&nhtd1->nh, &nhtd2->nh); +} + +static unsigned int path_nht_data_hash(const struct path_nht_data *nhtd) +{ + unsigned int key = 0; + + key = prefix_hash_key(&nhtd->nh); + return jhash_1word(nhtd->nh_vrf_id, key); +} + +DECLARE_HASH(path_nht_hash, struct path_nht_data, itm, path_nht_data_cmp, + path_nht_data_hash); + +static struct path_nht_hash_head path_nht_hash[1]; + +static struct path_nht_data *path_nht_hash_getref(const struct path_nht_data *ref) +{ + struct path_nht_data *nhtd; + + nhtd = path_nht_hash_find(path_nht_hash, ref); + if (!nhtd) { + nhtd = XCALLOC(MTYPE_PATH_NHT_DATA, sizeof(*nhtd)); + + prefix_copy(&nhtd->nh, &ref->nh); + nhtd->nh_vrf_id = ref->nh_vrf_id; + + path_nht_hash_add(path_nht_hash, nhtd); + } + + nhtd->refcount++; + return nhtd; +} + +static bool path_nht_hash_decref(struct path_nht_data **nhtd_p) +{ + struct path_nht_data *nhtd = *nhtd_p; + + *nhtd_p = NULL; + + if (--nhtd->refcount > 0) + return true; + + path_nht_hash_del(path_nht_hash, nhtd); + XFREE(MTYPE_PATH_NHT_DATA, nhtd); + return false; +} + +static void path_nht_hash_clear(void) +{ + struct path_nht_data *nhtd; + + while ((nhtd = path_nht_hash_pop(path_nht_hash))) { + if (nhtd->nexthop) + nexthops_free(nhtd->nexthop); + XFREE(MTYPE_PATH_NHT_DATA, nhtd); + } +} + /** * Gives the IPv4 router ID received from Zebra. * @@ -57,6 +142,7 @@ pthread_mutex_t g_router_id_v6_mtx = PTHREAD_MUTEX_INITIALIZER; bool get_ipv4_router_id(struct in_addr *router_id) { bool retval = false; + assert(router_id != NULL); pthread_mutex_lock(&g_router_id_v4_mtx); if (g_has_router_id_v4) { @@ -76,6 +162,7 @@ bool get_ipv4_router_id(struct in_addr *router_id) bool get_ipv6_router_id(struct in6_addr *router_id) { bool retval = false; + assert(router_id != NULL); pthread_mutex_lock(&g_router_id_v6_mtx); if (g_has_router_id_v6) { @@ -86,6 +173,96 @@ bool get_ipv6_router_id(struct in6_addr *router_id) return retval; } +static bool path_zebra_segment_list_srv6(struct srte_segment_list *segment_list) +{ + struct srte_segment_entry *segment; + + segment = RB_MIN(srte_segment_entry_head, &segment_list->segments); + if (segment && !IPV6_ADDR_SAME(&segment->srv6_sid_value, &in6addr_any)) + return true; + + return false; +} + +static bool path_zebra_nht_get_srv6_prefix(struct srte_segment_list *segment_list, + struct prefix *nh) +{ + struct srte_segment_entry *segment; + bool found = false; + + if (!segment_list) + return false; + + segment = RB_MIN(srte_segment_entry_head, &segment_list->segments); + if (segment && !IPV6_ADDR_SAME(&segment->srv6_sid_value, &in6addr_any)) { + nh->family = AF_INET6; + nh->prefixlen = IPV6_MAX_BITLEN; + memcpy(&nh->u.prefix6, &segment->srv6_sid_value, + sizeof(struct in6_addr)); + found = true; + } + return found; +} + +static void path_zebra_add_srv6_policy_internal(struct srte_policy *policy) +{ + struct path_nht_data *nhtd, lookup = {}; + uint32_t cmd; + struct srte_candidate *candidate; + struct srte_segment_list *segment_list = NULL; + + candidate = policy->best_candidate; + if (candidate && candidate->lsp) + segment_list = candidate->lsp->segment_list; + + if (!segment_list) + return; + + if (!path_zebra_nht_get_srv6_prefix(segment_list, &lookup.nh)) + return; + + lookup.nh_vrf_id = VRF_DEFAULT; + + if (CHECK_FLAG(segment_list->flags, F_SEGMENT_LIST_NHT_REGISTERED)) { + /* nh->nh_registered means we own a reference on the nhtd */ + nhtd = path_nht_hash_find(path_nht_hash, &lookup); + + assertf(nhtd, "BUG: NH %pFX registered but not in hashtable", + &lookup.nh); + } else { + nhtd = path_nht_hash_getref(&lookup); + + if (nhtd->refcount > 1) + zlog_debug("Reusing registered nexthop(%pFX) for candidate %s pref %u (num %d)", + &lookup.nh, candidate->name, + candidate->preference, nhtd->nh_num); + } + + SET_FLAG(segment_list->flags, F_SEGMENT_LIST_NHT_REGISTERED); + + if (nhtd->nh_num) { + path_zebra_add_sr_policy(candidate->policy, segment_list); + return; + } + path_zebra_delete_sr_policy(candidate->policy); + + if (nhtd->registered) + /* have no data, but did send register */ + return; + + cmd = ZEBRA_NEXTHOP_REGISTER; + zlog_debug("Registering nexthop(%pFX) for candidate %s pref %u", + &lookup.nh, candidate->name, candidate->preference); + + if (zclient_send_rnh(zclient, cmd, &lookup.nh, SAFI_UNICAST, false, + false, VRF_DEFAULT) == ZCLIENT_SEND_FAILURE) + zlog_warn("%s: Failure to send nexthop %pFX for candidate %s pref %u to zebra", + __func__, &lookup.nh, candidate->name, + candidate->preference); + else + nhtd->registered = true; +} + static void path_zebra_connected(struct zclient *zclient) { struct srte_policy *policy; @@ -105,8 +282,10 @@ static void path_zebra_connected(struct zclient *zclient) segment_list = candidate->lsp->segment_list; if (!segment_list) continue; - - path_zebra_add_sr_policy(policy, segment_list); + if (path_zebra_segment_list_srv6(segment_list)) + path_zebra_add_srv6_policy_internal(policy); + else + path_zebra_add_sr_policy(policy, segment_list); } } @@ -140,6 +319,7 @@ static int path_zebra_router_id_update(ZAPI_CALLBACK_ARGS) struct prefix pref; const char *family; char buf[PREFIX2STR_BUFFER]; + zebra_router_id_update_read(zclient->ibuf, &pref); if (pref.family == AF_INET) { pthread_mutex_lock(&g_router_id_v4_mtx); @@ -166,14 +346,66 @@ static int path_zebra_router_id_update(ZAPI_CALLBACK_ARGS) return 0; } +/** + * Disconnect from NHT + */ +void path_nht_removed(struct srte_candidate *candidate) +{ + struct path_nht_data *nhtd, lookup; + struct srte_segment_list *segment_list; + bool was_zebra_registered; + + if (!candidate || !candidate->lsp) + return; + + segment_list = candidate->lsp->segment_list; + if (!segment_list) + return; + + if (!CHECK_FLAG(segment_list->flags, F_SEGMENT_LIST_NHT_REGISTERED)) + return; + + if (!path_zebra_nht_get_srv6_prefix(segment_list, &lookup.nh)) + return; + + lookup.nh_vrf_id = VRF_DEFAULT; + + /* nh->nh_registered means we own a reference on the nhtd */ + nhtd = path_nht_hash_find(path_nht_hash, &lookup); + + assertf(nhtd, "BUG: NH %pFX registered but not in hashtable", + &lookup.nh); + + was_zebra_registered = nhtd->registered; + UNSET_FLAG(segment_list->flags, F_SEGMENT_LIST_NHT_REGISTERED); + if (path_nht_hash_decref(&nhtd)) + /* still got references alive */ + return; + + /* NB: nhtd is now NULL. */ + if (!was_zebra_registered) + return; + + zlog_debug("Unregistering nexthop(%pFX) for candidate %s pref %u", + &lookup.nh, candidate->name, candidate->preference); + + if (zclient_send_rnh(zclient, ZEBRA_NEXTHOP_UNREGISTER, &lookup.nh, + SAFI_UNICAST, false, false, + VRF_DEFAULT) == ZCLIENT_SEND_FAILURE) + zlog_warn("%s: Failure to send nexthop %pFX for candidate %s pref %u to zebra", + __func__, &lookup.nh, candidate->name, + candidate->preference); +} + /** * Adds a segment routing policy to Zebra. * * @param policy The policy to add * @param segment_list The segment list for the policy */ -void path_zebra_add_sr_policy(struct srte_policy *policy, - struct srte_segment_list *segment_list) +static void +path_zebra_add_sr_policy_internal(struct srte_policy *policy, + struct srte_segment_list *segment_list) { struct zapi_sr_policy zp = {}; struct srte_segment_entry *segment; @@ -181,9 +413,8 @@ void path_zebra_add_sr_policy(struct srte_policy *policy, zp.color = policy->color; zp.endpoint = policy->endpoint; strlcpy(zp.name, policy->name, sizeof(zp.name)); - segment = RB_MIN(srte_segment_entry_head, &segment_list->segments); - if (sid_zero_ipv6(&segment->srv6_sid_value)) { + if (!path_zebra_segment_list_srv6(segment_list)) { zp.segment_list.type = ZEBRA_SR_LSP_SRTE; zp.segment_list.local_label = policy->binding_sid; zp.segment_list.label_num = 0; @@ -207,6 +438,21 @@ void path_zebra_add_sr_policy(struct srte_policy *policy, (void)zebra_send_sr_policy(zclient, ZEBRA_SR_POLICY_SET, &zp); } +/** + * Adds a segment routing policy to Zebra. + * + * @param policy The policy to add + * @param segment_list The segment list for the policy + */ +void path_zebra_add_sr_policy(struct srte_policy *policy, + struct srte_segment_list *segment_list) +{ + if (path_zebra_segment_list_srv6(segment_list)) + path_zebra_add_srv6_policy_internal(policy); + else + path_zebra_add_sr_policy_internal(policy, segment_list); +} + /** * Deletes a segment policy from Zebra. * @@ -215,15 +461,18 @@ void path_zebra_add_sr_policy(struct srte_policy *policy, void path_zebra_delete_sr_policy(struct srte_policy *policy) { struct zapi_sr_policy zp = {}; - struct srte_segment_entry *segment; + struct srte_segment_entry *segment = NULL; zp.color = policy->color; zp.endpoint = policy->endpoint; strlcpy(zp.name, policy->name, sizeof(zp.name)); - segment = RB_MIN(srte_segment_entry_head, - &policy->best_candidate->segment_list->segments); - if (sid_zero_ipv6(&segment->srv6_sid_value)) { + if (policy->best_candidate && policy->best_candidate->segment_list) + segment = + RB_MIN(srte_segment_entry_head, + &policy->best_candidate->segment_list->segments); + + if (segment && sid_zero_ipv6(&segment->srv6_sid_value)) { zp.segment_list.type = ZEBRA_SR_LSP_SRTE; zp.segment_list.local_label = policy->binding_sid; zp.segment_list.label_num = 0; @@ -317,6 +566,24 @@ static void path_zebra_label_manager_connect(struct event *event) } } +static void path_zebra_nexthop_update(struct vrf *vrf, struct prefix *match, + struct zapi_route *nhr) +{ + struct path_nht_data *nhtd, lookup; + + if (match->family != AF_INET6) + return; + + memset(&lookup, 0, sizeof(lookup)); + prefix_copy(&lookup.nh, match); + lookup.nh_vrf_id = vrf->vrf_id; + + nhtd = path_nht_hash_find(path_nht_hash, &lookup); + + if (!nhtd) + zlog_err("Unable to find next-hop data for the given route."); +} + static int path_zebra_opaque_msg_handler(ZAPI_CALLBACK_ARGS) { int ret = 0; @@ -391,13 +658,21 @@ void path_zebra_init(struct event_loop *master) zclient_sync->instance = 1; zclient_sync->privs = &pathd_privs; + zclient->nexthop_update = path_zebra_nexthop_update; + /* Connect to the LM. */ t_sync_connect = NULL; path_zebra_label_manager_connect(NULL); + + /* Pathd nht init */ + path_nht_hash_init(path_nht_hash); } void path_zebra_stop(void) { + path_nht_hash_clear(); + path_nht_hash_fini(path_nht_hash); + zclient_stop(zclient); zclient_free(zclient); event_cancel(&t_sync_connect); diff --git a/pathd/path_zebra.h b/pathd/path_zebra.h index 74a62e38b328..25995b8304f7 100644 --- a/pathd/path_zebra.h +++ b/pathd/path_zebra.h @@ -18,5 +18,6 @@ int path_zebra_request_label(mpls_label_t label); void path_zebra_release_label(mpls_label_t label); void path_zebra_init(struct event_loop *master); void path_zebra_stop(void); +void path_nht_removed(struct srte_candidate *candidate); #endif /* _FRR_PATH_MPLS_H_ */ diff --git a/pathd/pathd.c b/pathd/pathd.c index 431fe4d1e31e..bfd9b35c3f53 100644 --- a/pathd/pathd.c +++ b/pathd/pathd.c @@ -616,7 +616,8 @@ void srte_policy_apply_changes(struct srte_policy *policy) UNSET_FLAG(old_best_candidate->flags, F_CANDIDATE_BEST); SET_FLAG(old_best_candidate->flags, F_CANDIDATE_MODIFIED); - + if (old_best_candidate->lsp) + path_nht_removed(old_best_candidate); /* * Rely on replace semantics if there's a new best * candidate. @@ -629,7 +630,6 @@ void srte_policy_apply_changes(struct srte_policy *policy) SET_FLAG(new_best_candidate->flags, F_CANDIDATE_BEST); SET_FLAG(new_best_candidate->flags, F_CANDIDATE_MODIFIED); - path_zebra_add_sr_policy( policy, new_best_candidate->lsp->segment_list); } diff --git a/pathd/pathd.h b/pathd/pathd.h index f0080caa0a1b..e7b840a88490 100644 --- a/pathd/pathd.h +++ b/pathd/pathd.h @@ -210,6 +210,7 @@ struct srte_segment_list { #define F_SEGMENT_LIST_MODIFIED 0x0004 #define F_SEGMENT_LIST_DELETED 0x0008 #define F_SEGMENT_LIST_SID_CONFLICT 0x0010 +#define F_SEGMENT_LIST_NHT_REGISTERED 0x0020 }; RB_HEAD(srte_segment_list_head, srte_segment_list); RB_PROTOTYPE(srte_segment_list_head, srte_segment_list, entry, From 65f98600c00fc7b84e044581bea52f229de85684 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Sun, 3 Dec 2023 22:33:55 +0100 Subject: [PATCH 19/45] pathd, lib: add srv6 policy with resolved nexthop info The Pathd daemon is aware of the SID reachability, but still once the sr policy installed in zebra, there is need to re-resolve the SID reachability. Extend the Pathd NHT service, by extracting the resolved next-hop. This information will be conveyed in the SR_POLICY_SET message, and will be very helpful to simplify code in zebra. Signed-off-by: Philippe Guibert Signed-off-by: Dmytro Shytyi --- lib/zclient.c | 38 +++++++++++++- lib/zclient.h | 7 +++ pathd/path_zebra.c | 121 +++++++++++++++++++++++++++++++++++++++++++-- 3 files changed, 160 insertions(+), 6 deletions(-) diff --git a/lib/zclient.c b/lib/zclient.c index 79629e3920d6..ee6a7fdee044 100644 --- a/lib/zclient.c +++ b/lib/zclient.c @@ -3855,6 +3855,8 @@ enum zclient_send_status zebra_send_sr_policy(struct zclient *zclient, int cmd, int zapi_sr_policy_encode(struct stream *s, int cmd, struct zapi_sr_policy *zp) { struct zapi_srte_tunnel *zt = &zp->segment_list; + struct zapi_nexthop *znh; + int i; stream_reset(s); @@ -3891,6 +3893,18 @@ int zapi_sr_policy_encode(struct stream *s, int cmd, struct zapi_sr_policy *zp) stream_put(s, &zt->srv6_segs.segs[0], zt->srv6_segs.num_segs * sizeof(struct in6_addr)); + stream_putw(s, zt->nexthop_resolved_num); + + for (i = 0; i < zt->nexthop_resolved_num; i++) { + znh = &zt->nexthop_resolved[i]; + + if (zapi_nexthop_encode(s, znh, 0, 0) < 0) + return -1; + } + + stream_putl(s, zt->metric); + stream_putc(s, zt->distance); + /* Put length at the first point of the stream. */ stream_putw_at(s, 0, stream_get_endp(s)); @@ -3899,9 +3913,11 @@ int zapi_sr_policy_encode(struct stream *s, int cmd, struct zapi_sr_policy *zp) int zapi_sr_policy_decode(struct stream *s, struct zapi_sr_policy *zp) { - memset(zp, 0, sizeof(*zp)); - struct zapi_srte_tunnel *zt = &zp->segment_list; + struct zapi_nexthop *znh; + int i; + + memset(zp, 0, sizeof(*zp)); STREAM_GETL(s, zp->color); STREAM_GET_IPADDR(s, &zp->endpoint); @@ -3934,6 +3950,24 @@ int zapi_sr_policy_decode(struct stream *s, struct zapi_sr_policy *zp) STREAM_GET(&zt->srv6_segs.segs[0], s, zt->srv6_segs.num_segs * sizeof(struct in6_addr)); + STREAM_GETW(s, zt->nexthop_resolved_num); + if (zt->nexthop_resolved_num > MULTIPATH_NUM) { + flog_err(EC_LIB_ZAPI_ENCODE, + "%s: invalid number of nexthops (%u)", __func__, + zt->nexthop_resolved_num); + return -1; + } + + for (i = 0; i < zt->nexthop_resolved_num; i++) { + znh = &zt->nexthop_resolved[i]; + + if (zapi_nexthop_decode(s, znh, 0, 0) != 0) + return -1; + } + + STREAM_GETL(s, zt->metric); + STREAM_GETC(s, zt->distance); + return 0; stream_failure: diff --git a/lib/zclient.h b/lib/zclient.h index 8a8a46a134f8..eba89a4daa70 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -646,6 +646,13 @@ struct zapi_srte_tunnel { /* SRv6-TE */ struct seg6_segs srv6_segs; + /* For SRv6 resolution. contains + * the resolved next-hop obtained by nexthop tracking + * the original metric and distance values + */ + uint16_t nexthop_resolved_num; + struct zapi_nexthop nexthop_resolved[MULTIPATH_NUM]; + uint32_t metric; uint8_t distance; }; diff --git a/pathd/path_zebra.c b/pathd/path_zebra.c index d849d63bbdcc..5dcae859da96 100644 --- a/pathd/path_zebra.c +++ b/pathd/path_zebra.c @@ -68,6 +68,11 @@ struct path_nht_data { uint8_t distance; }; +static void +path_zebra_add_sr_policy_internal(struct srte_policy *policy, + struct srte_segment_list *segment_list, + struct path_nht_data *nhtd); + static int path_nht_data_cmp(const struct path_nht_data *nhtd1, const struct path_nht_data *nhtd2) { @@ -241,7 +246,8 @@ static void path_zebra_add_srv6_policy_internal(struct srte_policy *policy) SET_FLAG(segment_list->flags, F_SEGMENT_LIST_NHT_REGISTERED); if (nhtd->nh_num) { - path_zebra_add_sr_policy(candidate->policy, segment_list); + path_zebra_add_sr_policy_internal(candidate->policy, + segment_list, nhtd); return; } path_zebra_delete_sr_policy(candidate->policy); @@ -285,7 +291,8 @@ static void path_zebra_connected(struct zclient *zclient) if (path_zebra_segment_list_srv6(segment_list)) path_zebra_add_srv6_policy_internal(policy); else - path_zebra_add_sr_policy(policy, segment_list); + path_zebra_add_sr_policy_internal(policy, segment_list, + NULL); } } @@ -405,10 +412,14 @@ void path_nht_removed(struct srte_candidate *candidate) */ static void path_zebra_add_sr_policy_internal(struct srte_policy *policy, - struct srte_segment_list *segment_list) + struct srte_segment_list *segment_list, + struct path_nht_data *nhtd) { struct zapi_sr_policy zp = {}; struct srte_segment_entry *segment; + struct zapi_nexthop *znh; + struct nexthop *nexthop; + int num = 0; zp.color = policy->color; zp.endpoint = policy->endpoint; @@ -435,6 +446,16 @@ path_zebra_add_sr_policy_internal(struct srte_policy *policy, } policy->status = SRTE_POLICY_STATUS_GOING_UP; + if (nhtd && nhtd->nexthop) { + zp.segment_list.distance = nhtd->distance; + zp.segment_list.metric = nhtd->metric; + for (ALL_NEXTHOPS_PTR(nhtd, nexthop)) { + znh = &zp.segment_list.nexthop_resolved[num++]; + zapi_nexthop_from_nexthop(znh, nexthop); + } + zp.segment_list.nexthop_resolved_num = nhtd->nh_num; + } + (void)zebra_send_sr_policy(zclient, ZEBRA_SR_POLICY_SET, &zp); } @@ -450,7 +471,7 @@ void path_zebra_add_sr_policy(struct srte_policy *policy, if (path_zebra_segment_list_srv6(segment_list)) path_zebra_add_srv6_policy_internal(policy); else - path_zebra_add_sr_policy_internal(policy, segment_list); + path_zebra_add_sr_policy_internal(policy, segment_list, NULL); } /** @@ -566,6 +587,96 @@ static void path_zebra_label_manager_connect(struct event *event) } } +static void path_nht_srv6_update(struct prefix *nh, struct path_nht_data *nhtd) +{ + struct srte_policy *policy; + struct prefix sid_srv6 = {}; + struct srte_candidate *candidate; + struct srte_segment_list *segment_list; + + RB_FOREACH (policy, srte_policy_head, &srte_policies) { + if (policy->endpoint.ipa_type != AF_INET6) + continue; + + candidate = policy->best_candidate; + if (!candidate) + continue; + if (!candidate->lsp) + continue; + segment_list = candidate->lsp->segment_list; + if (!segment_list) + continue; + + /* srv6 segment lists are registered */ + if (!CHECK_FLAG(segment_list->flags, + F_SEGMENT_LIST_NHT_REGISTERED)) + continue; + + if (!path_zebra_nht_get_srv6_prefix(segment_list, &sid_srv6)) + continue; + if (!IPV6_ADDR_SAME(&sid_srv6.u.prefix6, &nh->u.prefix6)) + continue; + if (nhtd->nh_num) + path_zebra_add_sr_policy_internal(policy, segment_list, + nhtd); + else + path_zebra_delete_sr_policy(policy); + } +} + +static bool path_zebra_srv6_nexthop_info_update(struct path_nht_data *nhtd, + struct zapi_route *nhr) +{ + struct nexthop *nexthop; + struct nexthop *nhlist_head = NULL; + struct nexthop *nhlist_tail = NULL; + struct nexthop *oldnh; + bool nh_changed = false; + int i; + + if (nhtd && nhr) + nhtd->nh_num = nhr->nexthop_num; + + if (!nhr->nexthop_num) { + nhtd->nh_num = nhr->nexthop_num; + if (nhtd->nexthop) + nexthop_free(nhtd->nexthop); + nhtd->nexthop = NULL; + return true; + } + + if (nhtd->distance != nhr->distance || nhtd->metric != nhr->metric) { + nhtd->distance = nhr->distance; + nhtd->metric = nhr->metric; + nh_changed = true; + } + + for (i = 0; i < nhr->nexthop_num; i++) { + nexthop = nexthop_from_zapi_nexthop(&nhr->nexthops[i]); + + if (nhlist_tail) { + nhlist_tail->next = nexthop; + nhlist_tail = nexthop; + } else { + nhlist_tail = nexthop; + nhlist_head = nexthop; + } + + for (oldnh = nhtd->nexthop; oldnh; oldnh = oldnh->next) + if (nexthop_same(oldnh, nexthop)) + break; + + if (!oldnh) + nh_changed = true; + } + if (nhtd->nexthop) + nexthop_free(nhtd->nexthop); + nhtd->nexthop = nhlist_head; + nhr->nexthop_num = nhr->nexthop_num; + + return nh_changed; +} + static void path_zebra_nexthop_update(struct vrf *vrf, struct prefix *match, struct zapi_route *nhr) { @@ -582,6 +693,8 @@ static void path_zebra_nexthop_update(struct vrf *vrf, struct prefix *match, if (!nhtd) zlog_err("Unable to find next-hop data for the given route."); + else if (path_zebra_srv6_nexthop_info_update(nhtd, nhr)) + path_nht_srv6_update(&nhr->prefix, nhtd); } static int path_zebra_opaque_msg_handler(ZAPI_CALLBACK_ARGS) From f09d80127be3927be5915e70c2b44f772fcaefac Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Mon, 4 Dec 2023 16:31:41 +0100 Subject: [PATCH 20/45] zebra: change srv6 policy based on segs change. When SRv6 list of segments was changed, change SRv6 policy. Signed-off-by: Dmytro Shytyi --- zebra/zebra_srte.c | 50 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/zebra/zebra_srte.c b/zebra/zebra_srte.c index 28a7b2fbfeea..b93951aae72f 100644 --- a/zebra/zebra_srte.c +++ b/zebra/zebra_srte.c @@ -208,6 +208,14 @@ static void zebra_sr_policy_notify_update(struct zebra_sr_policy *policy) } } +static void zebra_sr_policy_srv6_activate(struct zebra_sr_policy *policy) +{ + policy->status = ZEBRA_SR_POLICY_UP; + zsend_sr_policy_notify_status(policy->color, &policy->endpoint, + policy->name, ZEBRA_SR_POLICY_UP); + zebra_sr_policy_notify_update(policy); +} + static void zebra_sr_policy_activate(struct zebra_sr_policy *policy, struct zebra_lsp *lsp) { @@ -223,21 +231,29 @@ static void zebra_sr_policy_update(struct zebra_sr_policy *policy, struct zebra_lsp *lsp, struct zapi_srte_tunnel *old_tunnel) { - bool bsid_changed; - bool segment_list_changed; + bool bsid_mpls_changed; + bool segment_list_mpls_changed, segment_list_srv6_changed; policy->lsp = lsp; - bsid_changed = - policy->segment_list.local_label != old_tunnel->local_label; - segment_list_changed = - policy->segment_list.label_num != old_tunnel->label_num - || memcmp(policy->segment_list.labels, old_tunnel->labels, - sizeof(mpls_label_t) - * policy->segment_list.label_num); + bsid_mpls_changed = policy->segment_list.local_label != + old_tunnel->local_label; + + segment_list_mpls_changed = + policy->segment_list.label_num != old_tunnel->label_num || + memcmp(policy->segment_list.labels, old_tunnel->labels, + sizeof(mpls_label_t) * policy->segment_list.label_num); + + segment_list_srv6_changed = + policy->segment_list.srv6_segs.num_segs != + old_tunnel->srv6_segs.num_segs || + memcmp(policy->segment_list.srv6_segs.segs, + old_tunnel->srv6_segs.segs, + sizeof(struct in6_addr) * + policy->segment_list.srv6_segs.num_segs); /* Re-install label stack if necessary. */ - if (bsid_changed || segment_list_changed) { + if (bsid_mpls_changed || segment_list_mpls_changed) { zebra_sr_policy_bsid_uninstall(policy, old_tunnel->local_label); (void)zebra_sr_policy_bsid_install(policy); } @@ -246,7 +262,7 @@ static void zebra_sr_policy_update(struct zebra_sr_policy *policy, policy->name, ZEBRA_SR_POLICY_UP); /* Handle segment-list update. */ - if (segment_list_changed) + if (segment_list_mpls_changed || segment_list_srv6_changed) zebra_sr_policy_notify_update(policy); } @@ -254,8 +270,11 @@ static void zebra_sr_policy_deactivate(struct zebra_sr_policy *policy) { policy->status = ZEBRA_SR_POLICY_DOWN; policy->lsp = NULL; - zebra_sr_policy_bsid_uninstall(policy, - policy->segment_list.local_label); + + if (policy->segment_list.local_label) + zebra_sr_policy_bsid_uninstall(policy, + policy->segment_list.local_label); + zsend_sr_policy_notify_status(policy->color, &policy->endpoint, policy->name, ZEBRA_SR_POLICY_DOWN); zebra_sr_policy_notify_update(policy); @@ -280,9 +299,10 @@ int zebra_sr_policy_validate(struct zebra_sr_policy *policy, } /* First label was resolved successfully. */ - if (policy->status == ZEBRA_SR_POLICY_DOWN) + if (policy->status == ZEBRA_SR_POLICY_DOWN) { zebra_sr_policy_activate(policy, lsp); - else + zebra_sr_policy_srv6_activate(policy); + } else zebra_sr_policy_update(policy, lsp, &old_tunnel); return 0; From cd5e5925c14d012cca92fe2e8ffabd38b5770a92 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Tue, 5 Dec 2023 22:59:56 +0100 Subject: [PATCH 21/45] zebra: if srv6 segs>0, put them in policy update message When SRv6 segment list size is bigger than 0, send segments and other elements via zserv_send_message(). Also, an error happens when BGP receives a nexthop update from ZEBRA SRTE. > Mar 11 08:19:40 north-vm bgpd[3842]: [KM298-5MMJ8] bgp_parse_nexthop_update[VRF default]: Failure to decode nexthop update The ZEBRA SRTE code can not rely on the routing table. Use instead the resolved nexthop information gathered by th SRv6 SRTE information. Signed-off-by: Philippe Guibert Signed-off-by: Dmytro Shytyi --- zebra/zebra_srte.c | 51 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/zebra/zebra_srte.c b/zebra/zebra_srte.c index b93951aae72f..6bf753552a1a 100644 --- a/zebra/zebra_srte.c +++ b/zebra/zebra_srte.c @@ -7,6 +7,7 @@ #include "lib/zclient.h" #include "lib/lib_errors.h" +#include "frrdistance.h" #include "zebra/zebra_srte.h" #include "zebra/zebra_mpls.h" @@ -81,6 +82,49 @@ struct zebra_sr_policy *zebra_sr_policy_find_by_name(char *name) return NULL; } +static int process_routes_for_policy(struct zebra_sr_policy *policy, + struct zserv *client, uint32_t message, + struct stream *s, struct zapi_nexthop *znh, + unsigned long *nump) +{ + int num = 0; + int ret, i; + struct prefix p = {}; + + assert(policy->status == ZEBRA_SR_POLICY_UP); + p.family = AF_INET6; + p.prefixlen = IPV6_MAX_BITLEN; + memcpy(&p.u.prefix6, &policy->endpoint.ipaddr_v6, sizeof(p.u.prefix6)); + + stream_putc(s, ZEBRA_ROUTE_SRTE); + stream_putw(s, 0); /* instance - not available */ + stream_putc(s, policy->segment_list.distance); + stream_putl(s, policy->segment_list.metric); + *nump = stream_get_endp(s); + stream_putc(s, 0); + + for (i = 0; i < policy->segment_list.nexthop_resolved_num; i++) { + znh = &policy->segment_list.nexthop_resolved[i]; + /* add SRTE in znh */ + if (CHECK_FLAG(message, ZAPI_MESSAGE_SRTE)) + znh->srte_color = policy->color; + ret = zapi_nexthop_encode(s, znh, 0, message); + if (ret < 0) + goto failure; + num++; + } + stream_putc_at(s, *nump, num); + stream_putw_at(s, 0, stream_get_endp(s)); + + client->nh_last_upd_time = monotime(NULL); + return zserv_send_message(client, s); + +failure: + stream_free(s); + /* Handle failure as needed */ + return ret; +} + static int zebra_sr_policy_notify_update_client(struct zebra_sr_policy *policy, struct zserv *client) { @@ -133,6 +177,13 @@ static int zebra_sr_policy_notify_update_client(struct zebra_sr_policy *policy, } stream_putl(s, policy->color); + if (policy->segment_list.srv6_segs.num_segs > SRV6_MAX_SIDS) + policy->segment_list.srv6_segs.num_segs = SRV6_MAX_SIDS; + + if (policy->segment_list.srv6_segs.num_segs > 0) + return process_routes_for_policy(policy, client, message, s, + &znh, &nump); + num = 0; frr_each (nhlfe_list_const, &policy->lsp->nhlfe_list, nhlfe) { if (!CHECK_FLAG(nhlfe->flags, NHLFE_FLAG_SELECTED) From aadcee8a075283b1d54f4fd9594c6579e2ce1d04 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Tue, 9 Jul 2024 18:47:36 +0200 Subject: [PATCH 22/45] zebra: add srv6 nexthop resolution given by sr policy Consider an srv6 policy active, when sr policy set is received. The sr_policy_validate() function is however called, the default state value is down, and immediately sent to UP. Signed-off-by: Philippe Guibert Signed-off-by: Dmytro Shytyi --- zebra/zebra_nhg.c | 33 +++++++++++++++++++++++---------- zebra/zebra_srte.c | 17 +++++++++++++---- zebra/zebra_srv6.h | 1 + 3 files changed, 37 insertions(+), 14 deletions(-) diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index adc87792ec0e..1fe1b7cdb9e0 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -2314,6 +2314,7 @@ static int nexthop_active(struct nexthop *nexthop, struct nhg_hash_entry *nhe, if (nexthop->srte_color) { struct ipaddr endpoint = {0}; struct zebra_sr_policy *policy; + struct nexthop *nexthop_resolved; switch (afi) { case AFI_IP: @@ -2336,19 +2337,31 @@ static int nexthop_active(struct nexthop *nexthop, struct nhg_hash_entry *nhe, policy = zebra_sr_policy_find(nexthop->srte_color, &endpoint); if (policy && policy->status == ZEBRA_SR_POLICY_UP) { resolved = 0; - frr_each_safe (nhlfe_list, &policy->lsp->nhlfe_list, - nhlfe) { - if (!CHECK_FLAG(nhlfe->flags, - NHLFE_FLAG_SELECTED) - || CHECK_FLAG(nhlfe->flags, - NHLFE_FLAG_DELETED)) - continue; - SET_FLAG(nexthop->flags, - NEXTHOP_FLAG_RECURSIVE); - nexthop_set_resolved(afi, nhlfe->nexthop, + if (policy->segment_list.label_num > 0) { + frr_each_safe (nhlfe_list, + &policy->lsp->nhlfe_list, nhlfe) { + if (!CHECK_FLAG(nhlfe->flags, + NHLFE_FLAG_SELECTED) || + CHECK_FLAG(nhlfe->flags, + NHLFE_FLAG_DELETED)) + continue; + SET_FLAG(nexthop->flags, + NEXTHOP_FLAG_RECURSIVE); + nexthop_set_resolved(afi, nhlfe->nexthop, + nexthop, policy); + resolved = 1; + } + } else if (policy->segment_list.nexthop_resolved_num) { + nexthop_resolved = nexthop_from_zapi_nexthop( + &policy->segment_list.nexthop_resolved[0]); + + SET_FLAG(nexthop->flags, NEXTHOP_FLAG_RECURSIVE); + nexthop_set_resolved(afi, nexthop_resolved, nexthop, policy); resolved = 1; + nexthop_free(nexthop_resolved); } + if (resolved) return 1; } diff --git a/zebra/zebra_srte.c b/zebra/zebra_srte.c index 6bf753552a1a..7cb3d0dbc603 100644 --- a/zebra/zebra_srte.c +++ b/zebra/zebra_srte.c @@ -335,15 +335,24 @@ int zebra_sr_policy_validate(struct zebra_sr_policy *policy, struct zapi_srte_tunnel *new_tunnel) { struct zapi_srte_tunnel old_tunnel = policy->segment_list; - struct zebra_lsp *lsp; + struct zebra_lsp *lsp = NULL; + bool srv6_sid_resolved = false; if (new_tunnel) policy->segment_list = *new_tunnel; /* Try to resolve the Binding-SID nexthops. */ - lsp = mpls_lsp_find(policy->zvrf, policy->segment_list.labels[0]); - if (!lsp || !lsp->best_nhlfe - || lsp->addr_family != ipaddr_family(&policy->endpoint)) { + if (policy->segment_list.type == ZEBRA_SR_LSP_SRTE) + lsp = mpls_lsp_find(policy->zvrf, + policy->segment_list.labels[0]); + + /* Check if there are resolved nexthops in the segment list. */ + srv6_sid_resolved = policy->segment_list.nexthop_resolved_num ? true + : false; + + if ((!lsp || !lsp->best_nhlfe || + lsp->addr_family != ipaddr_family(&policy->endpoint)) && + !srv6_sid_resolved) { if (policy->status == ZEBRA_SR_POLICY_UP) zebra_sr_policy_deactivate(policy); return -1; diff --git a/zebra/zebra_srv6.h b/zebra/zebra_srv6.h index 1599fd7adfbd..b3b6af2b5e61 100644 --- a/zebra/zebra_srv6.h +++ b/zebra/zebra_srv6.h @@ -10,6 +10,7 @@ #include #include #include +#include "zebra_srte.h" #include "qobj.h" #include "prefix.h" From 375cb5d52ef09387b0f4542ff7df2774945e9783 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Mon, 11 Dec 2023 15:39:07 +0100 Subject: [PATCH 23/45] doc: add pathd srv6 segment-list support Add pathd srv6 segment list support. Signed-off-by: Philippe Guibert Signed-off-by: Dmytro Shytyi --- doc/user/pathd.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/user/pathd.rst b/doc/user/pathd.rst index 2519ac491228..e98bbfaa9a9b 100644 --- a/doc/user/pathd.rst +++ b/doc/user/pathd.rst @@ -288,6 +288,7 @@ Configuration Commands .. clicmd:: index INDEX nai adjacency A.B.C.D A.B.C.D .. clicmd:: index INDEX nai prefix A.B.C.D/M algorithm <0|1> .. clicmd:: index INDEX nai prefix A.B.C.D/M iface (0-65535) +.. clicmd:: index INDEX ipv6-address X:X::X:X Delete or specify a segment in a segment list definition. From 1523c1b010d573ca9c66a20cf162239dee0cdade Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Mon, 11 Dec 2023 16:15:37 +0100 Subject: [PATCH 24/45] topotest: isis_srv6_te_topo1 SRv6 Policy is an ordered list of segments that represent a source-routed policy. Packet flows are steered into an SRv6 Policy. Signed-off-by: Dmytro Shytyi Signed-off-by: Philippe Guibert --- .../isis_srv6_te_topo1/dst/zebra.conf | 22 + .../isis_srv6_te_topo1/rt1/bgpd.conf | 45 ++ .../isis_srv6_te_topo1/rt1/isisd.conf | 34 ++ .../isis_srv6_te_topo1/rt1/pathd.conf | 0 .../isis_srv6_te_topo1/rt1/sharpd.conf | 0 .../isis_srv6_te_topo1/rt1/staticd.conf | 0 .../rt1/step1/show_ipv6_route.ref | 246 +++++++++ .../rt1/step1/show_srv6_locator_table.ref | 18 + .../show_yang_interface_isis_adjacencies.ref | 32 ++ .../rt1/step2/show_srv6_route.ref | 36 ++ .../rt1/step3/show_srv6_additional_route.ref | 37 ++ .../rt1/step4/show_ipv6_route.ref | 37 ++ .../isis_srv6_te_topo1/rt1/zebra.conf | 23 + .../isis_srv6_te_topo1/rt2/isisd.conf | 48 ++ .../isis_srv6_te_topo1/rt2/zebra.conf | 32 ++ .../isis_srv6_te_topo1/rt3/isisd.conf | 48 ++ .../isis_srv6_te_topo1/rt3/zebra.conf | 31 ++ .../isis_srv6_te_topo1/rt4/isisd.conf | 56 +++ .../isis_srv6_te_topo1/rt4/zebra.conf | 34 ++ .../isis_srv6_te_topo1/rt5/isisd.conf | 56 +++ .../isis_srv6_te_topo1/rt5/zebra.conf | 34 ++ .../isis_srv6_te_topo1/rt6/bgpd.conf | 45 ++ .../isis_srv6_te_topo1/rt6/isisd.conf | 42 ++ .../isis_srv6_te_topo1/rt6/sharpd.conf | 0 .../isis_srv6_te_topo1/rt6/zebra.conf | 44 ++ .../isis_srv6_te_topo1/src/zebra.conf | 6 + .../test_isis_srv6_te_topo1.py | 472 ++++++++++++++++++ 27 files changed, 1478 insertions(+) create mode 100644 tests/topotests/isis_srv6_te_topo1/dst/zebra.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt1/bgpd.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt1/isisd.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt1/pathd.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt1/sharpd.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt1/staticd.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt1/step1/show_ipv6_route.ref create mode 100644 tests/topotests/isis_srv6_te_topo1/rt1/step1/show_srv6_locator_table.ref create mode 100644 tests/topotests/isis_srv6_te_topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref create mode 100644 tests/topotests/isis_srv6_te_topo1/rt1/step2/show_srv6_route.ref create mode 100644 tests/topotests/isis_srv6_te_topo1/rt1/step3/show_srv6_additional_route.ref create mode 100644 tests/topotests/isis_srv6_te_topo1/rt1/step4/show_ipv6_route.ref create mode 100644 tests/topotests/isis_srv6_te_topo1/rt1/zebra.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt2/isisd.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt2/zebra.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt3/isisd.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt3/zebra.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt4/isisd.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt4/zebra.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt5/isisd.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt5/zebra.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt6/bgpd.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt6/isisd.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt6/sharpd.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/rt6/zebra.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/src/zebra.conf create mode 100644 tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py diff --git a/tests/topotests/isis_srv6_te_topo1/dst/zebra.conf b/tests/topotests/isis_srv6_te_topo1/dst/zebra.conf new file mode 100644 index 000000000000..60fe528e1513 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/dst/zebra.conf @@ -0,0 +1,22 @@ +log file zebra.log +! +hostname dst +! +! debug zebra kernel +! debug zebra packet +! debug zebra mpls +! +interface lo + ipv6 address fc00:0:9::1/128 +! +interface eth-rt6 + ip address 10.0.10.2/24 + ipv6 address 2001:db8:10::2/64 +! +ip forwarding +! +ip route 10.8.0.2/24 10.0.10.1 +ipv6 route 2001:db8:1::/64 2001:db8:10::1 +! +line vty +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt1/bgpd.conf b/tests/topotests/isis_srv6_te_topo1/rt1/bgpd.conf new file mode 100644 index 000000000000..40cea2d86fbd --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt1/bgpd.conf @@ -0,0 +1,45 @@ +frr defaults traditional +! +hostname r1 +password zebra +! +log stdout notifications +log commands +! +router bgp 65001 + bgp router-id 192.0.2.1 + no bgp ebgp-requires-policy + no bgp default ipv4-unicast + neighbor fc00:0:6b::1 remote-as 65002 + neighbor fc00:0:6b::1 ebgp-multihop + neighbor fc00:0:6b::1 update-source fc00:0:1b::1 + neighbor fc00:0:6b::1 timers 3 10 + neighbor fc00:0:6b::1 timers connect 1 + neighbor fc00:0:6b::1 capability extended-nexthop + ! + segment-routing srv6 + locator loc2 + ! + address-family ipv6 + neighbor fc00:0:6b::1 activate + exit-address-family +! +address-family ipv4 vpn + neighbor fc00:0:6b::1 activate + exit-address-family + ! +! +router bgp 65001 vrf vrf10 + bgp router-id 192.0.2.1 + ! + address-family ipv4 unicast + redistribute connected + sid vpn export 1 + rd vpn export 65001:10 + rt vpn both 0:10 + import vpn + export vpn + exit-address-family + ! +! +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt1/isisd.conf b/tests/topotests/isis_srv6_te_topo1/rt1/isisd.conf new file mode 100644 index 000000000000..4c5ef881ed64 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt1/isisd.conf @@ -0,0 +1,34 @@ +hostname rt1 +log file isisd.log +! +! debug isis events +! debug isis route-events +! debug isis spf-events +! debug isis sr-events +! debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-sw1 + ip router isis 1 + ipv6 router isis 1 + isis hello-interval 1 + isis hello-multiplier 10 +! +router isis 1 + lsp-gen-interval 2 + net 49.0000.0000.0000.0001.00 + is-type level-1 + topology ipv6-unicast + segment-routing srv6 + locator loc1 + node-msd + max-segs-left 3 + max-end-pop 3 + max-h-encaps 2 + max-end-d 5 + interface sr0 +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt1/pathd.conf b/tests/topotests/isis_srv6_te_topo1/rt1/pathd.conf new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/isis_srv6_te_topo1/rt1/sharpd.conf b/tests/topotests/isis_srv6_te_topo1/rt1/sharpd.conf new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/isis_srv6_te_topo1/rt1/staticd.conf b/tests/topotests/isis_srv6_te_topo1/rt1/staticd.conf new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/isis_srv6_te_topo1/rt1/step1/show_ipv6_route.ref b/tests/topotests/isis_srv6_te_topo1/rt1/step1/show_ipv6_route.ref new file mode 100644 index 000000000000..4311cb260dd6 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt1/step1/show_ipv6_route.ref @@ -0,0 +1,246 @@ +{ + "fc00:0:1::/128": [ + { + "prefix": "fc00:0:1::/128", + "prefixLen": 128, + "protocol": "isis", + "selected": true, + "distance": 115, + "installed": true, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "active": true, + "weight": 1, + "seg6local": { + "action": "End" + } + } + ] + } + ], + "fc00:0:1:1::/128": [ + { + "prefix": "fc00:0:1:1::/128", + "prefixLen": 128, + "protocol": "isis", + "selected": true, + "distance": 115, + "installed": true, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "active": true, + "weight": 1, + "seg6local": { + "action": "End.X" + } + } + ] + } + ], + "fc00:0:1:2::/128": [ + { + "prefix": "fc00:0:1:2::/128", + "prefixLen": 128, + "protocol": "isis", + "selected": true, + "distance": 115, + "installed": true, + "nexthops": [ + { + "flags": 3, + "fib": true, + "directlyConnected": true, + "active": true, + "weight": 1, + "seg6local": { + "action": "End.X" + } + } + ] + } + ], + "fc00:0:2::/48": [ + { + "prefix": "fc00:0:2::/48", + "prefixLen": 48, + "protocol": "isis", + "selected": true, + "distance": 115, + "installed": true, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "active": true, + "weight": 1 + } + ] + } + ], + "fc00:0:2::1/128": [ + { + "prefix": "fc00:0:2::1/128", + "prefixLen": 128, + "protocol": "isis", + "selected": true, + "distance": 115, + "installed": true, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "active": true, + "weight": 1 + } + ] + } + ], + "fc00:0:3::/48": [ + { + "prefix": "fc00:0:3::/48", + "prefixLen": 48, + "protocol": "isis", + "selected": true, + "distance": 115, + "installed": true, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "active": true, + "weight": 1 + } + ] + } + ], + "fc00:0:3::1/128": [ + { + "prefix": "fc00:0:3::1/128", + "prefixLen": 128, + "protocol": "isis", + "selected": true, + "distance": 115, + "installed": true, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "active": true, + "weight": 1 + } + ] + } + ], + "fc00:0:4::/48": [ + { + "prefix": "fc00:0:4::/48", + "prefixLen": 48, + "protocol": "isis", + "selected": true, + "distance": 115, + "installed": true, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "active": true, + "weight": 1 + } + ] + } + ], + "fc00:0:4::1/128": [ + { + "prefix": "fc00:0:4::1/128", + "prefixLen": 128, + "protocol": "isis", + "selected": true, + "distance": 115, + "installed": true, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "active": true, + "weight": 1 + } + ] + } + ], + "fc00:0:5::/48": [ + { + "prefix": "fc00:0:5::/48", + "prefixLen": 48, + "protocol": "isis", + "selected": true, + "distance": 115, + "installed": true, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "active": true, + "weight": 1 + } + ] + } + ], + "fc00:0:5::1/128": [ + { + "prefix": "fc00:0:5::1/128", + "prefixLen": 128, + "protocol": "isis", + "selected": true, + "distance": 115, + "installed": true, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "active": true, + "weight": 1 + } + ] + } + ], + "fc00:0:6::/48": [ + { + "prefix": "fc00:0:6::/48", + "prefixLen": 48, + "protocol": "isis", + "selected": true, + "distance": 115, + "installed": true, + "nexthops": [ + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "active": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/topotests/isis_srv6_te_topo1/rt1/step1/show_srv6_locator_table.ref b/tests/topotests/isis_srv6_te_topo1/rt1/step1/show_srv6_locator_table.ref new file mode 100644 index 000000000000..cd4e45fcd4f1 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt1/step1/show_srv6_locator_table.ref @@ -0,0 +1,18 @@ +{ + "locators":[ + { + "name":"loc1", + "prefix":"fc00:0:1::/48", + "blockBitsLength":32, + "nodeBitsLength":16, + "functionBitsLength":16, + "argumentBitsLength":0, + "statusUp":true, + "chunks":[ + { + "prefix":"fc00:0:1::/48" + } + ] + } + ] +} diff --git a/tests/topotests/isis_srv6_te_topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref b/tests/topotests/isis_srv6_te_topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref new file mode 100644 index 000000000000..9c5901b90ff1 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt1/step1/show_yang_interface_isis_adjacencies.ref @@ -0,0 +1,32 @@ +{ + "frr-interface:lib": { + "interface": [ + { + "name": "eth-sw1", + "vrf": "default", + "state": { + "frr-isisd:isis": { + "adjacencies": { + "adjacency": [ + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0003", + "hold-timer": 10, + "neighbor-priority": 64, + "state": "up" + }, + { + "neighbor-sys-type": "level-1", + "neighbor-sysid": "0000.0000.0002", + "hold-timer": 10, + "neighbor-priority": 64, + "state": "up" + } + ] + } + } + } + } + ] + } +} diff --git a/tests/topotests/isis_srv6_te_topo1/rt1/step2/show_srv6_route.ref b/tests/topotests/isis_srv6_te_topo1/rt1/step2/show_srv6_route.ref new file mode 100644 index 000000000000..a57c04d5a982 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt1/step2/show_srv6_route.ref @@ -0,0 +1,36 @@ +{ + "2001:db8:10::/64": [ + { + "prefix": "2001:db8:10::/64", + "prefixLen": 64, + "protocol": "static", + "distance": 1, + "installed": true, + "nexthops": [ + { + "flags": 133, + "ip": "fc00:0:6::", + "afi": "ipv6", + "active": true, + "recursive": true, + "weight": 1, + "srteColor": 1 + }, + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "weight": 1, + "seg6local": { + "action": "unspec" + }, + "seg6": [ + "fc00:0:3::", + "fc00:0:5::", + "fc00:0:6::" + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/topotests/isis_srv6_te_topo1/rt1/step3/show_srv6_additional_route.ref b/tests/topotests/isis_srv6_te_topo1/rt1/step3/show_srv6_additional_route.ref new file mode 100644 index 000000000000..fc4bd55d37fd --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt1/step3/show_srv6_additional_route.ref @@ -0,0 +1,37 @@ +{ + "fc00:0:6b::/48": [ + { + "prefix": "fc00:0:6b::/48", + "prefixLen": 48, + "protocol": "static", + "selected": true, + "nexthops": [ + { + "flags": 133, + "ip": "fc00:0:6::", + "afi": "ipv6", + "active":true, + "recursive": true, + "weight": 1, + "srteColor": 1 + }, + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "resolver": true, + "active": true, + "weight": 1, + "seg6local": { + "action": "unspec" + }, + "seg6": [ + "fc00:0:3::", + "fc00:0:5::", + "fc00:0:6::" + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/topotests/isis_srv6_te_topo1/rt1/step4/show_ipv6_route.ref b/tests/topotests/isis_srv6_te_topo1/rt1/step4/show_ipv6_route.ref new file mode 100644 index 000000000000..fffa60e950bc --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt1/step4/show_ipv6_route.ref @@ -0,0 +1,37 @@ +{ + "fc00:0:6b::/48": [ + { + "prefix": "fc00:0:6b::/48", + "prefixLen": 48, + "protocol": "static", + "distance": 1, + "metric": 0, + "installed": true, + "nexthops": [ + { + "flags": 133, + "ip": "fc00:0:6::", + "afi": "ipv6", + "active": true, + "recursive": true, + "weight": 1, + "srteColor": 1 + }, + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "active": true, + "weight": 1 + }, + { + "flags": 3, + "fib": true, + "afi": "ipv6", + "active": true, + "weight": 1 + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/topotests/isis_srv6_te_topo1/rt1/zebra.conf b/tests/topotests/isis_srv6_te_topo1/rt1/zebra.conf new file mode 100644 index 000000000000..35be716f664b --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt1/zebra.conf @@ -0,0 +1,23 @@ +log file zebra.log +! +hostname rt1 +! +! debug zebra kernel +! debug zebra packet +! +interface lo + ipv6 address fc00:0:1::1/48 + ipv6 address fc00:0:1b::1/48 +! +interface eth-sw1 + ip address 10.0.1.1/24 + ipv6 address 2001:db8:1::1/64 +! +interface eth-src + ip address 10.8.0.1/24 +exit +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt2/isisd.conf b/tests/topotests/isis_srv6_te_topo1/rt2/isisd.conf new file mode 100644 index 000000000000..b095f049101f --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt2/isisd.conf @@ -0,0 +1,48 @@ +hostname rt2 +log file isisd.log +! +! debug isis events +! debug isis route-events +! debug isis spf-events +! debug isis sr-events +! debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-sw1 + ip router isis 1 + ipv6 router isis 1 + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt4-1 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt4-2 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +router isis 1 + lsp-gen-interval 2 + net 49.0000.0000.0000.0002.00 + is-type level-1 + topology ipv6-unicast + segment-routing srv6 + locator loc1 + node-msd + max-segs-left 3 + max-end-pop 3 + max-h-encaps 2 + max-end-d 5 + interface sr0 +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt2/zebra.conf b/tests/topotests/isis_srv6_te_topo1/rt2/zebra.conf new file mode 100644 index 000000000000..16e11bd5fbd3 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt2/zebra.conf @@ -0,0 +1,32 @@ +log file zebra.log +! +hostname rt2 +! +! debug zebra kernel +! debug zebra packet +! +interface lo + ipv6 address fc00:0:2::1/128 +! +interface eth-sw1 + ip address 10.0.1.2/24 + ipv6 address 2001:db8:1::2/64 +! +interface eth-rt4-1 + ip address 10.0.2.2/24 +! +interface eth-rt4-2 + ip address 10.0.3.2/24 +! +segment-routing + srv6 + locators + locator loc1 + prefix fc00:0:2::/48 block-len 32 node-len 16 func-bits 16 + ! + ! +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt3/isisd.conf b/tests/topotests/isis_srv6_te_topo1/rt3/isisd.conf new file mode 100644 index 000000000000..e237db2f4940 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt3/isisd.conf @@ -0,0 +1,48 @@ +hostname rt3 +log file isisd.log +! +! debug isis events +! debug isis route-events +! debug isis spf-events +! debug isis sr-events +! debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-sw1 + ip router isis 1 + ipv6 router isis 1 + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt5-1 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt5-2 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +router isis 1 + lsp-gen-interval 2 + net 49.0000.0000.0000.0003.00 + is-type level-1 + topology ipv6-unicast + segment-routing srv6 + locator loc1 + node-msd + max-segs-left 3 + max-end-pop 3 + max-h-encaps 2 + max-end-d 5 + interface sr0 +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt3/zebra.conf b/tests/topotests/isis_srv6_te_topo1/rt3/zebra.conf new file mode 100644 index 000000000000..f1fd260823e9 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt3/zebra.conf @@ -0,0 +1,31 @@ +log file zebra.log +! +hostname rt3 +! +! debug zebra kernel +! debug zebra packet +! +interface lo + ipv6 address fc00:0:3::1/128 +! +interface eth-sw1 + ip address 10.0.1.3/24 +! +interface eth-rt5-1 + ip address 10.0.4.3/24 +! +interface eth-rt5-2 + ip address 10.0.5.3/24 +! +segment-routing + srv6 + locators + locator loc1 + prefix fc00:0:3::/48 block-len 32 node-len 16 func-bits 16 + ! + ! +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt4/isisd.conf b/tests/topotests/isis_srv6_te_topo1/rt4/isisd.conf new file mode 100644 index 000000000000..b4c92146a1f6 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt4/isisd.conf @@ -0,0 +1,56 @@ +hostname rt4 +log file isisd.log +! +! debug isis events +! debug isis route-events +! debug isis spf-events +! debug isis sr-events +! debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt2-1 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt2-2 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt5 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt6 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +router isis 1 + lsp-gen-interval 2 + net 49.0000.0000.0000.0004.00 + is-type level-1 + topology ipv6-unicast + segment-routing srv6 + locator loc1 + node-msd + max-segs-left 3 + max-end-pop 3 + max-h-encaps 2 + max-end-d 5 + interface sr0 +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt4/zebra.conf b/tests/topotests/isis_srv6_te_topo1/rt4/zebra.conf new file mode 100644 index 000000000000..4d8a351aedb7 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt4/zebra.conf @@ -0,0 +1,34 @@ +log file zebra.log +! +hostname rt4 +! +! debug zebra kernel +! debug zebra packet +! +interface lo + ipv6 address fc00:0:4::1/128 +! +interface eth-rt2-1 + ip address 10.0.2.4/24 +! +interface eth-rt2-2 + ip address 10.0.3.4/24 +! +interface eth-rt5 + ip address 10.0.6.4/24 +! +interface eth-rt6 + ip address 10.0.7.4/24 +! +segment-routing + srv6 + locators + locator loc1 + prefix fc00:0:4::/48 block-len 32 node-len 16 func-bits 16 + ! + ! +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt5/isisd.conf b/tests/topotests/isis_srv6_te_topo1/rt5/isisd.conf new file mode 100644 index 000000000000..26f895dd82ea --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt5/isisd.conf @@ -0,0 +1,56 @@ +hostname rt5 +log file isisd.log +! +! debug isis events +! debug isis route-events +! debug isis spf-events +! debug isis sr-events +! debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt3-1 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt3-2 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt4 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt6 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +router isis 1 + lsp-gen-interval 2 + net 49.0000.0000.0000.0005.00 + is-type level-1 + topology ipv6-unicast + segment-routing srv6 + locator loc1 + node-msd + max-segs-left 3 + max-end-pop 3 + max-h-encaps 2 + max-end-d 5 + interface sr0 +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt5/zebra.conf b/tests/topotests/isis_srv6_te_topo1/rt5/zebra.conf new file mode 100644 index 000000000000..b441fe6a9222 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt5/zebra.conf @@ -0,0 +1,34 @@ +log file zebra.log +! +hostname rt5 +! +! debug zebra kernel +! debug zebra packet +! +interface lo + ipv6 address fc00:0:5::1/128 +! +interface eth-rt3-1 + ip address 10.0.4.5/24 +! +interface eth-rt3-2 + ip address 10.0.5.5/24 +! +interface eth-rt4 + ip address 10.0.6.5/24 +! +interface eth-rt6 + ip address 10.0.8.5/24 +! +segment-routing + srv6 + locators + locator loc1 + prefix fc00:0:5::/48 block-len 32 node-len 16 func-bits 16 + ! + ! +! +ip forwarding +! +line vty +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt6/bgpd.conf b/tests/topotests/isis_srv6_te_topo1/rt6/bgpd.conf new file mode 100644 index 000000000000..78a3279fc4c4 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt6/bgpd.conf @@ -0,0 +1,45 @@ +frr defaults traditional +! +hostname r2 +password zebra +! +log stdout notifications +log commands +! +router bgp 65002 + bgp router-id 192.0.2.2 + no bgp ebgp-requires-policy + no bgp default ipv4-unicast + neighbor fc00:0:1b::1 remote-as 65001 + neighbor fc00:0:1b::1 ebgp-multihop + neighbor fc00:0:1b::1 update-source fc00:0:6b::1 + neighbor fc00:0:1b::1 timers 3 10 + neighbor fc00:0:1b::1 timers connect 1 + neighbor fc00:0:1b::1 capability extended-nexthop + ! + segment-routing srv6 + locator loc2 + ! + address-family ipv6 + neighbor fc00:0:1b::1 activate + network 2001:db8:11::1/64 + exit-address-family + ! + address-family ipv4 vpn + neighbor fc00:0:1b::1 activate + exit-address-family + ! +! +router bgp 65002 vrf vrf10 + bgp router-id 192.0.2.2 + ! + address-family ipv4 unicast + redistribute connected + sid vpn export 1 + rd vpn export 65002:10 + rt vpn both 0:10 + import vpn + export vpn + exit-address-family + ! +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt6/isisd.conf b/tests/topotests/isis_srv6_te_topo1/rt6/isisd.conf new file mode 100644 index 000000000000..f8816db43aac --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt6/isisd.conf @@ -0,0 +1,42 @@ +hostname rt6 +log file isisd.log +! +! debug isis events +! debug isis route-events +! debug isis spf-events +! debug isis sr-events +! debug isis lsp-gen +! +interface lo + ip router isis 1 + ipv6 router isis 1 + isis passive +! +interface eth-rt4 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +interface eth-rt5 + ip router isis 1 + ipv6 router isis 1 + isis network point-to-point + isis hello-interval 1 + isis hello-multiplier 10 +! +router isis 1 + lsp-gen-interval 2 + net 49.0000.0000.0000.0006.00 + is-type level-1 + topology ipv6-unicast + segment-routing srv6 + locator loc1 + node-msd + max-segs-left 3 + max-end-pop 3 + max-h-encaps 2 + max-end-d 5 + interface sr0 +! diff --git a/tests/topotests/isis_srv6_te_topo1/rt6/sharpd.conf b/tests/topotests/isis_srv6_te_topo1/rt6/sharpd.conf new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/isis_srv6_te_topo1/rt6/zebra.conf b/tests/topotests/isis_srv6_te_topo1/rt6/zebra.conf new file mode 100644 index 000000000000..5c18c3ec50c8 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt6/zebra.conf @@ -0,0 +1,44 @@ +log file zebra.log +! +hostname rt6 +! +! debug zebra kernel +! debug zebra packet +! +interface lo + ipv6 address fc00:0:6::1/48 + ipv6 address fc00:0:6b::1/48 +! +interface eth-rt4 + ip address 10.0.7.6/24 +! +interface eth-rt5 + ip address 10.0.8.6/24 +! +interface eth-dst + ip address 10.0.10.1/24 + ip address 2001:db8:10::1/64 + ip address 2001:db8:11::1/64 +! +segment-routing + srv6 + locators + locator loc1 + prefix fc00:0:6::/48 block-len 32 node-len 16 func-bits 16 + exit + ! + locator loc2 + prefix fc00:0:6b::/48 block-len 32 node-len 16 func-bits 16 + exit + ! + exit + ! + exit + ! +exit +ip forwarding +! +ip route fc00:0:9::1/128 2001:db8:10::2 +! +line vty +! diff --git a/tests/topotests/isis_srv6_te_topo1/src/zebra.conf b/tests/topotests/isis_srv6_te_topo1/src/zebra.conf new file mode 100644 index 000000000000..dd6b394a164c --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/src/zebra.conf @@ -0,0 +1,6 @@ +ip route 10.0.10.0/24 10.8.0.1 +! +interface eth-rt1 + ip address 10.8.0.2/24 +exit +! diff --git a/tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py b/tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py new file mode 100644 index 000000000000..5af35a5c4256 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py @@ -0,0 +1,472 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# Copyright 2023 6WIND S.A. +# Dmytro Shytyi +# + + +""" +test_isis_srv6_te_topo1.py: + + + +---------+ + | | + | SRC | + | | + +---------+ + |eth-rt1 + | + |10.8.0.0/24 + | + |eth-src + +---------+ + | | + | RT1 | + | | + | | + +---------+ + |eth-sw1 + | + | + | + +---------+ | +---------+ + | | | | | + | RT2 |eth-sw1 | eth-sw1| RT3 | + | +----------+----------+ | + | | 10.0.1.0/24 | | + +---------+ +---------+ + eth-rt4-1| |eth-rt4-2 eth-rt5-1| |eth-rt5-2 + | | | | + 10.0.2.0/24| |10.0.3.0/24 10.0.4.0/24| |10.0.5.0/24 + | | | | + eth-rt2-1| |eth-rt2-2 eth-rt3-1| |eth-rt3-2 + +---------+ +---------+ + | | | | + | RT4 | 10.0.6.0/24 | RT5 | + | +---------------------+ | + | |eth-rt5 eth-rt4| | + +---------+ +---------+ + eth-rt6| |eth-rt6 + | | + 10.0.7.0/24| |10.0.8.0/24 + | +---------+ | + | | | | + | | RT6 | | + +----------+ +-----------+ + eth-rt4| |eth-rt5 + +---------+ + |eth-dst (.1) + | + |10.0.10.0/24 + | + |eth-rt6 (.2) + +---------+ + | | + | DST | + | | + +---------+ + +""" + +import os +import re +import sys +import json +import functools +import pytest + +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger +from lib.common_config import ( + required_linux_kernel_version, + create_interface_in_kernel, +) + +pytestmark = [pytest.mark.isisd, pytest.mark.bgpd] + + +def build_topo(tgen): + """Build function""" + + # Define FRR Routers + tgen.add_router("rt1") + tgen.add_router("rt2") + tgen.add_router("rt3") + tgen.add_router("rt4") + tgen.add_router("rt5") + tgen.add_router("rt6") + tgen.add_router("dst") + tgen.add_router("src") + + # Define connections + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["rt1"], nodeif="eth-sw1") + switch.add_link(tgen.gears["rt2"], nodeif="eth-sw1") + switch.add_link(tgen.gears["rt3"], nodeif="eth-sw1") + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-1") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-1") + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["rt2"], nodeif="eth-rt4-2") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt2-2") + + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-1") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-1") + + switch = tgen.add_switch("s5") + switch.add_link(tgen.gears["rt3"], nodeif="eth-rt5-2") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt3-2") + + switch = tgen.add_switch("s6") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt5") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt4") + + switch = tgen.add_switch("s7") + switch.add_link(tgen.gears["rt4"], nodeif="eth-rt6") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt4") + + switch = tgen.add_switch("s8") + switch.add_link(tgen.gears["rt5"], nodeif="eth-rt6") + switch.add_link(tgen.gears["rt6"], nodeif="eth-rt5") + + switch = tgen.add_switch("s9") + switch.add_link(tgen.gears["rt6"], nodeif="eth-dst") + switch.add_link(tgen.gears["dst"], nodeif="eth-rt6") + + switch = tgen.add_switch("s10") + switch.add_link(tgen.gears["rt1"], nodeif="eth-src") + switch.add_link(tgen.gears["src"], nodeif="eth-rt1") + + # Add dummy interface for SRv6 + create_interface_in_kernel( + tgen, + "rt1", + "sr0", + "2001:db8::1", + netmask="128", + create=True, + ) + create_interface_in_kernel( + tgen, + "rt2", + "sr0", + "2001:db8::2", + netmask="128", + create=True, + ) + create_interface_in_kernel( + tgen, + "rt3", + "sr0", + "2001:db8::3", + netmask="128", + create=True, + ) + create_interface_in_kernel( + tgen, + "rt4", + "sr0", + "2001:db8::4", + netmask="128", + create=True, + ) + create_interface_in_kernel( + tgen, + "rt5", + "sr0", + "2001:db8::5", + netmask="128", + create=True, + ) + create_interface_in_kernel( + tgen, + "rt6", + "sr0", + "2001:db8::6", + netmask="128", + create=True, + ) + + +def setup_module(mod): + """Sets up the pytest environment""" + + # Verify if kernel requirements are satisfied + result = required_linux_kernel_version("4.10") + if result is not True: + pytest.skip("Kernel requirements are not met") + + # Build the topology + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + # For all registered routers, load the zebra and isis configuration files + for rname, router in tgen.routers().items(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + if os.path.exists("{}/isisd.conf".format(rname)): + router.load_config( + TopoRouter.RD_ISIS, os.path.join(CWD, "{}/isisd.conf".format(rname)) + ) + if os.path.exists("{}/bgpd.conf".format(rname)): + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + if os.path.exists("{}/pathd.conf".format(rname)): + router.load_config( + TopoRouter.RD_PATH, os.path.join(CWD, "{}/pathd.conf".format(rname)) + ) + if os.path.exists("{}/staticd.conf".format(rname)): + router.load_config( + TopoRouter.RD_STATIC, os.path.join(CWD, "{}/staticd.conf".format(rname)) + ) + + tgen.gears["rt1"].run("sysctl net.vrf.strict_mode=1") + tgen.gears["rt1"].run("ip link add vrf10 type vrf table 10") + tgen.gears["rt1"].run("ip link set vrf10 up") + tgen.gears["rt1"].run("ip link set eth-src master vrf10") + + tgen.gears["rt6"].run("sysctl net.vrf.strict_mode=1") + tgen.gears["rt6"].run("ip link add vrf10 type vrf table 10") + tgen.gears["rt6"].run("ip link set vrf10 up") + tgen.gears["rt6"].run("sysctl -w net.ipv6.conf.all.seg6_enabled=1") + tgen.gears["rt6"].run("sysctl -w net.ipv6.conf.default.seg6_enabled=1") + tgen.gears["rt6"].run("sysctl -w net.ipv6.conf.eth-rt4.seg6_enabled=1") + tgen.gears["rt6"].run("sysctl -w net.ipv6.conf.eth-rt5.seg6_enabled=1") + + # Start routers + tgen.start_router() + + +def teardown_module(mod): + "Teardown the pytest environment" + + # Teardown the topology + tgen = get_topogen() + tgen.stop_topology() + + +def router_compare_json_output(rname, command, reference): + "Compare router JSON output" + + logger.info('Comparing router "%s" "%s" output', rname, command) + + tgen = get_topogen() + filename = "{}/{}/{}".format(CWD, rname, reference) + expected = json.loads(open(filename).read()) + + # Run test function until we get an result. Wait at most 60 seconds. + test_func = functools.partial( + topotest.router_json_cmp, tgen.gears[rname], command, expected + ) + _, diff = topotest.run_and_expect(test_func, None, count=120, wait=0.5) + assertmsg = '"{}" JSON output mismatches the expected result'.format(rname) + assert diff is None, assertmsg + + +# +# Step 1 +# +# Test initial network convergence +# +def test_isis_adjacencies_step1(): + logger.info("Test (step 1): check IS-IS adjacencies") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, + "show yang operational-data /frr-interface:lib isisd", + "step1/show_yang_interface_isis_adjacencies.ref", + ) + + +def test_configure_srv6_locators(): + tgen = get_topogen() + tgen.gears["rt1"].vtysh_cmd( + "configure \n \ + segment-routing \n \ + traffic-eng \n \ + segment-list srv6-header \n \ + index 1 ipv6-address fc00:0:3:: \n \ + index 2 ipv6-address fc00:0:5:: \n \ + index 3 ipv6-address fc00:0:6:: \n \ + exit \n \ + exit \n \ + srv6 \n \ + locators \n \ + locator loc1 \n \ + prefix fc00:0:1::/48 block-len 32 node-len 16 func-bits 16 \n \ + exit \n \ + locator loc2 \n \ + prefix fc00:0:1b::/48 block-len 32 node-len 16 func-bits 16 \n \ + exit \n \ + exit \n \ + exit \n \ + exit" + ) + + +def test_rib_ipv6_step1(): + logger.info("Test (step 1): verify IPv6 RIB") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, "show ipv6 route isis json", "step1/show_ipv6_route.ref" + ) + + +def test_srv6_locator_step1(): + logger.info("Test (step 1): verify SRv6 Locator") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + for rname in ["rt1"]: + router_compare_json_output( + rname, + "show segment-routing srv6 locator json", + "step1/show_srv6_locator_table.ref", + ) + + +# +# Step 2 +# +# Test SRv6 TE Policy activted +# + + +def test_srv6_te_policy_activated(): + logger.info("Test (step 2): verify SRv6 TE policy activated") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["rt1"].vtysh_cmd( + "configure \n \ + ipv6 route 2001:db8:10::/64 fc00:0:6:: color 1 \n \ + segment-routing \n \ + traffic-eng \n \ + policy color 1 endpoint fc00:0:6:: \n \ + candidate-path preference 1 name srv6 explicit segment-list srv6-header \n \ + exit \ + exit \ + exit \ + !" + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, + "show ipv6 route static json", + "step2/show_srv6_route.ref", + ) + + +# +# Step 3 +# +# Test SRv6 additional srv6 route. +# + + +def test_srv6_te_policy_additional_route(): + logger.info("Test (step 3): verify SRv6 TE additional route") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["rt1"].vtysh_cmd( + "configure \n \ + no ipv6 route 2001:db8:10::/64 fc00:0:6:: color 1 \n \ + ipv6 route fc00:0:6b::/48 fc00:0:6:: color 1 \n \ + exit \ + !" + ) + + # Add this to use 'eth-dst' if for BGP tests. + tgen.gears["rt6"].run("ip link set eth-dst master vrf10") + + for rname in ["rt1"]: + router_compare_json_output( + rname, + "show ipv6 route static json", + "step3/show_srv6_additional_route.ref", + ) + + +# +# Step 4 +# +# Test SRv6 TE Policy removed +# + + +def test_srv6_te_policy_removed(): + logger.info("Test (step 3): verify SRv6 TE policy removed") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["rt1"].vtysh_cmd( + "configure \n \ + segment-routing \n \ + traffic-eng \n \ + no policy color 1 endpoint fc00:0:6:: \n \ + exit \ + exit \ + exit \ + !" + ) + + for rname in ["rt1"]: + router_compare_json_output( + rname, + "show ipv6 route static json", + "step4/show_ipv6_route.ref", + ) + + +# Memory leak test template +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args)) From 3dba3f5d97bbedd7f597e85e8ba8c74549f179ff Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Mon, 24 Jun 2024 20:28:50 +0200 Subject: [PATCH 25/45] lib: Add ifindex to SRv6 SID ctx When daemons allocate End.X SIDs they should also pass the interface index. Signed-off-by: Carmine Scarpitta --- lib/srv6.h | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/srv6.h b/lib/srv6.h index f25c5cfcaa5c..c6b2a1853711 100644 --- a/lib/srv6.h +++ b/lib/srv6.h @@ -255,6 +255,7 @@ struct srv6_sid_ctx { struct in_addr nh4; struct in6_addr nh6; vrf_id_t vrf_id; + ifindex_t ifindex; }; static inline const char *seg6_mode2str(enum seg6_mode_t mode) From e18b1102663dea48f599cdf818e8374b9df52c94 Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Mon, 24 Jun 2024 20:30:28 +0200 Subject: [PATCH 26/45] isisd: Pass ifindex when allocate End.X SIDs When isisd allocates an End.X SID it should also pass the interface index down to zebra. Signed-off-by: Carmine Scarpitta --- isisd/isis_zebra.c | 1 + 1 file changed, 1 insertion(+) diff --git a/isisd/isis_zebra.c b/isisd/isis_zebra.c index caf7d3ddfbfc..aa00f3933035 100644 --- a/isisd/isis_zebra.c +++ b/isisd/isis_zebra.c @@ -669,6 +669,7 @@ void isis_zebra_request_srv6_sid_endx(struct isis_adjacency *adj) ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_X; ctx.nh6 = nexthop; + ctx.ifindex = circuit->interface->ifindex; ret = isis_zebra_request_srv6_sid(&ctx, &sid_value, area->srv6db.config.srv6_locator_name); if (!ret) { From a9839cbc429a7c79458de736c1f1ba1f09cecf17 Mon Sep 17 00:00:00 2001 From: Carmine Scarpitta Date: Tue, 25 Jun 2024 18:10:48 +0200 Subject: [PATCH 27/45] zebra: Add CLI to show SRv6 SIDs Add a command to show SRv6 SIDs allocated. Output examples: ``` router# show segment-routing srv6 sid SID Behavior Context Daemon/Instance ----------------- ---------- --------------------- ----------------- fc00:0:1:: uN - isis(0) fc00:0:1:fe00:: uDT6 VRF 'vrf10' bgp(0) fc00:0:1:fe01:: uDT6 VRF 'vrf20' bgp(0) fc00:0:1:e000:: uA Interface 'eth-sw1' isis(0) fc00:0:1:e001:: uA Interface 'eth-sw1' isis(0) ``` ``` router# show segment-routing srv6 sid fc00:0:1:e000:: detail SID Behavior Context Daemon/Instance ----------------- ---------- --------------------- ----------------- fc00:0:1:e000:: uA Interface 'eth-sw1' isis(0) Locator: loc1 Allocation type: dynamic ``` ``` router# show segment-routing srv6 sid json [ { "sid":"fc00:0:1::", "behavior":"uN", "context":"-", "daemons":"isis(0)" }, { "sid":"fc00:0:1:fe00::", "behavior":"uDT6", "context":"VRF 'vrf10'", "daemons":"bgp(0)" }, { "sid":"fc00:0:1:fe01::", "behavior":"uDT6", "context":"VRF 'vrf20'", "daemons":"bgp(0)" }, { "sid":"fc00:0:1:e000::", "behavior":"uA", "context":"Interface 'eth-sw1'", "daemons":"isis(0)" }, { "sid":"fc00:0:1:e001::", "behavior":"uA", "context":"Interface 'eth-sw1'", "daemons":"isis(0)" } ] ``` Signed-off-by: Carmine Scarpitta --- zebra/zebra_srv6_vty.c | 302 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 302 insertions(+) diff --git a/zebra/zebra_srv6_vty.c b/zebra/zebra_srv6_vty.c index 5a8052414902..a0f0bcb1f50d 100644 --- a/zebra/zebra_srv6_vty.c +++ b/zebra/zebra_srv6_vty.c @@ -16,6 +16,7 @@ #include "vrf.h" #include "srv6.h" #include "lib/json.h" +#include "termtable.h" #include "zebra/zserv.h" #include "zebra/zebra_router.h" @@ -259,6 +260,305 @@ DEFUN (show_srv6_locator_detail, return CMD_SUCCESS; } +static void do_show_srv6_sid_line(struct ttable *tt, struct zebra_srv6_sid *sid) +{ + struct listnode *node; + struct zserv *client; + char clients[256]; + char ctx[256] = {}; + char behavior[256] = {}; + struct vrf *vrf; + struct interface *ifp; + int ret; + + /* Zclients */ + if (listcount(sid->client_list)) { + bool first = true; + int i = 0; + for (ALL_LIST_ELEMENTS_RO(sid->client_list, node, client)) { + if (first) + ret = snprintf(clients + i, sizeof(clients) - i, + "%s(%d)", + zebra_route_string(client->proto), + client->instance); + else + ret = snprintf(clients + i, sizeof(clients) - i, + ", %s(%d)", + zebra_route_string(client->proto), + client->instance); + + if (ret > 0) + i += ret; + } + } + + /* Behavior */ + if (sid->locator) { + if ((sid->locator->sid_format && + sid->locator->sid_format->type == + SRV6_SID_FORMAT_TYPE_USID) || + (!sid->locator->sid_format && + CHECK_FLAG(sid->locator->flags, SRV6_LOCATOR_USID))) { + switch (sid->ctx->ctx.behavior) { + case ZEBRA_SEG6_LOCAL_ACTION_END: + snprintf(behavior, sizeof(behavior), "uN"); + break; + case ZEBRA_SEG6_LOCAL_ACTION_END_X: + snprintf(behavior, sizeof(behavior), "uA"); + break; + case ZEBRA_SEG6_LOCAL_ACTION_END_DX6: + snprintf(behavior, sizeof(behavior), "uDX6"); + break; + case ZEBRA_SEG6_LOCAL_ACTION_END_DX4: + snprintf(behavior, sizeof(behavior), "uDX4"); + break; + case ZEBRA_SEG6_LOCAL_ACTION_END_DT6: + snprintf(behavior, sizeof(behavior), "uDT6"); + break; + case ZEBRA_SEG6_LOCAL_ACTION_END_DT4: + snprintf(behavior, sizeof(behavior), "uDT4"); + break; + case ZEBRA_SEG6_LOCAL_ACTION_END_DT46: + snprintf(behavior, sizeof(behavior), "uDT46"); + break; + case ZEBRA_SEG6_LOCAL_ACTION_UNSPEC: + snprintf(behavior, sizeof(behavior), "unspec"); + break; + case ZEBRA_SEG6_LOCAL_ACTION_END_T: + case ZEBRA_SEG6_LOCAL_ACTION_END_DX2: + case ZEBRA_SEG6_LOCAL_ACTION_END_B6: + case ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP: + case ZEBRA_SEG6_LOCAL_ACTION_END_BM: + case ZEBRA_SEG6_LOCAL_ACTION_END_S: + case ZEBRA_SEG6_LOCAL_ACTION_END_AS: + case ZEBRA_SEG6_LOCAL_ACTION_END_AM: + case ZEBRA_SEG6_LOCAL_ACTION_END_BPF: + default: + snprintf(behavior, sizeof(behavior), "unknown"); + } + } else { + snprintf(behavior, sizeof(behavior), "%s", + seg6local_action2str(sid->ctx->ctx.behavior)); + } + } + + /* SID context */ + switch (sid->ctx->ctx.behavior) { + case ZEBRA_SEG6_LOCAL_ACTION_END: + break; + case ZEBRA_SEG6_LOCAL_ACTION_END_X: + case ZEBRA_SEG6_LOCAL_ACTION_END_DX6: + case ZEBRA_SEG6_LOCAL_ACTION_END_DX4: + RB_FOREACH (vrf, vrf_id_head, &vrfs_by_id) { + ifp = if_lookup_by_index(sid->ctx->ctx.ifindex, + vrf->vrf_id); + if (ifp) + snprintf(ctx, sizeof(ctx), "Interface '%s'", + ifp->name); + } + break; + case ZEBRA_SEG6_LOCAL_ACTION_END_T: + case ZEBRA_SEG6_LOCAL_ACTION_END_DT6: + case ZEBRA_SEG6_LOCAL_ACTION_END_DT4: + case ZEBRA_SEG6_LOCAL_ACTION_END_DT46: + vrf = vrf_lookup_by_id(sid->ctx->ctx.vrf_id); + snprintf(ctx, sizeof(ctx), "VRF '%s'", + vrf ? vrf->name : ""); + break; + case ZEBRA_SEG6_LOCAL_ACTION_END_DX2: + case ZEBRA_SEG6_LOCAL_ACTION_END_B6: + case ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP: + case ZEBRA_SEG6_LOCAL_ACTION_END_BM: + case ZEBRA_SEG6_LOCAL_ACTION_END_S: + case ZEBRA_SEG6_LOCAL_ACTION_END_AS: + case ZEBRA_SEG6_LOCAL_ACTION_END_AM: + case ZEBRA_SEG6_LOCAL_ACTION_END_BPF: + case ZEBRA_SEG6_LOCAL_ACTION_UNSPEC: + default: + break; + } + + if (strlen(ctx) == 0) + snprintf(ctx, sizeof(ctx), "-"); + + ttable_add_row(tt, "%pI6|%s|%s|%s", &sid->value, behavior, ctx, clients); +} + +static void do_show_srv6_sid_detail(struct vty *vty, json_object **json, + struct srv6_locator *locator, + struct zebra_srv6_sid_ctx *sid_ctx) +{ + struct ttable *tt; + + /* Prepare table. */ + tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row(tt, "SID|Behavior|Context|Daemon/Instance"); + tt->style.cell.rpad = 2; + tt->style.corner = ' '; + ttable_restyle(tt); + ttable_rowseps(tt, 0, BOTTOM, true, '-'); + + if (!sid_ctx || !sid_ctx->sid) + return; + + if (locator && sid_ctx->sid->locator != locator) + return; + + do_show_srv6_sid_line(tt, sid_ctx->sid); + + ttable_colseps(tt, 0, RIGHT, true, ' '); + ttable_colseps(tt, 1, LEFT, true, ' '); + ttable_colseps(tt, 1, RIGHT, true, ' '); + ttable_colseps(tt, 2, LEFT, true, ' '); + ttable_colseps(tt, 2, RIGHT, true, ' '); + ttable_colseps(tt, 3, LEFT, true, ' '); + + /* Dump the generated table. */ + if (tt->nrows > 1) { + if (json) { + *json = ttable_json_with_json_text(tt, "ssss", + "sid|behavior|context|daemons"); + } else { + char *table; + + table = ttable_dump(tt, "\n"); + vty_out(vty, "%s\n", table); + XFREE(MTYPE_TMP, table); + } + } + ttable_del(tt); + + if (!json) { + vty_out(vty, " Locator: %s\n", + sid_ctx->sid->locator ? sid_ctx->sid->locator->name + : "-"); + vty_out(vty, " Allocation type: %s\n", + srv6_sid_alloc_mode2str(sid_ctx->sid->alloc_mode)); + } +} + +static void do_show_srv6_sid(struct vty *vty, json_object **json, + struct srv6_locator *locator, + struct zebra_srv6_sid_ctx *sid_ctx) +{ + struct zebra_srv6 *srv6 = zebra_srv6_get_default(); + struct zebra_srv6_sid_ctx *ctx; + struct listnode *node; + struct ttable *tt; + + /* Prepare table. */ + tt = ttable_new(&ttable_styles[TTSTYLE_BLANK]); + ttable_add_row(tt, "SID|Behavior|Context|Daemon/Instance"); + tt->style.cell.rpad = 2; + tt->style.corner = ' '; + ttable_restyle(tt); + ttable_rowseps(tt, 0, BOTTOM, true, '-'); + + for (ALL_LIST_ELEMENTS_RO(srv6->sids, node, ctx)) { + /* Skip contexts not associated with any SID */ + if (!ctx->sid) + continue; + + /* Skip SIDs from locators we are not interested in */ + if (locator && ctx->sid->locator != locator) + continue; + + /* Skip SIDs we are not interested in */ + if (sid_ctx && sid_ctx != ctx) + continue; + + do_show_srv6_sid_line(tt, ctx->sid); + } + + ttable_colseps(tt, 0, RIGHT, true, ' '); + ttable_colseps(tt, 1, LEFT, true, ' '); + ttable_colseps(tt, 1, RIGHT, true, ' '); + ttable_colseps(tt, 2, LEFT, true, ' '); + ttable_colseps(tt, 2, RIGHT, true, ' '); + ttable_colseps(tt, 3, LEFT, true, ' '); + + /* Dump the generated table. */ + if (tt->nrows > 1) { + if (json) { + *json = ttable_json_with_json_text(tt, "ssss", + "sid|behavior|context|daemons"); + } else { + char *table; + + table = ttable_dump(tt, "\n"); + vty_out(vty, "%s\n", table); + XFREE(MTYPE_TMP, table); + } + } + ttable_del(tt); +} + +DEFPY (show_srv6_sid, + show_srv6_sid_cmd, + "show segment-routing srv6 [locator NAME$locator_name] sid [X:X::X:X$sid_value [detail$detail]] [json]", + SHOW_STR + "Segment Routing\n" + "Segment Routing SRv6\n" + "Locator Information\n" + "Locator Name\n" + "SID\n" + "SID value\n" + "Detailed information\n" + JSON_STR) +{ + bool uj = use_json(argc, argv); + struct zebra_srv6 *srv6 = zebra_srv6_get_default(); + struct srv6_locator *locator = NULL; + struct zebra_srv6_sid_ctx *sid_ctx = NULL, *c; + struct listnode *node; + json_object *json = NULL; + + + if (locator_name) { + locator = zebra_srv6_locator_lookup(locator_name); + if (!locator) { + vty_out(vty, "%% Can't find the SRv6 locator\n"); + return CMD_WARNING; + } + } + + if (!IPV6_ADDR_SAME(&sid_value, &in6addr_any)) { + for (ALL_LIST_ELEMENTS_RO(srv6->sids, node, c)) { + if (c->sid && + IPV6_ADDR_SAME(&c->sid->value, &sid_value)) { + sid_ctx = c; + break; + } + } + + if (!sid_ctx) { + vty_out(vty, "%% Can't find the SRv6 SID\n"); + return CMD_WARNING; + } + } + + if (locator && sid_ctx) + if (!sid_ctx->sid || sid_ctx->sid->locator != locator) { + vty_out(vty, + "%% Can't find the SRv6 SID in the provided locator\n"); + return CMD_WARNING; + } + + if (uj) + json = json_object_new_object(); + + if (detail) + do_show_srv6_sid_detail(vty, uj ? &json : NULL, locator, + sid_ctx); + else + do_show_srv6_sid(vty, uj ? &json : NULL, locator, sid_ctx); + + if (uj) + vty_json(vty, json); + + return CMD_SUCCESS; +} + DEFUN_NOSH (segment_routing, segment_routing_cmd, "segment-routing", @@ -361,6 +661,7 @@ DEFUN (no_srv6_locator, struct listnode *node, *nnode; struct zebra_srv6_sid_ctx *ctx; struct srv6_locator *locator = zebra_srv6_locator_lookup(argv[2]->arg); + if (!locator) { vty_out(vty, "%% Can't find SRv6 locator\n"); return CMD_WARNING_CONFIG_FAILED; @@ -1124,4 +1425,5 @@ void zebra_srv6_vty_init(void) install_element(VIEW_NODE, &show_srv6_locator_cmd); install_element(VIEW_NODE, &show_srv6_locator_detail_cmd); install_element(VIEW_NODE, &show_srv6_manager_cmd); + install_element(VIEW_NODE, &show_srv6_sid_cmd); } From be33f8cc928d6c12109f942e2f10380fb9193bef Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Fri, 7 Jun 2024 13:33:05 +0200 Subject: [PATCH 28/45] pathd: add 'debug pathd zebra' command Some debug traces are visible by default whereas it should not: > ... Registering nexthop(2001:db8::1/128) for candidate srte_ipv6 pref 10 Add a 'debug pathd zebra' command. By default, traces are not visible. Signed-off-by: Philippe Guibert Acked-by: Dmytro Shytyi --- pathd/path_cli.c | 20 +++++++++++++++++++- pathd/path_zebra.c | 8 ++++---- pathd/pathd.c | 18 ++++++++++++++---- pathd/pathd.h | 16 ++++++++++++++++ 4 files changed, 53 insertions(+), 9 deletions(-) diff --git a/pathd/path_cli.c b/pathd/path_cli.c index 0d68926e0202..45092e38e810 100644 --- a/pathd/path_cli.c +++ b/pathd/path_cli.c @@ -1105,7 +1105,10 @@ DEFPY_NOSH(show_debugging_pathd, show_debugging_pathd_cmd, vty_out(vty, "Path debugging status:\n"); cmd_show_lib_debugs(vty); - + /* nothing to do here */ + path_ted_show_debugging(vty); + path_policy_show_debugging(vty); + path_zebra_show_debugging(vty); return CMD_SUCCESS; } @@ -1120,6 +1123,19 @@ DEFPY(debug_path_policy, debug_path_policy_cmd, "[no] debug pathd policy", return CMD_SUCCESS; } +DEFPY(debug_path_zebra, debug_path_zebra_cmd, "[no] debug pathd zebra", + NO_STR DEBUG_STR + "path debugging\n" + "policy debugging\n") +{ + uint32_t mode = DEBUG_NODE2MODE(vty->node); + bool no_debug = no; + + DEBUG_MODE_SET(&path_zebra_debug, mode, !no); + DEBUG_FLAGS_SET(&path_zebra_debug, PATH_ZEBRA_DEBUG_BASIC, !no_debug); + return CMD_SUCCESS; +} + static const char *metric_type_name(enum srte_candidate_metric_type type) { switch (type) { @@ -1340,6 +1356,8 @@ void path_cli_init(void) install_element(ENABLE_NODE, &debug_path_policy_cmd); install_element(CONFIG_NODE, &debug_path_policy_cmd); + install_element(ENABLE_NODE, &debug_path_zebra_cmd); + install_element(CONFIG_NODE, &debug_path_zebra_cmd); install_element(CONFIG_NODE, &segment_routing_cmd); install_element(SEGMENT_ROUTING_NODE, &sr_traffic_eng_cmd); diff --git a/pathd/path_zebra.c b/pathd/path_zebra.c index 5dcae859da96..cb0af431e565 100644 --- a/pathd/path_zebra.c +++ b/pathd/path_zebra.c @@ -257,8 +257,8 @@ static void path_zebra_add_srv6_policy_internal(struct srte_policy *policy) return; cmd = ZEBRA_NEXTHOP_REGISTER; - zlog_debug("Registering nexthop(%pFX) for candidate %s pref %u", - &lookup.nh, candidate->name, candidate->preference); + PATH_ZEBRA_DEBUG("Registering nexthop(%pFX) for candidate %s pref %u", + &lookup.nh, candidate->name, candidate->preference); if (zclient_send_rnh(zclient, cmd, &lookup.nh, SAFI_UNICAST, false, false, VRF_DEFAULT) == ZCLIENT_SEND_FAILURE) @@ -393,8 +393,8 @@ void path_nht_removed(struct srte_candidate *candidate) if (!was_zebra_registered) return; - zlog_debug("Unregistering nexthop(%pFX) for candidate %s pref %u", - &lookup.nh, candidate->name, candidate->preference); + PATH_ZEBRA_DEBUG("Unregistering nexthop(%pFX) for candidate %s pref %u", + &lookup.nh, candidate->name, candidate->preference); if (zclient_send_rnh(zclient, ZEBRA_NEXTHOP_UNREGISTER, &lookup.nh, SAFI_UNICAST, false, false, diff --git a/pathd/pathd.c b/pathd/pathd.c index bfd9b35c3f53..c6ed48aeac8e 100644 --- a/pathd/pathd.c +++ b/pathd/pathd.c @@ -33,10 +33,8 @@ DEFINE_HOOK(pathd_candidate_updated, (struct srte_candidate * candidate), DEFINE_HOOK(pathd_candidate_removed, (struct srte_candidate * candidate), (candidate)); -struct debug path_policy_debug = { - .conf = "debug pathd policy", - .desc = "Pathd policy", -}; +struct debug path_policy_debug; +struct debug path_zebra_debug; #define PATH_POLICY_DEBUG(fmt, ...) \ DEBUGD(&path_policy_debug, "policy: " fmt, ##__VA_ARGS__) @@ -1279,6 +1277,18 @@ const char *srte_origin2str(enum srte_protocol_origin origin) assert(!"Reached end of function we should never hit"); } +void path_policy_show_debugging(struct vty *vty) +{ + if (DEBUG_FLAGS_CHECK(&path_policy_debug, PATH_POLICY_DEBUG_BASIC)) + vty_out(vty, " Path policy debugging is on\n"); +} + +void path_zebra_show_debugging(struct vty *vty) +{ + if (DEBUG_FLAGS_CHECK(&path_zebra_debug, PATH_ZEBRA_DEBUG_BASIC)) + vty_out(vty, " Path zebra debugging is on\n"); +} + void pathd_shutdown(void) { path_ted_teardown(); diff --git a/pathd/pathd.h b/pathd/pathd.h index e7b840a88490..1a328937dd87 100644 --- a/pathd/pathd.h +++ b/pathd/pathd.h @@ -32,6 +32,20 @@ enum srte_protocol_origin { extern struct debug path_policy_debug; +#define PATH_POLICY_DEBUG_BASIC 0x01 + +extern struct debug path_zebra_debug; + +#define PATH_ZEBRA_DEBUG_BASIC 0x01 + +#define PATH_ZEBRA_DEBUG(fmt, ...) \ + do { \ + if (DEBUG_FLAGS_CHECK(&path_zebra_debug, \ + PATH_ZEBRA_DEBUG_BASIC)) \ + DEBUGD(&path_zebra_debug, "policy: " fmt, \ + ##__VA_ARGS__); \ + } while (0) + enum srte_policy_status { SRTE_POLICY_STATUS_UNKNOWN = 0, SRTE_POLICY_STATUS_DOWN = 1, @@ -439,6 +453,8 @@ void srte_candidate_status_update(struct srte_candidate *candidate, int status); void srte_candidate_unset_segment_list(const char *originator, bool force); const char *srte_origin2str(enum srte_protocol_origin origin); void pathd_shutdown(void); +void path_zebra_show_debugging(struct vty *vty); +void path_policy_show_debugging(struct vty *vty); /* path_cli.c */ void path_cli_init(void); From a60009d7167c1c34271da51296bd43adee536031 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Fri, 7 Jun 2024 14:28:57 +0200 Subject: [PATCH 29/45] lib: add srv6 context support for End.B6.Encap SID The SID manager should be able to provide SID to SRTE policies. An explicit SID value should be proposed by the pathd daemon to ZEBRA, based on an usage context. The usage context is defined by the srv6_sid_ctx structure, and should help an operator to know the reason why a SID has been allocated. For SRTE policies, the usage is a binding SID IPv6 attached to a SRTE policy. The sid context should be based on the color and the nexthop of the policy. Propose to extend the srv6_sid_ctx structure by adding the 'color' attribute, as the ipv6 nexthop is already present. Signed-off-by: Philippe Guibert Acked-by: Dmytro Shytyi --- lib/srv6.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/srv6.h b/lib/srv6.h index c6b2a1853711..0281c791d70e 100644 --- a/lib/srv6.h +++ b/lib/srv6.h @@ -254,6 +254,7 @@ struct srv6_sid_ctx { /* Behavior-specific attributes */ struct in_addr nh4; struct in6_addr nh6; + uint32_t color; vrf_id_t vrf_id; ifindex_t ifindex; }; @@ -355,9 +356,13 @@ static inline const char *srv6_sid_ctx2str(char *str, size_t size, snprintf(str + len, size - len, " vrf_id %u", ctx->vrf_id); break; + case ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP: + len += snprintf(str + len, size - len, " nh6 %pI6 color %u", + &ctx->nh6, ctx->color); + break; + case ZEBRA_SEG6_LOCAL_ACTION_END_DX2: case ZEBRA_SEG6_LOCAL_ACTION_END_B6: - case ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP: case ZEBRA_SEG6_LOCAL_ACTION_END_BM: case ZEBRA_SEG6_LOCAL_ACTION_END_S: case ZEBRA_SEG6_LOCAL_ACTION_END_AS: From 1d908c6fac69677293cd7b9108934d9394647936 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Fri, 29 Dec 2023 15:07:39 +0100 Subject: [PATCH 30/45] doc: add srv6 te policy bsid Add srv6-binding-sid X:X::X:X to Traffic Engineering Policy. End.B6.Encaps - Endpoint bound to an SRv6 Policy with encapsulation SRv6 instantiation of a Binding SID. Signed-off-by: Dmytro Shytyi --- doc/user/pathd.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/user/pathd.rst b/doc/user/pathd.rst index e98bbfaa9a9b..9248407a9783 100644 --- a/doc/user/pathd.rst +++ b/doc/user/pathd.rst @@ -307,6 +307,9 @@ Configuration Commands Specify the policy SID. +.. clicmd:: srv6-binding-sid X:X::X:X + + Specify the policy SRv6 SID .. clicmd:: candidate-path preference PREFERENCE name NAME explicit segment-list SEGMENT-LIST-NAME From a30deff21bb8f881ce625854abc8381e2679cda9 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Fri, 29 Dec 2023 14:12:53 +0100 Subject: [PATCH 31/45] lib, pathd, yang: add srv6_bsid, cli and nb handlers Add SRv6 Binding SID to YANG model, pathd and zclient headers. Provide CLI and NB handlers for SRv6 Binding SID. Signed-off-by: Dmytro Shytyi Signed-off-by: Philippe Guibert --- lib/srv6.c | 3 ++- pathd/path_cli.c | 38 ++++++++++++++++++++++++++++++++++++-- pathd/path_nb.c | 8 ++++++++ pathd/path_nb.h | 5 +++++ pathd/path_nb_config.c | 41 +++++++++++++++++++++++++++++++++++++++++ pathd/pathd.c | 25 +++++++++++++++++++++++++ pathd/pathd.h | 5 +++++ yang/frr-pathd.yang | 5 +++++ 8 files changed, 127 insertions(+), 3 deletions(-) diff --git a/lib/srv6.c b/lib/srv6.c index e6fc375fbb11..0248e792059e 100644 --- a/lib/srv6.c +++ b/lib/srv6.c @@ -135,11 +135,12 @@ const char *seg6local_context2str(char *str, size_t size, snprintf(str, size, "table %u", ctx->table); return str; + case ZEBRA_SEG6_LOCAL_ACTION_END_DX2: + return str; case ZEBRA_SEG6_LOCAL_ACTION_END_B6: case ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP: snprintfrr(str, size, "nh6 %pI6", &ctx->nh6); return str; - case ZEBRA_SEG6_LOCAL_ACTION_END_DX2: case ZEBRA_SEG6_LOCAL_ACTION_END_BM: case ZEBRA_SEG6_LOCAL_ACTION_END_S: case ZEBRA_SEG6_LOCAL_ACTION_END_AS: diff --git a/pathd/path_cli.c b/pathd/path_cli.c index 45092e38e810..bf2c76848111 100644 --- a/pathd/path_cli.c +++ b/pathd/path_cli.c @@ -114,9 +114,13 @@ DEFPY(show_srte_policy, RB_FOREACH (policy, srte_policy_head, &srte_policies) { char endpoint[ENDPOINT_STR_LENGTH]; - char binding_sid[16] = "-"; + char binding_sid[ENDPOINT_STR_LENGTH] = "-"; + struct in6_addr ipv6_zero = {}; ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + if (!IPV6_ADDR_SAME(&policy->srv6_binding_sid, &ipv6_zero)) + sid2str(&policy->srv6_binding_sid, binding_sid, + ENDPOINT_STR_LENGTH); if (policy->binding_sid != MPLS_LABEL_NONE) snprintf(binding_sid, sizeof(binding_sid), "%u", policy->binding_sid); @@ -161,13 +165,15 @@ DEFPY(show_srte_policy_detail, RB_FOREACH (policy, srte_policy_head, &srte_policies) { struct srte_candidate *candidate; char endpoint[ENDPOINT_STR_LENGTH]; - char binding_sid[16] = "-"; + char binding_sid[ENDPOINT_STR_LENGTH] = "-"; char *segment_list_info; static char undefined_info[] = "(undefined)"; static char created_by_pce_info[] = "(created by PCE)"; ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + if (!sid_zero_ipv6(&policy->srv6_binding_sid)) + sid2str(&policy->srv6_binding_sid, binding_sid, 128); if (policy->binding_sid != MPLS_LABEL_NONE) snprintf(binding_sid, sizeof(binding_sid), "%u", policy->binding_sid); @@ -749,6 +755,32 @@ void cli_show_srte_policy_binding_sid(struct vty *vty, vty_out(vty, " binding-sid %s\n", yang_dnode_get_string(dnode, NULL)); } +/* + * XPath: /frr-pathd:pathd/srte/policy/srv6-binding-sid + */ +DEFPY(srte_policy_srv6_binding_sid, srte_policy_srv6_binding_sid_cmd, + "[no] srv6-binding-sid X:X::X:X$ipv6address", + NO_STR "Segment Routing Policy SRv6-Binding-SID\n" + "SR Policy SRv6-Binding-SID ipv6-address\n") +{ + if (!no) + nb_cli_enqueue_change(vty, "./srv6-binding-sid", NB_OP_CREATE, + ipv6address_str); + else + nb_cli_enqueue_change(vty, "./srv6-binding-sid", NB_OP_DESTROY, + NULL); + + return nb_cli_apply_changes(vty, NULL); +} + +void cli_show_srte_policy_srv6_binding_sid(struct vty *vty, + const struct lyd_node *dnode, + bool show_defaults) +{ + vty_out(vty, " srv6-binding-sid %s\n", + yang_dnode_get_string(dnode, NULL)); +} + /* * XPath: /frr-pathd:pathd/srte/policy/candidate-path */ @@ -1365,6 +1397,7 @@ void path_cli_init(void) install_element(SR_TRAFFIC_ENG_NODE, &srte_no_segment_list_cmd); install_element(SR_SEGMENT_LIST_NODE, &srte_segment_list_segment_cmd); + install_element(SR_SEGMENT_LIST_NODE, &srte_segment_list_no_segment_cmd); install_element(SR_TRAFFIC_ENG_NODE, &srte_policy_cmd); @@ -1373,6 +1406,7 @@ void path_cli_init(void) install_element(SR_POLICY_NODE, &srte_policy_no_name_cmd); install_element(SR_POLICY_NODE, &srte_policy_binding_sid_cmd); install_element(SR_POLICY_NODE, &srte_policy_no_binding_sid_cmd); + install_element(SR_POLICY_NODE, &srte_policy_srv6_binding_sid_cmd); install_element(SR_POLICY_NODE, &srte_policy_candidate_exp_cmd); install_element(SR_POLICY_NODE, &srte_policy_candidate_dyn_cmd); install_element(SR_POLICY_NODE, &srte_policy_no_candidate_cmd); diff --git a/pathd/path_nb.c b/pathd/path_nb.c index 29a143a3e1af..bbd3efc375c0 100644 --- a/pathd/path_nb.c +++ b/pathd/path_nb.c @@ -155,6 +155,14 @@ const struct frr_yang_module_info frr_pathd_info = { .destroy = pathd_srte_policy_binding_sid_destroy, } }, + { + .xpath = "/frr-pathd:pathd/srte/policy/srv6-binding-sid", + .cbs = { + .modify = pathd_srte_policy_srv6_binding_sid_modify, + .cli_show = cli_show_srte_policy_srv6_binding_sid, + .destroy = pathd_srte_policy_srv6_binding_sid_destroy, + } + }, { .xpath = "/frr-pathd:pathd/srte/policy/is-operational", .cbs = { diff --git a/pathd/path_nb.h b/pathd/path_nb.h index b89851e73929..113354047614 100644 --- a/pathd/path_nb.h +++ b/pathd/path_nb.h @@ -46,6 +46,8 @@ int pathd_srte_policy_name_modify(struct nb_cb_modify_args *args); int pathd_srte_policy_name_destroy(struct nb_cb_destroy_args *args); int pathd_srte_policy_binding_sid_modify(struct nb_cb_modify_args *args); int pathd_srte_policy_binding_sid_destroy(struct nb_cb_destroy_args *args); +int pathd_srte_policy_srv6_binding_sid_modify(struct nb_cb_modify_args *args); +int pathd_srte_policy_srv6_binding_sid_destroy(struct nb_cb_destroy_args *args); struct yang_data * pathd_srte_policy_is_operational_get_elem(struct nb_cb_get_elem_args *args); int pathd_srte_policy_candidate_path_create(struct nb_cb_create_args *args); @@ -116,6 +118,9 @@ void cli_show_srte_policy_name(struct vty *vty, const struct lyd_node *dnode, void cli_show_srte_policy_binding_sid(struct vty *vty, const struct lyd_node *dnode, bool show_defaults); +void cli_show_srte_policy_srv6_binding_sid(struct vty *vty, + const struct lyd_node *dnode, + bool show_defaults); void cli_show_srte_policy_candidate_path(struct vty *vty, const struct lyd_node *dnode, bool show_defaults); diff --git a/pathd/path_nb_config.c b/pathd/path_nb_config.c index f9dc82b58f9b..000836fc8971 100644 --- a/pathd/path_nb_config.c +++ b/pathd/path_nb_config.c @@ -396,6 +396,47 @@ int pathd_srte_policy_binding_sid_destroy(struct nb_cb_destroy_args *args) return NB_OK; } +/* + * XPath: /frr-pathd:pathd/srte/policy/srv6-binding-sid + */ +int pathd_srte_policy_srv6_binding_sid_modify(struct nb_cb_modify_args *args) +{ + struct srte_policy *policy; + struct in6_addr srv6_binding_sid; + + yang_dnode_get_ipv6(&srv6_binding_sid, args->dnode, NULL); + + switch (args->event) { + case NB_EV_VALIDATE: + break; + case NB_EV_PREPARE: + break; + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + policy = nb_running_get_entry(args->dnode, NULL, true); + srte_policy_update_srv6_binding_sid(policy, &srv6_binding_sid); + SET_FLAG(policy->flags, F_POLICY_MODIFIED); + break; + } + + return NB_OK; +} + +int pathd_srte_policy_srv6_binding_sid_destroy(struct nb_cb_destroy_args *args) +{ + struct srte_policy *policy; + + if (args->event != NB_EV_APPLY) + return NB_OK; + + policy = nb_running_get_entry(args->dnode, NULL, true); + srte_policy_update_srv6_binding_sid(policy, NULL); + SET_FLAG(policy->flags, F_POLICY_MODIFIED); + + return NB_OK; +} + /* * XPath: /frr-pathd:pathd/srte/policy/candidate-path */ diff --git a/pathd/pathd.c b/pathd/pathd.c index c6ed48aeac8e..39a984fbc43a 100644 --- a/pathd/pathd.c +++ b/pathd/pathd.c @@ -17,6 +17,7 @@ #include "pathd/path_zebra.h" #include "pathd/path_debug.h" #include "pathd/path_ted.h" +#include "srv6.h" #define HOOK_DELAY 3 @@ -505,6 +506,30 @@ void srte_policy_update_binding_sid(struct srte_policy *policy, policy, policy->best_candidate->lsp->segment_list); } +/** + * Update a policy SRv6 binding SID. + * + * @param policy The policy for which the SID should be updated + * @param srv6_binding_sid The new binding SID for the given policy + */ +void srte_policy_update_srv6_binding_sid(struct srte_policy *policy, + struct in6_addr *srv6_binding_sid) +{ + struct in6_addr srv6_binding_sid_zero = {}; + + + if (srv6_binding_sid) + IPV6_ADDR_COPY(&policy->srv6_binding_sid, srv6_binding_sid); + else + IPV6_ADDR_COPY(&policy->srv6_binding_sid, + &srv6_binding_sid_zero); + + /* Reinstall the Binding-SID if necessary. */ + if (policy->best_candidate) + path_zebra_add_sr_policy(policy, policy->best_candidate->lsp + ->segment_list); +} + /** * Gives the policy best candidate path. * diff --git a/pathd/pathd.h b/pathd/pathd.h index 1a328937dd87..0746175f6f24 100644 --- a/pathd/pathd.h +++ b/pathd/pathd.h @@ -350,6 +350,9 @@ struct srte_policy { /* Binding SID */ mpls_label_t binding_sid; + /* SRv6 Binding SID */ + struct in6_addr srv6_binding_sid; + /* The Protocol-Origin. */ enum srte_protocol_origin protocol_origin; @@ -412,6 +415,8 @@ struct srte_policy *srte_policy_find(uint32_t color, struct ipaddr *endpoint); int srte_policy_update_ted_sid(void); void srte_policy_update_binding_sid(struct srte_policy *policy, uint32_t binding_sid); +void srte_policy_update_srv6_binding_sid(struct srte_policy *policy, + struct in6_addr *srv6_binding_sid); void srte_apply_changes(void); void srte_clean_zebra(void); void srte_policy_apply_changes(struct srte_policy *policy); diff --git a/yang/frr-pathd.yang b/yang/frr-pathd.yang index 7aca37beb36f..2225b0de998c 100644 --- a/yang/frr-pathd.yang +++ b/yang/frr-pathd.yang @@ -197,6 +197,11 @@ module frr-pathd { description "BSID of the SR Policy."; } + leaf srv6-binding-sid { + type inet:ipv6-address; + description + "SRv6 BSID of the SR policy."; + } leaf is-operational { type boolean; config false; From b27d011c001ca54e91d3d4ac129376c1b4ac4b82 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Sun, 3 Dec 2023 22:33:55 +0100 Subject: [PATCH 32/45] fix: lib: process nexthop_resolved in policy_encode Encode the resolved nexthops number when zapi_sr_policy_encode is called. Fixes: 223d488 ("pathd, lib: add srv6 policy with resolved nexthop info") Signed-off-by: Dmytro Shytyi --- lib/zclient.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/lib/zclient.c b/lib/zclient.c index ee6a7fdee044..dd4e3e4583f4 100644 --- a/lib/zclient.c +++ b/lib/zclient.c @@ -3905,6 +3905,15 @@ int zapi_sr_policy_encode(struct stream *s, int cmd, struct zapi_sr_policy *zp) stream_putl(s, zt->metric); stream_putc(s, zt->distance); + stream_putw(s, zt->nexthop_resolved_num); + + for (i = 0; i < zt->nexthop_resolved_num; i++) { + znh = &zt->nexthop_resolved[i]; + + if (zapi_nexthop_encode(s, znh, 0, 0) < 0) + return -1; + } + /* Put length at the first point of the stream. */ stream_putw_at(s, 0, stream_get_endp(s)); From 93ac7702a8b0a60e32dcaad56452f69b7b558641 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Fri, 29 Dec 2023 14:13:56 +0100 Subject: [PATCH 33/45] lib: add zebra_route_srte symbol Change symbol for ZEBRA_ROUTE_SRTE from "-" to "p". It will be used by the further commits to display installed SRv6 Binding SID. Signed-off-by: Dmytro Shytyi --- lib/route_types.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lib/route_types.txt b/lib/route_types.txt index 93cbc36e97bd..a8c34ac0ef85 100644 --- a/lib/route_types.txt +++ b/lib/route_types.txt @@ -86,7 +86,7 @@ ZEBRA_ROUTE_BFD, bfd, bfdd, '-', 0, 0, 0, "BFD", bf ZEBRA_ROUTE_OPENFABRIC, openfabric, fabricd, 'f', 1, 1, 1, "OpenFabric", fabricd ZEBRA_ROUTE_VRRP, vrrp, vrrpd, '-', 0, 0, 0, "VRRP", vrrpd ZEBRA_ROUTE_NHG, zebra, none, '-', 0, 0, 0, "Nexthop Group", none -ZEBRA_ROUTE_SRTE, srte, none, '-', 0, 0, 0, "SR-TE", none +ZEBRA_ROUTE_SRTE, srte, none, 'p', 0, 0, 0, "SR-TE", none ZEBRA_ROUTE_TABLE_DIRECT, table-direct, zebra, 't', 1, 1, 1, "Table-Direct", zebra ZEBRA_ROUTE_ALL, wildcard, none, '-', 0, 0, 0, "-", none From 66eb89fba479aa2b5f28354c397e240c4f570269 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Sat, 23 Mar 2024 03:52:02 +0100 Subject: [PATCH 34/45] lib: add missing SR-TE to ipv6 route show command Add "p - SR-TE" field to the list. rt2# show ipv6 route Codes: K - kernel route, C - connected, L - local, S - static, R - RIPng, O - OSPFv3, I - IS-IS, B - BGP, N - NHRP, T - Table, v - VNC, V - VNC-Direct, A - Babel, D - SHARP, F - PBR, f - OpenFabric, t - Table-Direct, p - SR-TE > - selected route, * - FIB route, q - queued, r - rejected, b - backup t - trapped, o - offload failure Signed-off-by: Dmytro Shytyi --- lib/route_types.pl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/route_types.pl b/lib/route_types.pl index c75a86696471..7d6d28c8283c 100755 --- a/lib/route_types.pl +++ b/lib/route_types.pl @@ -113,7 +113,8 @@ sub codelist { $str .= $s; } $str =~ s/ $//; - push @lines, $str . "\\n\" \\\n"; + push @lines, $str . " p - SR-TE,"; + push @lines, "\\n\" \\\n"; push @lines, " \" > - selected route, * - FIB route, q - queued, r - rejected, b - backup\\n\""; push @lines, " \" t - trapped, o - offload failure\\n\\n\""; @@ -131,7 +132,7 @@ sub collect { my (@names, @help) = ((), ()); for my $p (@protos) { next if ($protodetail{$p}->{"daemon"} eq $daemon && $daemon ne "zebra"); - next if ($protodetail{$p}->{"restrict2"} ne "" && + next if ($protodetail{$p}->{"restrict2"} ne "" && $protodetail{$p}->{"restrict2"} ne $daemon); next if ($protodetail{$p}->{"redist"} eq 0); next unless (grep $_ eq $protodetail{$p}->{"enabled"}, @enabled); @@ -210,4 +211,3 @@ sub collect { #endif /* _FRR_ROUTE_TYPES_H */ EOF - From 2ac34a9de6837add436018d0fb90ef274f398f9b Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Fri, 29 Dec 2023 14:30:59 +0100 Subject: [PATCH 35/45] pathd: add path_zebra_send_bsid() We use ZAPI_ROUTE_ADD/DELETE, as SRv6 treatment is based on the routes and not MPLS entries. Set SRv6 binding SID in zebra sr policy from srte_policy. Signed-off-by: Dmytro Shytyi --- pathd/path_zebra.c | 81 ++++++++++++++++++++++++++++++++++++++++++++-- pathd/path_zebra.h | 5 +++ pathd/pathd.c | 6 ++++ 3 files changed, 90 insertions(+), 2 deletions(-) diff --git a/pathd/path_zebra.c b/pathd/path_zebra.c index cb0af431e565..d63adad18974 100644 --- a/pathd/path_zebra.c +++ b/pathd/path_zebra.c @@ -404,6 +404,75 @@ void path_nht_removed(struct srte_candidate *candidate) candidate->preference); } +/** + * Send SRv6 SID to ZEBRA for installation or deletion. + * + * @param cmd ZEBRA_ROUTE_ADD or ZEBRA_ROUTE_DELETE + * @param sid SRv6 BSID to install or delete + * @param prefixlen Prefix length + * @param oif Outgoing interface + * @param action SID action + * @param context SID context + */ +void path_zebra_send_bsid(const struct in6_addr *bsid, ifindex_t oif, + enum seg6local_action_t action, + struct in6_addr *srv6_segs, int num_segs) +{ + struct prefix_ipv6 p = {}; + struct zapi_route api = {}; + struct zapi_nexthop *znh; + uint16_t prefixlen = IPV6_MAX_BITLEN; + char opaque[1024]; + + if (prefixlen > IPV6_MAX_BITLEN) { + flog_warn(EC_LIB_DEVELOPMENT, "%s: wrong prefixlen %u", + __func__, prefixlen); + return; + } + + opaque[0] = '\0'; + p.family = AF_INET6; + p.prefixlen = prefixlen; + memcpy(&p.prefix, bsid, IPV6_MAX_BYTELEN); + + api.vrf_id = VRF_DEFAULT; + api.type = ZEBRA_ROUTE_SRTE; + api.distance = 50; + + api.safi = SAFI_UNICAST; + memcpy(&api.prefix, &p, sizeof(p)); + + if (num_segs == 0) + return (void)zclient_route_send(ZEBRA_ROUTE_DELETE, zclient, + &api); + + SET_FLAG(api.flags, ZEBRA_FLAG_ALLOW_RECURSION); + SET_FLAG(api.message, ZAPI_MESSAGE_NEXTHOP); + SET_FLAG(api.message, ZAPI_MESSAGE_DISTANCE); + + if (strlen(opaque)) { + SET_FLAG(api.message, ZAPI_MESSAGE_OPAQUE); + api.opaque.length = strlen(opaque) + 1; + assert(api.opaque.length <= ZAPI_MESSAGE_OPAQUE_LENGTH); + memcpy(api.opaque.data, opaque, api.opaque.length); + } + + znh = &api.nexthops[0]; + + memset(znh, 0, sizeof(*znh)); + + znh->type = NEXTHOP_TYPE_IFINDEX; + znh->ifindex = oif; + SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL); + znh->seg6local_action = action; + znh->seg_num = num_segs; + memcpy(znh->seg6_segs, srv6_segs, sizeof(struct in6_addr) * num_segs); + memcpy(&znh->seg6local_ctx.nh6, bsid, sizeof(struct in6_addr)); + api.nexthop_num = 1; + + zclient_route_send(ZEBRA_ROUTE_ADD, zclient, &api); +} + /** * Adds a segment routing policy to Zebra. * @@ -416,8 +485,8 @@ path_zebra_add_sr_policy_internal(struct srte_policy *policy, struct path_nht_data *nhtd) { struct zapi_sr_policy zp = {}; - struct srte_segment_entry *segment; - struct zapi_nexthop *znh; + struct srte_segment_entry *segment = NULL; + struct zapi_nexthop *znh = NULL; struct nexthop *nexthop; int num = 0; @@ -455,6 +524,14 @@ path_zebra_add_sr_policy_internal(struct srte_policy *policy, } zp.segment_list.nexthop_resolved_num = nhtd->nh_num; } + if (znh && !sid_zero_ipv6(&policy->srv6_binding_sid) && segment_list && + zp.segment_list.nexthop_resolved_num) + (void)path_zebra_send_bsid(&policy->srv6_binding_sid, + znh->ifindex, + ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP, + &zp.segment_list.srv6_segs.segs[0], + zp.segment_list.srv6_segs.num_segs); + (void)zebra_send_sr_policy(zclient, ZEBRA_SR_POLICY_SET, &zp); } diff --git a/pathd/path_zebra.h b/pathd/path_zebra.h index 25995b8304f7..243c48fecce2 100644 --- a/pathd/path_zebra.h +++ b/pathd/path_zebra.h @@ -8,6 +8,7 @@ #include #include "pathd/pathd.h" +#include "srv6.h" bool get_ipv4_router_id(struct in_addr *router_id); bool get_ipv6_router_id(struct in6_addr *router_id); @@ -19,5 +20,9 @@ void path_zebra_release_label(mpls_label_t label); void path_zebra_init(struct event_loop *master); void path_zebra_stop(void); void path_nht_removed(struct srte_candidate *candidate); +void path_zebra_send_bsid(const struct in6_addr *bsid, ifindex_t oif, + enum seg6local_action_t action, + struct in6_addr *srv6_segs, int num_segs); + #endif /* _FRR_PATH_MPLS_H_ */ diff --git a/pathd/pathd.c b/pathd/pathd.c index 39a984fbc43a..466d56fb07ed 100644 --- a/pathd/pathd.c +++ b/pathd/pathd.c @@ -517,6 +517,12 @@ void srte_policy_update_srv6_binding_sid(struct srte_policy *policy, { struct in6_addr srv6_binding_sid_zero = {}; + if (!srv6_binding_sid || + (srv6_binding_sid && + !IPV6_ADDR_SAME(&policy->srv6_binding_sid, srv6_binding_sid))) + (void)path_zebra_send_bsid(&policy->srv6_binding_sid, 0, + ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP, + NULL, 0); if (srv6_binding_sid) IPV6_ADDR_COPY(&policy->srv6_binding_sid, srv6_binding_sid); From acc857e6725e20bef17025a79537fc0e9d8fccd5 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Fri, 29 Dec 2023 14:20:52 +0100 Subject: [PATCH 36/45] lib, zebra, sharpd: add end.b6.encap support Add SRv6 SIDs and their number to the nexthop structure for end.b6.encaps seg6local. Extend zapi_nexthop_from_nexthop end.b6.encap support and en_de_code srv6_bsid. Signed-off-by: Dmytro Shytyi --- lib/nexthop.c | 58 +++++++++++++++++++++++++++++++++++++++------- lib/nexthop.h | 3 ++- lib/zclient.c | 34 ++++++++++++++++++++------- sharpd/sharp_vty.c | 2 +- zebra/rt_netlink.c | 4 ++-- zebra/zapi_msg.c | 16 +++++++++---- zebra/zebra_nhg.c | 3 ++- 7 files changed, 95 insertions(+), 25 deletions(-) diff --git a/lib/nexthop.c b/lib/nexthop.c index ac22e7ec8451..a8848aecf69c 100644 --- a/lib/nexthop.c +++ b/lib/nexthop.c @@ -582,8 +582,11 @@ void nexthop_del_labels(struct nexthop *nexthop) } void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action, - const struct seg6local_context *ctx) + const struct seg6local_context *ctx, + struct in6_addr *segs, int num_segs) { + int i; + if (action == ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) return; @@ -593,6 +596,29 @@ void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action, nexthop->nh_srv6->seg6local_action = action; nexthop->nh_srv6->seg6local_ctx = *ctx; + + if (!segs) + return; + + if (num_segs > 0) { + /* Enforce limit on srv6 seg stack size */ + if (num_segs > SRV6_MAX_SIDS) + num_segs = SRV6_MAX_SIDS; + + if (!nexthop->nh_srv6->seg6_segs) { + nexthop->nh_srv6->seg6_segs = + XCALLOC(MTYPE_NH_SRV6, + sizeof(struct seg6_seg_stack) + + num_segs * + sizeof(struct in6_addr)); + } + + nexthop->nh_srv6->seg6_segs->num_segs = num_segs; + + for (i = 0; i < num_segs; i++) + memcpy(&nexthop->nh_srv6->seg6_segs->seg[i], &segs[i], + sizeof(struct in6_addr)); + } } void nexthop_del_srv6_seg6local(struct nexthop *nexthop) @@ -885,14 +911,30 @@ void nexthop_copy_no_recurse(struct nexthop *copy, &nexthop->nh_label->label[0]); if (nexthop->nh_srv6) { - if (nexthop->nh_srv6->seg6local_action != - ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) + if (nexthop->nh_srv6->seg6local_action == + ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP && + nexthop->nh_srv6->seg6_segs->num_segs > 1) nexthop_add_srv6_seg6local(copy, - nexthop->nh_srv6->seg6local_action, - &nexthop->nh_srv6->seg6local_ctx); - if (nexthop->nh_srv6->seg6_segs && - nexthop->nh_srv6->seg6_segs->num_segs && - !sid_zero(nexthop->nh_srv6->seg6_segs)) + nexthop->nh_srv6 + ->seg6local_action, + &nexthop->nh_srv6 + ->seg6local_ctx, + &nexthop->nh_srv6->seg6_segs + ->seg[0], + nexthop->nh_srv6->seg6_segs + ->num_segs); + else if (nexthop->nh_srv6->seg6local_action != + ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) + nexthop_add_srv6_seg6local(copy, + nexthop->nh_srv6 + ->seg6local_action, + &nexthop->nh_srv6 + ->seg6local_ctx, + NULL, 0); + + else if (nexthop->nh_srv6->seg6_segs && + nexthop->nh_srv6->seg6_segs->num_segs && + !sid_zero(nexthop->nh_srv6->seg6_segs)) nexthop_add_srv6_seg6(copy, &nexthop->nh_srv6->seg6_segs->seg[0], nexthop->nh_srv6->seg6_segs diff --git a/lib/nexthop.h b/lib/nexthop.h index 15cfb26d8206..377851f363ea 100644 --- a/lib/nexthop.h +++ b/lib/nexthop.h @@ -155,7 +155,8 @@ void nexthop_add_labels(struct nexthop *nexthop, enum lsp_types_t ltype, uint8_t num_labels, const mpls_label_t *labels); void nexthop_del_labels(struct nexthop *); void nexthop_add_srv6_seg6local(struct nexthop *nexthop, uint32_t action, - const struct seg6local_context *ctx); + const struct seg6local_context *ctx, + struct in6_addr *segs, int num_segs); void nexthop_del_srv6_seg6local(struct nexthop *nexthop); void nexthop_add_srv6_seg6(struct nexthop *nexthop, const struct in6_addr *seg, int num_segs); diff --git a/lib/zclient.c b/lib/zclient.c index dd4e3e4583f4..c266ed760dfc 100644 --- a/lib/zclient.c +++ b/lib/zclient.c @@ -1070,7 +1070,8 @@ int zapi_nexthop_encode(struct stream *s, const struct zapi_nexthop *api_nh, sizeof(struct seg6local_context)); } - if (CHECK_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_SEG6)) { + if (CHECK_FLAG(nh_flags, ZAPI_NEXTHOP_FLAG_SEG6) || + api_nh->seg6local_action == ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP) { stream_putc(s, api_nh->seg_num); stream_put(s, &api_nh->seg6_segs[0], api_nh->seg_num * sizeof(struct in6_addr)); @@ -1450,7 +1451,8 @@ int zapi_nexthop_decode(struct stream *s, struct zapi_nexthop *api_nh, sizeof(struct seg6local_context)); } - if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6)) { + if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6) || + api_nh->seg6local_action == ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP) { STREAM_GETC(s, api_nh->seg_num); if (api_nh->seg_num > SRV6_MAX_SIDS) { flog_err(EC_LIB_ZAPI_ENCODE, @@ -2200,7 +2202,7 @@ struct nexthop *nexthop_from_zapi_nexthop(const struct zapi_nexthop *znh) if (znh->seg6local_action != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) nexthop_add_srv6_seg6local(n, znh->seg6local_action, - &znh->seg6local_ctx); + &znh->seg6local_ctx, NULL, 0); if (znh->seg_num && !sid_zero_ipv6(znh->seg6_segs)) nexthop_add_srv6_seg6(n, &znh->seg6_segs[0], znh->seg_num); @@ -2256,16 +2258,32 @@ int zapi_nexthop_from_nexthop(struct zapi_nexthop *znh, if (nh->nh_srv6) { if (nh->nh_srv6->seg6local_action != - ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) { + ZEBRA_SEG6_LOCAL_ACTION_UNSPEC && + nh->nh_srv6->seg6local_action != + ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP) { SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL); znh->seg6local_action = nh->nh_srv6->seg6local_action; memcpy(&znh->seg6local_ctx, &nh->nh_srv6->seg6local_ctx, sizeof(struct seg6local_context)); - } - - if (nh->nh_srv6->seg6_segs && nh->nh_srv6->seg6_segs->num_segs && - !sid_zero(nh->nh_srv6->seg6_segs)) { + } else if (nh->nh_srv6->seg6local_action == + ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP && + nh->nh_srv6->seg6_segs && + nh->nh_srv6->seg6_segs->num_segs > 0) { + SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6LOCAL); + znh->seg6local_action = nh->nh_srv6->seg6local_action; + memcpy(&znh->seg6local_ctx, &nh->nh_srv6->seg6local_ctx, + sizeof(struct seg6local_context)); + znh->seg_num = nh->nh_srv6->seg6_segs->num_segs; + for (i = 0; i < nh->nh_srv6->seg6_segs->num_segs; i++) + memcpy(&znh->seg6_segs[i], + &nh->nh_srv6->seg6_segs->seg[i], + sizeof(struct in6_addr)); + } else if ((nh->nh_srv6->seg6local_action == + ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) && + nh->nh_srv6->seg6_segs && + nh->nh_srv6->seg6_segs->num_segs && + !sid_zero(nh->nh_srv6->seg6_segs)) { SET_FLAG(znh->flags, ZAPI_NEXTHOP_FLAG_SEG6); znh->seg_num = nh->nh_srv6->seg6_segs->num_segs; for (i = 0; i < nh->nh_srv6->seg6_segs->num_segs; i++) diff --git a/sharpd/sharp_vty.c b/sharpd/sharp_vty.c index 21c596bf7124..79f45db456cb 100644 --- a/sharpd/sharp_vty.c +++ b/sharpd/sharp_vty.c @@ -545,7 +545,7 @@ DEFPY (install_seg6local_routes, sg.r.nhop.ifindex = ifname2ifindex(seg6l_oif, vrf->vrf_id); sg.r.nhop.vrf_id = vrf->vrf_id; sg.r.nhop_group.nexthop = &sg.r.nhop; - nexthop_add_srv6_seg6local(&sg.r.nhop, action, &ctx); + nexthop_add_srv6_seg6local(&sg.r.nhop, action, &ctx, NULL, 0); sg.r.vrf_id = vrf->vrf_id; sharp_install_routes_helper(&sg.r.orig_prefix, sg.r.vrf_id, sg.r.inst, diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index 900c999fe8d9..3c182728abe6 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -584,7 +584,7 @@ parse_nexthop_unicast(ns_id_t ns_id, struct rtmsg *rtm, struct rtattr **tb, } if (seg6l_act != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) - nexthop_add_srv6_seg6local(&nh, seg6l_act, &seg6l_ctx); + nexthop_add_srv6_seg6local(&nh, seg6l_act, &seg6l_ctx, NULL, 0); if (num_segs) nexthop_add_srv6_seg6(&nh, segs, num_segs); @@ -703,7 +703,7 @@ static uint8_t parse_multipath_nexthops_unicast(ns_id_t ns_id, if (seg6l_act != ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) nexthop_add_srv6_seg6local(nh, seg6l_act, - &seg6l_ctx); + &seg6l_ctx, NULL, 0); if (num_segs) nexthop_add_srv6_seg6(nh, segs, num_segs); diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index 936429a10094..2e07dd06643a 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -1868,10 +1868,18 @@ static bool zapi_read_nexthops(struct zserv *client, struct prefix *p, __func__, seg6local_action2str( api_nh->seg6local_action)); - - nexthop_add_srv6_seg6local(nexthop, - api_nh->seg6local_action, - &api_nh->seg6local_ctx); + if (api_nh->seg6local_action == + ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP) + nexthop_add_srv6_seg6local(nexthop, + api_nh->seg6local_action, + &api_nh->seg6local_ctx, + &api_nh->seg6_segs[0], + api_nh->seg_num); + else + nexthop_add_srv6_seg6local(nexthop, + api_nh->seg6local_action, + &api_nh->seg6local_ctx, + NULL, 0); } if (CHECK_FLAG(api_nh->flags, ZAPI_NEXTHOP_FLAG_SEG6) diff --git a/zebra/zebra_nhg.c b/zebra/zebra_nhg.c index 1fe1b7cdb9e0..08589db09b45 100644 --- a/zebra/zebra_nhg.c +++ b/zebra/zebra_nhg.c @@ -1925,7 +1925,8 @@ static struct nexthop *nexthop_set_resolved(afi_t afi, nexthop->nh_srv6 ->seg6local_action, &nexthop->nh_srv6 - ->seg6local_ctx); + ->seg6local_ctx, + NULL, 0); if (nexthop->nh_srv6->seg6_segs) nexthop_add_srv6_seg6(resolved_hop, &nexthop->nh_srv6->seg6_segs->seg[0], From 26546d087d4c097c7cea4fb1374b45d211115d76 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Fri, 29 Dec 2023 14:40:48 +0100 Subject: [PATCH 37/45] zebra: add end.b6.encaps to rt_netlink Introduce the End.B6.Encaps in the rt_netlink code. Signed-off-by: Dmytro Shytyi Signed-off-by: Philippe Guibert --- lib/nexthop.c | 1 + zebra/rt_netlink.c | 77 +++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 73 insertions(+), 5 deletions(-) diff --git a/lib/nexthop.c b/lib/nexthop.c index a8848aecf69c..74cdcb2c264e 100644 --- a/lib/nexthop.c +++ b/lib/nexthop.c @@ -913,6 +913,7 @@ void nexthop_copy_no_recurse(struct nexthop *copy, if (nexthop->nh_srv6) { if (nexthop->nh_srv6->seg6local_action == ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP && + nexthop->nh_srv6->seg6_segs && nexthop->nh_srv6->seg6_segs->num_segs > 1) nexthop_add_srv6_seg6local(copy, nexthop->nh_srv6 diff --git a/zebra/rt_netlink.c b/zebra/rt_netlink.c index 3c182728abe6..63ed087fb0b3 100644 --- a/zebra/rt_netlink.c +++ b/zebra/rt_netlink.c @@ -80,7 +80,7 @@ DEFINE_MTYPE_STATIC(LIB, NH_SRV6, "Nexthop srv6"); -static vlanid_t filter_vlan = 0; +static vlanid_t filter_vlan; /* We capture whether the current kernel supports nexthop ids; by * default, we'll use them if possible. There's also a configuration @@ -94,6 +94,12 @@ struct gw_family_t { union g_addr gate; }; +struct buf_req { + struct nlmsghdr n; + struct nhmsg nhm; + char buf[]; +}; + static const char ipv4_ll_buf[16] = "169.254.0.1"; static struct in_addr ipv4_ll; @@ -970,6 +976,7 @@ int netlink_route_change_read_unicast_internal(struct nlmsghdr *h, } afi_t afi = AFI_IP; + if (rtm->rtm_family == AF_INET6) afi = AFI_IP6; @@ -1083,7 +1090,7 @@ static int netlink_route_change_read_unicast(struct nlmsghdr *h, ns_id_t ns_id, NULL); } -static struct mcast_route_data *mroute = NULL; +static struct mcast_route_data *mroute; static int netlink_route_change_read_multicast(struct nlmsghdr *h, ns_id_t ns_id, int startup) @@ -2705,6 +2712,58 @@ static bool _netlink_nexthop_build_group(struct nlmsghdr *n, size_t req_size, return true; } +static ssize_t fill_srh_end_b6_encaps(char *buffer, size_t buflen, + struct seg6_seg_stack *segs) +{ + struct ipv6_sr_hdr *srh; + size_t srhlen; + int i; + + if (!segs || segs->num_segs > SRV6_MAX_SEGS) { + /* Exceeding maximum supported SIDs */ + return -1; + } + + srhlen = SRH_BASE_HEADER_LENGTH + SRH_SEGMENT_LENGTH * segs->num_segs; + + if (buflen < srhlen) + return -1; + + memset(buffer, 0, buflen); + + srh = (struct ipv6_sr_hdr *)buffer; + srh->hdrlen = (srhlen >> 3) - 1; + srh->type = 4; + srh->segments_left = segs->num_segs - 1; + srh->first_segment = segs->num_segs - 1; + + for (i = 0; i < segs->num_segs; i++) { + memcpy(&srh->segments[segs->num_segs - i - 1], &segs->seg[i], + sizeof(struct in6_addr)); + } + + return srhlen; +} + +static int netlink_nexthop_msg_encode_end_b6_encaps(struct buf_req *req, + const struct nexthop *nh, + size_t buflen) +{ + int srh_len; + char srh_buf[4096]; + + if (!nl_attr_put32(&req->n, buflen, SEG6_LOCAL_ACTION, + SEG6_LOCAL_ACTION_END_B6_ENCAP)) + return 0; + srh_len = fill_srh_end_b6_encaps(srh_buf, sizeof(srh_buf), + nh->nh_srv6->seg6_segs); + if (srh_len < 0) + return 0; + if (!nl_attr_put(&req->n, buflen, SEG6_LOCAL_SRH, srh_buf, srh_len)) + return 0; + return 1; +} + /** * Next hop packet encoding helper function. * @@ -3019,6 +3078,11 @@ ssize_t netlink_nexthop_msg_encode(uint16_t cmd, ctx->table)) return 0; break; + case ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP: + netlink_nexthop_msg_encode_end_b6_encaps( + (struct buf_req *)req, + nh, buflen); + break; default: zlog_err("%s: unsupport seg6local behaviour action=%u", __func__, action); @@ -3034,7 +3098,9 @@ ssize_t netlink_nexthop_msg_encode(uint16_t cmd, if (nh->nh_srv6->seg6_segs && nh->nh_srv6->seg6_segs->num_segs && - !sid_zero(nh->nh_srv6->seg6_segs)) { + !sid_zero(nh->nh_srv6->seg6_segs) && + nh->nh_srv6->seg6local_action == + ZEBRA_SEG6_LOCAL_ACTION_UNSPEC) { char tun_buf[4096]; ssize_t tun_len; struct rtattr *nest; @@ -3811,7 +3877,8 @@ static int netlink_macfdb_change(struct nlmsghdr *h, int len, ns_id_t ns_id) return 0; zif = (struct zebra_if *)ifp->info; - if ((br_if = zif->brslave_info.br_if) == NULL) { + br_if = zif->brslave_info.br_if; + if (br_if == NULL) { if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( "%s AF_BRIDGE IF %s(%u) brIF %u - no bridge master", @@ -3849,7 +3916,7 @@ static int netlink_macfdb_change(struct nlmsghdr *h, int len, ns_id_t ns_id) * so perform an implicit delete of any local entry (if it exists). */ if (h->nlmsg_type == RTM_NEWNEIGH) { - /* Drop "permanent" entries. */ + /* Drop "permanent" entries. */ if (!vni_mcast_grp && (ndm->ndm_state & NUD_PERMANENT)) { if (IS_ZEBRA_DEBUG_KERNEL) zlog_debug( From be3f341c4dc5e786ab0d2be59b91ff64f75db66c Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Fri, 29 Dec 2023 14:41:25 +0100 Subject: [PATCH 38/45] tests: add end.b6.encaps topotest The topotest for End.B6.Encaps binding sid is provided. At rt4 out the effect of End.B6.Encaps: 1. decrement the counter of the current segment list 2. add the SRH [0]fc00:0:5::, [1]fc00:0:4:: Signed-off-by: Dmytro Shytyi --- .../isis_srv6_te_topo1/rt2/pathd.conf | 0 .../rt2/step6/show_ipv6_route.ref | 25 +++ .../test_isis_srv6_te_topo1.py | 208 +++++++++++++++++- 3 files changed, 229 insertions(+), 4 deletions(-) create mode 100644 tests/topotests/isis_srv6_te_topo1/rt2/pathd.conf create mode 100755 tests/topotests/isis_srv6_te_topo1/rt2/step6/show_ipv6_route.ref mode change 100644 => 100755 tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py diff --git a/tests/topotests/isis_srv6_te_topo1/rt2/pathd.conf b/tests/topotests/isis_srv6_te_topo1/rt2/pathd.conf new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/isis_srv6_te_topo1/rt2/step6/show_ipv6_route.ref b/tests/topotests/isis_srv6_te_topo1/rt2/step6/show_ipv6_route.ref new file mode 100755 index 000000000000..2e8ff6d8f94d --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt2/step6/show_ipv6_route.ref @@ -0,0 +1,25 @@ +{ + "fc00:0:2::/128":[ + { + "prefix":"fc00:0:2::/128", + "prefixLen":128, + "protocol":"isis", + "vrfId":0, + "vrfName":"default", + "selected":true, + "installed":true, + "table":254, + "nexthops":[ + { + "flags":3, + "fib":true, + "directlyConnected":true, + "weight":1, + "seg6local":{ + "action":"End" + } + } + ] + } + ] +} diff --git a/tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py b/tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py old mode 100644 new mode 100755 index 5af35a5c4256..3ed574b5ed7c --- a/tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py +++ b/tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py @@ -76,6 +76,7 @@ import json import functools import pytest +import subprocess CWD = os.path.dirname(os.path.realpath(__file__)) sys.path.append(os.path.join(CWD, "../")) @@ -89,6 +90,7 @@ required_linux_kernel_version, create_interface_in_kernel, ) +from multiprocessing import Process pytestmark = [pytest.mark.isisd, pytest.mark.bgpd] @@ -431,7 +433,7 @@ def test_srv6_te_policy_additional_route(): def test_srv6_te_policy_removed(): - logger.info("Test (step 3): verify SRv6 TE policy removed") + logger.info("Test (step 4): verify SRv6 TE policy removed") tgen = get_topogen() # Skip if previous fatal error condition is raised @@ -440,6 +442,7 @@ def test_srv6_te_policy_removed(): tgen.gears["rt1"].vtysh_cmd( "configure \n \ + no ipv6 route fc00:0:6b::/48 fc00:0:6:: color 1 \n \ segment-routing \n \ traffic-eng \n \ no policy color 1 endpoint fc00:0:6:: \n \ @@ -449,11 +452,208 @@ def test_srv6_te_policy_removed(): !" ) - for rname in ["rt1"]: + # for rname in ["rt1"]: + # router_compare_json_output( + # rname, + # "show ipv6 route static json", + # "step4/show_ipv6_route.ref", + # ) + + +# +# Step 5 +# +# Test SRv6 End.B6.Encaps +# + + +def test_srv6_end_b6_encaps(): + logger.info("Test (step 5): verify SRv6 END.B6.Encaps") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["rt1"].vtysh_cmd( + "configure \n \ + ipv6 route fc00:0:6b::/48 fc00:0:6:: color 1 \n \ + segment-routing \n \ + traffic-eng \n \ + segment-list srv6 \n \ + index 1 ipv6-address fc00:0:2:: \n \ + index 2 ipv6-address fc00:0:6:: \n \ + exit \n \ + policy color 1 endpoint fc00:0:6:: \n \ + candidate-path preference 1 name srv6 explicit segment-list srv6 \n \ + exit \n \ + exit \n \ + !" + ) + + tgen.gears["rt2"].vtysh_cmd( + "configure \n \ + segment-routing \n \ + traffic-eng \n \ + segment-list srv6-header \n \ + index 1 ipv6-address fc00:0:4:: \n \ + index 2 ipv6-address fc00:0:5:: \n \ + exit \n \ + policy color 1 endpoint fc00:0:6:: \n \ + srv6-binding-sid fc00:0:2:: \n \ + candidate-path preference 1 name srv6 explicit segment-list srv6-header \n \ + exit \n \ + exit \n \ + !" + ) + + tgen.gears["rt5"].vtysh_cmd( + "configure \n \ + interface sr0 \n \ + ipv6 address fc00:0:5::/48 \n \ + exit" + ) + + tgen.gears["rt5"].run("sysctl -w net.ipv6.conf.all.seg6_enabled=1") + tgen.gears["rt5"].run("sysctl -w net.ipv6.conf.default.seg6_enabled=1") + tgen.gears["rt5"].run("sysctl -w net.ipv6.conf.eth-rt4.seg6_enabled=1") + + subprocess.check_call(["apt", "install", "-y", "tcpdump"]) + if not os.path.isdir("/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/scapy-latest"): + subprocess.check_call( + ["git", "clone", "https://github.com/secdev/scapy.git", "/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/scapy-latest"] + ) + if not os.path.isdir("/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/python_packages"): + subprocess.check_call( + ["pip", "download", "--dest", "/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/python_packages/", "setuptools==62.0.0"] + ) + subprocess.check_call( + ["pip", "download", "--dest", "/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/python_packages/", "wheel"] + ) + tgen.gears["rt2"].run( + "python3 -m venv /tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/; source /tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/venv/bin/activate; pip3 install --no-index --force-reinstall --find-links=/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/python_packages/ setuptools==62.0.0; pip3 install --no-index --find-links=/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/python_packages /tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/scapy-latest/" + ) + + def ping_func(): + tgen = get_topogen() + tgen.gears["src"].run("ping 10.0.10.2 -c 128 -i 0.1") + + p1 = Process(target=ping_func) + p1.start() + # tgen.gears["rt2"].run("tcpdump -i any -q -w ./rt2-dump.pcap & ping 10.0.10.2 -c 5; killall tcpdump") + tgen.gears["rt2"].run("touch ./rt2-dump.pcap && chmod oug+rwx ./rt2-dump.pcap") + tgen.gears["rt2"].run("tcpdump -i any -w ./rt2-dump.pcap -c 64") + + # tgen.gears["rt2"].run("tshark -ni any -w /tmp/rt2-dump.pcap -c 64") + p1.join() + + file_content = """\ +from scapy.all import rdpcap, IPv6, IPv6ExtHdrSegmentRouting +scapy_cap = rdpcap( + "/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/rt2-dump.pcap" +) +# scapy_cap = rdpcap('/tmp/rt2-dump.pcap') +output_pkt_flag = False +outer_srh = -1 +outer_srhl = -1 +inner_srh = -1 +inner_srhl = -1 +inner_most_srh = -1 +inner_most_srhl = -1 +for packet in scapy_cap: + if "echo-request" in str(packet): + print(packet) + if packet.haslayer(IPv6) and packet.haslayer(IPv6ExtHdrSegmentRouting): + outer_srh = packet[IPv6][IPv6ExtHdrSegmentRouting].addresses + outer_srhl = packet[IPv6][IPv6ExtHdrSegmentRouting].segleft + encap = packet[IPv6][IPv6ExtHdrSegmentRouting] + if encap.haslayer(IPv6) and encap.haslayer(IPv6ExtHdrSegmentRouting): + inner_srh = encap[IPv6][IPv6ExtHdrSegmentRouting].addresses + inner_srhl = encap[IPv6][IPv6ExtHdrSegmentRouting].segleft + bgp_encap = encap[IPv6][IPv6ExtHdrSegmentRouting] + if bgp_encap.haslayer(IPv6) and bgp_encap.haslayer( + IPv6ExtHdrSegmentRouting + ): + inner_most_srh = bgp_encap[IPv6][ + IPv6ExtHdrSegmentRouting + ].addresses + inner_most_srhl = bgp_encap[IPv6][ + IPv6ExtHdrSegmentRouting + ].segleft + + if ( + ("fc00:0:6::" in inner_srh) + and ("fc00:0:2::" in inner_srh) + and (inner_srhl == 0) + and ("fc00:0:5::" in outer_srh) + and ("fc00:0:4::" in outer_srh) + and (outer_srhl == 1) + and ("fc00:0:6b:1::" in inner_most_srh) + and (inner_most_srhl == 0) + ): + output_pkt_flag = True + else: + print( + "Observed: outer srh: {}, segleft={}, inner srh: {}, segleft={}, inner_most_srh: {}, segleft={}".format( + outer_srh, + outer_srhl, + inner_srh, + inner_srhl, + inner_most_srh, + inner_most_srhl, + ) + ) + +if output_pkt_flag == False: + assertmsg = ('Unexpected SRH in the captured packet in rt2' + 'Expected outer srh: [fc00:0:5::, fc00:0:4::], segleft=1, inner srh: [fc00:0:6::, fc00:0:2::], segleft=0, inner_most_srh: [fc00:0:6b:1::], segleft=0') + assert False, assertmsg + """ + + file_path = ( + "/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/scapy_test_srv6.py" + ) + with open(file_path, "w") as file: + file.write(file_content) + + activate_cmd = f"source {os.path.join('/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/venv', 'bin', 'activate')}" + subprocess.run( + [activate_cmd, ";", "python3", file_path], shell=True, executable="/bin/bash" + ) + + + +# +# Step 6 +# +# Test SRv6 End.B6.Encaps removal +# + + +def test_srv6_end_b6_encaps_removal(): + logger.info("Test (step 6): verify SRv6 END.B6.Encaps removal") + tgen = get_topogen() + + # Skip if previous fatal error condition is raised + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["rt2"].vtysh_cmd( + "configure \n \ + segment-routing \n \ + traffic-eng \n \ + policy color 1 endpoint fc00:0:6:: \n \ + no srv6-binding-sid fc00:0:2:: \n \ + exit \n \ + exit \n \ + !" + ) + + for rname in ["rt2"]: router_compare_json_output( rname, - "show ipv6 route static json", - "step4/show_ipv6_route.ref", + "show ipv6 route fc00:0:2:: json", + "step6/show_ipv6_route.ref", ) From 130188a4e6ce879bcedf6cf8572cadd38973d32f Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Tue, 11 Jun 2024 10:19:19 +0200 Subject: [PATCH 39/45] pathd: send ROUTE_DEL message to ZEBRA when BSID IPv6 is not installed The update of the srv6-binding-sid value of a policy triggers the emission of a message to ZEBRA configured: > segment-routing > traffic-eng > policy color 1 endpoint 1::1 > srv6-binding-sid 1001::1 > [..] > srv6-binding-sid 1001::40 output on logs: > 2024/06/11 10:09:35 ZEBRA: [YXG8K-BCYMV] zebra message[ZEBRA_ROUTE_DELETE:0:40] comes from socket [34] The previous BSID is attempted to be flushed, whereas it has never been installed: the SR policy is not up and does not have any candidate paths. Fix this by using a BSID_IPV6_INSTALLED flag on srte policy to tell whether the BSID is installed or not. Signed-off-by: Philippe Guibert Acked-by: Dmytro Shytyi --- pathd/path_zebra.c | 4 +++- pathd/pathd.c | 10 ++++++---- pathd/pathd.h | 1 + 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/pathd/path_zebra.c b/pathd/path_zebra.c index d63adad18974..8c2000287810 100644 --- a/pathd/path_zebra.c +++ b/pathd/path_zebra.c @@ -525,13 +525,15 @@ path_zebra_add_sr_policy_internal(struct srte_policy *policy, zp.segment_list.nexthop_resolved_num = nhtd->nh_num; } if (znh && !sid_zero_ipv6(&policy->srv6_binding_sid) && segment_list && - zp.segment_list.nexthop_resolved_num) + zp.segment_list.nexthop_resolved_num) { (void)path_zebra_send_bsid(&policy->srv6_binding_sid, znh->ifindex, ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP, &zp.segment_list.srv6_segs.segs[0], zp.segment_list.srv6_segs.num_segs); + SET_FLAG(policy->flags, F_POLICY_BSID_IPV6_INSTALLED); + } (void)zebra_send_sr_policy(zclient, ZEBRA_SR_POLICY_SET, &zp); } diff --git a/pathd/pathd.c b/pathd/pathd.c index 466d56fb07ed..a2bfe808051a 100644 --- a/pathd/pathd.c +++ b/pathd/pathd.c @@ -517,13 +517,15 @@ void srte_policy_update_srv6_binding_sid(struct srte_policy *policy, { struct in6_addr srv6_binding_sid_zero = {}; - if (!srv6_binding_sid || - (srv6_binding_sid && - !IPV6_ADDR_SAME(&policy->srv6_binding_sid, srv6_binding_sid))) + if (CHECK_FLAG(policy->flags, F_POLICY_BSID_IPV6_INSTALLED) && + (!srv6_binding_sid || + (srv6_binding_sid && + !IPV6_ADDR_SAME(&policy->srv6_binding_sid, srv6_binding_sid)))) { (void)path_zebra_send_bsid(&policy->srv6_binding_sid, 0, ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP, NULL, 0); - + UNSET_FLAG(policy->flags, F_POLICY_BSID_IPV6_INSTALLED); + } if (srv6_binding_sid) IPV6_ADDR_COPY(&policy->srv6_binding_sid, srv6_binding_sid); else diff --git a/pathd/pathd.h b/pathd/pathd.h index 0746175f6f24..d5223a5c765f 100644 --- a/pathd/pathd.h +++ b/pathd/pathd.h @@ -372,6 +372,7 @@ struct srte_policy { #define F_POLICY_NEW 0x0002 #define F_POLICY_MODIFIED 0x0004 #define F_POLICY_DELETED 0x0008 +#define F_POLICY_BSID_IPV6_INSTALLED 0x0040 /* SRP id for PcInitiated support */ int srp_id; }; From 31a08065e2c002e3d4737486d8c032a1df3ec39e Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Fri, 7 Jun 2024 13:22:10 +0200 Subject: [PATCH 40/45] pathd: add srv6 manager control over bsid-ipv6 of sr-te policies A policy can be defined with whatever binding ipv6 sid address, whereas this value should be uniquely used by the SR-TE service, and should own a defined srv6 locator. Add the ability for The pathd daemon to request SRv6 SID values to the zebra srv6 SID manager. Compared with the previous behaviour, the user will have to configure a locator, and the used binding sid should be in this locator and be freed (not used by an other CP daemon). If not, the binding SID will be configured but not usable. Signed-off-by: Philippe Guibert Acked-by: Dmytro Shytyi --- pathd/path_zebra.c | 91 ++++++++++++++++++++++++++++++++++++++++++++++ pathd/path_zebra.h | 3 ++ pathd/pathd.c | 35 ++++++++++++------ pathd/pathd.h | 1 + 4 files changed, 119 insertions(+), 11 deletions(-) diff --git a/pathd/path_zebra.c b/pathd/path_zebra.c index 8c2000287810..0e0bc1d20e20 100644 --- a/pathd/path_zebra.c +++ b/pathd/path_zebra.c @@ -525,6 +525,7 @@ path_zebra_add_sr_policy_internal(struct srte_policy *policy, zp.segment_list.nexthop_resolved_num = nhtd->nh_num; } if (znh && !sid_zero_ipv6(&policy->srv6_binding_sid) && segment_list && + CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED) && zp.segment_list.nexthop_resolved_num) { (void)path_zebra_send_bsid(&policy->srv6_binding_sid, znh->ifindex, @@ -824,10 +825,78 @@ static int path_zebra_opaque_msg_handler(ZAPI_CALLBACK_ARGS) return ret; } +static int path_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS) +{ + struct srv6_sid_ctx ctx; + struct in6_addr sid_addr; + enum zapi_srv6_sid_notify note; + uint32_t sid_func; + struct srte_policy *policy, *safe_pol; + char buf[256]; + char *locator_name = NULL; + char **p_locator_name = &locator_name; + + /* Decode the received notification message */ + if (!zapi_srv6_sid_notify_decode(zclient->ibuf, &ctx, &sid_addr, + &sid_func, NULL, ¬e, + p_locator_name)) { + zlog_err("%s : error in msg decode", __func__); + return -1; + } + + PATH_ZEBRA_DEBUG("%s: received SRv6 SID notify: ctx %s sid_value %pI6 %s", + __func__, srv6_sid_ctx2str(buf, sizeof(buf), &ctx), + &sid_addr, zapi_srv6_sid_notify2str(note)); + + /* Get the SR-TE policy which has requested that SID */ + RB_FOREACH_SAFE (policy, srte_policy_head, &srte_policies, safe_pol) { + if (!IPV6_ADDR_SAME(&policy->srv6_binding_sid, &sid_addr)) + continue; + + switch (note) { + case ZAPI_SRV6_SID_ALLOCATED: + PATH_ZEBRA_DEBUG("SRv6 SID %pI6 %s : ALLOCATED", + &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), + &ctx)); + SET_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED); + if (policy->best_candidate) + path_zebra_add_sr_policy(policy, + policy->best_candidate + ->lsp + ->segment_list); + break; + case ZAPI_SRV6_SID_RELEASED: + PATH_ZEBRA_DEBUG("SRv6 SID %pI6 %s: RELEASED", &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), + &ctx)); + UNSET_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED); + break; + case ZAPI_SRV6_SID_FAIL_ALLOC: + PATH_ZEBRA_DEBUG("SRv6 SID %pI6 %s: Failed to allocate", + &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), + &ctx)); + + /* Error will be logged by zebra module */ + break; + case ZAPI_SRV6_SID_FAIL_RELEASE: + zlog_warn("%s: SRv6 SID %pI6 %s failure to release", + __func__, &sid_addr, + srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); + + /* Error will be logged by zebra module */ + break; + } + } + return 0; +} + static zclient_handler *const path_handlers[] = { [ZEBRA_SR_POLICY_NOTIFY_STATUS] = path_zebra_sr_policy_notify_status, [ZEBRA_ROUTER_ID_UPDATE] = path_zebra_router_id_update, [ZEBRA_OPAQUE_MESSAGE] = path_zebra_opaque_msg_handler, + [ZEBRA_SRV6_SID_NOTIFY] = path_zebra_srv6_sid_notify, }; /** @@ -871,3 +940,25 @@ void path_zebra_stop(void) zclient_stop(zclient_sync); zclient_free(zclient_sync); } + +void path_zebra_srv6_manager_get_sid(struct srv6_sid_ctx *ctx, + struct in6_addr *sid_addr) +{ + int ret; + uint32_t sid_func; + + ret = srv6_manager_get_sid(zclient, ctx, sid_addr, NULL, &sid_func); + if (ret < 0) + /* TODO: need to re-send later GET_SID message */ + zlog_warn("%s: error getting SRv6 SID!", __func__); +} + +void path_zebra_srv6_manager_release_sid(struct srv6_sid_ctx *ctx) +{ + int ret; + + ret = srv6_manager_release_sid(zclient, ctx); + if (ret < 0) + /* TODO: need to re-send later GET_SID message */ + zlog_warn("%s: error releasing SRv6 SID!", __func__); +} diff --git a/pathd/path_zebra.h b/pathd/path_zebra.h index 243c48fecce2..4ce8212f284d 100644 --- a/pathd/path_zebra.h +++ b/pathd/path_zebra.h @@ -23,6 +23,9 @@ void path_nht_removed(struct srte_candidate *candidate); void path_zebra_send_bsid(const struct in6_addr *bsid, ifindex_t oif, enum seg6local_action_t action, struct in6_addr *srv6_segs, int num_segs); +void path_zebra_srv6_manager_release_sid(struct srv6_sid_ctx *ctx); +void path_zebra_srv6_manager_get_sid(struct srv6_sid_ctx *ctx, + struct in6_addr *sid_addr); #endif /* _FRR_PATH_MPLS_H_ */ diff --git a/pathd/pathd.c b/pathd/pathd.c index a2bfe808051a..e98ee9c042a3 100644 --- a/pathd/pathd.c +++ b/pathd/pathd.c @@ -516,19 +516,32 @@ void srte_policy_update_srv6_binding_sid(struct srte_policy *policy, struct in6_addr *srv6_binding_sid) { struct in6_addr srv6_binding_sid_zero = {}; - - if (CHECK_FLAG(policy->flags, F_POLICY_BSID_IPV6_INSTALLED) && - (!srv6_binding_sid || - (srv6_binding_sid && - !IPV6_ADDR_SAME(&policy->srv6_binding_sid, srv6_binding_sid)))) { - (void)path_zebra_send_bsid(&policy->srv6_binding_sid, 0, - ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP, - NULL, 0); - UNSET_FLAG(policy->flags, F_POLICY_BSID_IPV6_INSTALLED); + struct srv6_sid_ctx ctx = {}; + + ctx.vrf_id = VRF_DEFAULT; + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP; + memcpy(&ctx.nh6, &policy->endpoint.ip._v6_addr, sizeof(struct in6_addr)); + ctx.color = policy->color; + + if (!srv6_binding_sid || + (srv6_binding_sid && + !IPV6_ADDR_SAME(&policy->srv6_binding_sid, srv6_binding_sid))) { + if (CHECK_FLAG(policy->flags, F_POLICY_BSID_IPV6_INSTALLED)) { + (void)path_zebra_send_bsid(&policy->srv6_binding_sid, 0, + ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP, + NULL, 0); + UNSET_FLAG(policy->flags, F_POLICY_BSID_IPV6_INSTALLED); + } + if (CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED)) + path_zebra_srv6_manager_release_sid(&ctx); + UNSET_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED); } - if (srv6_binding_sid) + + if (srv6_binding_sid) { IPV6_ADDR_COPY(&policy->srv6_binding_sid, srv6_binding_sid); - else + if (!CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED)) + path_zebra_srv6_manager_get_sid(&ctx, srv6_binding_sid); + } else IPV6_ADDR_COPY(&policy->srv6_binding_sid, &srv6_binding_sid_zero); diff --git a/pathd/pathd.h b/pathd/pathd.h index d5223a5c765f..7c8118ba154a 100644 --- a/pathd/pathd.h +++ b/pathd/pathd.h @@ -372,6 +372,7 @@ struct srte_policy { #define F_POLICY_NEW 0x0002 #define F_POLICY_MODIFIED 0x0004 #define F_POLICY_DELETED 0x0008 +#define F_POLICY_BSID_ALLOCATED 0x0010 #define F_POLICY_BSID_IPV6_INSTALLED 0x0040 /* SRP id for PcInitiated support */ int srp_id; From 1a21776eec3bd8ca610c94541d6ebcd6627e285f Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Tue, 16 Jul 2024 14:56:53 +0200 Subject: [PATCH 41/45] pathd: add srv6 locator register/unregister handling Let pathd be aware of the list of locators. Upon the presence of a locator, the policies are parsed to re-send srv6 sid requests that previously failed. Upon the deletion of a locator, release the sids that previously used it. Signed-off-by: Philippe Guibert Acked-by: Dmytro Shytyi --- pathd/path_zebra.c | 118 +++++++++++++++++++++++++++++++++++++++++++++ pathd/path_zebra.h | 2 + pathd/pathd.h | 3 ++ 3 files changed, 123 insertions(+) diff --git a/pathd/path_zebra.c b/pathd/path_zebra.c index 0e0bc1d20e20..0522ba1ba7bf 100644 --- a/pathd/path_zebra.c +++ b/pathd/path_zebra.c @@ -49,6 +49,7 @@ struct in6_addr g_router_id_v6; pthread_mutex_t g_router_id_v4_mtx = PTHREAD_MUTEX_INITIALIZER; pthread_mutex_t g_router_id_v6_mtx = PTHREAD_MUTEX_INITIALIZER; +DEFINE_MTYPE_STATIC(PATHD, LOCATOR_LIST_CTX, "Locator List context"); DEFINE_MTYPE_STATIC(PATHD, PATH_NHT_DATA, "Pathd Nexthop tracking data"); PREDECL_HASH(path_nht_hash); @@ -825,6 +826,97 @@ static int path_zebra_opaque_msg_handler(ZAPI_CALLBACK_ARGS) return ret; } +PREDECL_DLIST(path_locator_list); +struct path_locator_list_head path_srv6_locator_list; +struct path_locator_ctx { + struct srv6_locator *locator; + struct path_locator_list_item pll_entries; +}; +DECLARE_DLIST(path_locator_list, struct path_locator_ctx, pll_entries); + + +static int path_zebra_process_srv6_locator_add(ZAPI_CALLBACK_ARGS) +{ + struct srv6_locator locator = {}; + struct path_locator_ctx *ctx; + struct srte_policy *policy; + struct srv6_sid_ctx sid_ctx = {}; + + if (zapi_srv6_locator_decode(zclient->ibuf, &locator) < 0) + return -1; + + if (!path_zebra_locator_ctx_lookup_by_name((const char *)&locator.name)) { + ctx = XCALLOC(MTYPE_LOCATOR_LIST_CTX, + sizeof(struct path_locator_ctx)); + ctx->locator = srv6_locator_alloc((const char *)&locator.name); + srv6_locator_copy(ctx->locator, &locator); + path_locator_list_add_tail(&path_srv6_locator_list, ctx); + } + + RB_FOREACH (policy, srte_policy_head, &srte_policies) { + if (IPV6_ADDR_SAME(&policy->srv6_binding_sid, &in6addr_any)) + continue; + sid_ctx.vrf_id = VRF_DEFAULT; + sid_ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP; + memcpy(&sid_ctx.nh6, &policy->endpoint.ip._v6_addr, + sizeof(struct in6_addr)); + sid_ctx.color = policy->color; + if (!CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED)) + path_zebra_srv6_manager_get_sid(&sid_ctx, + &policy->srv6_binding_sid); + } + return 0; +} + +static int path_zebra_process_srv6_locator_delete(ZAPI_CALLBACK_ARGS) +{ + struct srv6_locator loc = {}; + struct srv6_locator *locator; + struct srte_policy *policy; + struct srv6_sid_ctx ctx = {}; + struct path_locator_ctx *context; + + if (zapi_srv6_locator_decode(zclient->ibuf, &loc) < 0) + return -1; + + context = path_zebra_locator_ctx_lookup_by_name((const char *)&loc.name); + if (!context || !context->locator) + return -1; + + locator = context->locator; + + PATH_ZEBRA_DEBUG("%s: Received SRv6 locator %s %pFX, loc-block-len=%u, loc-node-len=%u func-len=%u, arg-len=%u", + __func__, locator->name, &locator->prefix, + locator->block_bits_length, locator->node_bits_length, + locator->function_bits_length, + locator->argument_bits_length); + + RB_FOREACH (policy, srte_policy_head, &srte_policies) { + if (IPV6_ADDR_SAME(&policy->srv6_binding_sid, &in6addr_any)) + continue; + if (policy->srv6_locator == locator) { + ctx.vrf_id = VRF_DEFAULT; + ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP; + memcpy(&ctx.nh6, &policy->endpoint.ip._v6_addr, + sizeof(struct in6_addr)); + ctx.color = policy->color; + (void)path_zebra_send_bsid(&policy->srv6_binding_sid, 0, + ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP, + NULL, 0); + if (CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED)) + path_zebra_srv6_manager_release_sid(&ctx); + UNSET_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED); + policy->srv6_locator = NULL; + } + } + + srv6_locator_free(context->locator); + path_locator_list_del(&path_srv6_locator_list, context); + XFREE(MTYPE_LOCATOR_LIST_CTX, context); + + return 0; +} + static int path_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS) { struct srv6_sid_ctx ctx; @@ -835,6 +927,7 @@ static int path_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS) char buf[256]; char *locator_name = NULL; char **p_locator_name = &locator_name; + struct path_locator_ctx *context; /* Decode the received notification message */ if (!zapi_srv6_sid_notify_decode(zclient->ibuf, &ctx, &sid_addr, @@ -843,6 +936,8 @@ static int path_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS) zlog_err("%s : error in msg decode", __func__); return -1; } + if (p_locator_name) + locator_name = *p_locator_name; PATH_ZEBRA_DEBUG("%s: received SRv6 SID notify: ctx %s sid_value %pI6 %s", __func__, srv6_sid_ctx2str(buf, sizeof(buf), &ctx), @@ -860,6 +955,11 @@ static int path_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS) srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); SET_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED); + + context = path_zebra_locator_ctx_lookup_by_name( + (const char *)locator_name); + if (context) + policy->srv6_locator = context->locator; if (policy->best_candidate) path_zebra_add_sr_policy(policy, policy->best_candidate @@ -871,6 +971,7 @@ static int path_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS) srv6_sid_ctx2str(buf, sizeof(buf), &ctx)); UNSET_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED); + policy->srv6_locator = NULL; break; case ZAPI_SRV6_SID_FAIL_ALLOC: PATH_ZEBRA_DEBUG("SRv6 SID %pI6 %s: Failed to allocate", @@ -896,9 +997,25 @@ static zclient_handler *const path_handlers[] = { [ZEBRA_SR_POLICY_NOTIFY_STATUS] = path_zebra_sr_policy_notify_status, [ZEBRA_ROUTER_ID_UPDATE] = path_zebra_router_id_update, [ZEBRA_OPAQUE_MESSAGE] = path_zebra_opaque_msg_handler, + [ZEBRA_SRV6_LOCATOR_ADD] = path_zebra_process_srv6_locator_add, + [ZEBRA_SRV6_LOCATOR_DELETE] = path_zebra_process_srv6_locator_delete, [ZEBRA_SRV6_SID_NOTIFY] = path_zebra_srv6_sid_notify, }; +struct path_locator_ctx * +path_zebra_locator_ctx_lookup_by_name(const char *locator_name) +{ + struct path_locator_ctx *ctx; + + frr_each_safe (path_locator_list, &path_srv6_locator_list, ctx) { + if (ctx->locator && + strcmp(ctx->locator->name, locator_name) == 0) { + return ctx; + } + } + return NULL; +} + /** * Initializes Zebra asynchronous connection. * @@ -927,6 +1044,7 @@ void path_zebra_init(struct event_loop *master) /* Pathd nht init */ path_nht_hash_init(path_nht_hash); + path_locator_list_init(&path_srv6_locator_list); } void path_zebra_stop(void) diff --git a/pathd/path_zebra.h b/pathd/path_zebra.h index 4ce8212f284d..adc0c94c1770 100644 --- a/pathd/path_zebra.h +++ b/pathd/path_zebra.h @@ -26,6 +26,8 @@ void path_zebra_send_bsid(const struct in6_addr *bsid, ifindex_t oif, void path_zebra_srv6_manager_release_sid(struct srv6_sid_ctx *ctx); void path_zebra_srv6_manager_get_sid(struct srv6_sid_ctx *ctx, struct in6_addr *sid_addr); +struct path_locator_ctx * +path_zebra_locator_ctx_lookup_by_name(const char *locator_name); #endif /* _FRR_PATH_MPLS_H_ */ diff --git a/pathd/pathd.h b/pathd/pathd.h index 7c8118ba154a..3c2af86aa017 100644 --- a/pathd/pathd.h +++ b/pathd/pathd.h @@ -353,6 +353,9 @@ struct srte_policy { /* SRv6 Binding SID */ struct in6_addr srv6_binding_sid; + /* SRv6 locator attached to SID */ + struct srv6_locator *srv6_locator; + /* The Protocol-Origin. */ enum srte_protocol_origin protocol_origin; From 7eb1f79a90aeee3e87c929c851b0f771bf409ae6 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Thu, 22 Aug 2024 16:36:32 +0200 Subject: [PATCH 42/45] pathd: add "use-srv6-sid-manager" leaf command Introduce the command use-srv6-sid-manager within srv6 te policy that allows to specify the srv6 bsid outside of the srv6 locator. Signed-off-by: Dmytro Shytyi Signed-off-by: Philippe Guibert --- pathd/path_cli.c | 23 +++++++++++++++++++++++ pathd/path_nb.c | 6 ++++++ pathd/path_nb.h | 1 + pathd/path_nb_config.c | 20 ++++++++++++++++++++ yang/frr-pathd.yang | 5 +++++ 5 files changed, 55 insertions(+) diff --git a/pathd/path_cli.c b/pathd/path_cli.c index bf2c76848111..89ce78fe7bd5 100644 --- a/pathd/path_cli.c +++ b/pathd/path_cli.c @@ -17,6 +17,7 @@ #include "termtable.h" #include "pathd/pathd.h" +#include "pathd/path_zebra.h" #include "pathd/path_nb.h" #include "pathd/path_cli_clippy.c" #include "pathd/path_ted.h" @@ -626,6 +627,27 @@ void cli_show_srte_segment_list_segment(struct vty *vty, vty_out(vty, "\n"); } +/* + * XPath: /frr-pathd:pathd/srte/srv6-use-sid-manager + */ +DEFPY( + srte_use_srv6_sid_manager, + srte_use_srv6_sid_manager_cmd, + "[no] use-srv6-sid-manager", + NO_STR + "Use segment routing ipv6 sid manager\n") +{ + char xpath[XPATH_MAXLEN]; + + if (!no == srv6_use_sid_manager) + return CMD_SUCCESS; + + snprintf(xpath, sizeof(xpath), + "/frr-pathd:pathd/srte/use-srv6-sid-manager"); + + nb_cli_enqueue_change(vty, xpath, NB_OP_MODIFY, no ? "false" : "true"); + return nb_cli_apply_changes(vty, NULL); +} /* * XPath: /frr-pathd:pathd/policy */ @@ -1381,6 +1403,7 @@ void path_cli_init(void) install_default(SR_SEGMENT_LIST_NODE); install_default(SR_POLICY_NODE); install_default(SR_CANDIDATE_DYN_NODE); + install_element(SR_TRAFFIC_ENG_NODE, &srte_use_srv6_sid_manager_cmd); install_element(ENABLE_NODE, &show_debugging_pathd_cmd); install_element(ENABLE_NODE, &show_srte_policy_cmd); diff --git a/pathd/path_nb.c b/pathd/path_nb.c index bbd3efc375c0..75666c28f8cb 100644 --- a/pathd/path_nb.c +++ b/pathd/path_nb.c @@ -293,6 +293,12 @@ const struct frr_yang_module_info frr_pathd_info = { .apply_finish = pathd_srte_policy_candidate_path_objfun_apply_finish } }, + { + .xpath = "/frr-pathd:pathd/srte/use-srv6-sid-manager", + .cbs = { + .modify = pathd_srv6_use_sid_manager_modify + } + }, { .xpath = "/frr-pathd:pathd/srte/policy/candidate-path/constraints/objective-function/required", .cbs = {.modify = dummy_modify} diff --git a/pathd/path_nb.h b/pathd/path_nb.h index 113354047614..41d2709ffbbc 100644 --- a/pathd/path_nb.h +++ b/pathd/path_nb.h @@ -98,6 +98,7 @@ int pathd_srte_policy_candidate_path_segment_list_name_modify( struct nb_cb_modify_args *args); int pathd_srte_policy_candidate_path_segment_list_name_destroy( struct nb_cb_destroy_args *args); +int pathd_srv6_use_sid_manager_modify(struct nb_cb_modify_args *args); /* Optional 'apply_finish' callbacks. */ void pathd_apply_finish(struct nb_cb_apply_finish_args *args); diff --git a/pathd/path_nb_config.c b/pathd/path_nb_config.c index 000836fc8971..c05354938c58 100644 --- a/pathd/path_nb_config.c +++ b/pathd/path_nb_config.c @@ -831,3 +831,23 @@ int pathd_srte_policy_candidate_path_bandwidth_destroy( srte_candidate_unset_bandwidth(candidate); return NB_OK; } + +/* + * XPath: /frr-pathd:pathd/srte/srv6-use-sid-manager + */ +int pathd_srv6_use_sid_manager_modify(struct nb_cb_modify_args *args) +{ + switch (args->event) { + case NB_EV_VALIDATE: + break; + case NB_EV_PREPARE: + break; + case NB_EV_ABORT: + break; + case NB_EV_APPLY: + srv6_use_sid_manager = yang_dnode_get_bool(args->dnode, NULL); + break; + } + + return NB_OK; +} diff --git a/yang/frr-pathd.yang b/yang/frr-pathd.yang index 2225b0de998c..0885c48390db 100644 --- a/yang/frr-pathd.yang +++ b/yang/frr-pathd.yang @@ -58,6 +58,11 @@ module frr-pathd { container pathd { container srte { + leaf use-srv6-sid-manager { + type boolean; + default false; + description "Set it to true if SID manager should be used."; + } list segment-list { key "name"; description "Segment-list properties"; From ef4e3cdecaaa8b0cd6a49871ccbafe41a91386e0 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Thu, 22 Aug 2024 16:42:56 +0200 Subject: [PATCH 43/45] pathd: use "use-srv6-sid-manager" to sync with sid-manager Add the backend for SRTE 'use-srv6-sid-manager' command to tell the SRTE service to use or not BSIDs allocated from the SRv6 SIDs pool of the zebra locators. - Handle the sync between pathd and zebra. - Handle the parameter configuration change impact on SID allocation Signed-off-by: Philippe Guibert Signed-off-by: Dmytro Shytyi --- pathd/path_main.c | 1 + pathd/path_nb_config.c | 1 + pathd/path_zebra.c | 48 +++++++++++++++++++++++++++++------------- pathd/path_zebra.h | 1 + pathd/pathd.c | 30 +++++++++++++++++++++++--- pathd/pathd.h | 5 +++++ 6 files changed, 68 insertions(+), 18 deletions(-) diff --git a/pathd/path_main.c b/pathd/path_main.c index 23cbb9ccedc6..ded306e1a9c3 100644 --- a/pathd/path_main.c +++ b/pathd/path_main.c @@ -137,6 +137,7 @@ int main(int argc, char **argv, char **envp) path_error_init(); path_zebra_init(master); + path_srv6_init(); path_cli_init(); path_ted_init(master); diff --git a/pathd/path_nb_config.c b/pathd/path_nb_config.c index c05354938c58..f861db17018f 100644 --- a/pathd/path_nb_config.c +++ b/pathd/path_nb_config.c @@ -846,6 +846,7 @@ int pathd_srv6_use_sid_manager_modify(struct nb_cb_modify_args *args) break; case NB_EV_APPLY: srv6_use_sid_manager = yang_dnode_get_bool(args->dnode, NULL); + path_zebra_process_srv6_bsid(srv6_use_sid_manager); break; } diff --git a/pathd/path_zebra.c b/pathd/path_zebra.c index 0522ba1ba7bf..d75a900d0f98 100644 --- a/pathd/path_zebra.c +++ b/pathd/path_zebra.c @@ -526,7 +526,8 @@ path_zebra_add_sr_policy_internal(struct srte_policy *policy, zp.segment_list.nexthop_resolved_num = nhtd->nh_num; } if (znh && !sid_zero_ipv6(&policy->srv6_binding_sid) && segment_list && - CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED) && + (CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED) || + !srv6_use_sid_manager) && zp.segment_list.nexthop_resolved_num) { (void)path_zebra_send_bsid(&policy->srv6_binding_sid, znh->ifindex, @@ -834,13 +835,37 @@ struct path_locator_ctx { }; DECLARE_DLIST(path_locator_list, struct path_locator_ctx, pll_entries); +void path_zebra_process_srv6_bsid(bool allocate) +{ + struct srte_policy *policy; + struct srv6_sid_ctx sid_ctx = {}; + + RB_FOREACH (policy, srte_policy_head, &srte_policies) { + if (IPV6_ADDR_SAME(&policy->srv6_binding_sid, &in6addr_any)) + continue; + sid_ctx.vrf_id = VRF_DEFAULT; + sid_ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP; + memcpy(&sid_ctx.nh6, &policy->endpoint.ip._v6_addr, + sizeof(struct in6_addr)); + sid_ctx.color = policy->color; + if (allocate && + !CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED) && + srv6_use_sid_manager) + path_zebra_srv6_manager_get_sid(&sid_ctx, + &policy->srv6_binding_sid); + else if (!allocate && + CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED) && + !srv6_use_sid_manager) { + path_zebra_srv6_manager_release_sid(&sid_ctx); + UNSET_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED); + } + } +} static int path_zebra_process_srv6_locator_add(ZAPI_CALLBACK_ARGS) { struct srv6_locator locator = {}; struct path_locator_ctx *ctx; - struct srte_policy *policy; - struct srv6_sid_ctx sid_ctx = {}; if (zapi_srv6_locator_decode(zclient->ibuf, &locator) < 0) return -1; @@ -853,18 +878,7 @@ static int path_zebra_process_srv6_locator_add(ZAPI_CALLBACK_ARGS) path_locator_list_add_tail(&path_srv6_locator_list, ctx); } - RB_FOREACH (policy, srte_policy_head, &srte_policies) { - if (IPV6_ADDR_SAME(&policy->srv6_binding_sid, &in6addr_any)) - continue; - sid_ctx.vrf_id = VRF_DEFAULT; - sid_ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP; - memcpy(&sid_ctx.nh6, &policy->endpoint.ip._v6_addr, - sizeof(struct in6_addr)); - sid_ctx.color = policy->color; - if (!CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED)) - path_zebra_srv6_manager_get_sid(&sid_ctx, - &policy->srv6_binding_sid); - } + path_zebra_process_srv6_bsid(true); return 0; } @@ -956,6 +970,10 @@ static int path_zebra_srv6_sid_notify(ZAPI_CALLBACK_ARGS) &ctx)); SET_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED); + if (!srv6_use_sid_manager) + /* as SID allocation is asyncronous, release it */ + path_zebra_srv6_manager_release_sid(&ctx); + context = path_zebra_locator_ctx_lookup_by_name( (const char *)locator_name); if (context) diff --git a/pathd/path_zebra.h b/pathd/path_zebra.h index adc0c94c1770..82038ea958b1 100644 --- a/pathd/path_zebra.h +++ b/pathd/path_zebra.h @@ -28,6 +28,7 @@ void path_zebra_srv6_manager_get_sid(struct srv6_sid_ctx *ctx, struct in6_addr *sid_addr); struct path_locator_ctx * path_zebra_locator_ctx_lookup_by_name(const char *locator_name); +void path_zebra_process_srv6_bsid(bool allocate); #endif /* _FRR_PATH_MPLS_H_ */ diff --git a/pathd/pathd.c b/pathd/pathd.c index e98ee9c042a3..2c0e3cecbb21 100644 --- a/pathd/pathd.c +++ b/pathd/pathd.c @@ -36,6 +36,7 @@ DEFINE_HOOK(pathd_candidate_removed, (struct srte_candidate * candidate), struct debug path_policy_debug; struct debug path_zebra_debug; +bool srv6_use_sid_manager; #define PATH_POLICY_DEBUG(fmt, ...) \ DEBUGD(&path_policy_debug, "policy: " fmt, ##__VA_ARGS__) @@ -517,38 +518,56 @@ void srte_policy_update_srv6_binding_sid(struct srte_policy *policy, { struct in6_addr srv6_binding_sid_zero = {}; struct srv6_sid_ctx ctx = {}; + char endpoint[ENDPOINT_STR_LENGTH]; ctx.vrf_id = VRF_DEFAULT; ctx.behavior = ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP; memcpy(&ctx.nh6, &policy->endpoint.ip._v6_addr, sizeof(struct in6_addr)); ctx.color = policy->color; + ipaddr2str(&policy->endpoint, endpoint, sizeof(endpoint)); + + PATH_POLICY_DEBUG("SR-TE(%s, %u): srte policy, update srv6 bsid", + endpoint, policy->color); + if (!srv6_binding_sid || (srv6_binding_sid && !IPV6_ADDR_SAME(&policy->srv6_binding_sid, srv6_binding_sid))) { if (CHECK_FLAG(policy->flags, F_POLICY_BSID_IPV6_INSTALLED)) { + PATH_POLICY_DEBUG("SR-TE(%s, %u): srte policy, remove srv6 bsid", + endpoint, policy->color); (void)path_zebra_send_bsid(&policy->srv6_binding_sid, 0, ZEBRA_SEG6_LOCAL_ACTION_END_B6_ENCAP, NULL, 0); UNSET_FLAG(policy->flags, F_POLICY_BSID_IPV6_INSTALLED); } - if (CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED)) + if (CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED)) { path_zebra_srv6_manager_release_sid(&ctx); + PATH_POLICY_DEBUG("SR-TE(%s, %u): srte policy, deallocate srv6 bsid", + endpoint, policy->color); + } UNSET_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED); } if (srv6_binding_sid) { IPV6_ADDR_COPY(&policy->srv6_binding_sid, srv6_binding_sid); - if (!CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED)) + if (!CHECK_FLAG(policy->flags, F_POLICY_BSID_ALLOCATED) && + srv6_use_sid_manager) { path_zebra_srv6_manager_get_sid(&ctx, srv6_binding_sid); + PATH_POLICY_DEBUG("SR-TE(%s, %u): srte policy, manager get srv6 bsid", + endpoint, policy->color); + } } else IPV6_ADDR_COPY(&policy->srv6_binding_sid, &srv6_binding_sid_zero); /* Reinstall the Binding-SID if necessary. */ - if (policy->best_candidate) + if (policy->best_candidate) { path_zebra_add_sr_policy(policy, policy->best_candidate->lsp ->segment_list); + PATH_POLICY_DEBUG("SR-TE(%s, %u): srte policy, reinstall srv6 bsid", + endpoint, policy->color); + } } /** @@ -1537,3 +1556,8 @@ int32_t srte_ted_do_query_type_f(struct srte_segment_entry *entry, ted_sid); return status; } + +void path_srv6_init(void) +{ + srv6_use_sid_manager = false; +} diff --git a/pathd/pathd.h b/pathd/pathd.h index 3c2af86aa017..d4c6a4ca6bc8 100644 --- a/pathd/pathd.h +++ b/pathd/pathd.h @@ -397,6 +397,8 @@ extern struct zebra_privs_t pathd_privs; /* master thread, defined in path_main.c */ extern struct event_loop *master; +extern bool srv6_use_sid_manager; + /* pathd.c */ struct srte_segment_list *srte_segment_list_add(const char *name); void srte_segment_list_del(struct srte_segment_list *segment_list); @@ -469,6 +471,9 @@ void path_policy_show_debugging(struct vty *vty); /* path_cli.c */ void path_cli_init(void); +/* srv6 */ +void path_srv6_init(void); + /** * Search for sid based in prefix and algorithm From f53618d1a70a3c37eafffb6ed76fff9d56c5bd96 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Thu, 18 Jul 2024 12:11:47 +0200 Subject: [PATCH 44/45] tests: topotests for bsid allocation modes Check the presense of srv6 bsid route (based/not based on srv6 locator) when allocated with or without srv6-sid-manager. Signed-off-by: Dmytro Shytyi --- .../rt2/step4/show_bsid_route.ref | 23 ++ .../rt2/step5/show_bsid_route.ref | 23 ++ .../rt2/step6/show_ipv6_route.ref | 25 -- .../test_isis_srv6_te_topo1.py | 233 ++++-------------- 4 files changed, 88 insertions(+), 216 deletions(-) create mode 100644 tests/topotests/isis_srv6_te_topo1/rt2/step4/show_bsid_route.ref create mode 100644 tests/topotests/isis_srv6_te_topo1/rt2/step5/show_bsid_route.ref delete mode 100755 tests/topotests/isis_srv6_te_topo1/rt2/step6/show_ipv6_route.ref diff --git a/tests/topotests/isis_srv6_te_topo1/rt2/step4/show_bsid_route.ref b/tests/topotests/isis_srv6_te_topo1/rt2/step4/show_bsid_route.ref new file mode 100644 index 000000000000..2aaebbdf5f1e --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt2/step4/show_bsid_route.ref @@ -0,0 +1,23 @@ +{ + "fc00:0:2::128/128":[ + { + "prefix":"fc00:0:2::128/128", + "prefixLen":128, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.B6.Encap" + }, + "seg6":[ + "fc00:0:4::", + "fc00:0:5::" + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/topotests/isis_srv6_te_topo1/rt2/step5/show_bsid_route.ref b/tests/topotests/isis_srv6_te_topo1/rt2/step5/show_bsid_route.ref new file mode 100644 index 000000000000..76767dc07b36 --- /dev/null +++ b/tests/topotests/isis_srv6_te_topo1/rt2/step5/show_bsid_route.ref @@ -0,0 +1,23 @@ +{ + "fc10:0:2::128/128":[ + { + "prefix":"fc10:0:2::128/128", + "prefixLen":128, + "installed":true, + "nexthops":[ + { + "fib":true, + "directlyConnected":true, + "active":true, + "seg6local":{ + "action":"End.B6.Encap" + }, + "seg6":[ + "fc00:0:4::", + "fc00:0:5::" + ] + } + ] + } + ] +} \ No newline at end of file diff --git a/tests/topotests/isis_srv6_te_topo1/rt2/step6/show_ipv6_route.ref b/tests/topotests/isis_srv6_te_topo1/rt2/step6/show_ipv6_route.ref deleted file mode 100755 index 2e8ff6d8f94d..000000000000 --- a/tests/topotests/isis_srv6_te_topo1/rt2/step6/show_ipv6_route.ref +++ /dev/null @@ -1,25 +0,0 @@ -{ - "fc00:0:2::/128":[ - { - "prefix":"fc00:0:2::/128", - "prefixLen":128, - "protocol":"isis", - "vrfId":0, - "vrfName":"default", - "selected":true, - "installed":true, - "table":254, - "nexthops":[ - { - "flags":3, - "fib":true, - "directlyConnected":true, - "weight":1, - "seg6local":{ - "action":"End" - } - } - ] - } - ] -} diff --git a/tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py b/tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py index 3ed574b5ed7c..76bf0d014ce9 100755 --- a/tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py +++ b/tests/topotests/isis_srv6_te_topo1/test_isis_srv6_te_topo1.py @@ -425,50 +425,10 @@ def test_srv6_te_policy_additional_route(): ) -# -# Step 4 -# -# Test SRv6 TE Policy removed -# - - -def test_srv6_te_policy_removed(): - logger.info("Test (step 4): verify SRv6 TE policy removed") - tgen = get_topogen() - - # Skip if previous fatal error condition is raised - if tgen.routers_have_failure(): - pytest.skip(tgen.errors) - - tgen.gears["rt1"].vtysh_cmd( - "configure \n \ - no ipv6 route fc00:0:6b::/48 fc00:0:6:: color 1 \n \ - segment-routing \n \ - traffic-eng \n \ - no policy color 1 endpoint fc00:0:6:: \n \ - exit \ - exit \ - exit \ - !" +def test_srv6_end_b6_encaps_within_locator_srv6_sid_manager(): + logger.info( + "Test (step 4): verify SRv6 Locator-based END.B6.Encaps with srv6-sid-manager" ) - - # for rname in ["rt1"]: - # router_compare_json_output( - # rname, - # "show ipv6 route static json", - # "step4/show_ipv6_route.ref", - # ) - - -# -# Step 5 -# -# Test SRv6 End.B6.Encaps -# - - -def test_srv6_end_b6_encaps(): - logger.info("Test (step 5): verify SRv6 END.B6.Encaps") tgen = get_topogen() # Skip if previous fatal error condition is raised @@ -478,160 +438,47 @@ def test_srv6_end_b6_encaps(): tgen.gears["rt1"].vtysh_cmd( "configure \n \ ipv6 route fc00:0:6b::/48 fc00:0:6:: color 1 \n \ - segment-routing \n \ - traffic-eng \n \ - segment-list srv6 \n \ - index 1 ipv6-address fc00:0:2:: \n \ - index 2 ipv6-address fc00:0:6:: \n \ - exit \n \ - policy color 1 endpoint fc00:0:6:: \n \ - candidate-path preference 1 name srv6 explicit segment-list srv6 \n \ - exit \n \ - exit \n \ - !" + segment-routing \n \ + traffic-eng \n \ + segment-list srv6 \n \ + index 1 ipv6-address fc00:0:2:: \n \ + index 2 ipv6-address fc00:0:6:: \n \ + exit \n \ + policy color 1 endpoint fc00:0:6:: \n \ + candidate-path preference 1 name srv6 explicit segment-list srv6 \n \ + exit \n \ + exit \n \ + !" ) tgen.gears["rt2"].vtysh_cmd( "configure \n \ - segment-routing \n \ - traffic-eng \n \ - segment-list srv6-header \n \ - index 1 ipv6-address fc00:0:4:: \n \ - index 2 ipv6-address fc00:0:5:: \n \ - exit \n \ - policy color 1 endpoint fc00:0:6:: \n \ - srv6-binding-sid fc00:0:2:: \n \ - candidate-path preference 1 name srv6 explicit segment-list srv6-header \n \ - exit \n \ - exit \n \ - !" - ) - - tgen.gears["rt5"].vtysh_cmd( - "configure \n \ - interface sr0 \n \ - ipv6 address fc00:0:5::/48 \n \ - exit" + segment-routing \n \ + traffic-eng \n \ + segment-list srv6-header \n \ + index 1 ipv6-address fc00:0:4:: \n \ + index 2 ipv6-address fc00:0:5:: \n \ + exit \n \ + policy color 1 endpoint fc00:0:6:: \n \ + srv6-binding-sid fc00:0:2::128 \n \ + candidate-path preference 1 name srv6 explicit segment-list srv6-header \n \ + exit \n \ + exit \n \ + !" ) - tgen.gears["rt5"].run("sysctl -w net.ipv6.conf.all.seg6_enabled=1") - tgen.gears["rt5"].run("sysctl -w net.ipv6.conf.default.seg6_enabled=1") - tgen.gears["rt5"].run("sysctl -w net.ipv6.conf.eth-rt4.seg6_enabled=1") - - subprocess.check_call(["apt", "install", "-y", "tcpdump"]) - if not os.path.isdir("/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/scapy-latest"): - subprocess.check_call( - ["git", "clone", "https://github.com/secdev/scapy.git", "/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/scapy-latest"] - ) - if not os.path.isdir("/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/python_packages"): - subprocess.check_call( - ["pip", "download", "--dest", "/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/python_packages/", "setuptools==62.0.0"] - ) - subprocess.check_call( - ["pip", "download", "--dest", "/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/python_packages/", "wheel"] + for rname in ["rt2"]: + router_compare_json_output( + rname, + "do show ipv6 route fc00:0:2::128 json", + "step4/show_bsid_route.ref", ) - tgen.gears["rt2"].run( - "python3 -m venv /tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/; source /tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/venv/bin/activate; pip3 install --no-index --force-reinstall --find-links=/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/python_packages/ setuptools==62.0.0; pip3 install --no-index --find-links=/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/python_packages /tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/scapy-latest/" - ) - - def ping_func(): - tgen = get_topogen() - tgen.gears["src"].run("ping 10.0.10.2 -c 128 -i 0.1") - - p1 = Process(target=ping_func) - p1.start() - # tgen.gears["rt2"].run("tcpdump -i any -q -w ./rt2-dump.pcap & ping 10.0.10.2 -c 5; killall tcpdump") - tgen.gears["rt2"].run("touch ./rt2-dump.pcap && chmod oug+rwx ./rt2-dump.pcap") - tgen.gears["rt2"].run("tcpdump -i any -w ./rt2-dump.pcap -c 64") - # tgen.gears["rt2"].run("tshark -ni any -w /tmp/rt2-dump.pcap -c 64") - p1.join() - file_content = """\ -from scapy.all import rdpcap, IPv6, IPv6ExtHdrSegmentRouting -scapy_cap = rdpcap( - "/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/rt2-dump.pcap" -) -# scapy_cap = rdpcap('/tmp/rt2-dump.pcap') -output_pkt_flag = False -outer_srh = -1 -outer_srhl = -1 -inner_srh = -1 -inner_srhl = -1 -inner_most_srh = -1 -inner_most_srhl = -1 -for packet in scapy_cap: - if "echo-request" in str(packet): - print(packet) - if packet.haslayer(IPv6) and packet.haslayer(IPv6ExtHdrSegmentRouting): - outer_srh = packet[IPv6][IPv6ExtHdrSegmentRouting].addresses - outer_srhl = packet[IPv6][IPv6ExtHdrSegmentRouting].segleft - encap = packet[IPv6][IPv6ExtHdrSegmentRouting] - if encap.haslayer(IPv6) and encap.haslayer(IPv6ExtHdrSegmentRouting): - inner_srh = encap[IPv6][IPv6ExtHdrSegmentRouting].addresses - inner_srhl = encap[IPv6][IPv6ExtHdrSegmentRouting].segleft - bgp_encap = encap[IPv6][IPv6ExtHdrSegmentRouting] - if bgp_encap.haslayer(IPv6) and bgp_encap.haslayer( - IPv6ExtHdrSegmentRouting - ): - inner_most_srh = bgp_encap[IPv6][ - IPv6ExtHdrSegmentRouting - ].addresses - inner_most_srhl = bgp_encap[IPv6][ - IPv6ExtHdrSegmentRouting - ].segleft - - if ( - ("fc00:0:6::" in inner_srh) - and ("fc00:0:2::" in inner_srh) - and (inner_srhl == 0) - and ("fc00:0:5::" in outer_srh) - and ("fc00:0:4::" in outer_srh) - and (outer_srhl == 1) - and ("fc00:0:6b:1::" in inner_most_srh) - and (inner_most_srhl == 0) - ): - output_pkt_flag = True - else: - print( - "Observed: outer srh: {}, segleft={}, inner srh: {}, segleft={}, inner_most_srh: {}, segleft={}".format( - outer_srh, - outer_srhl, - inner_srh, - inner_srhl, - inner_most_srh, - inner_most_srhl, - ) - ) - -if output_pkt_flag == False: - assertmsg = ('Unexpected SRH in the captured packet in rt2' - 'Expected outer srh: [fc00:0:5::, fc00:0:4::], segleft=1, inner srh: [fc00:0:6::, fc00:0:2::], segleft=0, inner_most_srh: [fc00:0:6b:1::], segleft=0') - assert False, assertmsg - """ - - file_path = ( - "/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/scapy_test_srv6.py" - ) - with open(file_path, "w") as file: - file.write(file_content) - - activate_cmd = f"source {os.path.join('/tmp/topotests/isis_srv6_te_topo1.test_isis_srv6_te_topo1/rt2/venv', 'bin', 'activate')}" - subprocess.run( - [activate_cmd, ";", "python3", file_path], shell=True, executable="/bin/bash" +def test_srv6_end_b6_encaps_outside_locator_no_srv6_sid_manager(): + logger.info( + "Test (step 5): verify SRv6 out-of-locator END.B6.Encaps without srv6-sid-manager" ) - - - -# -# Step 6 -# -# Test SRv6 End.B6.Encaps removal -# - - -def test_srv6_end_b6_encaps_removal(): - logger.info("Test (step 6): verify SRv6 END.B6.Encaps removal") tgen = get_topogen() # Skip if previous fatal error condition is raised @@ -642,8 +489,12 @@ def test_srv6_end_b6_encaps_removal(): "configure \n \ segment-routing \n \ traffic-eng \n \ - policy color 1 endpoint fc00:0:6:: \n \ - no srv6-binding-sid fc00:0:2:: \n \ + policy color 1 endpoint fc00:0:6:: \n \ + no srv6-binding-sid fc00:0:2::128 \n \ + exit \n \ + no use-srv6-sid-manager \n \ + policy color 1 endpoint fc00:0:6:: \n \ + srv6-binding-sid fc10:0:2::128 \n \ exit \n \ exit \n \ !" @@ -652,8 +503,8 @@ def test_srv6_end_b6_encaps_removal(): for rname in ["rt2"]: router_compare_json_output( rname, - "show ipv6 route fc00:0:2:: json", - "step6/show_ipv6_route.ref", + "do show ipv6 route fc10:0:2::128 json", + "step5/show_bsid_route.ref", ) From f79c287e46582c42475c059ff4cb3d1cf13f87b0 Mon Sep 17 00:00:00 2001 From: Dmytro Shytyi Date: Fri, 23 Aug 2024 13:29:08 +0200 Subject: [PATCH 45/45] doc: add use-srv6-sid-manager command SRTE 'use-srv6-sid-manager' command tells the SRTE service to use or not BSIDs allocated from the SRv6 SIDs pool of the zebra locators. Signed-off-by: Dmytro Shytyi --- doc/user/pathd.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/doc/user/pathd.rst b/doc/user/pathd.rst index 9248407a9783..e67031444c74 100644 --- a/doc/user/pathd.rst +++ b/doc/user/pathd.rst @@ -311,6 +311,14 @@ Configuration Commands Specify the policy SRv6 SID + +.. clicmd:: use-srv6-sid-manager + + SRTE 'use-srv6-sid-manager' command tells the SRTE service + to use or not BSIDs allocated from the SRv6 SIDs pool of the zebra + locators. + + .. clicmd:: candidate-path preference PREFERENCE name NAME explicit segment-list SEGMENT-LIST-NAME Delete or define an explicit candidate path.