From b75ed232aab91034309139936dc2cdab189cd36c Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Wed, 28 Apr 2021 12:51:43 -0300 Subject: [PATCH 1/7] lib,zebra: API for route lookups Implement a new zebra API to request route lookup results. This is similar to what `ZEBRA_IPV4_NEXTHOP_LOOKUP_MRIB` does, but it gets route information and all next hop information (uses dynamically allocated memory instead of expecting static array). Signed-off-by: Rafael Zalamena --- lib/log.c | 3 +- lib/zclient.c | 161 +++++++++++++++++++++++++++++++++++++++++++++++ lib/zclient.h | 70 +++++++++++++++++++++ zebra/zapi_msg.c | 114 +++++++++++++++++++++++++++++++++ 4 files changed, 347 insertions(+), 1 deletion(-) diff --git a/lib/log.c b/lib/log.c index bc1ed5c5ccae..94053486d392 100644 --- a/lib/log.c +++ b/lib/log.c @@ -464,7 +464,8 @@ static const struct zebra_desc_table command_types[] = { DESC_ENTRY(ZEBRA_TC_FILTER_ADD), DESC_ENTRY(ZEBRA_TC_FILTER_DELETE), DESC_ENTRY(ZEBRA_OPAQUE_NOTIFY), - DESC_ENTRY(ZEBRA_SRV6_SID_NOTIFY) + DESC_ENTRY(ZEBRA_SRV6_SID_NOTIFY), + DESC_ENTRY(ZEBRA_ROUTE_LOOKUP), }; #undef DESC_ENTRY diff --git a/lib/zclient.c b/lib/zclient.c index 063944fd3b23..7ee0c8aa2c48 100644 --- a/lib/zclient.c +++ b/lib/zclient.c @@ -5131,3 +5131,164 @@ void zclient_register_neigh(struct zclient *zclient, vrf_id_t vrf_id, afi_t afi, stream_putw_at(s, 0, stream_get_endp(s)); zclient_send_message(zclient); } + +static struct zroute_nh_info *zapi_route_nh_decode(struct stream *s) +{ + struct zroute_nh_info *rni = XCALLOC(MTYPE_TMP, sizeof(*rni)); + + STREAM_GETL(s, rni->rni_vrf_id); + STREAM_GETC(s, rni->rni_type); + + switch (rni->rni_type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + STREAM_GET(&rni->rni_addr, s, sizeof(struct in_addr)); + STREAM_GETL(s, rni->rni_ifindex); + break; + case NEXTHOP_TYPE_IPV6: + STREAM_GET(&rni->rni_addr, s, sizeof(struct in6_addr)); + break; + case NEXTHOP_TYPE_IPV6_IFINDEX: + STREAM_GET(&rni->rni_addr, s, sizeof(struct in6_addr)); + STREAM_GETL(s, rni->rni_ifindex); + break; + case NEXTHOP_TYPE_IFINDEX: + STREAM_GETL(s, rni->rni_ifindex); + break; + } + + return rni; + +stream_failure: + zlog_warn("%s: failed to parse next hop", __func__); + XFREE(MTYPE_TMP, rni); + return NULL; +} + +struct zroute_info *zapi_route_lookup(struct zclient *zc, vrf_id_t vrf_id, int family, + const void *addr) +{ + struct zroute_info *ri; + struct zroute_nh_info *rni; + struct stream *s; + int err; + size_t addr_size; + uint32_t idx; + uint16_t length; + uint16_t command; + uint8_t marker; + uint8_t version; + struct ipaddr ip; + union { + struct in_addr ia; + struct in6_addr i6a; + } const *address = addr; + + /* Initial checks and logging. */ + switch (family) { + case AF_INET: + /* `0.0.0.0` is unroutable. */ + if (address->ia.s_addr == INADDR_NONE) + return NULL; + + addr_size = sizeof(struct in_addr); + break; + + default: + zlog_warn("%s: unsupported address family %d", __func__, family); + return NULL; + } + + /* Get output stream and reset it. */ + s = zc->obuf; + stream_reset(s); + + /* Create and send lookup request. */ + zclient_create_header(s, ZEBRA_ROUTE_LOOKUP, vrf_id); + stream_putl(s, family); + stream_put(s, addr, addr_size); + stream_putw_at(s, 0, stream_get_endp(s)); + err = writen(zc->sock, s->data, stream_get_endp(s)); + if (err < 0) { + flog_err(EC_LIB_SOCKET, "%s: writen() failure: %d writing to zclient lookup socket", + __func__, errno); + return NULL; + } + if (err == 0) { + flog_err_sys(EC_LIB_SOCKET, "%s: connection closed on zclient lookup socket", + __func__); + return NULL; + } + + /* Read and handle unwanted messages. */ + s = zc->ibuf; + do { + stream_reset(s); + err = zclient_read_header(s, zc->sock, &length, &marker, &version, &vrf_id, + &command); + if (err != 0) { + flog_err(EC_LIB_ZAPI_MISSMATCH, "%s: zclient_read_header() failed", + __func__); + return NULL; + } + } while (command != ZEBRA_ROUTE_LOOKUP); + + /* Read the response. */ + STREAM_GETC(s, err); + + /* No route found case. */ + if (err == 0) + return NULL; + + ri = XCALLOC(MTYPE_TMP, sizeof(*ri)); + stream_get_ipaddr(s, &ip); + ri->ri_p.family = ip.ipa_type; + switch (ri->ri_p.family) { + case AF_INET: + ri->ri_p.u.prefix4 = ip.ip._v4_addr; + break; + case AF_INET6: + ri->ri_p.u.prefix6 = ip.ip._v6_addr; + break; + default: + XFREE(MTYPE_TMP, ri); + zlog_warn("%s: invalid IP type %d", __func__, ri->ri_p.family); + return NULL; + } + STREAM_GETW(s, ri->ri_p.prefixlen); + STREAM_GETL(s, ri->ri_distance); + STREAM_GETL(s, ri->ri_metric); + STREAM_GETL(s, ri->ri_type); + STREAM_GETL(s, ri->ri_nexthop_num); + + for (idx = 0; idx < ri->ri_nexthop_num; idx++) { + rni = zapi_route_nh_decode(s); + SLIST_INSERT_HEAD(&ri->ri_nhlist, rni, rni_entry); + } + + STREAM_GETL(s, ri->ri_opaque_size); + STREAM_GET(ri->ri_opaque, s, ri->ri_opaque_size); + + return ri; + +stream_failure: + zlog_warn("%s: invalid message format", __func__); + return NULL; +} + +void zroute_info_free(struct zroute_info **ri) +{ + struct zroute_nh_info *rni; + + /* Handle `NULL` pointers */ + if (*ri == NULL) + return; + + /* Free all allocated next hop information. */ + while ((rni = SLIST_FIRST(&(*ri)->ri_nhlist)) != NULL) { + SLIST_REMOVE(&(*ri)->ri_nhlist, rni, zroute_nh_info, rni_entry); + XFREE(MTYPE_TMP, rni); + } + + XFREE(MTYPE_TMP, (*ri)); +} diff --git a/lib/zclient.h b/lib/zclient.h index 2385a8a2197b..e650662d9a47 100644 --- a/lib/zclient.h +++ b/lib/zclient.h @@ -30,6 +30,8 @@ struct zclient; #include "srte.h" #include "srv6.h" +#include "openbsd-queue.h" + #ifdef __cplusplus extern "C" { #endif @@ -238,6 +240,7 @@ typedef enum { ZEBRA_TC_FILTER_DELETE, ZEBRA_OPAQUE_NOTIFY, ZEBRA_SRV6_SID_NOTIFY, + ZEBRA_ROUTE_LOOKUP, } zebra_message_types_t; /* Zebra message types. Please update the corresponding * command_types array with any changes! @@ -1375,6 +1378,73 @@ extern int zapi_client_close_notify_decode(struct stream *s, extern int zclient_send_zebra_gre_request(struct zclient *client, struct interface *ifp); + +/* + * Route lookup zebra API. + */ + +/** + * Zebra route next hop information. + */ +struct zroute_nh_info { + /** Next hop pointer to next. */ + SLIST_ENTRY(zroute_nh_info) rni_entry; + + /** Next hop VRF. */ + vrf_id_t rni_vrf_id; + /** Next hop type (see `enum nexthop_types_t`). */ + uint8_t rni_type; + /** Next hop address. */ + union { + struct in_addr v4; + struct in6_addr v6; + } rni_addr; + /** Next hop interface index. */ + ifindex_t rni_ifindex; +}; + +/** + * Zebra route information. + */ +struct zroute_info { + /** Route prefix. */ + struct prefix ri_p; + /** Route distance */ + int ri_distance; + /** Route metric */ + int ri_metric; + /** Route type. */ + int ri_type; + /** Amount of next hops. */ + uint32_t ri_nexthop_num; + + /** List of next hops. */ + SLIST_HEAD(, zroute_nh_info) ri_nhlist; + + /** Opaque data size. */ + uint32_t ri_opaque_size; + /** Protocol specific opaque information. */ + uint8_t ri_opaque[ZAPI_MESSAGE_OPAQUE_LENGTH]; +}; + +/** + * Request zebra to lookup a route for us matching those specifications. + * + * \param zc Zebra client context pointer. + * \param vrf_id the VRF identification. + * \param family Address type (e.g. `AF_INET`). + * \param addr Address (usually `struct in_addr *`). + * \returns a pointer to the result allocated in the heap. Memory should + * be returned using `zroute_info_free()`. + */ +struct zroute_info *zapi_route_lookup(struct zclient *zc, vrf_id_t vrf_id, int family, + const void *addr); + +/** + * Unallocates all memory allocated by `zapi_route_lookup`. + */ +void zroute_info_free(struct zroute_info **ri); + #ifdef __cplusplus } #endif diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index ab55998af046..6464d2bbcac3 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -4027,6 +4027,119 @@ static void zserv_error_invalid_msg_type(ZAPI_HANDLER_ARGS) zsend_error_msg(client, ZEBRA_INVALID_MSG_TYPE, hdr); } +static void zsend_route_result(struct zserv *zc, struct zebra_vrf *zvrf, struct prefix *p, + struct route_entry *re) +{ + struct nexthop *nexthop; + struct stream *s; + uint32_t nexthop_num = 0; + struct ipaddr ip = {}; + + s = stream_new(ZEBRA_MAX_PACKET_SIZ); + zclient_create_header(s, ZEBRA_ROUTE_LOOKUP, zvrf_id(zvrf)); + + /** + * Format: + * - 1 byte: search success status + * - X bytes: route prefix + * - 4 bytes: distance + * - 4 bytes: metric + * - 4 bytes: type + * - 4 bytes: next hop number. + * - X bytes: next hop(s) information. + * - 4 bytes: opaque data size + * - X bytes: opaque data + * + * When first byte (search result status) is zero no more data + * will be appended. + */ + + /* No routes found case. */ + if (re == NULL) { + stream_putc(s, 0); + goto finish_and_send; + } + + /* Convert prefix to ipaddr. */ + ip.ipa_type = p->family; + switch (ip.ipa_type) { + case IPADDR_V4: + ip.ip._v4_addr = p->u.prefix4; + break; + case IPADDR_V6: + ip.ip._v6_addr = p->u.prefix6; + break; + + case IPADDR_NONE: + default: + zlog_warn("%s: unsupported prefix type %d", __func__, ip.ipa_type); + break; + } + + stream_putc(s, 1); + stream_put_ipaddr(s, &ip); + stream_putw(s, p->prefixlen); + stream_putl(s, re->distance); + stream_putl(s, re->metric); + stream_putl(s, re->type); + + /* Calculate total amount of next hops. */ + for (nexthop = re->nhe->nhg.nexthop; nexthop; nexthop = nexthop->next) + nexthop_num++; + + stream_putl(s, nexthop_num); + + /* Encode the next hops. */ + for (nexthop = re->nhe->nhg.nexthop; nexthop; nexthop = nexthop->next) + zserv_encode_nexthop(s, nexthop); + + /* Encode opaque data. */ + if (re->opaque) { + stream_putl(s, re->opaque->length); + if (re->opaque->length > 0) + stream_put(s, re->opaque->data, re->opaque->length); + } else + stream_putl(s, 0); + +finish_and_send: + stream_putw_at(s, 0, stream_get_endp(s)); + zserv_send_message(zc, s); +} + +static void zread_route_lookup(ZAPI_HANDLER_ARGS) +{ + struct route_entry *re = NULL; + struct route_node *rn = NULL; + union g_addr addr; + uint32_t af; + + /* + * Request format: + * - 4 bytes: address family (AF_INET, AF_INET6) + * - X bytes: address. + */ + STREAM_GETL(msg, af); + switch (af) { + case AF_INET: + STREAM_GET(&addr.ipv4, msg, sizeof(addr.ipv4)); + break; + case AF_INET6: + STREAM_GET(&addr.ipv6, msg, sizeof(addr.ipv6)); + break; + default: + zlog_warn("%s: unsupported address family %d", __func__, af); + return; + } + + /* Look up route in RIB. */ + re = rib_match(af == AF_INET ? AFI_IP : AFI_IP6, SAFI_UNICAST, zvrf_id(zvrf), &addr, &rn); + /* Send result whether it found or not. */ + zsend_route_result(client, zvrf, &rn->p, re); + +stream_failure: + zlog_warn("%s: invalid stream format", __func__); +} + void (*const zserv_handlers[])(ZAPI_HANDLER_ARGS) = { [ZEBRA_ROUTER_ID_ADD] = zread_router_id_add, [ZEBRA_ROUTER_ID_DELETE] = zread_router_id_delete, @@ -4123,6 +4236,7 @@ void (*const zserv_handlers[])(ZAPI_HANDLER_ARGS) = { [ZEBRA_TC_CLASS_DELETE] = zread_tc_class, [ZEBRA_TC_FILTER_ADD] = zread_tc_filter, [ZEBRA_TC_FILTER_DELETE] = zread_tc_filter, + [ZEBRA_ROUTE_LOOKUP] = zread_route_lookup, }; /* From 0287e3de5351d19b6eb13ba35bec5411926639f2 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Wed, 28 Apr 2021 14:43:16 -0300 Subject: [PATCH 2/7] yang: MSDP eBGP loop detection configuration Implement the 'RFC 4611 Section 2.1. Peering between PIM Border Routers' knob to configure the expected eBGP remote AS number for detecting loops. Signed-off-by: Rafael Zalamena --- yang/frr-pim.yang | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/yang/frr-pim.yang b/yang/frr-pim.yang index 8dadf4fd7cff..ce1a5055e8bb 100644 --- a/yang/frr-pim.yang +++ b/yang/frr-pim.yang @@ -403,6 +403,12 @@ module frr-pim { description "Peer SA maximum limit."; } + + leaf as { + type inet:as-number; + description + "BGP Autonomous System number."; + } } container mlag { From b317f2096f2129e1431569ef64068756c15996d4 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Wed, 28 Apr 2021 12:54:07 -0300 Subject: [PATCH 3/7] pimd: MSDP SA loop check using AS Path Implement the 'RFC 4611 Section 2.1. Peering between PIM Border Routers' recommendation of using the peer AS number to check for loops in the topology when using direct inter-domains connections. Signed-off-by: Rafael Zalamena --- pimd/pim_cmd.c | 31 ++++++++-- pimd/pim_msdp.c | 18 +++++- pimd/pim_msdp.h | 4 ++ pimd/pim_nb.c | 7 +++ pimd/pim_nb.h | 2 + pimd/pim_nb_config.c | 43 ++++++++++++++ pimd/pim_rpf.c | 131 +++++++++++++++++++++++++++++++++++++++++++ pimd/pim_rpf.h | 1 + pimd/pim_zlookup.c | 21 +++++++ pimd/pim_zlookup.h | 11 ++++ 10 files changed, 261 insertions(+), 8 deletions(-) diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index a34fb344feb5..f30fa6d379bd 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -6778,12 +6778,14 @@ ALIAS(no_ip_pim_bfd, no_ip_pim_bfd_param_cmd, #endif /* !HAVE_BFDD */ DEFPY(pim_msdp_peer, pim_msdp_peer_cmd, - "msdp peer A.B.C.D$peer source A.B.C.D$source", + "msdp peer A.B.C.D$peer source A.B.C.D$source [as (1-4294967295)$asn]", CFG_MSDP_STR "Configure MSDP peer\n" "Peer IP address\n" "Source address for TCP connection\n" - "Local IP address\n") + "Local IP address\n" + "BGP Autonomous System peer information\n" + "BGP Autonomous System peer number\n") { char msdp_peer_source_xpath[XPATH_MAXLEN]; @@ -6791,6 +6793,11 @@ DEFPY(pim_msdp_peer, pim_msdp_peer_cmd, "./msdp-peer[peer-ip='%s']/source-ip", peer_str); nb_cli_enqueue_change(vty, msdp_peer_source_xpath, NB_OP_MODIFY, source_str); + if (asn_str) { + snprintf(msdp_peer_source_xpath, sizeof(msdp_peer_source_xpath), + "./msdp-peer[peer-ip='%s']/as", peer_str); + nb_cli_enqueue_change(vty, msdp_peer_source_xpath, NB_OP_MODIFY, asn_str); + } return nb_cli_apply_changes(vty, NULL); } @@ -7795,7 +7802,7 @@ static void ip_msdp_show_peers(struct pim_instance *pim, struct vty *vty, json = json_object_new_object(); } else { vty_out(vty, - "Peer Local State Uptime SaCnt\n"); + "Peer Local State Uptime SaCnt AS\n"); } for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, mpnode, mp)) { @@ -7817,10 +7824,17 @@ static void ip_msdp_show_peers(struct pim_instance *pim, struct vty *vty, json_object_string_add(json_row, "state", state_str); json_object_string_add(json_row, "upTime", timebuf); json_object_int_add(json_row, "saCount", mp->sa_cnt); + if (mp->asn) + json_object_int_add(json_row, "asn", mp->asn); + json_object_object_add(json, peer_str, json_row); } else { - vty_out(vty, "%-15s %15s %11s %8s %6d\n", peer_str, - local_str, state_str, timebuf, mp->sa_cnt); + vty_out(vty, "%-15s %15s %11s %8s %6d", peer_str, local_str, state_str, + timebuf, mp->sa_cnt); + if (mp->asn) + vty_out(vty, " %5d\n", mp->asn); + else + vty_out(vty, " %5s\n", "-"); } } @@ -8093,6 +8107,7 @@ static void ip_msdp_show_sa_entry_detail(struct pim_msdp_sa *sa, char spt_str[8]; char local_str[8]; char statetimer[PIM_MSDP_TIMER_STRLEN]; + uint32_t asn = pim_msdp_sa_asn(sa); int64_t now; json_object *json_group = NULL; json_object *json_row = NULL; @@ -8135,6 +8150,9 @@ static void ip_msdp_show_sa_entry_detail(struct pim_msdp_sa *sa, json_object_string_add(json_row, "sptSetup", spt_str); json_object_string_add(json_row, "upTime", timebuf); json_object_string_add(json_row, "stateTimer", statetimer); + if (asn) + json_object_int_add(json_row, "asn", asn); + json_object_object_add(json_group, src_str, json_row); } else { vty_out(vty, "SA : %s\n", sa->sg_str); @@ -8144,6 +8162,9 @@ static void ip_msdp_show_sa_entry_detail(struct pim_msdp_sa *sa, vty_out(vty, " SPT Setup : %s\n", spt_str); vty_out(vty, " Uptime : %s\n", timebuf); vty_out(vty, " State Timer : %s\n", statetimer); + if (asn) + vty_out(vty, " BGP/AS : %u\n", asn); + vty_out(vty, "\n"); } } diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c index 5e5ee5e91f27..67bcab8eeda2 100644 --- a/pimd/pim_msdp.c +++ b/pimd/pim_msdp.c @@ -65,6 +65,16 @@ void pim_msdp_originator_id(struct pim_instance *pim, const struct prefix *group } } +uint32_t pim_msdp_sa_asn(const struct pim_msdp_sa *sa) +{ + struct pim_msdp_peer *peer = pim_msdp_peer_find(sa->pim, sa->peer); + + if (peer == NULL) + return 0; + + return peer->asn; +} + /************************ SA cache management ******************************/ /* RFC-3618:Sec-5.1 - global active source advertisement timer */ static void pim_msdp_sa_adv_timer_cb(struct event *t) @@ -706,7 +716,7 @@ bool pim_msdp_peer_rpf_check(struct pim_msdp_peer *mp, struct in_addr rp) } /* check if the MSDP peer is the nexthop for the RP */ - if (pim_nht_lookup(mp->pim, &nexthop, rp, 0) && + if (pim_route_lookup(mp->pim, rp, mp->asn, &nexthop) && nexthop.mrib_nexthop_addr.s_addr == mp->peer.s_addr) { return true; } @@ -1332,8 +1342,10 @@ bool pim_msdp_peer_config_write(struct vty *vty, struct pim_instance *pim) if (mp->flags & PIM_MSDP_PEERF_IN_GROUP) continue; - vty_out(vty, " msdp peer %pI4 source %pI4\n", &mp->peer, - &mp->local); + vty_out(vty, " msdp peer %pI4 source %pI4", &mp->peer, &mp->local); + if (mp->asn) + vty_out(vty, " as %u", mp->asn); + vty_out(vty, "\n"); if (mp->auth_type == MSDP_AUTH_MD5) vty_out(vty, " msdp peer %pI4 password %s\n", &mp->peer, diff --git a/pimd/pim_msdp.h b/pimd/pim_msdp.h index 4edb6e6166ec..33af2d0b6be8 100644 --- a/pimd/pim_msdp.h +++ b/pimd/pim_msdp.h @@ -155,6 +155,9 @@ struct pim_msdp_peer { /** SA maximum amount. */ uint32_t sa_limit; + + /** BGP AS number for RPF check. */ + uint32_t asn; }; struct pim_msdp_mg_mbr { @@ -253,6 +256,7 @@ void pim_msdp_sa_ref(struct pim_instance *pim, struct pim_msdp_peer *mp, pim_sgaddr *sg, struct in_addr rp); void pim_msdp_sa_local_update(struct pim_upstream *up); void pim_msdp_sa_local_del(struct pim_instance *pim, pim_sgaddr *sg); +uint32_t pim_msdp_sa_asn(const struct pim_msdp_sa *sa); void pim_msdp_i_am_rp_changed(struct pim_instance *pim); bool pim_msdp_peer_rpf_check(struct pim_msdp_peer *mp, struct in_addr rp); void pim_msdp_up_join_state_changed(struct pim_instance *pim, diff --git a/pimd/pim_nb.c b/pimd/pim_nb.c index b55541b81026..ab002cceb3b3 100644 --- a/pimd/pim_nb.c +++ b/pimd/pim_nb.c @@ -222,6 +222,13 @@ const struct frr_yang_module_info frr_pim_info = { .destroy = pim_msdp_peer_sa_limit_destroy, } }, + { + .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-peer/as", + .cbs = { + .modify = pim_msdp_peer_as_modify, + .destroy = pim_msdp_peer_as_destroy, + } + }, { .xpath = "/frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/mlag", .cbs = { diff --git a/pimd/pim_nb.h b/pimd/pim_nb.h index a5ef6ad60a8f..660ece67fa93 100644 --- a/pimd/pim_nb.h +++ b/pimd/pim_nb.h @@ -80,6 +80,8 @@ int pim_msdp_peer_authentication_key_modify(struct nb_cb_modify_args *args); int pim_msdp_peer_authentication_key_destroy(struct nb_cb_destroy_args *args); int pim_msdp_peer_sa_limit_modify(struct nb_cb_modify_args *args); int pim_msdp_peer_sa_limit_destroy(struct nb_cb_destroy_args *args); +int pim_msdp_peer_as_modify(struct nb_cb_modify_args *args); +int pim_msdp_peer_as_destroy(struct nb_cb_destroy_args *args); int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_create( struct nb_cb_create_args *args); int routing_control_plane_protocols_control_plane_protocol_pim_address_family_mlag_destroy( diff --git a/pimd/pim_nb_config.c b/pimd/pim_nb_config.c index b55d08bab9c9..d6de39a81bbb 100644 --- a/pimd/pim_nb_config.c +++ b/pimd/pim_nb_config.c @@ -1023,6 +1023,8 @@ pim6_msdp_err(pim_msdp_peer_sa_filter_out_modify, nb_cb_modify_args); pim6_msdp_err(pim_msdp_peer_sa_filter_out_destroy, nb_cb_destroy_args); pim6_msdp_err(pim_msdp_peer_sa_limit_modify, nb_cb_modify_args); pim6_msdp_err(pim_msdp_peer_sa_limit_destroy, nb_cb_destroy_args); +pim6_msdp_err(pim_msdp_peer_as_modify, nb_cb_modify_args); +pim6_msdp_err(pim_msdp_peer_as_destroy, nb_cb_destroy_args); pim6_msdp_err( routing_control_plane_protocols_control_plane_protocol_pim_address_family_msdp_peer_source_ip_modify, nb_cb_modify_args); @@ -1677,6 +1679,47 @@ int pim_msdp_peer_sa_limit_destroy(struct nb_cb_destroy_args *args) return NB_OK; } + +/* + * XPath: /frr-routing:routing/control-plane-protocols/control-plane-protocol/frr-pim:pim/address-family/msdp-peer/as + */ +int pim_msdp_peer_as_modify(struct nb_cb_modify_args *args) +{ + struct pim_msdp_peer *peer; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + /* NOTHING */ + break; + case NB_EV_APPLY: + peer = nb_running_get_entry(args->dnode, NULL, true); + peer->asn = yang_dnode_get_uint32(args->dnode, NULL); + break; + } + + return NB_OK; +} + +int pim_msdp_peer_as_destroy(struct nb_cb_destroy_args *args) +{ + struct pim_msdp_peer *peer; + + switch (args->event) { + case NB_EV_VALIDATE: + case NB_EV_PREPARE: + case NB_EV_ABORT: + /* NOTHING */ + break; + case NB_EV_APPLY: + peer = nb_running_get_entry(args->dnode, NULL, true); + peer->asn = 0; + break; + } + + return NB_OK; +} #endif /* PIM_IPV != 6 */ /* diff --git a/pimd/pim_rpf.c b/pimd/pim_rpf.c index 75e921382549..6a829d7908b5 100644 --- a/pimd/pim_rpf.c +++ b/pimd/pim_rpf.c @@ -257,3 +257,134 @@ int pim_rpf_is_same(struct pim_rpf *rpf1, struct pim_rpf *rpf2) return 0; } + +bool pim_route_lookup(struct pim_instance *pim, pim_addr addr, uint32_t asn, struct pim_nexthop *pn) +{ + struct interface *ifp; + struct zroute_info *ri; + struct zroute_nh_info *rni; + bool found = false; + + /* Zero out pim next hop response.*/ + memset(pn, 0, sizeof(*pn)); + + /* Search route information. */ + ri = zclient_route_lookup(pim, &addr); + if (ri == NULL) { + if (PIM_DEBUG_ZEBRA) + zlog_debug("%s: could not find route for address %pPA", __func__, &addr); + return false; + } + + /* + * AS check (in case AS number was provided). + * + * This code path checks for the following MSDP rule: + * + * > For a direct peering inter-domain environment to be successful, the + * > first AS in the MBGP best path to the originating RP should be the + * > same as the AS of the MSDP peer. + * + * RFC 4611 Section 2.1. Peering between PIM Border Routers. + */ + if (asn) { + long long first_asn; + + /* + * We expect a BGP route so if no AS Path information is + * available it means this is not a BGP route. + */ + if (ri->ri_type != ZEBRA_ROUTE_BGP) { + if (PIM_DEBUG_ZEBRA) + zlog_debug("%s: expected BGP route for %pPA (got %d)", __func__, + &addr, ri->ri_type); + goto free_and_exit; + } + + errno = 0; + first_asn = strtoll((char *)ri->ri_opaque, NULL, 10); + /* Check for number conversion failures. */ + if (first_asn == LLONG_MIN || first_asn == LLONG_MAX) { + zlog_warn("%s: AS number overflow/underflow %s", __func__, ri->ri_opaque); + goto free_and_exit; + } + if (first_asn == 0 && errno != 0) { + zlog_warn("%s: AS number conversion failed: %s", __func__, strerror(errno)); + goto free_and_exit; + } + + /* AS did not match. */ + if (first_asn != asn) { + if (PIM_DEBUG_ZEBRA) + zlog_debug("%s: next hop AS did not match for address %pPA (%u != %lld)", + __func__, &addr, asn, first_asn); + goto free_and_exit; + } + + /* Proceed to validate next hop. */ + if (PIM_DEBUG_ZEBRA) + zlog_debug("%s: next hop AS matched for address %pPA (AS %u)", __func__, + &addr, asn); + } + + SLIST_FOREACH (rni, &ri->ri_nhlist, rni_entry) { + ifp = if_lookup_by_index(rni->rni_ifindex, pim->vrf->vrf_id); + if (ifp == NULL) { + if (PIM_DEBUG_ZEBRA) + zlog_debug("%s: could not find interface for ifindex %d (address %pPA)", + __func__, rni->rni_ifindex, &addr); + continue; + } + if (ifp->info == NULL) { + if (PIM_DEBUG_ZEBRA) + zlog_debug("%s: multicast not enabled on input interface %s (ifindex=%d, RPF for source %pPA)", + __func__, ifp->name, rni->rni_ifindex, &addr); + continue; + } + + /* Fill next hop parameter and return. */ + switch (rni->rni_type) { + case NEXTHOP_TYPE_IPV4: + case NEXTHOP_TYPE_IPV4_IFINDEX: + if (PIM_DEBUG_ZEBRA) + zlog_debug("%s: found nexthop %pI4 for address %pPA: interface %s ifindex=%d metric=%d pref=%d", + __func__, &rni->rni_addr.v4, &addr, ifp->name, + rni->rni_ifindex, ri->ri_metric, ri->ri_distance); + +#if PIM_IPV == 4 + pn->mrib_nexthop_addr = rni->rni_addr.v4; +#endif + break; + case NEXTHOP_TYPE_IPV6: + case NEXTHOP_TYPE_IPV6_IFINDEX: + if (PIM_DEBUG_ZEBRA) + zlog_debug("%s: found nexthop %pI6 for address %pPA: interface %s ifindex=%d metric=%d pref=%d", + __func__, &rni->rni_addr.v6, &addr, ifp->name, + rni->rni_ifindex, ri->ri_metric, ri->ri_distance); + +#if PIM_IPV == 6 + pn->mrib_nexthop_addr = rni->rni_addr.v6; +#endif + break; + case NEXTHOP_TYPE_IFINDEX: + break; + + default: + zlog_warn("%s: invalid next hop address type %d", __func__, rni->rni_type); + continue; + } + + pn->interface = ifp; + pn->mrib_metric_preference = ri->ri_distance; + pn->mrib_route_metric = ri->ri_metric; + pn->last_lookup = addr; + pn->last_lookup_time = pim_time_monotonic_usec(); + + found = true; + break; + } + +free_and_exit: + zroute_info_free(&ri); + return found; +} diff --git a/pimd/pim_rpf.h b/pimd/pim_rpf.h index 84d6b7f6c278..c1bada9d34f0 100644 --- a/pimd/pim_rpf.h +++ b/pimd/pim_rpf.h @@ -62,4 +62,5 @@ int pim_rpf_addr_is_inaddr_any(struct pim_rpf *rpf); int pim_rpf_is_same(struct pim_rpf *rpf1, struct pim_rpf *rpf2); void pim_rpf_set_refresh_time(struct pim_instance *pim); +bool pim_route_lookup(struct pim_instance *pim, pim_addr addr, uint32_t asn, struct pim_nexthop *pn); #endif /* PIM_RPF_H */ diff --git a/pimd/pim_zlookup.c b/pimd/pim_zlookup.c index febc595ad4c4..ef2cc3ff1b1a 100644 --- a/pimd/pim_zlookup.c +++ b/pimd/pim_zlookup.c @@ -559,6 +559,27 @@ int zclient_lookup_nexthop(struct pim_instance *pim, return -2; } +struct zroute_info *zclient_route_lookup(struct pim_instance *pim, const pim_addr *addr) +{ + if (zlookup->sock < 0) { + flog_err(EC_LIB_ZAPI_SOCKET, "%s: zclient lookup socket is not connected", __func__); + zclient_lookup_failed(zlookup); + return NULL; + } + + if (pim->vrf->vrf_id == VRF_UNKNOWN) { + zlog_notice("%s: VRF: %s does not fully exist yet, delaying lookup", __func__, + pim->vrf->name); + return NULL; + } + +#if PIM_IPV == 4 + return zapi_route_lookup(zlookup, pim->vrf->vrf_id, AF_INET, addr); +#else + return zapi_route_lookup(zlookup, pim->vrf->vrf_id, AF_INET6, addr); +#endif +} + void pim_zlookup_show_ip_multicast(struct vty *vty) { vty_out(vty, "Zclient lookup socket: "); diff --git a/pimd/pim_zlookup.h b/pimd/pim_zlookup.h index c9461eb7e3df..1bf7db60eede 100644 --- a/pimd/pim_zlookup.h +++ b/pimd/pim_zlookup.h @@ -35,4 +35,15 @@ int zclient_lookup_nexthop(struct pim_instance *pim, void pim_zlookup_show_ip_multicast(struct vty *vty); int pim_zlookup_sg_statistics(struct channel_oil *c_oil); + +/** + * Asks zebra to lookup for a route and return its information. + * + * \param pim PIM instance information. + * \param addr route address. + * \param type route type (e.g. if none specific then `ZEBRA_ROUTE_ALL`). + * \note call `zroute_info_free` after using the results. + */ +struct zroute_info *zclient_route_lookup(struct pim_instance *pim, const pim_addr *addr); + #endif /* PIM_ZLOOKUP_H */ From 298a31706d0026135d6505ef9e6cb036ba9386bd Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Fri, 20 Dec 2024 15:32:04 -0300 Subject: [PATCH 4/7] doc: document MSDP peer eBGP AS integration Let users know that they can utilize the BGP AS integration to detect loops in SAs using the network topology. Signed-off-by: Rafael Zalamena --- doc/user/pim.rst | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/doc/user/pim.rst b/doc/user/pim.rst index ff45f21b5663..d01fd149d6e8 100644 --- a/doc/user/pim.rst +++ b/doc/user/pim.rst @@ -546,10 +546,25 @@ Commands available for MSDP Create or update a mesh group to set the source address used to connect to peers. -.. clicmd:: msdp peer A.B.C.D source A.B.C.D +.. clicmd:: msdp peer A.B.C.D source A.B.C.D [as AS_NUMBER] Create a regular MSDP session with peer using the specified source address. + Optionally the Autonomous Number (AS) can be provided for eBGP assisted + loop detection (see RFC 4611 Section 2.1. Peering between PIM Border + Routers). + + .. note:: + + The BGP configuration must be enabled in order for this feature to work: + + :: + + bgp send-extra-data zebra + + This knob causes BGP to send the AS Path information to ``zebra`` so + MSDP can use that information. + .. clicmd:: msdp peer A.B.C.D sa-filter ACL_NAME Configure incoming or outgoing SA filtering rule. From d937cbf00a2f8a584812ffef854185c65f3c88d5 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Tue, 10 May 2022 08:20:47 -0300 Subject: [PATCH 5/7] pimd: add MSDP statistic counters Allow MSDP peers to hold the following new information: - Amount of RPF look up failures - Amount of incoming SAs filtered - Amount of outgoing SAs filtered Signed-off-by: Rafael Zalamena --- pimd/pim_msdp.c | 2 ++ pimd/pim_msdp.h | 8 ++++++++ pimd/pim_msdp_packet.c | 5 +++++ 3 files changed, 15 insertions(+) diff --git a/pimd/pim_msdp.c b/pimd/pim_msdp.c index 67bcab8eeda2..074230737fd6 100644 --- a/pimd/pim_msdp.c +++ b/pimd/pim_msdp.c @@ -724,6 +724,8 @@ bool pim_msdp_peer_rpf_check(struct pim_msdp_peer *mp, struct in_addr rp) if (pim_msdp_log_sa_events(mp->pim)) zlog_info("MSDP peer %pI4 RPF failure for %pI4", &mp->peer, &rp); + mp->rpf_lookup_failure_count++; + return false; } diff --git a/pimd/pim_msdp.h b/pimd/pim_msdp.h index 33af2d0b6be8..6bfa041fc337 100644 --- a/pimd/pim_msdp.h +++ b/pimd/pim_msdp.h @@ -148,10 +148,18 @@ struct pim_msdp_peer { /* timestamps */ int64_t uptime; + /** RPF lookup failures count. */ + uint32_t rpf_lookup_failure_count; + /** SA input access list name. */ char *acl_in; + /** Number of input filtered SAs. */ + uint32_t acl_in_count; + /** SA output access list name. */ char *acl_out; + /** Number of output filtered SAs. */ + uint32_t acl_out_count; /** SA maximum amount. */ uint32_t sa_limit; diff --git a/pimd/pim_msdp_packet.c b/pimd/pim_msdp_packet.c index 8c821cb5e548..17c71010862c 100644 --- a/pimd/pim_msdp_packet.c +++ b/pimd/pim_msdp_packet.c @@ -436,6 +436,7 @@ static void pim_msdp_pkt_sa_gen(struct pim_instance *pim, if (pim_msdp_log_sa_events(pim)) zlog_info("MSDP peer %pI4 filter SA out %s", &mp->peer, sa->sg_str); + mp->acl_out_count++; continue; } @@ -498,6 +499,7 @@ void pim_msdp_pkt_sa_tx_one(struct pim_msdp_sa *sa) if (pim_msdp_log_sa_events(sa->pim)) zlog_info("MSDP peer %pI4 filter SA out %s", &mp->peer, sa->sg_str); + mp->acl_out_count++; continue; } @@ -531,6 +533,7 @@ void pim_msdp_pkt_sa_tx_one_to_one_peer(struct pim_msdp_peer *mp, zlog_info("MSDP peer %pI4 filter SA out (%pI4, %pI4)", &mp->peer, &sa.sg.src, &sa.sg.grp); + mp->acl_out_count++; return; } @@ -590,6 +593,8 @@ static void pim_msdp_pkt_sa_rx_one(struct pim_msdp_peer *mp, struct in_addr rp) if (pim_msdp_log_sa_events(mp->pim)) zlog_info("MSDP peer %pI4 filter SA in (%pI4, %pI4)", &mp->peer, &sg.src, &sg.grp); + + mp->acl_in_count++; return; } } From 367785f69d345e0b5e319ac50419ee7e2ec444d0 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Tue, 10 May 2022 08:20:54 -0300 Subject: [PATCH 6/7] pimd: handle new MSDP peers counters Add new version of `show msdp peer` command with extra details and a new command to clear MSDP peer counters. Signed-off-by: Rafael Zalamena --- pimd/pim_cmd.c | 252 ++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 195 insertions(+), 57 deletions(-) diff --git a/pimd/pim_cmd.c b/pimd/pim_cmd.c index f30fa6d379bd..2489f4d8ad49 100644 --- a/pimd/pim_cmd.c +++ b/pimd/pim_cmd.c @@ -7842,6 +7842,91 @@ static void ip_msdp_show_peers(struct pim_instance *pim, struct vty *vty, vty_json(vty, json); } +static void msdp_peer_details_add_json(struct json_object *peer_json, + const struct pim_msdp_peer *peer) +{ + char state_str[PIM_MSDP_STATE_STRLEN]; + char holdtimer[PIM_MSDP_TIMER_STRLEN]; + char timebuf[PIM_MSDP_UPTIME_STRLEN]; + char katimer[PIM_MSDP_TIMER_STRLEN]; + char crtimer[PIM_MSDP_TIMER_STRLEN]; + time_t now; + + if (peer->state == PIM_MSDP_ESTABLISHED) { + now = pim_time_monotonic_sec(); + pim_time_uptime(timebuf, sizeof(timebuf), now - peer->uptime); + } else + strlcpy(timebuf, "-", sizeof(timebuf)); + + pim_msdp_state_dump(peer->state, state_str, sizeof(state_str)); + pim_time_timer_to_hhmmss(katimer, sizeof(katimer), peer->ka_timer); + pim_time_timer_to_hhmmss(crtimer, sizeof(crtimer), peer->cr_timer); + pim_time_timer_to_hhmmss(holdtimer, sizeof(holdtimer), peer->hold_timer); + + if (peer->state == PIM_MSDP_ESTABLISHED) { + union { + struct sockaddr sa; + struct sockaddr_in sin; + struct sockaddr_in6 sin6; + } sock_address; + socklen_t sock_address_size = sizeof(sock_address); + char address_string[INET6_ADDRSTRLEN]; + + memset(&sock_address, 0, sizeof(sock_address)); + + if (getsockname(peer->fd, &sock_address.sa, &sock_address_size) == -1) { + zlog_warn("MSDP peer failed to get socket local address: (%d) %s", errno, + strerror(errno)); + return; + } + + inet_ntop(AF_INET, &sock_address.sin.sin_addr, address_string, + sizeof(address_string)); + json_object_string_add(peer_json, "local", address_string); + json_object_int_add(peer_json, "localPort", ntohs(sock_address.sin.sin_port)); + + if (getpeername(peer->fd, &sock_address.sa, &sock_address_size) == -1) { + zlog_warn("MSDP peer failed to get socket peer address: (%d) %s", errno, + strerror(errno)); + return; + } + + inet_ntop(AF_INET, &sock_address.sin.sin_addr, address_string, + sizeof(address_string)); + json_object_string_add(peer_json, "peer", address_string); + json_object_int_add(peer_json, "peerPort", ntohs(sock_address.sin.sin_port)); + } + + if (peer->flags & PIM_MSDP_PEERF_IN_GROUP) + json_object_string_add(peer_json, "meshGroupName", peer->mesh_group_name); + + json_object_string_add(peer_json, "state", state_str); + json_object_string_add(peer_json, "upTime", timebuf); + json_object_string_add(peer_json, "keepAliveTimer", katimer); + json_object_string_add(peer_json, "connRetryTimer", crtimer); + json_object_string_add(peer_json, "holdTimer", holdtimer); + json_object_string_add(peer_json, "lastReset", peer->last_reset); + json_object_int_add(peer_json, "connAttempts", peer->conn_attempts); + json_object_int_add(peer_json, "establishedChanges", peer->est_flaps); + json_object_int_add(peer_json, "saCount", peer->sa_cnt); + json_object_int_add(peer_json, "kaSent", peer->ka_tx_cnt); + json_object_int_add(peer_json, "kaRcvd", peer->ka_rx_cnt); + json_object_int_add(peer_json, "saSent", peer->sa_tx_cnt); + json_object_int_add(peer_json, "saRcvd", peer->sa_rx_cnt); + if (peer->asn != 0) + json_object_int_add(peer_json, "asn", peer->asn); + + json_object_int_add(peer_json, "saFilteredIn", peer->acl_in_count); + if (peer->acl_in) + json_object_string_add(peer_json, "saFilterIn", peer->acl_in); + + json_object_int_add(peer_json, "saFilteredOut", peer->acl_out_count); + if (peer->acl_out) + json_object_string_add(peer_json, "saFilterout", peer->acl_out); + + json_object_int_add(peer_json, "rpfLookupFailures", peer->rpf_lookup_failure_count); +} + static void ip_msdp_show_peers_detail(struct pim_instance *pim, struct vty *vty, const char *peer, bool uj) { @@ -7867,6 +7952,13 @@ static void ip_msdp_show_peers_detail(struct pim_instance *pim, struct vty *vty, if (strcmp(peer, "detail") && strcmp(peer, peer_str)) continue; + if (uj) { + json_row = json_object_new_object(); + msdp_peer_details_add_json(json_row, mp); + json_object_object_add(json, peer_str, json_row); + continue; + } + if (mp->state == PIM_MSDP_ESTABLISHED) { now = pim_time_monotonic_sec(); pim_time_uptime(timebuf, sizeof(timebuf), @@ -7884,63 +7976,29 @@ static void ip_msdp_show_peers_detail(struct pim_instance *pim, struct vty *vty, pim_time_timer_to_hhmmss(holdtimer, sizeof(holdtimer), mp->hold_timer); - if (uj) { - json_row = json_object_new_object(); - json_object_string_add(json_row, "peer", peer_str); - json_object_string_add(json_row, "local", local_str); - if (mp->flags & PIM_MSDP_PEERF_IN_GROUP) - json_object_string_add(json_row, - "meshGroupName", - mp->mesh_group_name); - json_object_string_add(json_row, "state", state_str); - json_object_string_add(json_row, "upTime", timebuf); - json_object_string_add(json_row, "keepAliveTimer", - katimer); - json_object_string_add(json_row, "connRetryTimer", - crtimer); - json_object_string_add(json_row, "holdTimer", - holdtimer); - json_object_string_add(json_row, "lastReset", - mp->last_reset); - json_object_int_add(json_row, "connAttempts", - mp->conn_attempts); - json_object_int_add(json_row, "establishedChanges", - mp->est_flaps); - json_object_int_add(json_row, "saCount", mp->sa_cnt); - json_object_int_add(json_row, "kaSent", mp->ka_tx_cnt); - json_object_int_add(json_row, "kaRcvd", mp->ka_rx_cnt); - json_object_int_add(json_row, "saSent", mp->sa_tx_cnt); - json_object_int_add(json_row, "saRcvd", mp->sa_rx_cnt); - json_object_object_add(json, peer_str, json_row); - } else { - vty_out(vty, "Peer : %s\n", peer_str); - vty_out(vty, " Local : %s\n", local_str); - if (mp->flags & PIM_MSDP_PEERF_IN_GROUP) - vty_out(vty, " Mesh Group : %s\n", - mp->mesh_group_name); - vty_out(vty, " State : %s\n", state_str); - vty_out(vty, " Uptime : %s\n", timebuf); - - vty_out(vty, " Keepalive Timer : %s\n", katimer); - vty_out(vty, " Conn Retry Timer : %s\n", crtimer); - vty_out(vty, " Hold Timer : %s\n", holdtimer); - vty_out(vty, " Last Reset : %s\n", - mp->last_reset); - vty_out(vty, " Conn Attempts : %d\n", - mp->conn_attempts); - vty_out(vty, " Established Changes : %d\n", - mp->est_flaps); - vty_out(vty, " SA Count : %d\n", - mp->sa_cnt); - vty_out(vty, " Statistics :\n"); - vty_out(vty, - " Sent Rcvd\n"); - vty_out(vty, " Keepalives : %10d %10d\n", - mp->ka_tx_cnt, mp->ka_rx_cnt); - vty_out(vty, " SAs : %10d %10d\n", - mp->sa_tx_cnt, mp->sa_rx_cnt); - vty_out(vty, "\n"); - } + vty_out(vty, "Peer : %s\n", peer_str); + vty_out(vty, " Local : %s\n", local_str); + if (mp->flags & PIM_MSDP_PEERF_IN_GROUP) + vty_out(vty, " Mesh Group : %s\n", mp->mesh_group_name); + if (mp->asn != 0) + vty_out(vty, " BGP/AS : %u\n", mp->asn); + else + vty_out(vty, " BGP/AS : -\n"); + vty_out(vty, " State : %s\n", state_str); + vty_out(vty, " Uptime : %s\n", timebuf); + + vty_out(vty, " Keepalive Timer : %s\n", katimer); + vty_out(vty, " Conn Retry Timer : %s\n", crtimer); + vty_out(vty, " Hold Timer : %s\n", holdtimer); + vty_out(vty, " Last Reset : %s\n", mp->last_reset); + vty_out(vty, " Conn Attempts : %d\n", mp->conn_attempts); + vty_out(vty, " Established Changes : %d\n", mp->est_flaps); + vty_out(vty, " SA Count : %d\n", mp->sa_cnt); + vty_out(vty, " Statistics :\n"); + vty_out(vty, " Sent Rcvd\n"); + vty_out(vty, " Keepalives : %10d %10d\n", mp->ka_tx_cnt, mp->ka_rx_cnt); + vty_out(vty, " SAs : %10d %10d\n", mp->sa_tx_cnt, mp->sa_rx_cnt); + vty_out(vty, "\n"); } if (uj) @@ -8021,6 +8079,85 @@ DEFUN (show_ip_msdp_peer_detail_vrf_all, return CMD_SUCCESS; } +static void clear_msdp_peer_counters(struct pim_msdp_peer *peer) +{ + peer->ka_rx_cnt = 0; + peer->ka_tx_cnt = 0; + peer->sa_rx_cnt = 0; + peer->sa_tx_cnt = 0; + peer->est_flaps = 0; + peer->conn_attempts = 0; + peer->acl_in_count = 0; + peer->acl_out_count = 0; + peer->rpf_lookup_failure_count = 0; +} + +DEFPY(clear_ip_msdp_peer_counters, clear_ip_msdp_peer_counters_cmd, + "clear ip msdp [] peer [A.B.C.D$peer] counters", + CLEAR_STR + IP_STR + MSDP_STR + VRF_CMD_HELP_STR + VRF_CMD_HELP_STR + "MSDP peer information\n" + "Peer IP address\n" + "MSDP peer counters\n") +{ + const struct pim_instance *pim; + struct pim_msdp_peer *msdp_peer = NULL; + struct vrf *vrf; + + if (vrf_name) { + vrf = vrf_lookup_by_name(vrf_name); + if (vrf == NULL) { + vty_out(vty, "VRF %s does not exist\n", vrf_name); + return CMD_WARNING; + } + } else if (vrf_all) { + vrf = NULL; + } else { + vrf = vrf_lookup_by_id(VRF_DEFAULT); + if (vrf == NULL) { + vty_out(vty, "Default VRF does not exist\n"); + return CMD_WARNING; + } + } + + if (vrf) { + pim = vrf->info; + + if (peer_str) { + msdp_peer = pim_msdp_peer_find(pim, peer); + if (msdp_peer) + clear_msdp_peer_counters(msdp_peer); + } else { + struct listnode *node; + + for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, node, msdp_peer)) + clear_msdp_peer_counters(msdp_peer); + } + + return CMD_SUCCESS; + } + + RB_FOREACH (vrf, vrf_name_head, &vrfs_by_name) { + pim = vrf->info; + + if (peer_str) { + msdp_peer = pim_msdp_peer_find(pim, peer); + if (msdp_peer) + clear_msdp_peer_counters(msdp_peer); + } else { + struct listnode *node; + + for (ALL_LIST_ELEMENTS_RO(pim->msdp.peer_list, node, msdp_peer)) + clear_msdp_peer_counters(msdp_peer); + } + } + + return CMD_SUCCESS; +} + static void ip_msdp_show_sa(struct pim_instance *pim, struct vty *vty, bool uj) { struct listnode *sanode; @@ -9234,6 +9371,7 @@ void pim_cmd_init(void) install_element(ENABLE_NODE, &clear_ip_mroute_count_cmd); install_element(ENABLE_NODE, &clear_ip_msdp_peer_cmd); + install_element(ENABLE_NODE, &clear_ip_msdp_peer_counters_cmd); install_element(ENABLE_NODE, &clear_ip_interfaces_cmd); install_element(ENABLE_NODE, &clear_ip_igmp_interfaces_cmd); install_element(ENABLE_NODE, &clear_ip_mroute_cmd); From 718e421d77ec33c6c2c8a559a6dd8e4f91c4a098 Mon Sep 17 00:00:00 2001 From: Rafael Zalamena Date: Fri, 20 Dec 2024 14:15:27 -0300 Subject: [PATCH 7/7] topotests: add tests for MSDP eBGP integration Add topology for testing MSDP eBGP integration. Signed-off-by: Rafael Zalamena --- tests/topotests/msdp_topo4/__init__.py | 0 tests/topotests/msdp_topo4/r1/frr.conf | 36 ++++ tests/topotests/msdp_topo4/r2/frr.conf | 39 ++++ tests/topotests/msdp_topo4/r3/frr.conf | 31 +++ tests/topotests/msdp_topo4/r4/frr.conf | 31 +++ tests/topotests/msdp_topo4/test_msdp_topo4.py | 184 ++++++++++++++++++ 6 files changed, 321 insertions(+) create mode 100644 tests/topotests/msdp_topo4/__init__.py create mode 100644 tests/topotests/msdp_topo4/r1/frr.conf create mode 100644 tests/topotests/msdp_topo4/r2/frr.conf create mode 100644 tests/topotests/msdp_topo4/r3/frr.conf create mode 100644 tests/topotests/msdp_topo4/r4/frr.conf create mode 100755 tests/topotests/msdp_topo4/test_msdp_topo4.py diff --git a/tests/topotests/msdp_topo4/__init__.py b/tests/topotests/msdp_topo4/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/msdp_topo4/r1/frr.conf b/tests/topotests/msdp_topo4/r1/frr.conf new file mode 100644 index 000000000000..213981e38b52 --- /dev/null +++ b/tests/topotests/msdp_topo4/r1/frr.conf @@ -0,0 +1,36 @@ +ip forwarding +! +interface r1-eth0 + ip address 192.168.0.1/24 + ip pim +! +interface r1-eth1 + ip address 192.168.1.1/24 + ip pim +! +interface r1-eth2 + ip address 192.168.10.1/24 + ip pim + ip igmp +! +interface lo + ip address 10.254.254.1/32 + ip pim + ip pim use-source 10.254.254.1 +! +router pim + msdp timers 10 20 3 + msdp peer 192.168.0.2 source 192.168.0.1 + msdp peer 192.168.1.2 source 192.168.1.1 + rp 10.254.254.1 + join-prune-interval 5 +! +router bgp 65001 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.0.2 remote-as 65002 + neighbor 192.168.1.2 remote-as 65003 + address-family ipv4 unicast + redistribute connected + exit-address-family +! \ No newline at end of file diff --git a/tests/topotests/msdp_topo4/r2/frr.conf b/tests/topotests/msdp_topo4/r2/frr.conf new file mode 100644 index 000000000000..6042f39cc34f --- /dev/null +++ b/tests/topotests/msdp_topo4/r2/frr.conf @@ -0,0 +1,39 @@ +ip forwarding +! +bgp send-extra-data zebra +! +interface r2-eth0 + ip address 192.168.0.2/24 + ip pim +! +interface r2-eth1 + ip address 192.168.2.1/24 + ip pim +! +interface r2-eth2 + ip address 192.168.3.2/24 + ip pim +! +interface lo + ip address 10.254.254.2/32 + ip pim + ip pim use-source 10.254.254.2 +! +router pim + msdp timers 10 20 3 + msdp peer 192.168.0.1 source 192.168.0.2 as 65001 + msdp peer 192.168.2.2 source 192.168.2.1 as 65003 + msdp peer 192.168.3.1 source 192.168.3.2 as 65004 + rp 10.254.254.2 + join-prune-interval 5 +! +router bgp 65002 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.0.1 remote-as 65001 + neighbor 192.168.2.2 remote-as 65003 + neighbor 192.168.3.1 remote-as 65004 + address-family ipv4 unicast + redistribute connected + exit-address-family +! \ No newline at end of file diff --git a/tests/topotests/msdp_topo4/r3/frr.conf b/tests/topotests/msdp_topo4/r3/frr.conf new file mode 100644 index 000000000000..f799925b87a6 --- /dev/null +++ b/tests/topotests/msdp_topo4/r3/frr.conf @@ -0,0 +1,31 @@ +ip forwarding +! +interface r3-eth0 + ip address 192.168.1.2/24 + ip pim +! +interface r3-eth1 + ip address 192.168.2.2/24 + ip pim +! +interface lo + ip address 10.254.254.3/32 + ip pim + ip pim use-source 10.254.254.3 +! +router pim + msdp timers 10 20 3 + msdp peer 192.168.1.1 source 192.168.1.2 + msdp peer 192.168.2.1 source 192.168.2.2 + rp 10.254.254.3 + join-prune-interval 5 +! +router bgp 65003 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.1.1 remote-as 65001 + neighbor 192.168.2.1 remote-as 65002 + address-family ipv4 unicast + redistribute connected + exit-address-family +! \ No newline at end of file diff --git a/tests/topotests/msdp_topo4/r4/frr.conf b/tests/topotests/msdp_topo4/r4/frr.conf new file mode 100644 index 000000000000..a7ed2b711477 --- /dev/null +++ b/tests/topotests/msdp_topo4/r4/frr.conf @@ -0,0 +1,31 @@ +ip forwarding +! +debug pim zebra +! +interface r4-eth0 + ip address 192.168.3.1/24 + ip pim +! +interface r4-eth1 + ip address 192.168.20.1/24 + ip pim +! +interface lo + ip address 10.254.254.4/32 + ip pim + ip pim use-source 10.254.254.4 +! +router pim + msdp timers 10 20 3 + msdp peer 192.168.3.2 source 192.168.3.1 + rp 10.254.254.4 + join-prune-interval 5 +! +router bgp 65004 + no bgp ebgp-requires-policy + no bgp network import-check + neighbor 192.168.3.2 remote-as 65002 + address-family ipv4 unicast + redistribute connected + exit-address-family +! \ No newline at end of file diff --git a/tests/topotests/msdp_topo4/test_msdp_topo4.py b/tests/topotests/msdp_topo4/test_msdp_topo4.py new file mode 100755 index 000000000000..c0f99f72d543 --- /dev/null +++ b/tests/topotests/msdp_topo4/test_msdp_topo4.py @@ -0,0 +1,184 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# test_msdp_topo4.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2024 by +# Network Device Education Foundation, Inc. ("NetDEF") +# + +""" +test_msdp_topo4.py: Test the FRR PIM MSDP peer. +""" + +import os +import sys +import json +from functools import partial +import re +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest + +# Required to instantiate the topology builder class. +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +from lib.pim import McastTesterHelper + +pytestmark = [pytest.mark.bgpd, pytest.mark.pimd] + +app_helper = McastTesterHelper() + + +def build_topo(tgen): + """ + h1----r1----r2----r4----h2 + | / + | / + | / + r3 + """ + + # Create 4 routers + for routern in range(1, 5): + tgen.add_router(f"r{routern}") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r3"]) + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r3"]) + + switch = tgen.add_switch("s4") + switch.add_link(tgen.gears["r2"]) + switch.add_link(tgen.gears["r4"]) + + switch = tgen.add_switch("s5") + tgen.add_host("h1", "192.168.10.100/24", "via 192.168.10.1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["h1"]) + + switch = tgen.add_switch("s6") + tgen.add_host("h2", "192.168.20.100/24", "via 192.168.20.1") + switch.add_link(tgen.gears["r4"]) + switch.add_link(tgen.gears["h2"]) + + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + for _, router in router_list.items(): + file = f"{CWD}/{router.name}/frr.conf" + router.load_frr_config(file) + + # Initialize all routers. + tgen.start_router() + + app_helper.init(tgen) + + +def teardown_module(): + "Teardown the pytest environment" + tgen = get_topogen() + app_helper.cleanup() + tgen.stop_topology() + + +def test_bgp_convergence(): + "Wait for BGP protocol convergence" + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("waiting for protocols to converge") + + def expect_loopback_route(router, iptype, route, proto): + "Wait until route is present on RIB for protocol." + logger.info(f"waiting route {route} in {router}") + test_func = partial( + topotest.router_json_cmp, + tgen.gears[router], + f"show {iptype} route json", + {route: [{"protocol": proto}]}, + ) + _, result = topotest.run_and_expect(test_func, None, count=130, wait=1) + assertmsg = f'"{router}" convergence failure' + assert result is None, assertmsg + + # Wait for R1 + expect_loopback_route("r1", "ip", "10.254.254.2/32", "bgp") + expect_loopback_route("r1", "ip", "10.254.254.3/32", "bgp") + expect_loopback_route("r1", "ip", "10.254.254.4/32", "bgp") + + # Wait for R2 + expect_loopback_route("r2", "ip", "10.254.254.1/32", "bgp") + expect_loopback_route("r2", "ip", "10.254.254.3/32", "bgp") + expect_loopback_route("r2", "ip", "10.254.254.4/32", "bgp") + + # Wait for R3 + expect_loopback_route("r3", "ip", "10.254.254.1/32", "bgp") + expect_loopback_route("r3", "ip", "10.254.254.2/32", "bgp") + expect_loopback_route("r3", "ip", "10.254.254.4/32", "bgp") + + # Wait for R4 + expect_loopback_route("r4", "ip", "10.254.254.1/32", "bgp") + expect_loopback_route("r4", "ip", "10.254.254.2/32", "bgp") + expect_loopback_route("r4", "ip", "10.254.254.3/32", "bgp") + + +def test_msdp_sa_check(): + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + MCAST_ADDRESS = "229.1.2.3" + app_helper.run("h1", ["--send=0.7", MCAST_ADDRESS, "h1-eth0"]) + app_helper.run("h2", [MCAST_ADDRESS, "h2-eth0"]) + + def test_r2_mroute(): + r2_expect = { + "229.1.2.3": { + "192.168.10.100": { + "rp": "10.254.254.1", + "local": "no", + } + } + } + out = tgen.gears["r2"].vtysh_cmd("show ip msdp sa json", isjson=True) + return topotest.json_cmp(out, r2_expect) + + logger.info("Waiting for R2 multicast routes") + _, val = topotest.run_and_expect(test_r2_mroute, None, count=55, wait=2) + assert val is None, "multicast route convergence failure" + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args))