From 07a459ac25e2b17464ccabf9df6ca07ae6887531 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Wed, 17 Jun 2020 14:11:35 +0200 Subject: [PATCH 1/6] lib: add support for new redistribute table-direct element Add a new kind of redistributed route that helps to import entries that are not copied in the default routing table. Contrary to the 'redistribute table' feature, the entries are directly obtained from the zebra appropriate routing table. The 'table-direct' naming expresses the direct redistribution of the routes, without having to copy the route entries in the default routing table. The distance value for this route is 14. Such route entries will be prioritary compared to 'table' and 'ebgp' route entries type. Signed-off-by: Philippe Guibert --- lib/log.c | 4 ++++ lib/route_types.txt | 2 ++ lib/zebra.h | 1 + 3 files changed, 7 insertions(+) diff --git a/lib/log.c b/lib/log.c index ac31faa7951c..306c995c15e5 100644 --- a/lib/log.c +++ b/lib/log.c @@ -573,6 +573,8 @@ int proto_redistnum(int afi, const char *s) return ZEBRA_ROUTE_SHARP; else if (strmatch(s, "openfabric")) return ZEBRA_ROUTE_OPENFABRIC; + else if (strmatch(s, "table-direct")) + return ZEBRA_ROUTE_TABLE_DIRECT; } if (afi == AFI_IP6) { if (strmatch(s, "kernel")) @@ -603,6 +605,8 @@ int proto_redistnum(int afi, const char *s) return ZEBRA_ROUTE_SHARP; else if (strmatch(s, "openfabric")) return ZEBRA_ROUTE_OPENFABRIC; + else if (strmatch(s, "table-direct")) + return ZEBRA_ROUTE_TABLE_DIRECT; } return -1; } diff --git a/lib/route_types.txt b/lib/route_types.txt index a82273a6dcb0..07289e1e0815 100644 --- a/lib/route_types.txt +++ b/lib/route_types.txt @@ -86,6 +86,7 @@ ZEBRA_ROUTE_OPENFABRIC, openfabric, fabricd, 'f', 1, 1, 1, "OpenFabric", fa ZEBRA_ROUTE_VRRP, vrrp, vrrpd, '-', 0, 0, 0, "VRRP", vrrpd ZEBRA_ROUTE_NHG, zebra, none, '-', 0, 0, 0, "Nexthop Group", none ZEBRA_ROUTE_SRTE, srte, none, '-', 0, 0, 0, "SR-TE", none +ZEBRA_ROUTE_TABLE_DIRECT, table-direct, zebra, 't', 1, 1, 1, "Table-Direct", zebra ZEBRA_ROUTE_ALL, wildcard, none, '-', 0, 0, 0, "-", none @@ -116,3 +117,4 @@ ZEBRA_ROUTE_BFD, "Bidirectional Fowarding Detection (BFD)" ZEBRA_ROUTE_VRRP, "Virtual Router Redundancy Protocol (VRRP)" ZEBRA_ROUTE_OPENFABRIC, "OpenFabric Routing Protocol" ZEBRA_ROUTE_NHG, "Zebra Nexthop Groups (NHG)" +ZEBRA_ROUTE_TABLE_DIRECT, "Non-main Kernel Routing Table - Direct" diff --git a/lib/zebra.h b/lib/zebra.h index ecc87f58f10f..42e6a97ff812 100644 --- a/lib/zebra.h +++ b/lib/zebra.h @@ -370,6 +370,7 @@ typedef enum { #define ZEBRA_IBGP_DISTANCE_DEFAULT 200 #define ZEBRA_EBGP_DISTANCE_DEFAULT 20 #define ZEBRA_TABLE_DISTANCE_DEFAULT 15 +#define ZEBRA_TABLEDIRECT_DISTANCE_DEFAULT 14 #define ZEBRA_EIGRP_DISTANCE_DEFAULT 90 #define ZEBRA_NHRP_DISTANCE_DEFAULT 10 #define ZEBRA_LDP_DISTANCE_DEFAULT 150 From daad19071cc40e4ef6913e674cc41fa35121c399 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Wed, 17 Jun 2020 14:11:35 +0200 Subject: [PATCH 2/6] zebra: add redistribute table-direct support Redistributing routes from a specific routing table to a particular routing protocol necessitates copying route entries to the main routing table using the "ip import-table" command. Once copied, these routes are assigned a distinct "table" route type, which the "redistribute table" command of the routing protocol then picks up. For illustration, here is a configuration that showcases the use of "import-table" and "redistribute": > # show running-config > [..] > ip route 172.31.0.10/32 172.31.1.10 table 100 > router bgp 65500 > address-family ipv4 unicast > redistribute table 100 > exit-address-family > exit > ip import-table 100 > > # show ip route vrf default > [..] > T[100]>* 172.31.0.10/32 [15/0] via 172.31.1.10, r2-eth1, weight 1, 00:00:05 However, this method has inherent constraints: - The 'import-table' parameter only handles route table id up to 252. The 253/254/255 table ids are reserved in the linux system, and adding other table IDs above 255 leads to a design issue, where the size of some tables is directly related to the maximum number of table ids to support. - Duplicated route entries might interfere with original default table routes, leading to potential conflicts. There is no guarantee that the zebra RIB will favor these duplicated entries during redistribution. - There are cases where the table ID can be checked independently of the default routing table, as seen in Linux where the "ip rule" command is able to divert traffic to that routing table. In that case, there is no need to duplicate route entries in the default routing table. To overcome these issues, a new redistribution type is proposed to redistribute route entries directly from a specified routing table, eliminating the need for an initial import into the default table. Add a 'ZEBRA_ROUTE_TABLE_DIRECT' type to the 'REDISTRIBUTE' ZAPI messages. It allows sending routes from a given non default table ID from zebra to a routing daemon. The destination routing protocol table must be the default table. The redistributed route inherit from the default distance value of 14: this is the distance value reserved for routes redistributed via ROUTE_TABLE_DIRECT. Signed-off-by: Philippe Guibert --- zebra/redistribute.c | 80 +++++++++++++++++++++++++++++++++++--------- zebra/zapi_msg.c | 13 +++++-- zebra/zapi_msg.h | 3 +- zebra/zebra_rib.c | 1 + 4 files changed, 78 insertions(+), 19 deletions(-) diff --git a/zebra/redistribute.c b/zebra/redistribute.c index 4069f7dd7039..71e7956324b0 100644 --- a/zebra/redistribute.c +++ b/zebra/redistribute.c @@ -77,9 +77,8 @@ static void zebra_redistribute_default(struct zserv *client, vrf_id_t vrf_id) RNODE_FOREACH_RE (rn, newre) { if (CHECK_FLAG(newre->flags, ZEBRA_FLAG_SELECTED)) - zsend_redistribute_route( - ZEBRA_REDISTRIBUTE_ROUTE_ADD, client, - rn, newre); + zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, + client, rn, newre, false); } route_unlock_node(rn); @@ -88,14 +87,26 @@ static void zebra_redistribute_default(struct zserv *client, vrf_id_t vrf_id) /* Redistribute routes. */ static void zebra_redistribute(struct zserv *client, int type, - unsigned short instance, vrf_id_t vrf_id, + unsigned short instance, struct zebra_vrf *zvrf, int afi) { struct route_entry *newre; struct route_table *table; struct route_node *rn; + bool is_table_direct = false; + vrf_id_t vrf_id = zvrf_id(zvrf); + + if (type == ZEBRA_ROUTE_TABLE_DIRECT) { + if (vrf_id == VRF_DEFAULT) { + table = zebra_router_find_table(zvrf, instance, afi, + SAFI_UNICAST); + type = ZEBRA_ROUTE_ALL; + is_table_direct = true; + } else + return; + } else + table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id); - table = zebra_vrf_table(afi, SAFI_UNICAST, vrf_id); if (!table) return; @@ -124,11 +135,30 @@ static void zebra_redistribute(struct zserv *client, int type, if (!zebra_check_addr(&rn->p)) continue; + if (type == ZEBRA_ROUTE_ADD && is_table_direct && + newre->vrf_id != VRF_DEFAULT) + continue; + zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, - client, rn, newre); + client, rn, newre, is_table_direct); } } +/* + * Function to return a valid table id value if table-direct is used + * return 0 otherwise + * This function can be called only if zebra_redistribute_check returns TRUE + */ +static bool zebra_redistribute_is_table_direct(const struct route_entry *re) +{ + struct zebra_vrf *zvrf; + + zvrf = zebra_vrf_lookup_by_id(re->vrf_id); + if (re->vrf_id == VRF_DEFAULT && zvrf->table_id != re->table) + return true; + return false; +} + /* * Function to check if prefix is candidate for * redistribute. @@ -146,8 +176,19 @@ static bool zebra_redistribute_check(const struct route_node *rn, afi = family2afi(rn->p.family); zvrf = zebra_vrf_lookup_by_id(re->vrf_id); - if (re->vrf_id == VRF_DEFAULT && zvrf->table_id != re->table) + if (re->vrf_id == VRF_DEFAULT && zvrf->table_id != re->table) { + if (re->table && + redist_check_instance(&client->mi_redist + [afi][ZEBRA_ROUTE_TABLE_DIRECT], + re->table)) { + /* table-direct redistribution only for route entries which + * are on the default vrf, and that have table id different + * from the default table. + */ + return true; + } return false; + } /* If default route and redistributed */ if (is_default_prefix(&rn->p) && @@ -185,6 +226,7 @@ void redistribute_update(const struct route_node *rn, { struct listnode *node, *nnode; struct zserv *client; + bool is_table_direct; if (IS_ZEBRA_DEBUG_RIB) zlog_debug( @@ -210,11 +252,16 @@ void redistribute_update(const struct route_node *rn, re->vrf_id, re->table, re->type, re->distance, re->metric); } + is_table_direct = zebra_redistribute_is_table_direct(re); zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_ADD, - client, rn, re); - } else if (zebra_redistribute_check(rn, prev_re, client)) + client, rn, re, + is_table_direct); + } else if (zebra_redistribute_check(rn, prev_re, client)) { + is_table_direct = zebra_redistribute_is_table_direct(prev_re); zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL, - client, rn, prev_re); + client, rn, prev_re, + is_table_direct); + } } } @@ -233,6 +280,7 @@ void redistribute_delete(const struct route_node *rn, struct listnode *node, *nnode; struct zserv *client; vrf_id_t vrfid; + bool is_table_direct; if (old_re) vrfid = old_re->vrf_id; @@ -285,9 +333,12 @@ void redistribute_delete(const struct route_node *rn, continue; /* Send a delete for the 'old' re to any subscribed client. */ - if (zebra_redistribute_check(rn, old_re, client)) + if (zebra_redistribute_check(rn, old_re, client)) { + is_table_direct = zebra_redistribute_is_table_direct(old_re); zsend_redistribute_route(ZEBRA_REDISTRIBUTE_ROUTE_DEL, - client, rn, old_re); + client, rn, old_re, + is_table_direct); + } } } @@ -326,8 +377,7 @@ void zebra_redistribute_add(ZAPI_HANDLER_ARGS) instance)) { redist_add_instance(&client->mi_redist[afi][type], instance); - zebra_redistribute(client, type, instance, - zvrf_id(zvrf), afi); + zebra_redistribute(client, type, instance, zvrf, afi); } } else { if (!vrf_bitmap_check(&client->redist[afi][type], @@ -339,7 +389,7 @@ void zebra_redistribute_add(ZAPI_HANDLER_ARGS) zvrf_id(zvrf)); vrf_bitmap_set(&client->redist[afi][type], zvrf_id(zvrf)); - zebra_redistribute(client, type, 0, zvrf_id(zvrf), afi); + zebra_redistribute(client, type, 0, zvrf, afi); } } diff --git a/zebra/zapi_msg.c b/zebra/zapi_msg.c index 92a81f682873..856c906bdcb2 100644 --- a/zebra/zapi_msg.c +++ b/zebra/zapi_msg.c @@ -510,7 +510,7 @@ int zsend_interface_update(int cmd, struct zserv *client, struct interface *ifp) int zsend_redistribute_route(int cmd, struct zserv *client, const struct route_node *rn, - const struct route_entry *re) + const struct route_entry *re, bool is_table_direct) { struct zapi_route api; struct zapi_nexthop *api_nh; @@ -526,7 +526,11 @@ int zsend_redistribute_route(int cmd, struct zserv *client, api.vrf_id = re->vrf_id; api.type = re->type; api.safi = SAFI_UNICAST; - api.instance = re->instance; + if (is_table_direct) { + api.instance = re->table; + api.type = ZEBRA_ROUTE_TABLE_DIRECT; + } else + api.instance = re->instance; api.flags = re->flags; afi = family2afi(p->family); @@ -593,7 +597,10 @@ int zsend_redistribute_route(int cmd, struct zserv *client, /* Attributes. */ SET_FLAG(api.message, ZAPI_MESSAGE_DISTANCE); - api.distance = re->distance; + if (is_table_direct) + api.distance = ZEBRA_TABLEDIRECT_DISTANCE_DEFAULT; + else + api.distance = re->distance; SET_FLAG(api.message, ZAPI_MESSAGE_METRIC); api.metric = re->metric; if (re->tag) { diff --git a/zebra/zapi_msg.h b/zebra/zapi_msg.h index a01cbf675d51..def1e8a1bdff 100644 --- a/zebra/zapi_msg.h +++ b/zebra/zapi_msg.h @@ -53,7 +53,8 @@ extern int zsend_interface_update(int cmd, struct zserv *client, struct interface *ifp); extern int zsend_redistribute_route(int cmd, struct zserv *zclient, const struct route_node *rn, - const struct route_entry *re); + const struct route_entry *re, + bool is_table_direct); extern int zsend_router_id_update(struct zserv *zclient, afi_t afi, struct prefix *p, vrf_id_t vrf_id); diff --git a/zebra/zebra_rib.c b/zebra/zebra_rib.c index 97b6a7decfea..37c042c04482 100644 --- a/zebra/zebra_rib.c +++ b/zebra/zebra_rib.c @@ -130,6 +130,7 @@ static const struct { [ZEBRA_ROUTE_OLSR] = {ZEBRA_ROUTE_OLSR, ZEBRA_MAX_DISTANCE_DEFAULT, META_QUEUE_OTHER}, [ZEBRA_ROUTE_TABLE] = {ZEBRA_ROUTE_TABLE, ZEBRA_TABLE_DISTANCE_DEFAULT, META_QUEUE_STATIC}, + [ZEBRA_ROUTE_TABLE_DIRECT] = {ZEBRA_ROUTE_TABLE_DIRECT, ZEBRA_TABLEDIRECT_DISTANCE_DEFAULT, META_QUEUE_STATIC}, [ZEBRA_ROUTE_LDP] = {ZEBRA_ROUTE_LDP, ZEBRA_LDP_DISTANCE_DEFAULT, META_QUEUE_OTHER}, [ZEBRA_ROUTE_VNC] = {ZEBRA_ROUTE_VNC, ZEBRA_EBGP_DISTANCE_DEFAULT, From a61f49ab3607a829f2ef838536f496da15f75518 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Thu, 14 Sep 2023 13:52:25 +0200 Subject: [PATCH 3/6] lib: fix RT_TABLE_LOCAL for bsd builds The routing table numbers are specific to linux builds, and the RT_TABLE_xxx are usually defined in linux headers. The bsd builds do not benefit from this definition: some RT_TABLE_xxx defines are missing for those builds. Fix this by appending RT_TABLE_LOCAL define for bsd headers. Signed-off-by: Philippe Guibert --- lib/zebra.h | 1 + 1 file changed, 1 insertion(+) diff --git a/lib/zebra.h b/lib/zebra.h index 42e6a97ff812..b742e71848f5 100644 --- a/lib/zebra.h +++ b/lib/zebra.h @@ -121,6 +121,7 @@ #include #else #define RT_TABLE_MAIN 0 +#define RT_TABLE_LOCAL RT_TABLE_MAIN #endif /* HAVE_NETLINK */ #include From b6367f84604ba815975ac419b96d8a134bf5cee4 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Mon, 28 Aug 2023 09:33:38 +0200 Subject: [PATCH 4/6] bgpd: add redistribute table-direct support Add the 'redistribute table-direct' command under the bgp address-family node. Handle the table-direct support wherever needed in the BGP code. Signed-off-by: Philippe Guibert --- bgpd/bgp_vty.c | 129 ++++++++++++++++++++++++++++++++++++++--------- bgpd/bgp_zebra.c | 3 +- 2 files changed, 108 insertions(+), 24 deletions(-) diff --git a/bgpd/bgp_vty.c b/bgpd/bgp_vty.c index fca9e2ad8e3c..3ce90adb1976 100644 --- a/bgpd/bgp_vty.c +++ b/bgpd/bgp_vty.c @@ -16846,10 +16846,11 @@ ALIAS_HIDDEN( DEFUN (bgp_redistribute_ipv4_ospf, bgp_redistribute_ipv4_ospf_cmd, - "redistribute (1-65535)", + "redistribute (1-65535)", "Redistribute information from another routing protocol\n" "Open Shortest Path First (OSPFv2)\n" "Non-main Kernel Routing Table\n" + "Non-main Kernel Routing Table - Direct\n" "Instance ID/Table ID\n") { VTY_DECLVAR_CONTEXT(bgp, bgp); @@ -16869,7 +16870,18 @@ DEFUN (bgp_redistribute_ipv4_ospf, argv[idx_ospf_table]->arg); return CMD_WARNING_CONFIG_FAILED; } - protocol = ZEBRA_ROUTE_TABLE; + if (strncmp(argv[idx_ospf_table]->arg, "table-direct", + strlen("table-direct")) == 0) { + protocol = ZEBRA_ROUTE_TABLE_DIRECT; + if (instance == RT_TABLE_MAIN || + instance == RT_TABLE_LOCAL) { + vty_out(vty, + "%% 'table-direct', can not use %u routing table\n", + instance); + return CMD_WARNING_CONFIG_FAILED; + } + } else + protocol = ZEBRA_ROUTE_TABLE; } bgp_redist_add(bgp, AFI_IP, protocol, instance); @@ -16877,18 +16889,20 @@ DEFUN (bgp_redistribute_ipv4_ospf, } ALIAS_HIDDEN(bgp_redistribute_ipv4_ospf, bgp_redistribute_ipv4_ospf_hidden_cmd, - "redistribute (1-65535)", + "redistribute (1-65535)", "Redistribute information from another routing protocol\n" "Open Shortest Path First (OSPFv2)\n" "Non-main Kernel Routing Table\n" + "Non-main Kernel Routing Table - Direct\n" "Instance ID/Table ID\n") DEFUN (bgp_redistribute_ipv4_ospf_rmap, bgp_redistribute_ipv4_ospf_rmap_cmd, - "redistribute (1-65535) route-map RMAP_NAME", + "redistribute (1-65535) route-map RMAP_NAME", "Redistribute information from another routing protocol\n" "Open Shortest Path First (OSPFv2)\n" "Non-main Kernel Routing Table\n" + "Non-main Kernel Routing Table - Direct\n" "Instance ID/Table ID\n" "Route map reference\n" "Pointer to route-map entries\n") @@ -16904,6 +16918,8 @@ DEFUN (bgp_redistribute_ipv4_ospf_rmap, struct route_map *route_map = route_map_lookup_warn_noexist(vty, argv[idx_word]->arg); + instance = strtoul(argv[idx_number]->arg, NULL, 10); + if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0) protocol = ZEBRA_ROUTE_OSPF; else { @@ -16913,10 +16929,20 @@ DEFUN (bgp_redistribute_ipv4_ospf_rmap, argv[idx_ospf_table]->arg); return CMD_WARNING_CONFIG_FAILED; } - protocol = ZEBRA_ROUTE_TABLE; + if (strncmp(argv[idx_ospf_table]->arg, "table-direct", + strlen("table-direct")) == 0) { + protocol = ZEBRA_ROUTE_TABLE_DIRECT; + if (instance == RT_TABLE_MAIN || + instance == RT_TABLE_LOCAL) { + vty_out(vty, + "%% 'table-direct', can not use %u routing table\n", + instance); + return CMD_WARNING_CONFIG_FAILED; + } + } else + protocol = ZEBRA_ROUTE_TABLE; } - instance = strtoul(argv[idx_number]->arg, NULL, 10); red = bgp_redist_add(bgp, AFI_IP, protocol, instance); changed = bgp_redistribute_rmap_set(red, argv[idx_word]->arg, route_map); @@ -16925,20 +16951,22 @@ DEFUN (bgp_redistribute_ipv4_ospf_rmap, ALIAS_HIDDEN(bgp_redistribute_ipv4_ospf_rmap, bgp_redistribute_ipv4_ospf_rmap_hidden_cmd, - "redistribute (1-65535) route-map RMAP_NAME", + "redistribute (1-65535) route-map RMAP_NAME", "Redistribute information from another routing protocol\n" "Open Shortest Path First (OSPFv2)\n" "Non-main Kernel Routing Table\n" + "Non-main Kernel Routing Table - Direct\n" "Instance ID/Table ID\n" "Route map reference\n" "Pointer to route-map entries\n") DEFUN (bgp_redistribute_ipv4_ospf_metric, bgp_redistribute_ipv4_ospf_metric_cmd, - "redistribute (1-65535) metric (0-4294967295)", + "redistribute (1-65535) metric (0-4294967295)", "Redistribute information from another routing protocol\n" "Open Shortest Path First (OSPFv2)\n" "Non-main Kernel Routing Table\n" + "Non-main Kernel Routing Table - Direct\n" "Instance ID/Table ID\n" "Metric for redistributed routes\n" "Default metric\n") @@ -16953,6 +16981,8 @@ DEFUN (bgp_redistribute_ipv4_ospf_metric, int protocol; bool changed; + instance = strtoul(argv[idx_number]->arg, NULL, 10); + if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0) protocol = ZEBRA_ROUTE_OSPF; else { @@ -16962,10 +16992,20 @@ DEFUN (bgp_redistribute_ipv4_ospf_metric, argv[idx_ospf_table]->arg); return CMD_WARNING_CONFIG_FAILED; } - protocol = ZEBRA_ROUTE_TABLE; + if (strncmp(argv[idx_ospf_table]->arg, "table-direct", + strlen("table-direct")) == 0) { + protocol = ZEBRA_ROUTE_TABLE_DIRECT; + if (instance == RT_TABLE_MAIN || + instance == RT_TABLE_LOCAL) { + vty_out(vty, + "%% 'table-direct', can not use %u routing table\n", + instance); + return CMD_WARNING_CONFIG_FAILED; + } + } else + protocol = ZEBRA_ROUTE_TABLE; } - instance = strtoul(argv[idx_number]->arg, NULL, 10); metric = strtoul(argv[idx_number_2]->arg, NULL, 10); red = bgp_redist_add(bgp, AFI_IP, protocol, instance); @@ -16976,20 +17016,22 @@ DEFUN (bgp_redistribute_ipv4_ospf_metric, ALIAS_HIDDEN(bgp_redistribute_ipv4_ospf_metric, bgp_redistribute_ipv4_ospf_metric_hidden_cmd, - "redistribute (1-65535) metric (0-4294967295)", + "redistribute (1-65535) metric (0-4294967295)", "Redistribute information from another routing protocol\n" "Open Shortest Path First (OSPFv2)\n" "Non-main Kernel Routing Table\n" + "Non-main Kernel Routing Table - Direct\n" "Instance ID/Table ID\n" "Metric for redistributed routes\n" "Default metric\n") DEFUN (bgp_redistribute_ipv4_ospf_rmap_metric, bgp_redistribute_ipv4_ospf_rmap_metric_cmd, - "redistribute (1-65535) route-map RMAP_NAME metric (0-4294967295)", + "redistribute (1-65535) route-map RMAP_NAME metric (0-4294967295)", "Redistribute information from another routing protocol\n" "Open Shortest Path First (OSPFv2)\n" "Non-main Kernel Routing Table\n" + "Non-main Kernel Routing Table - Direct\n" "Instance ID/Table ID\n" "Route map reference\n" "Pointer to route-map entries\n" @@ -17009,6 +17051,8 @@ DEFUN (bgp_redistribute_ipv4_ospf_rmap_metric, struct route_map *route_map = route_map_lookup_warn_noexist(vty, argv[idx_word]->arg); + instance = strtoul(argv[idx_number]->arg, NULL, 10); + if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0) protocol = ZEBRA_ROUTE_OSPF; else { @@ -17018,10 +17062,20 @@ DEFUN (bgp_redistribute_ipv4_ospf_rmap_metric, argv[idx_ospf_table]->arg); return CMD_WARNING_CONFIG_FAILED; } - protocol = ZEBRA_ROUTE_TABLE; + if (strncmp(argv[idx_ospf_table]->arg, "table-direct", + strlen("table-direct")) == 0) { + protocol = ZEBRA_ROUTE_TABLE_DIRECT; + if (instance == RT_TABLE_MAIN || + instance == RT_TABLE_LOCAL) { + vty_out(vty, + "%% 'table-direct', can not use %u routing table\n", + instance); + return CMD_WARNING_CONFIG_FAILED; + } + } else + protocol = ZEBRA_ROUTE_TABLE; } - instance = strtoul(argv[idx_number]->arg, NULL, 10); metric = strtoul(argv[idx_number_2]->arg, NULL, 10); red = bgp_redist_add(bgp, AFI_IP, protocol, instance); @@ -17035,10 +17089,11 @@ DEFUN (bgp_redistribute_ipv4_ospf_rmap_metric, ALIAS_HIDDEN( bgp_redistribute_ipv4_ospf_rmap_metric, bgp_redistribute_ipv4_ospf_rmap_metric_hidden_cmd, - "redistribute (1-65535) route-map RMAP_NAME metric (0-4294967295)", + "redistribute (1-65535) route-map RMAP_NAME metric (0-4294967295)", "Redistribute information from another routing protocol\n" "Open Shortest Path First (OSPFv2)\n" "Non-main Kernel Routing Table\n" + "Non-main Kernel Routing Table - Direct\n" "Instance ID/Table ID\n" "Route map reference\n" "Pointer to route-map entries\n" @@ -17047,10 +17102,11 @@ ALIAS_HIDDEN( DEFUN (bgp_redistribute_ipv4_ospf_metric_rmap, bgp_redistribute_ipv4_ospf_metric_rmap_cmd, - "redistribute (1-65535) metric (0-4294967295) route-map RMAP_NAME", + "redistribute (1-65535) metric (0-4294967295) route-map RMAP_NAME", "Redistribute information from another routing protocol\n" "Open Shortest Path First (OSPFv2)\n" "Non-main Kernel Routing Table\n" + "Non-main Kernel Routing Table - Direct\n" "Instance ID/Table ID\n" "Metric for redistributed routes\n" "Default metric\n" @@ -17070,6 +17126,8 @@ DEFUN (bgp_redistribute_ipv4_ospf_metric_rmap, struct route_map *route_map = route_map_lookup_warn_noexist(vty, argv[idx_word]->arg); + instance = strtoul(argv[idx_number]->arg, NULL, 10); + if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0) protocol = ZEBRA_ROUTE_OSPF; else { @@ -17078,8 +17136,18 @@ DEFUN (bgp_redistribute_ipv4_ospf_metric_rmap, "%% Only default BGP instance can use '%s'\n", argv[idx_ospf_table]->arg); return CMD_WARNING_CONFIG_FAILED; - } - protocol = ZEBRA_ROUTE_TABLE; + } else if (strncmp(argv[idx_ospf_table]->arg, "table-direct", + strlen("table-direct")) == 0) { + protocol = ZEBRA_ROUTE_TABLE_DIRECT; + if (instance == RT_TABLE_MAIN || + instance == RT_TABLE_LOCAL) { + vty_out(vty, + "%% 'table-direct', can not use %u routing table\n", + instance); + return CMD_WARNING_CONFIG_FAILED; + } + } else + protocol = ZEBRA_ROUTE_TABLE; } instance = strtoul(argv[idx_number]->arg, NULL, 10); @@ -17096,10 +17164,11 @@ DEFUN (bgp_redistribute_ipv4_ospf_metric_rmap, ALIAS_HIDDEN( bgp_redistribute_ipv4_ospf_metric_rmap, bgp_redistribute_ipv4_ospf_metric_rmap_hidden_cmd, - "redistribute (1-65535) metric (0-4294967295) route-map RMAP_NAME", + "redistribute (1-65535) metric (0-4294967295) route-map RMAP_NAME", "Redistribute information from another routing protocol\n" "Open Shortest Path First (OSPFv2)\n" "Non-main Kernel Routing Table\n" + "Non-main Kernel Routing Table - Direct\n" "Instance ID/Table ID\n" "Metric for redistributed routes\n" "Default metric\n" @@ -17108,11 +17177,12 @@ ALIAS_HIDDEN( DEFUN (no_bgp_redistribute_ipv4_ospf, no_bgp_redistribute_ipv4_ospf_cmd, - "no redistribute (1-65535) [{metric (0-4294967295)|route-map RMAP_NAME}]", + "no redistribute (1-65535) [{metric (0-4294967295)|route-map RMAP_NAME}]", NO_STR "Redistribute information from another routing protocol\n" "Open Shortest Path First (OSPFv2)\n" "Non-main Kernel Routing Table\n" + "Non-main Kernel Routing Table - Direct\n" "Instance ID/Table ID\n" "Metric for redistributed routes\n" "Default metric\n" @@ -17125,6 +17195,8 @@ DEFUN (no_bgp_redistribute_ipv4_ospf, unsigned short instance; int protocol; + instance = strtoul(argv[idx_number]->arg, NULL, 10); + if (strncmp(argv[idx_ospf_table]->arg, "o", 1) == 0) protocol = ZEBRA_ROUTE_OSPF; else { @@ -17134,21 +17206,32 @@ DEFUN (no_bgp_redistribute_ipv4_ospf, argv[idx_ospf_table]->arg); return CMD_WARNING_CONFIG_FAILED; } - protocol = ZEBRA_ROUTE_TABLE; + if (strncmp(argv[idx_ospf_table]->arg, "table-direct", + strlen("table-direct")) == 0) { + protocol = ZEBRA_ROUTE_TABLE_DIRECT; + if (instance == RT_TABLE_MAIN || + instance == RT_TABLE_LOCAL) { + vty_out(vty, + "%% 'table-direct', can not use %u routing table\n", + instance); + return CMD_WARNING_CONFIG_FAILED; + } + } else + protocol = ZEBRA_ROUTE_TABLE; } - instance = strtoul(argv[idx_number]->arg, NULL, 10); bgp_redistribute_unset(bgp, AFI_IP, protocol, instance); return CMD_SUCCESS; } ALIAS_HIDDEN( no_bgp_redistribute_ipv4_ospf, no_bgp_redistribute_ipv4_ospf_hidden_cmd, - "no redistribute (1-65535) [{metric (0-4294967295)|route-map RMAP_NAME}]", + "no redistribute (1-65535) [{metric (0-4294967295)|route-map RMAP_NAME}]", NO_STR "Redistribute information from another routing protocol\n" "Open Shortest Path First (OSPFv2)\n" "Non-main Kernel Routing Table\n" + "Non-main Kernel Routing Table - Direct\n" "Instance ID/Table ID\n" "Metric for redistributed routes\n" "Default metric\n" diff --git a/bgpd/bgp_zebra.c b/bgpd/bgp_zebra.c index 212b7f398b4c..fc0911f0638b 100644 --- a/bgpd/bgp_zebra.c +++ b/bgpd/bgp_zebra.c @@ -2051,7 +2051,8 @@ void bgp_redistribute_unset(struct bgp *bgp, afi_t afi, int type, struct listnode *node, *nnode; struct bgp_redist *red; - if (type != ZEBRA_ROUTE_TABLE || instance != 0) + if ((type != ZEBRA_ROUTE_TABLE && type != ZEBRA_ROUTE_TABLE_DIRECT) || + instance != 0) return _bgp_redistribute_unset(bgp, afi, type, instance); /* walk over instance */ From 8937fe668dae8b7141626af6a7bf9788436d7dda Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Mon, 11 Sep 2023 13:50:23 +0200 Subject: [PATCH 5/6] doc: update redistribute table-direct command Add the redistribute table/table-direct command in the user guide. Signed-off-by: Philippe Guibert Signed-off-by: Louis Scalbert --- doc/user/bgp.rst | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/doc/user/bgp.rst b/doc/user/bgp.rst index 3e7efc12aa5e..5c7ddcfc3a68 100644 --- a/doc/user/bgp.rst +++ b/doc/user/bgp.rst @@ -1309,10 +1309,31 @@ section for the specific AF to redistribute into. Protocol availability for redistribution is determined by BGP AF; for example, you cannot redistribute OSPFv3 into ``address-family ipv4 unicast`` as OSPFv3 supports IPv6. -.. clicmd:: redistribute [metric (0-4294967295)] [route-map WORD] +.. clicmd:: redistribute [metric (0-4294967295)] [route-map WORD] Redistribute routes from other protocols into BGP. +.. clicmd:: redistribute (1-65535)] [metric (0-4294967295)] [route-map WORD] + + Redistribute routes from a routing table ID into BGP. There are two + techniques for redistribution: + + - Standard Table Redistribution ``table (1-65535)``: + - Routes from the specified routing table ID are imported into the + default routing table using the ``ip import-table ID`` command. + - These routes are identified by the protocol type "T[ID]" when + displayed with ``show (ip|ipv6) route``. + - The ``redistribute table ID`` command then integrates these routes + into BGP. + + - Direct Table Redistribution ``table-direct (1-65535)``: + - This method directly imports routes from the designated routing table + ID into BGP, omitting the step of adding to the default routing table. + - This method is especially relevant when the specified table ID is + checked against routing by appending the appropriate `ip rules`. + +Redistribute routes from a routing table number into BGP. + .. clicmd:: redistribute vnc-direct Redistribute VNC direct (not via zebra) routes to BGP process. From deac143c6eef3c4e8cc1c6d2ef29d830d1ae5b86 Mon Sep 17 00:00:00 2001 From: Philippe Guibert Date: Mon, 28 Aug 2023 11:12:20 +0200 Subject: [PATCH 6/6] topotests: add bgp_redistribute_table test There is no test that ensures the test of the 'redistribute table-direct' facility. Add a test that checks that routes created before and after BGP is started, is correctly imported. Signed-off-by: Philippe Guibert --- .../bgp_redistribute_table/__init__.py | 0 .../bgp_redistribute_table/r1/bgpd.conf | 8 + .../r1/ipv4_routes_with_all_redistribute.json | 71 ++++ .../r1/ipv4_routes_with_redistribute.json | 48 +++ .../r1/ipv4_routes_without_redistribute.json | 25 ++ .../bgp_redistribute_table/r1/zebra.conf | 7 + .../bgp_redistribute_table/r2/bgpd.conf | 10 + .../bgp_redistribute_table/r2/zebra.conf | 8 + .../test_bgp_redistribute_table.py | 306 ++++++++++++++++++ 9 files changed, 483 insertions(+) create mode 100644 tests/topotests/bgp_redistribute_table/__init__.py create mode 100644 tests/topotests/bgp_redistribute_table/r1/bgpd.conf create mode 100644 tests/topotests/bgp_redistribute_table/r1/ipv4_routes_with_all_redistribute.json create mode 100644 tests/topotests/bgp_redistribute_table/r1/ipv4_routes_with_redistribute.json create mode 100644 tests/topotests/bgp_redistribute_table/r1/ipv4_routes_without_redistribute.json create mode 100644 tests/topotests/bgp_redistribute_table/r1/zebra.conf create mode 100644 tests/topotests/bgp_redistribute_table/r2/bgpd.conf create mode 100644 tests/topotests/bgp_redistribute_table/r2/zebra.conf create mode 100644 tests/topotests/bgp_redistribute_table/test_bgp_redistribute_table.py diff --git a/tests/topotests/bgp_redistribute_table/__init__.py b/tests/topotests/bgp_redistribute_table/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/tests/topotests/bgp_redistribute_table/r1/bgpd.conf b/tests/topotests/bgp_redistribute_table/r1/bgpd.conf new file mode 100644 index 000000000000..c5e0fcd92bbf --- /dev/null +++ b/tests/topotests/bgp_redistribute_table/r1/bgpd.conf @@ -0,0 +1,8 @@ +router bgp 65500 + bgp router-id 192.0.2.1 + no bgp ebgp-requires-policy + neighbor 192.168.0.2 remote-as 65501 + address-family ipv4 unicast + neighbor 192.168.0.2 activate + exit-address-family +! diff --git a/tests/topotests/bgp_redistribute_table/r1/ipv4_routes_with_all_redistribute.json b/tests/topotests/bgp_redistribute_table/r1/ipv4_routes_with_all_redistribute.json new file mode 100644 index 000000000000..e5a27f32ea0e --- /dev/null +++ b/tests/topotests/bgp_redistribute_table/r1/ipv4_routes_with_all_redistribute.json @@ -0,0 +1,71 @@ +{ + "172.31.0.2/32": [ + { + "prefix": "172.31.0.2/32", + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "nexthops": [ + { + "fib": true, + "ip": "192.168.0.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "active": true, + "weight": 1 + } + ] + } + ], + "172.31.0.10/32": [ + { + "prefix": "172.31.0.10/32", + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "nexthops": [ + { + "fib": true, + "ip": "192.168.0.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "active": true, + "weight": 1 + } + ] + } + ], + "172.31.0.15/32": [ + { + "prefix": "172.31.0.15/32", + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "nexthops": [ + { + "fib": true, + "ip": "192.168.0.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "active": true, + "weight": 1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_redistribute_table/r1/ipv4_routes_with_redistribute.json b/tests/topotests/bgp_redistribute_table/r1/ipv4_routes_with_redistribute.json new file mode 100644 index 000000000000..1304edddca07 --- /dev/null +++ b/tests/topotests/bgp_redistribute_table/r1/ipv4_routes_with_redistribute.json @@ -0,0 +1,48 @@ +{ + "172.31.0.2/32": [ + { + "prefix": "172.31.0.2/32", + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "nexthops": [ + { + "fib": true, + "ip": "192.168.0.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "active": true, + "weight": 1 + } + ] + } + ], + "172.31.0.10/32": [ + { + "prefix": "172.31.0.10/32", + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "nexthops": [ + { + "fib": true, + "ip": "192.168.0.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "active": true, + "weight": 1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_redistribute_table/r1/ipv4_routes_without_redistribute.json b/tests/topotests/bgp_redistribute_table/r1/ipv4_routes_without_redistribute.json new file mode 100644 index 000000000000..74f594f9e840 --- /dev/null +++ b/tests/topotests/bgp_redistribute_table/r1/ipv4_routes_without_redistribute.json @@ -0,0 +1,25 @@ +{ + "172.31.0.2/32": [ + { + "prefix": "172.31.0.2/32", + "protocol": "bgp", + "vrfId": 0, + "vrfName": "default", + "selected": true, + "destSelected": true, + "distance": 20, + "metric": 0, + "installed": true, + "nexthops": [ + { + "fib": true, + "ip": "192.168.0.2", + "afi": "ipv4", + "interfaceName": "r1-eth0", + "active": true, + "weight": 1 + } + ] + } + ] +} diff --git a/tests/topotests/bgp_redistribute_table/r1/zebra.conf b/tests/topotests/bgp_redistribute_table/r1/zebra.conf new file mode 100644 index 000000000000..abe6d395a322 --- /dev/null +++ b/tests/topotests/bgp_redistribute_table/r1/zebra.conf @@ -0,0 +1,7 @@ +log stdout +interface r1-eth1 + ip address 172.31.0.1/32 +! +interface r1-eth0 + ip address 192.168.0.1/24 +! diff --git a/tests/topotests/bgp_redistribute_table/r2/bgpd.conf b/tests/topotests/bgp_redistribute_table/r2/bgpd.conf new file mode 100644 index 000000000000..2ce704309756 --- /dev/null +++ b/tests/topotests/bgp_redistribute_table/r2/bgpd.conf @@ -0,0 +1,10 @@ +router bgp 65501 + bgp router-id 192.0.2.2 + no bgp ebgp-requires-policy + neighbor 192.168.0.1 remote-as 65500 + address-family ipv4 unicast + network 172.31.0.2/32 + neighbor 192.168.0.1 activate + redistribute table-direct 2200 + exit-address-family +! diff --git a/tests/topotests/bgp_redistribute_table/r2/zebra.conf b/tests/topotests/bgp_redistribute_table/r2/zebra.conf new file mode 100644 index 000000000000..89ad2ec74428 --- /dev/null +++ b/tests/topotests/bgp_redistribute_table/r2/zebra.conf @@ -0,0 +1,8 @@ +log stdout +interface r2-eth0 + ip address 192.168.0.2/24 +! +interface r2-eth1 + ip address 172.31.0.2/32 + ip address 172.31.1.2/24 +! diff --git a/tests/topotests/bgp_redistribute_table/test_bgp_redistribute_table.py b/tests/topotests/bgp_redistribute_table/test_bgp_redistribute_table.py new file mode 100644 index 000000000000..08b70ae0daee --- /dev/null +++ b/tests/topotests/bgp_redistribute_table/test_bgp_redistribute_table.py @@ -0,0 +1,306 @@ +#!/usr/bin/env python +# SPDX-License-Identifier: ISC + +# +# test_bgp_redistribute_table.py +# Part of NetDEF Topology Tests +# +# Copyright (c) 2023 by 6WIND +# + +""" + test_bgp_redistribute_table.py: Test the FRR BGP daemon with 'redistribute table-direct' +""" + +import os +import sys +import json +from functools import partial +import pytest + +# Save the Current Working Directory to find configuration files. +CWD = os.path.dirname(os.path.realpath(__file__)) +sys.path.append(os.path.join(CWD, "../")) + +# pylint: disable=C0413 +# Import topogen and topotest helpers +from lib import topotest +from lib.common_config import step +from lib.topogen import Topogen, TopoRouter, get_topogen +from lib.topolog import logger + +# Required to instantiate the topology builder class. + + +pytestmark = [pytest.mark.bgpd] + + +def build_topo(tgen): + "Build function" + + # Create 2 routers. + tgen.add_router("r1") + tgen.add_router("r2") + + switch = tgen.add_switch("s1") + switch.add_link(tgen.gears["r1"]) + switch.add_link(tgen.gears["r2"]) + + switch = tgen.add_switch("s2") + switch.add_link(tgen.gears["r1"]) + + switch = tgen.add_switch("s3") + switch.add_link(tgen.gears["r2"]) + + +def setup_module(mod): + "Sets up the pytest environment" + tgen = Topogen(build_topo, mod.__name__) + tgen.start_topology() + + router_list = tgen.routers() + + for rname, router in router_list.items(): + router.load_config( + TopoRouter.RD_ZEBRA, os.path.join(CWD, "{}/zebra.conf".format(rname)) + ) + router.load_config( + TopoRouter.RD_BGP, os.path.join(CWD, "{}/bgpd.conf".format(rname)) + ) + + # Initialize all routers. + tgen.start_router() + + +def teardown_module(_mod): + "Teardown the pytest environment" + tgen = get_topogen() + + tgen.stop_topology() + + +def _router_json_cmp_exact_filter(router, cmd, expected): + output = router.vtysh_cmd(cmd) + logger.info("{}: {}\n{}".format(router.name, cmd, output)) + + json_output = json.loads(output) + + # filter out tableVersion, version, nhVrfId and vrfId + for route, attrs in json_output.items(): + for attr in attrs: + if "table" in attr: + attr.pop("table") + if "internalStatus" in attr: + attr.pop("internalStatus") + if "internalFlags" in attr: + attr.pop("internalFlags") + if "internalNextHopNum" in attr: + attr.pop("internalNextHopNum") + if "internalNextHopActiveNum" in attr: + attr.pop("internalNextHopActiveNum") + if "nexthopGroupId" in attr: + attr.pop("nexthopGroupId") + if "installedNexthopGroupId" in attr: + attr.pop("installedNexthopGroupId") + if "uptime" in attr: + attr.pop("uptime") + if "prefixLen" in attr: + attr.pop("prefixLen") + if "asPath" in attr: + attr.pop("asPath") + for nexthop in attr.get("nexthops", []): + if "flags" in nexthop: + nexthop.pop("flags") + if "interfaceIndex" in nexthop: + nexthop.pop("interfaceIndex") + + return topotest.json_cmp(json_output, expected, exact=True) + + +def _check_zebra_rib_r1(with_redistributed_route, with_second_route=False): + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + with_str = "" if with_redistributed_route else "out" + + router = tgen.gears["r1"] + if with_redistributed_route: + with_str = "" + if with_second_route: + json_file = "{}/{}/ipv4_routes_with_all_redistribute.json".format( + CWD, router.name + ) + else: + json_file = "{}/{}/ipv4_routes_with_redistribute.json".format( + CWD, router.name + ) + else: + with_str = "out" + json_file = "{}/{}/ipv4_routes_without_redistribute.json".format( + CWD, router.name + ) + + step(f"Checking IPv4 routes for convergence on r1 with{with_str} kernel route") + expected = json.loads(open(json_file).read()) + test_func = partial( + _router_json_cmp_exact_filter, + router, + "show ip route bgp json", + expected, + ) + _, result = topotest.run_and_expect(test_func, None, count=10, wait=0.5) + assertmsg = '"{}" JSON output mismatches'.format(router.name) + assert result is None, assertmsg + + +def _test_add_and_check_kernel_route_on_table_2200(): + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router_list = tgen.routers() + + step("r2, adding new kernel route 172.31.0.10/32 on table 2200") + cmd = "ip route add 172.31.0.10/32 via 172.31.1.10 table 2200" + tgen.net["r2"].cmd(cmd) + + _check_zebra_rib_r1(True) + + +def test_step1_protocols_convergence(): + """ + Assert that all protocols have converged + statuses as they depend on it. + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + logger.info("Checking IPv4 routes for convergence on r1") + _check_zebra_rib_r1(False) + + +def test_step2_add_kernel_route_on_table_2200(): + """ + On r2, create a kernel route on table 2200 + * Check that the kernel route is redistributed to r1 + """ + _test_add_and_check_kernel_route_on_table_2200() + + +def test_step3_remove_kernel_route_on_table_2200(): + """ + On r2, remove a kernel route on table 2200 + * Check that the kernel route is no more redistributed to r1 + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router_list = tgen.routers() + + step("r2, remove a kernel route on table 2200") + cmd = "ip route delete 172.31.0.10/32 via 172.31.1.10 table 2200" + tgen.net["r2"].cmd(cmd) + + _check_zebra_rib_r1(False) + + +def test_step4_add_kernel_route_on_table_2200(): + """ + On r2, add a kernel route on table 2200 + * Check that the kernel route is redistributed to r1 + """ + _test_add_and_check_kernel_route_on_table_2200() + + +def test_step5_no_redistribute_table_2200(): + """ + On r2, unconfigure the 'no redistribute' service + * Check that the 'redistribute' command is not configured + * Check that the kernel route is not redistributed to r1 + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["r2"].vtysh_cmd( + "configure terminal\nrouter bgp 65501\naddress-family ipv4 unicast\nno redistribute table-direct\n" + ) + + step("r2, check that the 'redistribute' command is not configured") + out = tgen.net["r2"].cmd( + "vtysh -c 'show running-config' | grep 'redistribute table-direct'" + ) + + if "redistribute" in out: + assert True, "r2, redistribute command still present" + + _check_zebra_rib_r1(False) + + +def test_step6_redistribute_table_2200(): + """ + On r2, configure the 'redistribute' service + * Check that the 'redistribute' command is configured + * Check that the kernel route is redistributed to r1 + """ + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + tgen.gears["r2"].vtysh_cmd( + "configure terminal\nrouter bgp 65501\naddress-family ipv4 unicast\nredistribute table-direct 2200\n" + ) + + step("r2, check that the 'redistribute' command is configured") + out = tgen.net["r2"].cmd( + "vtysh -c 'show running-config' | grep 'redistribute table-direct'" + ) + if "redistribute" not in out: + assert True, "r2, redistribute command still present" + + _check_zebra_rib_r1(True) + + +def test_step7_reset_bgp_instance_add_kernel_route_and_add_bgp(): + """ + On r2, remove BGP configuration, create a kernel route on table 2200, + then restore BGP configuration + * Check that the kernel route is redistributed to r1 + """ + + tgen = get_topogen() + if tgen.routers_have_failure(): + pytest.skip(tgen.errors) + + router_list = tgen.routers() + + router = tgen.gears["r2"] + step("r2, removing r2 BGP configuration") + router.vtysh_cmd("configure terminal\nno router bgp 65501\n") + + step("r2, adding new kernel route 172.31.0.15/32 on table 2200") + cmd = "ip route add 172.31.0.15/32 via 172.31.1.100 table 2200" + tgen.net["r2"].cmd(cmd) + + router = tgen.gears["r2"] + step("r2, restoring r2 BGP configuration") + tgen.net["r2"].cmd("vtysh -f {}".format(os.path.join(CWD, "r2/bgpd.conf"))) + + _check_zebra_rib_r1(True, with_second_route=True) + + +def test_memory_leak(): + "Run the memory leak test and report results." + tgen = get_topogen() + if not tgen.is_memleak_enabled(): + pytest.skip("Memory leak test/report is disabled") + + tgen.report_memory_leaks() + + +if __name__ == "__main__": + args = ["-s"] + sys.argv[1:] + sys.exit(pytest.main(args))