X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Ffib%2Ffib_path.c;h=889d17def9cd4b5e755fb2591b263148df941b39;hb=6f6311560380d0e992f710558e213df1b098ef94;hp=f81f41706c520461d82a3cea410e996bc93564b6;hpb=0f26c5a0138ac86d7ebd197c31a09d8d624c35fe;p=vpp.git diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c index f81f41706c5..889d17def9c 100644 --- a/src/vnet/fib/fib_path.c +++ b/src/vnet/fib/fib_path.c @@ -21,8 +21,9 @@ #include #include #include -#include +#include #include +#include #include #include @@ -193,12 +194,19 @@ typedef struct fib_path_t_ { * next-hop's address. We can't derive this from the address itself * since the address can be all zeros */ - fib_protocol_t fp_nh_proto; + dpo_proto_t fp_nh_proto; /** - * UCMP [unnormalised] weigt + * UCMP [unnormalised] weigth */ - u32 fp_weight; + u8 fp_weight; + + /** + * A path preference. 0 is the best. + * Only paths of the best preference, that are 'up', are considered + * for forwarding. + */ + u8 fp_preference; /** * per-type union of the data required to resolve the path @@ -240,25 +248,6 @@ typedef struct fib_path_t_ { } fp_nh; /** * The FIB table index in which to find the next-hop. - * This needs to be fixed. We should lookup the adjacencies in - * a separate table of adjacencies, rather than from the FIB. - * Two reasons I can think of: - * - consider: - * int ip addr Gig0 10.0.0.1/24 - * ip route 10.0.0.2/32 via Gig1 192.168.1.2 - * ip route 1.1.1.1/32 via Gig0 10.0.0.2 - * this is perfectly valid. - * Packets addressed to 10.0.0.2 should be sent via Gig1. - * Packets address to 1.1.1.1 should be sent via Gig0. - * when we perform the adj resolution from the FIB for the path - * "via Gig0 10.0.0.2" the lookup will result in the route via Gig1 - * and so we will pick up the adj via Gig1 - which was not what the - * operator wanted. - * - we can only return link-type IPv4 and so not the link-type MPLS. - * more on this in a later commit. - * - * The table ID should only belong to a recursive path and indicate - * which FIB should be used to resolve the next-hop. */ fib_node_index_t fp_tbl_id; } recursive; @@ -393,8 +382,9 @@ format_fib_path (u8 * s, va_list * args) s = format (s, " index:%d ", fib_path_get_index(path)); s = format (s, "pl-index:%d ", path->fp_pl_index); - s = format (s, "%U ", format_fib_protocol, path->fp_nh_proto); + s = format (s, "%U ", format_dpo_proto, path->fp_nh_proto); s = format (s, "weight=%d ", path->fp_weight); + s = format (s, "pref=%d ", path->fp_preference); s = format (s, "%s: ", fib_path_type_names[path->fp_type]); if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) { s = format(s, " oper-flags:"); @@ -465,7 +455,7 @@ format_fib_path (u8 * s, va_list * args) } break; case FIB_PATH_TYPE_RECURSIVE: - if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { s = format (s, "via %U %U", format_mpls_unicast_label, @@ -563,14 +553,14 @@ fib_path_attached_next_hop_get_adj (fib_path_t *path, * the subnet address (the attached route) links to the * auto-adj (see below), we want that adj here too. */ - return (adj_nbr_add_or_lock(path->fp_nh_proto, + return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), link, &zero_addr, path->attached_next_hop.fp_interface)); } else { - return (adj_nbr_add_or_lock(path->fp_nh_proto, + return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), link, &path->attached_next_hop.fp_nh, path->attached_next_hop.fp_interface)); @@ -586,10 +576,10 @@ fib_path_attached_next_hop_set (fib_path_t *path) */ dpo_set(&path->fp_dpo, DPO_ADJACENCY, - fib_proto_to_dpo(path->fp_nh_proto), + path->fp_nh_proto, fib_path_attached_next_hop_get_adj( path, - fib_proto_to_link(path->fp_nh_proto))); + dpo_proto_to_link(path->fp_nh_proto))); /* * become a child of the adjacency so we receive updates @@ -607,6 +597,30 @@ fib_path_attached_next_hop_set (fib_path_t *path) } } +static const adj_index_t +fib_path_attached_get_adj (fib_path_t *path, + vnet_link_t link) +{ + if (vnet_sw_interface_is_p2p(vnet_get_main(), + path->attached.fp_interface)) + { + /* + * point-2-point interfaces do not require a glean, since + * there is nothing to ARP. Install a rewrite/nbr adj instead + */ + return (adj_nbr_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), + link, + &zero_addr, + path->attached.fp_interface)); + } + else + { + return (adj_glean_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), + path->attached.fp_interface, + NULL)); + } +} + /* * create of update the paths recursive adj */ @@ -637,7 +651,7 @@ fib_path_recursive_adj_update (fib_path_t *path, if (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto)); } else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST) { @@ -655,7 +669,7 @@ fib_path_recursive_adj_update (fib_path_t *path, if (fib_entry_get_best_source(path->fp_via_fib) >= FIB_SOURCE_RR) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto)); /* * PIC edge trigger. let the load-balance maps know @@ -672,7 +686,7 @@ fib_path_recursive_adj_update (fib_path_t *path, if (!(FIB_ENTRY_FLAG_ATTACHED & fib_entry_get_flags(path->fp_via_fib))) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto)); /* * PIC edge trigger. let the load-balance maps know @@ -686,7 +700,7 @@ fib_path_recursive_adj_update (fib_path_t *path, if (!fib_entry_is_resolved(path->fp_via_fib)) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&via_dpo, drop_dpo_get(path->fp_nh_proto)); /* * PIC edge trigger. let the load-balance maps know @@ -694,14 +708,20 @@ fib_path_recursive_adj_update (fib_path_t *path, load_balance_map_path_state_change(fib_path_get_index(path)); } + /* + * If this path is contributing a drop, then it's not resolved + */ + if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo)) + { + path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; + } + /* * update the path's contributed DPO */ dpo_copy(dpo, &via_dpo); - FIB_PATH_DBG(path, "recursive update: %U", - fib_get_lookup_main(path->fp_nh_proto), - &path->fp_dpo, 2); + FIB_PATH_DBG(path, "recursive update:"); dpo_reset(&via_dpo); } @@ -752,11 +772,18 @@ fib_path_unresolve (fib_path_t *path) } break; case FIB_PATH_TYPE_ATTACHED_NEXT_HOP: - case FIB_PATH_TYPE_ATTACHED: adj_child_remove(path->fp_dpo.dpoi_index, path->fp_sibling); adj_unlock(path->fp_dpo.dpoi_index); break; + case FIB_PATH_TYPE_ATTACHED: + if (DPO_PROTO_ETHERNET != path->fp_nh_proto) + { + adj_child_remove(path->fp_dpo.dpoi_index, + path->fp_sibling); + adj_unlock(path->fp_dpo.dpoi_index); + } + break; case FIB_PATH_TYPE_EXCLUSIVE: dpo_reset(&path->exclusive.fp_ex_dpo); break; @@ -783,13 +810,8 @@ fib_path_unresolve (fib_path_t *path) static fib_forward_chain_type_t fib_path_to_chain_type (const fib_path_t *path) { - switch (path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { - case FIB_PROTOCOL_IP4: - return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4); - case FIB_PROTOCOL_IP6: - return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6); - case FIB_PROTOCOL_MPLS: if (FIB_PATH_TYPE_RECURSIVE == path->fp_type && MPLS_EOS == path->recursive.fp_nh.fp_eos) { @@ -797,10 +819,13 @@ fib_path_to_chain_type (const fib_path_t *path) } else { - return (FIB_FORW_CHAIN_TYPE_MPLS_EOS); + return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS); } } - return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4); + else + { + return (fib_forw_chain_type_from_dpo_proto(path->fp_nh_proto)); + } } /* @@ -906,7 +931,7 @@ FIXME comment ai = fib_path_attached_next_hop_get_adj( path, - fib_proto_to_link(path->fp_nh_proto)); + dpo_proto_to_link(path->fp_nh_proto)); path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; if (if_is_up && adj_is_up(ai)) @@ -914,9 +939,7 @@ FIXME comment path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED; } - dpo_set(&path->fp_dpo, DPO_ADJACENCY, - fib_proto_to_dpo(path->fp_nh_proto), - ai); + dpo_set(&path->fp_dpo, DPO_ADJACENCY, path->fp_nh_proto, ai); adj_unlock(ai); if (!if_is_up) @@ -1066,6 +1089,7 @@ fib_path_create (fib_node_index_t pl_index, */ path->fp_weight = 1; } + path->fp_preference = rpath->frp_preference; path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath); /* @@ -1119,7 +1143,7 @@ fib_path_create (fib_node_index_t pl_index, else { path->fp_type = FIB_PATH_TYPE_RECURSIVE; - if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { path->recursive.fp_nh.fp_local_label = rpath->frp_local_label; path->recursive.fp_nh.fp_eos = rpath->frp_eos; @@ -1145,7 +1169,7 @@ fib_path_create (fib_node_index_t pl_index, */ fib_node_index_t fib_path_create_special (fib_node_index_t pl_index, - fib_protocol_t nh_proto, + dpo_proto_t nh_proto, fib_path_cfg_flags_t flags, const dpo_id_t *dpo) { @@ -1160,6 +1184,7 @@ fib_path_create_special (fib_node_index_t pl_index, path->fp_pl_index = pl_index; path->fp_weight = 1; + path->fp_preference = 0; path->fp_nh_proto = nh_proto; path->fp_via_fib = FIB_NODE_INDEX_INVALID; path->fp_cfg_flags = flags; @@ -1270,7 +1295,7 @@ fib_path_cmp_i (const fib_path_t *path1, /* * paths of different types and protocol are not equal. - * different weights only are the same path. + * different weights and/or preference only are the same path. */ if (path1->fp_type != path2->fp_type) { @@ -1345,6 +1370,15 @@ fib_path_cmp_for_sort (void * v1, path1 = fib_path_get(*pi1); path2 = fib_path_get(*pi2); + /* + * when sorting paths we want the highest preference paths + * first, so that the choices set built is in prefernce order + */ + if (path1->fp_preference != path2->fp_preference) + { + return (path1->fp_preference - path2->fp_preference); + } + return (fib_path_cmp_i(path1, path2)); } @@ -1401,7 +1435,7 @@ fib_path_cmp_w_route_path (fib_node_index_t path_index, res = (path->attached.fp_interface - rpath->frp_sw_if_index); break; case FIB_PATH_TYPE_RECURSIVE: - if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label; @@ -1503,8 +1537,7 @@ fib_path_recursive_loop_detect (fib_node_index_t path_index, FIB_PATH_DBG(path, "recursive loop formed"); path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RECURSIVE_LOOP; - dpo_copy(&path->fp_dpo, - drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto)); } else { @@ -1558,8 +1591,7 @@ fib_path_resolve (fib_node_index_t path_index) */ if (fib_path_is_permanent_drop(path)) { - dpo_copy(&path->fp_dpo, - drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto)); path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; return (fib_path_is_resolved(path_index)); } @@ -1570,47 +1602,35 @@ fib_path_resolve (fib_node_index_t path_index) fib_path_attached_next_hop_set(path); break; case FIB_PATH_TYPE_ATTACHED: - /* - * path->attached.fp_interface - */ - if (!vnet_sw_interface_is_admin_up(vnet_get_main(), - path->attached.fp_interface)) - { - path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - } - if (vnet_sw_interface_is_p2p(vnet_get_main(), - path->attached.fp_interface)) - { - /* - * point-2-point interfaces do not require a glean, since - * there is nothing to ARP. Install a rewrite/nbr adj instead - */ - dpo_set(&path->fp_dpo, - DPO_ADJACENCY, - fib_proto_to_dpo(path->fp_nh_proto), - adj_nbr_add_or_lock( - path->fp_nh_proto, - fib_proto_to_link(path->fp_nh_proto), - &zero_addr, - path->attached.fp_interface)); - } - else - { - dpo_set(&path->fp_dpo, - DPO_ADJACENCY_GLEAN, - fib_proto_to_dpo(path->fp_nh_proto), - adj_glean_add_or_lock(path->fp_nh_proto, - path->attached.fp_interface, - NULL)); - } - /* - * become a child of the adjacency so we receive updates - * when the interface state changes - */ - path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index, - FIB_NODE_TYPE_PATH, - fib_path_get_index(path)); + if (DPO_PROTO_ETHERNET == path->fp_nh_proto) + { + l2_bridge_dpo_add_or_lock(path->attached.fp_interface, + &path->fp_dpo); + } + else + { + /* + * path->attached.fp_interface + */ + if (!vnet_sw_interface_is_admin_up(vnet_get_main(), + path->attached.fp_interface)) + { + path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; + } + dpo_set(&path->fp_dpo, + DPO_ADJACENCY, + path->fp_nh_proto, + fib_path_attached_get_adj(path, + dpo_proto_to_link(path->fp_nh_proto))); + /* + * become a child of the adjacency so we receive updates + * when the interface state changes + */ + path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index, + FIB_NODE_TYPE_PATH, + fib_path_get_index(path)); + } break; case FIB_PATH_TYPE_RECURSIVE: { @@ -1626,7 +1646,7 @@ fib_path_resolve (fib_node_index_t path_index) ASSERT(FIB_NODE_INDEX_INVALID == path->fp_via_fib); - if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) + if (DPO_PROTO_MPLS == path->fp_nh_proto) { fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label, path->recursive.fp_nh.fp_eos, @@ -1640,8 +1660,7 @@ fib_path_resolve (fib_node_index_t path_index) fei = fib_table_entry_special_add(path->recursive.fp_tbl_id, &pfx, FIB_SOURCE_RR, - FIB_ENTRY_FLAG_NONE, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_NONE); path = fib_path_get(path_index); path->fp_via_fib = fei; @@ -1668,8 +1687,7 @@ fib_path_resolve (fib_node_index_t path_index) /* * Resolve via the drop */ - dpo_copy(&path->fp_dpo, - drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + dpo_copy(&path->fp_dpo, drop_dpo_get(path->fp_nh_proto)); break; case FIB_PATH_TYPE_DEAG: { @@ -1684,7 +1702,7 @@ fib_path_resolve (fib_node_index_t path_index) LOOKUP_UNICAST); lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id, - fib_proto_to_dpo(path->fp_nh_proto), + path->fp_nh_proto, cast, LOOKUP_INPUT_DST_ADDR, LOOKUP_TABLE_FROM_CONFIG, @@ -1695,7 +1713,7 @@ fib_path_resolve (fib_node_index_t path_index) /* * Resolve via a receive DPO. */ - receive_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto), + receive_dpo_add_or_lock(path->fp_nh_proto, path->receive.fp_interface, &path->receive.fp_addr, &path->fp_dpo); @@ -1704,9 +1722,9 @@ fib_path_resolve (fib_node_index_t path_index) /* * Resolve via a receive DPO. */ - interface_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto), - path->intf_rx.fp_interface, - &path->fp_dpo); + interface_rx_dpo_add_or_lock(path->fp_nh_proto, + path->intf_rx.fp_interface, + &path->fp_dpo); break; } case FIB_PATH_TYPE_EXCLUSIVE: @@ -1736,7 +1754,11 @@ fib_path_get_resolving_interface (fib_node_index_t path_index) case FIB_PATH_TYPE_RECEIVE: return (path->receive.fp_interface); case FIB_PATH_TYPE_RECURSIVE: - return (fib_entry_get_resolving_interface(path->fp_via_fib)); + if (fib_path_is_resolved(path_index)) + { + return (fib_entry_get_resolving_interface(path->fp_via_fib)); + } + break; case FIB_PATH_TYPE_INTF_RX: case FIB_PATH_TYPE_SPECIAL: case FIB_PATH_TYPE_DEAG: @@ -1761,7 +1783,7 @@ fib_path_get_adj (fib_node_index_t path_index) return (ADJ_INDEX_INVALID); } -int +u16 fib_path_get_weight (fib_node_index_t path_index) { fib_path_t *path; @@ -1773,6 +1795,18 @@ fib_path_get_weight (fib_node_index_t path_index) return (path->fp_weight); } +u16 +fib_path_get_preference (fib_node_index_t path_index) +{ + fib_path_t *path; + + path = fib_path_get(path_index); + + ASSERT(path); + + return (path->fp_preference); +} + /** * @brief Contribute the path's adjacency to the list passed. * By calling this function over all paths, recursively, a child @@ -1801,11 +1835,12 @@ fib_path_contribute_urpf (fib_node_index_t path_index, break; case FIB_PATH_TYPE_RECURSIVE: - if (FIB_NODE_INDEX_INVALID != path->fp_via_fib) + if (FIB_NODE_INDEX_INVALID != path->fp_via_fib && + !fib_path_is_looped(path_index)) { /* * there's unresolved due to constraints, and there's unresolved - * due to ain't go no via. can't do nowt w'out via. + * due to ain't got no via. can't do nowt w'out via. */ fib_entry_contribute_urpf(path->fp_via_fib, urpf); } @@ -1976,6 +2011,11 @@ fib_path_contribute_forwarding (fib_node_index_t path_index, dpo_copy(dpo, &path->exclusive.fp_ex_dpo); break; case FIB_PATH_TYPE_ATTACHED: + if (DPO_PROTO_ETHERNET == path->fp_nh_proto) + { + dpo_copy(dpo, &path->fp_dpo); + break; + } switch (fct) { case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS: @@ -1984,7 +2024,20 @@ fib_path_contribute_forwarding (fib_node_index_t path_index, case FIB_FORW_CHAIN_TYPE_MPLS_EOS: case FIB_FORW_CHAIN_TYPE_ETHERNET: case FIB_FORW_CHAIN_TYPE_NSH: - break; + { + adj_index_t ai; + + /* + * get a appropriate link type adj. + */ + ai = fib_path_attached_get_adj( + path, + fib_forw_chain_type_to_link_type(fct)); + dpo_set(dpo, DPO_ADJACENCY, + fib_forw_chain_type_to_dpo_proto(fct), ai); + adj_unlock(ai); + break; + } case FIB_FORW_CHAIN_TYPE_MCAST_IP4: case FIB_FORW_CHAIN_TYPE_MCAST_IP6: { @@ -1993,7 +2046,7 @@ fib_path_contribute_forwarding (fib_node_index_t path_index, /* * Create the adj needed for sending IP multicast traffic */ - ai = adj_mcast_add_or_lock(path->fp_nh_proto, + ai = adj_mcast_add_or_lock(dpo_proto_to_fib(path->fp_nh_proto), fib_forw_chain_type_to_link_type(fct), path->attached.fp_interface); dpo_set(dpo, DPO_ADJACENCY, @@ -2008,9 +2061,9 @@ fib_path_contribute_forwarding (fib_node_index_t path_index, /* * Create the adj needed for sending IP multicast traffic */ - interface_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct), - path->attached.fp_interface, - dpo); + interface_rx_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct), + path->attached.fp_interface, + dpo); break; case FIB_PATH_TYPE_RECEIVE: case FIB_PATH_TYPE_SPECIAL: @@ -2045,13 +2098,15 @@ fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index, } int -fib_path_is_recursive (fib_node_index_t path_index) +fib_path_is_recursive_constrained (fib_node_index_t path_index) { fib_path_t *path; path = fib_path_get(path_index); - return (FIB_PATH_TYPE_RECURSIVE == path->fp_type); + return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) && + ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) || + (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST))); } int @@ -2097,7 +2152,7 @@ fib_path_is_looped (fib_node_index_t path_index) return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP); } -int +fib_path_list_walk_rc_t fib_path_encode (fib_node_index_t path_list_index, fib_node_index_t path_index, void *ctx) @@ -2108,9 +2163,10 @@ fib_path_encode (fib_node_index_t path_list_index, path = fib_path_get(path_index); if (!path) - return (0); + return (FIB_PATH_LIST_WALK_CONTINUE); vec_add2(*api_rpaths, api_rpath, 1); api_rpath->rpath.frp_weight = path->fp_weight; + api_rpath->rpath.frp_preference = path->fp_preference; api_rpath->rpath.frp_proto = path->fp_nh_proto; api_rpath->rpath.frp_sw_if_index = ~0; api_rpath->dpo = path->exclusive.fp_ex_dpo; @@ -2119,9 +2175,11 @@ fib_path_encode (fib_node_index_t path_list_index, case FIB_PATH_TYPE_RECEIVE: api_rpath->rpath.frp_addr = path->receive.fp_addr; api_rpath->rpath.frp_sw_if_index = path->receive.fp_interface; + api_rpath->dpo = path->fp_dpo; break; case FIB_PATH_TYPE_ATTACHED: api_rpath->rpath.frp_sw_if_index = path->attached.fp_interface; + api_rpath->dpo = path->fp_dpo; break; case FIB_PATH_TYPE_ATTACHED_NEXT_HOP: api_rpath->rpath.frp_sw_if_index = path->attached_next_hop.fp_interface; @@ -2130,6 +2188,8 @@ fib_path_encode (fib_node_index_t path_list_index, case FIB_PATH_TYPE_SPECIAL: break; case FIB_PATH_TYPE_DEAG: + api_rpath->rpath.frp_fib_index = path->deag.fp_tbl_id; + api_rpath->dpo = path->fp_dpo; break; case FIB_PATH_TYPE_RECURSIVE: api_rpath->rpath.frp_addr = path->recursive.fp_nh.fp_ip; @@ -2137,10 +2197,10 @@ fib_path_encode (fib_node_index_t path_list_index, default: break; } - return (1); + return (FIB_PATH_LIST_WALK_CONTINUE); } -fib_protocol_t +dpo_proto_t fib_path_get_proto (fib_node_index_t path_index) { fib_path_t *path;