X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Ffib%2Ffib_path.c;h=a32baf217f9b5821aaaa7553f1582e70ea816635;hb=57b5860f013953ce161d05302e05370db9cd6ee2;hp=928a9d432be38c380d697609da8a8322d993245c;hpb=a3af337e06a79f7d1dacf42a319f241c907122fc;p=vpp.git diff --git a/src/vnet/fib/fib_path.c b/src/vnet/fib/fib_path.c index 928a9d432be..a32baf217f9 100644 --- a/src/vnet/fib/fib_path.c +++ b/src/vnet/fib/fib_path.c @@ -21,6 +21,8 @@ #include #include #include +#include +#include #include #include @@ -66,6 +68,10 @@ typedef enum fib_path_type_t_ { * deag. Link to a lookup adj in the next table */ FIB_PATH_TYPE_DEAG, + /** + * interface receive. + */ + FIB_PATH_TYPE_INTF_RX, /** * receive. it's for-us. */ @@ -88,6 +94,7 @@ typedef enum fib_path_type_t_ { [FIB_PATH_TYPE_SPECIAL] = "special", \ [FIB_PATH_TYPE_EXCLUSIVE] = "exclusive", \ [FIB_PATH_TYPE_DEAG] = "deag", \ + [FIB_PATH_TYPE_INTF_RX] = "intf-rx", \ [FIB_PATH_TYPE_RECEIVE] = "receive", \ } @@ -189,9 +196,15 @@ typedef struct fib_path_t_ { fib_protocol_t fp_nh_proto; /** - * UCMP [unnormalised] weigt + * UCMP [unnormalised] weigth + */ + u16 fp_weight; + /** + * A path preference. 0 is the best. + * Only paths of the best preference, that are 'up', are considered + * for forwarding. */ - u32 fp_weight; + u16 fp_preference; /** * per-type union of the data required to resolve the path @@ -220,32 +233,19 @@ typedef struct fib_path_t_ { * The next-hop */ ip46_address_t fp_ip; - /** - * The local label to resolve through. - */ - mpls_label_t fp_local_label; + struct { + /** + * The local label to resolve through. + */ + mpls_label_t fp_local_label; + /** + * The EOS bit of the resolving label + */ + mpls_eos_bit_t fp_eos; + }; } fp_nh; /** * The FIB table index in which to find the next-hop. - * This needs to be fixed. We should lookup the adjacencies in - * a separate table of adjacencies, rather than from the FIB. - * Two reasons I can think of: - * - consider: - * int ip addr Gig0 10.0.0.1/24 - * ip route 10.0.0.2/32 via Gig1 192.168.1.2 - * ip route 1.1.1.1/32 via Gig0 10.0.0.2 - * this is perfectly valid. - * Packets addressed to 10.0.0.2 should be sent via Gig1. - * Packets address to 1.1.1.1 should be sent via Gig0. - * when we perform the adj resolution from the FIB for the path - * "via Gig0 10.0.0.2" the lookup will result in the route via Gig1 - * and so we will pick up the adj via Gig1 - which was not what the - * operator wanted. - * - we can only return link-type IPv4 and so not the link-type MPLS. - * more on this in a later commit. - * - * The table ID should only belong to a recursive path and indicate - * which FIB should be used to resolve the next-hop. */ fib_node_index_t fp_tbl_id; } recursive; @@ -254,6 +254,10 @@ typedef struct fib_path_t_ { * The FIB index in which to perfom the next lookup */ fib_node_index_t fp_tbl_id; + /** + * The RPF-ID to tag the packets with + */ + fib_rpf_id_t fp_rpf_id; } deag; struct { } special; @@ -273,6 +277,12 @@ typedef struct fib_path_t_ { */ ip46_address_t fp_addr; } receive; + struct { + /** + * The interface on which the packets will be input. + */ + u32 fp_interface; + } intf_rx; }; STRUCT_MARK(path_hash_end); @@ -372,6 +382,7 @@ format_fib_path (u8 * s, va_list * args) s = format (s, "pl-index:%d ", path->fp_pl_index); s = format (s, "%U ", format_fib_protocol, path->fp_nh_proto); s = format (s, "weight=%d ", path->fp_weight); + s = format (s, "pref=%d ", path->fp_preference); s = format (s, "%s: ", fib_path_type_names[path->fp_type]); if (FIB_PATH_OPER_FLAG_NONE != path->fp_oper_flags) { s = format(s, " oper-flags:"); @@ -444,9 +455,11 @@ format_fib_path (u8 * s, va_list * args) case FIB_PATH_TYPE_RECURSIVE: if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) { - s = format (s, "via %U", + s = format (s, "via %U %U", format_mpls_unicast_label, - path->recursive.fp_nh.fp_local_label); + path->recursive.fp_nh.fp_local_label, + format_mpls_eos_bit, + path->recursive.fp_nh.fp_eos); } else { @@ -465,6 +478,7 @@ format_fib_path (u8 * s, va_list * args) break; case FIB_PATH_TYPE_RECEIVE: + case FIB_PATH_TYPE_INTF_RX: case FIB_PATH_TYPE_SPECIAL: case FIB_PATH_TYPE_DEAG: case FIB_PATH_TYPE_EXCLUSIVE: @@ -558,12 +572,6 @@ fib_path_attached_next_hop_set (fib_path_t *path) * resolve directly via the adjacnecy discribed by the * interface and next-hop */ - if (!vnet_sw_interface_is_admin_up(vnet_get_main(), - path->attached_next_hop.fp_interface)) - { - path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; - } - dpo_set(&path->fp_dpo, DPO_ADJACENCY, fib_proto_to_dpo(path->fp_nh_proto), @@ -578,6 +586,37 @@ fib_path_attached_next_hop_set (fib_path_t *path) path->fp_sibling = adj_child_add(path->fp_dpo.dpoi_index, FIB_NODE_TYPE_PATH, fib_path_get_index(path)); + + if (!vnet_sw_interface_is_admin_up(vnet_get_main(), + path->attached_next_hop.fp_interface) || + !adj_is_up(path->fp_dpo.dpoi_index)) + { + path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; + } +} + +static const adj_index_t +fib_path_attached_get_adj (fib_path_t *path, + vnet_link_t link) +{ + if (vnet_sw_interface_is_p2p(vnet_get_main(), + path->attached.fp_interface)) + { + /* + * point-2-point interfaces do not require a glean, since + * there is nothing to ARP. Install a rewrite/nbr adj instead + */ + return (adj_nbr_add_or_lock(path->fp_nh_proto, + link, + &zero_addr, + path->attached.fp_interface)); + } + else + { + return (adj_glean_add_or_lock(path->fp_nh_proto, + path->attached.fp_interface, + NULL)); + } } /* @@ -653,6 +692,27 @@ fib_path_recursive_adj_update (fib_path_t *path, load_balance_map_path_state_change(fib_path_get_index(path)); } } + /* + * check for over-riding factors on the FIB entry itself + */ + if (!fib_entry_is_resolved(path->fp_via_fib)) + { + path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; + dpo_copy(&via_dpo, drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); + + /* + * PIC edge trigger. let the load-balance maps know + */ + load_balance_map_path_state_change(fib_path_get_index(path)); + } + + /* + * If this path is contributing a drop, then it's not resolved + */ + if (dpo_is_drop(&via_dpo) || load_balance_is_drop(&via_dpo)) + { + path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; + } /* * update the path's contributed DPO @@ -722,6 +782,7 @@ fib_path_unresolve (fib_path_t *path) break; case FIB_PATH_TYPE_SPECIAL: case FIB_PATH_TYPE_RECEIVE: + case FIB_PATH_TYPE_INTF_RX: case FIB_PATH_TYPE_DEAG: /* * these hold only the path's DPO, which is reset below. @@ -740,16 +801,24 @@ fib_path_unresolve (fib_path_t *path) } static fib_forward_chain_type_t -fib_path_proto_to_chain_type (fib_protocol_t proto) +fib_path_to_chain_type (const fib_path_t *path) { - switch (proto) + switch (path->fp_nh_proto) { case FIB_PROTOCOL_IP4: return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4); case FIB_PROTOCOL_IP6: return (FIB_FORW_CHAIN_TYPE_UNICAST_IP6); case FIB_PROTOCOL_MPLS: - return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS); + if (FIB_PATH_TYPE_RECURSIVE == path->fp_type && + MPLS_EOS == path->recursive.fp_nh.fp_eos) + { + return (FIB_FORW_CHAIN_TYPE_MPLS_EOS); + } + else + { + return (FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS); + } } return (FIB_FORW_CHAIN_TYPE_UNICAST_IP4); } @@ -779,7 +848,7 @@ fib_path_back_walk_notify (fib_node_t *node, */ fib_path_recursive_adj_update( path, - fib_path_proto_to_chain_type(path->fp_nh_proto), + fib_path_to_chain_type(path), &path->fp_dpo); } if ((FIB_NODE_BW_REASON_FLAG_ADJ_UPDATE & ctx->fnbw_reason) || @@ -855,15 +924,16 @@ FIXME comment vnet_get_main(), path->attached_next_hop.fp_interface); - if (if_is_up) - { - path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED; - } - ai = fib_path_attached_next_hop_get_adj( path, fib_proto_to_link(path->fp_nh_proto)); + path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; + if (if_is_up && adj_is_up(ai)) + { + path->fp_oper_flags |= FIB_PATH_OPER_FLAG_RESOLVED; + } + dpo_set(&path->fp_dpo, DPO_ADJACENCY, fib_proto_to_dpo(path->fp_nh_proto), ai); @@ -916,6 +986,8 @@ FIXME comment path->fp_oper_flags |= FIB_PATH_OPER_FLAG_DROP; } break; + case FIB_PATH_TYPE_INTF_RX: + ASSERT(0); case FIB_PATH_TYPE_DEAG: /* * FIXME When VRF delete is allowed this will need a poke. @@ -971,6 +1043,14 @@ fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath) cfg_flags |= FIB_PATH_CFG_FLAG_LOCAL; if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED) cfg_flags |= FIB_PATH_CFG_FLAG_ATTACHED; + if (rpath->frp_flags & FIB_ROUTE_PATH_INTF_RX) + cfg_flags |= FIB_PATH_CFG_FLAG_INTF_RX; + if (rpath->frp_flags & FIB_ROUTE_PATH_RPF_ID) + cfg_flags |= FIB_PATH_CFG_FLAG_RPF_ID; + if (rpath->frp_flags & FIB_ROUTE_PATH_EXCLUSIVE) + cfg_flags |= FIB_PATH_CFG_FLAG_EXCLUSIVE; + if (rpath->frp_flags & FIB_ROUTE_PATH_DROP) + cfg_flags |= FIB_PATH_CFG_FLAG_DROP; return (cfg_flags); } @@ -983,8 +1063,6 @@ fib_path_route_flags_to_cfg_flags (const fib_route_path_t *rpath) */ fib_node_index_t fib_path_create (fib_node_index_t pl_index, - fib_protocol_t nh_proto, - fib_path_cfg_flags_t flags, const fib_route_path_t *rpath) { fib_path_t *path; @@ -997,7 +1075,7 @@ fib_path_create (fib_node_index_t pl_index, dpo_reset(&path->fp_dpo); path->fp_pl_index = pl_index; - path->fp_nh_proto = nh_proto; + path->fp_nh_proto = rpath->frp_proto; path->fp_via_fib = FIB_NODE_INDEX_INVALID; path->fp_weight = rpath->frp_weight; if (0 == path->fp_weight) @@ -1008,8 +1086,8 @@ fib_path_create (fib_node_index_t pl_index, */ path->fp_weight = 1; } - path->fp_cfg_flags = flags; - path->fp_cfg_flags |= fib_path_route_flags_to_cfg_flags(rpath); + path->fp_preference = rpath->frp_preference; + path->fp_cfg_flags = fib_path_route_flags_to_cfg_flags(rpath); /* * deduce the path's tpye from the parementers and save what is needed. @@ -1020,6 +1098,17 @@ fib_path_create (fib_node_index_t pl_index, path->receive.fp_interface = rpath->frp_sw_if_index; path->receive.fp_addr = rpath->frp_addr; } + else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_INTF_RX) + { + path->fp_type = FIB_PATH_TYPE_INTF_RX; + path->intf_rx.fp_interface = rpath->frp_sw_if_index; + } + else if (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID) + { + path->fp_type = FIB_PATH_TYPE_DEAG; + path->deag.fp_tbl_id = rpath->frp_fib_index; + path->deag.fp_rpf_id = rpath->frp_rpf_id; + } else if (~0 != rpath->frp_sw_if_index) { if (ip46_address_is_zero(&rpath->frp_addr)) @@ -1054,6 +1143,7 @@ fib_path_create (fib_node_index_t pl_index, if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) { path->recursive.fp_nh.fp_local_label = rpath->frp_local_label; + path->recursive.fp_nh.fp_eos = rpath->frp_eos; } else { @@ -1091,6 +1181,7 @@ fib_path_create_special (fib_node_index_t pl_index, path->fp_pl_index = pl_index; path->fp_weight = 1; + path->fp_preference = 0; path->fp_nh_proto = nh_proto; path->fp_via_fib = FIB_NODE_INDEX_INVALID; path->fp_cfg_flags = flags; @@ -1201,7 +1292,7 @@ fib_path_cmp_i (const fib_path_t *path1, /* * paths of different types and protocol are not equal. - * different weights only are the same path. + * different weights and/or preference only are the same path. */ if (path1->fp_type != path2->fp_type) { @@ -1223,17 +1314,13 @@ fib_path_cmp_i (const fib_path_t *path1, res = ip46_address_cmp(&path1->attached_next_hop.fp_nh, &path2->attached_next_hop.fp_nh); if (0 == res) { - res = vnet_sw_interface_compare( - vnet_get_main(), - path1->attached_next_hop.fp_interface, - path2->attached_next_hop.fp_interface); + res = (path1->attached_next_hop.fp_interface - + path2->attached_next_hop.fp_interface); } break; case FIB_PATH_TYPE_ATTACHED: - res = vnet_sw_interface_compare( - vnet_get_main(), - path1->attached.fp_interface, - path2->attached.fp_interface); + res = (path1->attached.fp_interface - + path2->attached.fp_interface); break; case FIB_PATH_TYPE_RECURSIVE: res = ip46_address_cmp(&path1->recursive.fp_nh, @@ -1246,6 +1333,13 @@ fib_path_cmp_i (const fib_path_t *path1, break; case FIB_PATH_TYPE_DEAG: res = (path1->deag.fp_tbl_id - path2->deag.fp_tbl_id); + if (0 == res) + { + res = (path1->deag.fp_rpf_id - path2->deag.fp_rpf_id); + } + break; + case FIB_PATH_TYPE_INTF_RX: + res = (path1->intf_rx.fp_interface - path2->intf_rx.fp_interface); break; case FIB_PATH_TYPE_SPECIAL: case FIB_PATH_TYPE_RECEIVE: @@ -1273,6 +1367,15 @@ fib_path_cmp_for_sort (void * v1, path1 = fib_path_get(*pi1); path2 = fib_path_get(*pi2); + /* + * when sorting paths we want the highest preference paths + * first, so that the choices set built is in prefernce order + */ + if (path1->fp_preference != path2->fp_preference) + { + return (path1->fp_preference - path2->fp_preference); + } + return (fib_path_cmp_i(path1, path2)); } @@ -1321,22 +1424,22 @@ fib_path_cmp_w_route_path (fib_node_index_t path_index, &rpath->frp_addr); if (0 == res) { - res = vnet_sw_interface_compare( - vnet_get_main(), - path->attached_next_hop.fp_interface, - rpath->frp_sw_if_index); + res = (path->attached_next_hop.fp_interface - + rpath->frp_sw_if_index); } break; case FIB_PATH_TYPE_ATTACHED: - res = vnet_sw_interface_compare( - vnet_get_main(), - path->attached.fp_interface, - rpath->frp_sw_if_index); + res = (path->attached.fp_interface - rpath->frp_sw_if_index); break; case FIB_PATH_TYPE_RECURSIVE: if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) { res = path->recursive.fp_nh.fp_local_label - rpath->frp_local_label; + + if (res == 0) + { + res = path->recursive.fp_nh.fp_eos - rpath->frp_eos; + } } else { @@ -1349,9 +1452,16 @@ fib_path_cmp_w_route_path (fib_node_index_t path_index, res = (path->recursive.fp_tbl_id - rpath->frp_fib_index); } break; + case FIB_PATH_TYPE_INTF_RX: + res = (path->intf_rx.fp_interface - rpath->frp_sw_if_index); + break; case FIB_PATH_TYPE_DEAG: res = (path->deag.fp_tbl_id - rpath->frp_fib_index); - break; + if (0 == res) + { + res = (path->deag.fp_rpf_id - rpath->frp_rpf_id); + } + break; case FIB_PATH_TYPE_SPECIAL: case FIB_PATH_TYPE_RECEIVE: case FIB_PATH_TYPE_EXCLUSIVE: @@ -1450,6 +1560,7 @@ fib_path_recursive_loop_detect (fib_node_index_t path_index, case FIB_PATH_TYPE_SPECIAL: case FIB_PATH_TYPE_DEAG: case FIB_PATH_TYPE_RECEIVE: + case FIB_PATH_TYPE_INTF_RX: case FIB_PATH_TYPE_EXCLUSIVE: /* * these path types cannot be part of a loop, since they are the leaves @@ -1498,31 +1609,12 @@ fib_path_resolve (fib_node_index_t path_index) { path->fp_oper_flags &= ~FIB_PATH_OPER_FLAG_RESOLVED; } - if (vnet_sw_interface_is_p2p(vnet_get_main(), - path->attached.fp_interface)) - { - /* - * point-2-point interfaces do not require a glean, since - * there is nothing to ARP. Install a rewrite/nbr adj instead - */ - dpo_set(&path->fp_dpo, - DPO_ADJACENCY, - fib_proto_to_dpo(path->fp_nh_proto), - adj_nbr_add_or_lock( - path->fp_nh_proto, - fib_proto_to_link(path->fp_nh_proto), - &zero_addr, - path->attached.fp_interface)); - } - else - { - dpo_set(&path->fp_dpo, - DPO_ADJACENCY_GLEAN, - fib_proto_to_dpo(path->fp_nh_proto), - adj_glean_add_or_lock(path->fp_nh_proto, - path->attached.fp_interface, - NULL)); - } + dpo_set(&path->fp_dpo, + DPO_ADJACENCY, + fib_proto_to_dpo(path->fp_nh_proto), + fib_path_attached_get_adj(path, + fib_proto_to_link(path->fp_nh_proto))); + /* * become a child of the adjacency so we receive updates * when the interface state changes @@ -1548,7 +1640,9 @@ fib_path_resolve (fib_node_index_t path_index) if (FIB_PROTOCOL_MPLS == path->fp_nh_proto) { - fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label, &pfx); + fib_prefix_from_mpls_label(path->recursive.fp_nh.fp_local_label, + path->recursive.fp_nh.fp_eos, + &pfx); } else { @@ -1558,8 +1652,7 @@ fib_path_resolve (fib_node_index_t path_index) fei = fib_table_entry_special_add(path->recursive.fp_tbl_id, &pfx, FIB_SOURCE_RR, - FIB_ENTRY_FLAG_NONE, - ADJ_INDEX_INVALID); + FIB_ENTRY_FLAG_NONE); path = fib_path_get(path_index); path->fp_via_fib = fei; @@ -1577,7 +1670,7 @@ fib_path_resolve (fib_node_index_t path_index) */ fib_path_recursive_adj_update( path, - fib_path_proto_to_chain_type(path->fp_nh_proto), + fib_path_to_chain_type(path), &path->fp_dpo); break; @@ -1590,16 +1683,25 @@ fib_path_resolve (fib_node_index_t path_index) drop_dpo_get(fib_proto_to_dpo(path->fp_nh_proto))); break; case FIB_PATH_TYPE_DEAG: + { /* * Resolve via a lookup DPO. * FIXME. control plane should add routes with a table ID */ - lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id, - fib_proto_to_dpo(path->fp_nh_proto), - LOOKUP_INPUT_DST_ADDR, - LOOKUP_TABLE_FROM_CONFIG, - &path->fp_dpo); + lookup_cast_t cast; + + cast = (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RPF_ID ? + LOOKUP_MULTICAST : + LOOKUP_UNICAST); + + lookup_dpo_add_or_lock_w_fib_index(path->deag.fp_tbl_id, + fib_proto_to_dpo(path->fp_nh_proto), + cast, + LOOKUP_INPUT_DST_ADDR, + LOOKUP_TABLE_FROM_CONFIG, + &path->fp_dpo); break; + } case FIB_PATH_TYPE_RECEIVE: /* * Resolve via a receive DPO. @@ -1609,6 +1711,15 @@ fib_path_resolve (fib_node_index_t path_index) &path->receive.fp_addr, &path->fp_dpo); break; + case FIB_PATH_TYPE_INTF_RX: { + /* + * Resolve via a receive DPO. + */ + interface_dpo_add_or_lock(fib_proto_to_dpo(path->fp_nh_proto), + path->intf_rx.fp_interface, + &path->fp_dpo); + break; + } case FIB_PATH_TYPE_EXCLUSIVE: /* * Resolve via the user provided DPO @@ -1636,7 +1747,12 @@ fib_path_get_resolving_interface (fib_node_index_t path_index) case FIB_PATH_TYPE_RECEIVE: return (path->receive.fp_interface); case FIB_PATH_TYPE_RECURSIVE: - return (fib_entry_get_resolving_interface(path->fp_via_fib)); + if (fib_path_is_resolved(path_index)) + { + return (fib_entry_get_resolving_interface(path->fp_via_fib)); + } + break; + case FIB_PATH_TYPE_INTF_RX: case FIB_PATH_TYPE_SPECIAL: case FIB_PATH_TYPE_DEAG: case FIB_PATH_TYPE_EXCLUSIVE: @@ -1660,7 +1776,7 @@ fib_path_get_adj (fib_node_index_t path_index) return (ADJ_INDEX_INVALID); } -int +u16 fib_path_get_weight (fib_node_index_t path_index) { fib_path_t *path; @@ -1672,6 +1788,18 @@ fib_path_get_weight (fib_node_index_t path_index) return (path->fp_weight); } +u16 +fib_path_get_preference (fib_node_index_t path_index) +{ + fib_path_t *path; + + path = fib_path_get(path_index); + + ASSERT(path); + + return (path->fp_preference); +} + /** * @brief Contribute the path's adjacency to the list passed. * By calling this function over all paths, recursively, a child @@ -1684,11 +1812,11 @@ fib_path_contribute_urpf (fib_node_index_t path_index, { fib_path_t *path; - if (!fib_path_is_resolved(path_index)) - return; - path = fib_path_get(path_index); + /* + * resolved and unresolved paths contribute to the RPF list. + */ switch (path->fp_type) { case FIB_PATH_TYPE_ATTACHED_NEXT_HOP: @@ -1700,7 +1828,15 @@ fib_path_contribute_urpf (fib_node_index_t path_index, break; case FIB_PATH_TYPE_RECURSIVE: - fib_entry_contribute_urpf(path->fp_via_fib, urpf); + if (FIB_NODE_INDEX_INVALID != path->fp_via_fib && + !fib_path_is_looped(path_index)) + { + /* + * there's unresolved due to constraints, and there's unresolved + * due to ain't got no via. can't do nowt w'out via. + */ + fib_entry_contribute_urpf(path->fp_via_fib, urpf); + } break; case FIB_PATH_TYPE_EXCLUSIVE: @@ -1721,6 +1857,7 @@ fib_path_contribute_urpf (fib_node_index_t path_index, case FIB_PATH_TYPE_DEAG: case FIB_PATH_TYPE_RECEIVE: + case FIB_PATH_TYPE_INTF_RX: /* * these path types don't link to an adj */ @@ -1728,6 +1865,44 @@ fib_path_contribute_urpf (fib_node_index_t path_index, } } +void +fib_path_stack_mpls_disp (fib_node_index_t path_index, + dpo_proto_t payload_proto, + dpo_id_t *dpo) +{ + fib_path_t *path; + + path = fib_path_get(path_index); + + ASSERT(path); + + switch (path->fp_type) + { + case FIB_PATH_TYPE_DEAG: + { + dpo_id_t tmp = DPO_INVALID; + + dpo_copy(&tmp, dpo); + dpo_set(dpo, + DPO_MPLS_DISPOSITION, + payload_proto, + mpls_disp_dpo_create(payload_proto, + path->deag.fp_rpf_id, + &tmp)); + dpo_reset(&tmp); + break; + } + case FIB_PATH_TYPE_RECEIVE: + case FIB_PATH_TYPE_ATTACHED: + case FIB_PATH_TYPE_ATTACHED_NEXT_HOP: + case FIB_PATH_TYPE_RECURSIVE: + case FIB_PATH_TYPE_INTF_RX: + case FIB_PATH_TYPE_EXCLUSIVE: + case FIB_PATH_TYPE_SPECIAL: + break; + } +} + void fib_path_contribute_forwarding (fib_node_index_t path_index, fib_forward_chain_type_t fct, @@ -1747,7 +1922,7 @@ fib_path_contribute_forwarding (fib_node_index_t path_index, * This then represents the path's 'native' protocol; IP. * For all others will need to go find something else. */ - if (fib_path_proto_to_chain_type(path->fp_nh_proto) == fct) + if (fib_path_to_chain_type(path) == fct) { dpo_copy(dpo, &path->fp_dpo); } @@ -1791,10 +1966,10 @@ fib_path_contribute_forwarding (fib_node_index_t path_index, case FIB_FORW_CHAIN_TYPE_UNICAST_IP4: case FIB_FORW_CHAIN_TYPE_UNICAST_IP6: case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS: - fib_path_recursive_adj_update(path, fct, dpo); - break; case FIB_FORW_CHAIN_TYPE_MCAST_IP4: case FIB_FORW_CHAIN_TYPE_MCAST_IP6: + fib_path_recursive_adj_update(path, fct, dpo); + break; case FIB_FORW_CHAIN_TYPE_ETHERNET: case FIB_FORW_CHAIN_TYPE_NSH: ASSERT(0); @@ -1807,13 +1982,14 @@ fib_path_contribute_forwarding (fib_node_index_t path_index, case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS: lookup_dpo_add_or_lock_w_table_id(MPLS_FIB_DEFAULT_TABLE_ID, DPO_PROTO_MPLS, + LOOKUP_UNICAST, LOOKUP_INPUT_DST_ADDR, LOOKUP_TABLE_FROM_CONFIG, dpo); break; + case FIB_FORW_CHAIN_TYPE_MPLS_EOS: case FIB_FORW_CHAIN_TYPE_UNICAST_IP4: case FIB_FORW_CHAIN_TYPE_UNICAST_IP6: - case FIB_FORW_CHAIN_TYPE_MPLS_EOS: dpo_copy(dpo, &path->fp_dpo); break; case FIB_FORW_CHAIN_TYPE_MCAST_IP4: @@ -1836,7 +2012,20 @@ fib_path_contribute_forwarding (fib_node_index_t path_index, case FIB_FORW_CHAIN_TYPE_MPLS_EOS: case FIB_FORW_CHAIN_TYPE_ETHERNET: case FIB_FORW_CHAIN_TYPE_NSH: - break; + { + adj_index_t ai; + + /* + * get a appropriate link type adj. + */ + ai = fib_path_attached_get_adj( + path, + fib_forw_chain_type_to_link_type(fct)); + dpo_set(dpo, DPO_ADJACENCY, + fib_forw_chain_type_to_dpo_proto(fct), ai); + adj_unlock(ai); + break; + } case FIB_FORW_CHAIN_TYPE_MCAST_IP4: case FIB_FORW_CHAIN_TYPE_MCAST_IP6: { @@ -1848,7 +2037,7 @@ fib_path_contribute_forwarding (fib_node_index_t path_index, ai = adj_mcast_add_or_lock(path->fp_nh_proto, fib_forw_chain_type_to_link_type(fct), path->attached.fp_interface); - dpo_set(dpo, DPO_ADJACENCY_MCAST, + dpo_set(dpo, DPO_ADJACENCY, fib_forw_chain_type_to_dpo_proto(fct), ai); adj_unlock(ai); @@ -1856,6 +2045,14 @@ fib_path_contribute_forwarding (fib_node_index_t path_index, break; } break; + case FIB_PATH_TYPE_INTF_RX: + /* + * Create the adj needed for sending IP multicast traffic + */ + interface_dpo_add_or_lock(fib_forw_chain_type_to_dpo_proto(fct), + path->attached.fp_interface, + dpo); + break; case FIB_PATH_TYPE_RECEIVE: case FIB_PATH_TYPE_SPECIAL: dpo_copy(dpo, &path->fp_dpo); @@ -1889,13 +2086,15 @@ fib_path_append_nh_for_multipath_hash (fib_node_index_t path_index, } int -fib_path_is_recursive (fib_node_index_t path_index) +fib_path_is_recursive_constrained (fib_node_index_t path_index) { fib_path_t *path; path = fib_path_get(path_index); - return (FIB_PATH_TYPE_RECURSIVE == path->fp_type); + return ((FIB_PATH_TYPE_RECURSIVE == path->fp_type) && + ((path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_ATTACHED) || + (path->fp_cfg_flags & FIB_PATH_CFG_FLAG_RESOLVE_HOST))); } int @@ -1941,7 +2140,7 @@ fib_path_is_looped (fib_node_index_t path_index) return (path->fp_oper_flags & FIB_PATH_OPER_FLAG_RECURSIVE_LOOP); } -int +fib_path_list_walk_rc_t fib_path_encode (fib_node_index_t path_list_index, fib_node_index_t path_index, void *ctx) @@ -1952,9 +2151,10 @@ fib_path_encode (fib_node_index_t path_list_index, path = fib_path_get(path_index); if (!path) - return (0); + return (FIB_PATH_LIST_WALK_CONTINUE); vec_add2(*api_rpaths, api_rpath, 1); api_rpath->rpath.frp_weight = path->fp_weight; + api_rpath->rpath.frp_preference = path->fp_preference; api_rpath->rpath.frp_proto = path->fp_nh_proto; api_rpath->rpath.frp_sw_if_index = ~0; api_rpath->dpo = path->exclusive.fp_ex_dpo; @@ -1981,7 +2181,7 @@ fib_path_encode (fib_node_index_t path_list_index, default: break; } - return (1); + return (FIB_PATH_LIST_WALK_CONTINUE); } fib_protocol_t