X-Git-Url: https://gerrit.fd.io/r/gitweb?p=vpp.git;a=blobdiff_plain;f=src%2Fvnet%2Ffib%2Ffib_entry_src.c;h=c6c2a04bed76b29ff3f85a8eeb2ca5fbdcfbe4d8;hp=1fb040608b415ac1287e2bc33c3d37e5be471dd9;hb=8feeaff56;hpb=c0790cfef0bd1c56f4c75dc4f959584148386258 diff --git a/src/vnet/fib/fib_entry_src.c b/src/vnet/fib/fib_entry_src.c index 1fb040608b4..c6c2a04bed7 100644 --- a/src/vnet/fib/fib_entry_src.c +++ b/src/vnet/fib/fib_entry_src.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include @@ -28,10 +29,27 @@ */ static fib_entry_src_vft_t fib_entry_src_vft[FIB_SOURCE_MAX]; -static fib_protocol_t -fib_entry_get_proto (const fib_entry_t * fib_entry) +/** + * Get the VFT for a given source. This is a combination of the source + * enum and the interposer flags + */ +const fib_entry_src_vft_t* +fib_entry_src_get_vft (const fib_entry_src_t *esrc) { - return (fib_entry->fe_prefix.fp_proto); + if (esrc->fes_entry_flags & FIB_ENTRY_FLAG_INTERPOSE) + { + return (&fib_entry_src_vft[FIB_SOURCE_INTERPOSE]); + } + + return (&fib_entry_src_vft[esrc->fes_src]); +} + +static void +fib_entry_src_copy_default (const fib_entry_src_t *orig_src, + const fib_entry_t *fib_entry, + fib_entry_src_t *copy_src) +{ + clib_memcpy(©_src->u, &orig_src->u, sizeof(copy_src->u)); } void @@ -39,6 +57,11 @@ fib_entry_src_register (fib_source_t source, const fib_entry_src_vft_t *vft) { fib_entry_src_vft[source] = *vft; + + if (NULL == fib_entry_src_vft[source].fesv_copy) + { + fib_entry_src_vft[source].fesv_copy = fib_entry_src_copy_default; + } } static int @@ -50,21 +73,19 @@ fib_entry_src_cmp_for_sort (void * v1, return (esrc1->fes_src - esrc2->fes_src); } -void +static void fib_entry_src_action_init (fib_entry_t *fib_entry, - fib_source_t source) - + fib_source_t source, + fib_entry_flag_t flags) { fib_entry_src_t esrc = { .fes_pl = FIB_NODE_INDEX_INVALID, .fes_flags = FIB_ENTRY_SRC_FLAG_NONE, .fes_src = source, + .fes_entry_flags = flags, }; - if (NULL != fib_entry_src_vft[source].fesv_init) - { - fib_entry_src_vft[source].fesv_init(&esrc); - } + FIB_ENTRY_SRC_VFT_INVOKE(&esrc, fesv_init, (&esrc)); vec_add1(fib_entry->fe_srcs, esrc); vec_sort_with_function(fib_entry->fe_srcs, @@ -72,9 +93,9 @@ fib_entry_src_action_init (fib_entry_t *fib_entry, } static fib_entry_src_t * -fib_entry_src_find (const fib_entry_t *fib_entry, - fib_source_t source, - u32 *index) +fib_entry_src_find_i (const fib_entry_t *fib_entry, + fib_source_t source, + u32 *index) { fib_entry_src_t *esrc; @@ -100,6 +121,14 @@ fib_entry_src_find (const fib_entry_t *fib_entry, return (NULL); } +static fib_entry_src_t * +fib_entry_src_find (const fib_entry_t *fib_entry, + fib_source_t source) + +{ + return (fib_entry_src_find_i(fib_entry, source, NULL)); +} + int fib_entry_is_sourced (fib_node_index_t fib_entry_index, fib_source_t source) @@ -108,27 +137,27 @@ fib_entry_is_sourced (fib_node_index_t fib_entry_index, fib_entry = fib_entry_get(fib_entry_index); - return (NULL != fib_entry_src_find(fib_entry, source, NULL)); + return (NULL != fib_entry_src_find(fib_entry, source)); } static fib_entry_src_t * fib_entry_src_find_or_create (fib_entry_t *fib_entry, fib_source_t source, - u32 *index) + fib_entry_flag_t flags) { fib_entry_src_t *esrc; - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); if (NULL == esrc) { - fib_entry_src_action_init(fib_entry, source); + fib_entry_src_action_init(fib_entry, source, flags); } - return (fib_entry_src_find(fib_entry, source, NULL)); + return (fib_entry_src_find(fib_entry, source)); } -void +static void fib_entry_src_action_deinit (fib_entry_t *fib_entry, fib_source_t source) @@ -136,29 +165,24 @@ fib_entry_src_action_deinit (fib_entry_t *fib_entry, fib_entry_src_t *esrc; u32 index = ~0; - esrc = fib_entry_src_find(fib_entry, source, &index); + esrc = fib_entry_src_find_i(fib_entry, source, &index); ASSERT(NULL != esrc); - if (NULL != fib_entry_src_vft[source].fesv_deinit) - { - fib_entry_src_vft[source].fesv_deinit(esrc); - } + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_deinit, (esrc)); - vec_free(esrc->fes_path_exts); + fib_path_ext_list_flush(&esrc->fes_path_exts); vec_del1(fib_entry->fe_srcs, index); + vec_sort_with_function(fib_entry->fe_srcs, + fib_entry_src_cmp_for_sort); } fib_entry_src_cover_res_t fib_entry_src_action_cover_change (fib_entry_t *fib_entry, - fib_source_t source) + fib_entry_src_t *esrc) { - if (NULL != fib_entry_src_vft[source].fesv_cover_change) - { - return (fib_entry_src_vft[source].fesv_cover_change( - fib_entry_src_find(fib_entry, source, NULL), - fib_entry)); - } + FIB_ENTRY_SRC_VFT_INVOKE_AND_RETURN(esrc, fesv_cover_change, + (esrc, fib_entry)); fib_entry_src_cover_res_t res = { .install = !0, @@ -169,14 +193,10 @@ fib_entry_src_action_cover_change (fib_entry_t *fib_entry, fib_entry_src_cover_res_t fib_entry_src_action_cover_update (fib_entry_t *fib_entry, - fib_source_t source) + fib_entry_src_t *esrc) { - if (NULL != fib_entry_src_vft[source].fesv_cover_update) - { - return (fib_entry_src_vft[source].fesv_cover_update( - fib_entry_src_find(fib_entry, source, NULL), - fib_entry)); - } + FIB_ENTRY_SRC_VFT_INVOKE_AND_RETURN(esrc, fesv_cover_update, + (esrc, fib_entry)); fib_entry_src_cover_res_t res = { .install = !0, @@ -187,11 +207,12 @@ fib_entry_src_action_cover_update (fib_entry_t *fib_entry, typedef struct fib_entry_src_collect_forwarding_ctx_t_ { - load_balance_path_t * next_hops; + load_balance_path_t *next_hops; const fib_entry_t *fib_entry; const fib_entry_src_t *esrc; fib_forward_chain_type_t fct; - int is_recursive; + int n_recursive_constrained; + u16 preference; } fib_entry_src_collect_forwarding_ctx_t; /** @@ -202,10 +223,11 @@ load_balance_flags_t fib_entry_calc_lb_flags (fib_entry_src_collect_forwarding_ctx_t *ctx) { /** - * We'll use a LB map is the path-list has recursive paths. + * We'll use a LB map if the path-list has multiple recursive paths. * recursive paths implies BGP, and hence scale. */ - if (ctx->is_recursive) + if (ctx->n_recursive_constrained > 1 && + fib_path_list_is_popular(ctx->esrc->fes_pl)) { return (LOAD_BALANCE_FLAG_USES_MAP); } @@ -216,6 +238,7 @@ static int fib_entry_src_valid_out_label (mpls_label_t label) { return ((MPLS_LABEL_IS_REAL(label) || + MPLS_LABEL_POP == label || MPLS_IETF_IPV4_EXPLICIT_NULL_LABEL == label || MPLS_IETF_IPV6_EXPLICIT_NULL_LABEL == label || MPLS_IETF_IMPLICIT_NULL_LABEL == label)); @@ -229,8 +252,6 @@ fib_forward_chain_type_t fib_entry_chain_type_fixup (const fib_entry_t *entry, fib_forward_chain_type_t fct) { - ASSERT(FIB_FORW_CHAIN_TYPE_MPLS_EOS == fct); - /* * The EOS chain is a tricky since one cannot know the adjacency * to link to without knowing what the packets payload protocol @@ -238,6 +259,11 @@ fib_entry_chain_type_fixup (const fib_entry_t *entry, */ fib_forward_chain_type_t dfct; + if (FIB_FORW_CHAIN_TYPE_MPLS_EOS != fct) + { + return (fct); + } + dfct = fib_entry_get_default_chain_type(entry); if (FIB_FORW_CHAIN_TYPE_MPLS_EOS == dfct) @@ -259,106 +285,215 @@ fib_entry_chain_type_fixup (const fib_entry_t *entry, return (dfct); } -static int +static dpo_proto_t +fib_prefix_get_payload_proto (const fib_prefix_t *pfx) +{ + switch (pfx->fp_proto) + { + case FIB_PROTOCOL_IP4: + return (DPO_PROTO_IP4); + case FIB_PROTOCOL_IP6: + return (DPO_PROTO_IP6); + case FIB_PROTOCOL_MPLS: + return (pfx->fp_payload_proto); + } + + ASSERT(0); + return (DPO_PROTO_IP4); +} + +static void +fib_entry_src_get_path_forwarding (fib_node_index_t path_index, + fib_entry_src_collect_forwarding_ctx_t *ctx) +{ + load_balance_path_t *nh; + + /* + * no extension => no out-going label for this path. that's OK + * in the case of an IP or EOS chain, but not for non-EOS + */ + switch (ctx->fct) + { + case FIB_FORW_CHAIN_TYPE_UNICAST_IP4: + case FIB_FORW_CHAIN_TYPE_UNICAST_IP6: + case FIB_FORW_CHAIN_TYPE_MCAST_IP4: + case FIB_FORW_CHAIN_TYPE_MCAST_IP6: + case FIB_FORW_CHAIN_TYPE_BIER: + /* + * EOS traffic with no label to stack, we need the IP Adj + */ + vec_add2(ctx->next_hops, nh, 1); + + nh->path_index = path_index; + nh->path_weight = fib_path_get_weight(path_index); + fib_path_contribute_forwarding(path_index, ctx->fct, &nh->path_dpo); + + break; + case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS: + if (fib_path_is_exclusive(path_index) || + fib_path_is_deag(path_index)) + { + vec_add2(ctx->next_hops, nh, 1); + + nh->path_index = path_index; + nh->path_weight = fib_path_get_weight(path_index); + fib_path_contribute_forwarding(path_index, + FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS, + &nh->path_dpo); + } + break; + case FIB_FORW_CHAIN_TYPE_MPLS_EOS: + { + /* + * no label. we need a chain based on the payload. fixup. + */ + vec_add2(ctx->next_hops, nh, 1); + + nh->path_index = path_index; + nh->path_weight = fib_path_get_weight(path_index); + fib_path_contribute_forwarding(path_index, + fib_entry_chain_type_fixup(ctx->fib_entry, + ctx->fct), + &nh->path_dpo); + fib_path_stack_mpls_disp(path_index, + fib_prefix_get_payload_proto(&ctx->fib_entry->fe_prefix), + FIB_MPLS_LSP_MODE_PIPE, + &nh->path_dpo); + + break; + } + case FIB_FORW_CHAIN_TYPE_ETHERNET: + case FIB_FORW_CHAIN_TYPE_NSH: + ASSERT(0); + break; + } +} + +static fib_path_list_walk_rc_t fib_entry_src_collect_forwarding (fib_node_index_t pl_index, fib_node_index_t path_index, void *arg) { fib_entry_src_collect_forwarding_ctx_t *ctx; fib_path_ext_t *path_ext; + u32 n_nhs; ctx = arg; + n_nhs = vec_len(ctx->next_hops); /* * if the path is not resolved, don't include it. */ if (!fib_path_is_resolved(path_index)) { - return (!0); + return (FIB_PATH_LIST_WALK_CONTINUE); } - if (fib_path_is_recursive(path_index)) + if (fib_path_is_recursive_constrained(path_index)) { - ctx->is_recursive = 1; + ctx->n_recursive_constrained += 1; + } + if (0xffff == ctx->preference) + { + /* + * not set a preference yet, so the first path we encounter + * sets the preference we are collecting. + */ + ctx->preference = fib_path_get_preference(path_index); + } + else if (ctx->preference != fib_path_get_preference(path_index)) + { + /* + * this path does not belong to the same preference as the + * previous paths encountered. we are done now. + */ + return (FIB_PATH_LIST_WALK_STOP); } /* * get the matching path-extension for the path being visited. */ - vec_foreach(path_ext, ctx->esrc->fes_path_exts) + path_ext = fib_path_ext_list_find_by_path_index(&ctx->esrc->fes_path_exts, + path_index); + + if (NULL != path_ext) { - if (path_ext->fpe_path_index == path_index) + switch (path_ext->fpe_type) + { + case FIB_PATH_EXT_MPLS: + if (fib_entry_src_valid_out_label(path_ext->fpe_label_stack[0].fml_value)) + { + /* + * found a matching extension. stack it to obtain the forwarding + * info for this path. + */ + ctx->next_hops = + fib_path_ext_stack(path_ext, + ctx->fct, + fib_entry_chain_type_fixup(ctx->fib_entry, + ctx->fct), + ctx->next_hops); + } + else + { + fib_entry_src_get_path_forwarding(path_index, ctx); + } break; - } - - if (NULL != path_ext && - path_ext->fpe_path_index == path_index && - fib_entry_src_valid_out_label(path_ext->fpe_label_stack[0])) - { - /* - * found a matching extension. stack it to obtain the forwarding - * info for this path. - */ - ctx->next_hops = fib_path_ext_stack(path_ext, ctx->fib_entry, ctx->fct, ctx->next_hops); + case FIB_PATH_EXT_ADJ: + if (FIB_PATH_EXT_ADJ_FLAG_REFINES_COVER & path_ext->fpe_adj_flags) + { + fib_entry_src_get_path_forwarding(path_index, ctx); + } + /* + * else + * the path does not refine the cover, meaning that + * the adjacency does/does not match the sub-net on the link. + * So this path does not contribute forwarding. + */ + break; + } } else { - load_balance_path_t *nh; + fib_entry_src_get_path_forwarding(path_index, ctx); + } + /* + * a this point 'ctx' has the DPO the path contributed, plus + * any labels from path extensions. + * check if there are any interpose sources that want to contribute + */ + if (n_nhs < vec_len(ctx->next_hops)) + { /* - * no extension => no out-going label for this path. that's OK - * in the case of an IP or EOS chain, but not for non-EOS + * the path contributed a new choice. */ - switch (ctx->fct) - { - case FIB_FORW_CHAIN_TYPE_UNICAST_IP4: - case FIB_FORW_CHAIN_TYPE_UNICAST_IP6: - /* - * EOS traffic with no label to stack, we need the IP Adj - */ - vec_add2(ctx->next_hops, nh, 1); + const fib_entry_src_vft_t *vft; - nh->path_index = path_index; - nh->path_weight = fib_path_get_weight(path_index); - fib_path_contribute_forwarding(path_index, ctx->fct, &nh->path_dpo); - - break; - case FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS: - if (fib_path_is_exclusive(path_index) || - fib_path_is_deag(path_index)) - { - vec_add2(ctx->next_hops, nh, 1); + vft = fib_entry_src_get_vft(ctx->esrc); - nh->path_index = path_index; - nh->path_weight = fib_path_get_weight(path_index); - fib_path_contribute_forwarding(path_index, - FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS, - &nh->path_dpo); - } - break; - case FIB_FORW_CHAIN_TYPE_MPLS_EOS: + if (NULL != vft->fesv_contribute_interpose) { - /* - * no label. we need a chain based on the payload. fixup. - */ - vec_add2(ctx->next_hops, nh, 1); + const dpo_id_t *interposer; - nh->path_index = path_index; - nh->path_weight = fib_path_get_weight(path_index); - fib_path_contribute_forwarding(path_index, - fib_entry_chain_type_fixup(ctx->fib_entry, - ctx->fct), - &nh->path_dpo); + interposer = vft->fesv_contribute_interpose(ctx->esrc, + ctx->fib_entry); - break; - } - case FIB_FORW_CHAIN_TYPE_ETHERNET: - ASSERT(0); - break; + if (NULL != interposer) + { + dpo_id_t clone = DPO_INVALID; + + dpo_mk_interpose(interposer, + &ctx->next_hops[n_nhs].path_dpo, + &clone); + + dpo_copy(&ctx->next_hops[n_nhs].path_dpo, &clone); + dpo_reset(&clone); + } } } - return (!0); + return (FIB_PATH_LIST_WALK_CONTINUE); } void @@ -378,19 +513,20 @@ fib_entry_src_mk_lb (fib_entry_t *fib_entry, .esrc = esrc, .fib_entry = fib_entry, .next_hops = NULL, - .is_recursive = 0, + .n_recursive_constrained = 0, .fct = fct, + .preference = 0xffff, }; /* * As an optimisation we allocate the vector of next-hops to be sized - * equal to the maximum nuber of paths we will need, which is also the + * equal to the maximum number of paths we will need, which is also the * most likely number we will need, since in most cases the paths are 'up'. */ vec_validate(ctx.next_hops, fib_path_list_get_n_paths(esrc->fes_pl)); vec_reset_length(ctx.next_hops); - lb_proto = fib_proto_to_dpo(fib_entry->fe_prefix.fp_proto); + lb_proto = fib_forw_chain_type_to_dpo_proto(fct); fib_path_list_walk(esrc->fes_pl, fib_entry_src_collect_forwarding, @@ -417,46 +553,85 @@ fib_entry_src_mk_lb (fib_entry_t *fib_entry, /* * first time create */ - flow_hash_config_t fhc; + if (esrc->fes_entry_flags & FIB_ENTRY_FLAG_MULTICAST) + { + dpo_set(dpo_lb, + DPO_REPLICATE, + lb_proto, + MPLS_IS_REPLICATE | replicate_create(0, lb_proto)); + } + else + { + fib_protocol_t flow_hash_proto; + flow_hash_config_t fhc; - fhc = fib_table_get_flow_hash_config(fib_entry->fe_fib_index, - dpo_proto_to_fib(lb_proto)); - dpo_set(dpo_lb, - DPO_LOAD_BALANCE, - lb_proto, - load_balance_create(0, lb_proto, fhc)); + /* + * if the protocol for the LB we are building does not match that + * of the fib_entry (i.e. we are build the [n]EOS LB for an IPv[46] + * then the fib_index is not an index that relates to the table + * type we need. So get the default flow-hash config instead. + */ + flow_hash_proto = dpo_proto_to_fib(lb_proto); + if (fib_entry->fe_prefix.fp_proto != flow_hash_proto) + { + fhc = fib_table_get_default_flow_hash_config(flow_hash_proto); + } + else + { + fhc = fib_table_get_flow_hash_config(fib_entry->fe_fib_index, + flow_hash_proto); + } + + dpo_set(dpo_lb, + DPO_LOAD_BALANCE, + lb_proto, + load_balance_create(0, lb_proto, fhc)); + } } - load_balance_multipath_update(dpo_lb, - ctx.next_hops, - fib_entry_calc_lb_flags(&ctx)); - vec_free(ctx.next_hops); - - /* - * if this entry is sourced by the uRPF-exempt source then we - * append the always present local0 interface (index 0) to the - * uRPF list so it is not empty. that way packets pass the loose check. - */ - index_t ui = fib_path_list_get_urpf(esrc->fes_pl); - - if (fib_entry_is_sourced(fib_entry_get_index(fib_entry), - FIB_SOURCE_URPF_EXEMPT) && - (0 == fib_urpf_check_size(ui))) + if (esrc->fes_entry_flags & FIB_ENTRY_FLAG_MULTICAST) { - /* - * The uRPF list we get from the path-list is shared by all - * other users of the list, but the uRPF exemption applies - * only to this prefix. So we need our own list. - */ - ui = fib_urpf_list_alloc_and_lock(); - fib_urpf_list_append(ui, 0); - fib_urpf_list_bake(ui); - load_balance_set_urpf(dpo_lb->dpoi_index, ui); - fib_urpf_list_unlock(ui); + /* + * MPLS multicast + */ + replicate_multipath_update(dpo_lb, ctx.next_hops); } else { - load_balance_set_urpf(dpo_lb->dpoi_index, ui); + load_balance_multipath_update(dpo_lb, + ctx.next_hops, + fib_entry_calc_lb_flags(&ctx)); + vec_free(ctx.next_hops); + + /* + * if this entry is sourced by the uRPF-exempt source then we + * append the always present local0 interface (index 0) to the + * uRPF list so it is not empty. that way packets pass the loose check. + */ + index_t ui = fib_path_list_get_urpf(esrc->fes_pl); + + if ((fib_entry_is_sourced(fib_entry_get_index(fib_entry), + FIB_SOURCE_URPF_EXEMPT) || + (esrc->fes_entry_flags & FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT))&& + (0 == fib_urpf_check_size(ui))) + { + /* + * The uRPF list we get from the path-list is shared by all + * other users of the list, but the uRPF exemption applies + * only to this prefix. So we need our own list. + */ + ui = fib_urpf_list_alloc_and_lock(); + fib_urpf_list_append(ui, 0); + fib_urpf_list_bake(ui); + load_balance_set_urpf(dpo_lb->dpoi_index, ui); + fib_urpf_list_unlock(ui); + } + else + { + load_balance_set_urpf(dpo_lb->dpoi_index, ui); + } + load_balance_set_fib_entry_flags(dpo_lb->dpoi_index, + fib_entry_get_flags_i(fib_entry)); } } @@ -473,7 +648,7 @@ fib_entry_src_action_install (fib_entry_t *fib_entry, int insert; fct = fib_entry_get_default_chain_type(fib_entry); - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); /* * Every entry has its own load-balance object. All changes to the entry's @@ -519,8 +694,7 @@ fib_entry_src_action_uninstall (fib_entry_t *fib_entry) /* * uninstall the forwarding chain from the forwarding tables */ - FIB_ENTRY_DBG(fib_entry, "uninstall: %d", - fib_entry->fe_adj_index); + FIB_ENTRY_DBG(fib_entry, "uninstall"); if (dpo_id_is_valid(&fib_entry->fe_lb)) { @@ -543,25 +717,288 @@ fib_entry_recursive_loop_detect_i (fib_node_index_t path_list_index) vec_free(entries); } +/* + * fib_entry_src_action_copy + * + * copy a source data from another entry to this one + */ +static fib_entry_t * +fib_entry_src_action_copy (fib_entry_t *fib_entry, + const fib_entry_src_t *orig_src) +{ + fib_entry_src_t *esrc; + + esrc = fib_entry_src_find_or_create(fib_entry, + orig_src->fes_src, + orig_src->fes_entry_flags); + + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_copy, + (orig_src, fib_entry, esrc)); + + fib_path_list_unlock(esrc->fes_pl); + + /* + * copy over all the data ... + */ + esrc->fes_flags = orig_src->fes_flags; + esrc->fes_pl = orig_src->fes_pl; + + /* + * ... then update + */ + esrc->fes_ref_count = 1; + esrc->fes_flags |= FIB_ENTRY_SRC_FLAG_INHERITED; + esrc->fes_flags &= ~(FIB_ENTRY_SRC_FLAG_ACTIVE | + FIB_ENTRY_SRC_FLAG_CONTRIBUTING); + esrc->fes_entry_flags &= ~FIB_ENTRY_FLAG_COVERED_INHERIT; + + /* + * the source owns a lock on the entry + */ + fib_path_list_lock(esrc->fes_pl); + fib_entry_lock(fib_entry_get_index(fib_entry)); + + return (fib_entry); +} + +/* + * fib_entry_src_action_update + * + * copy a source data from another entry to this one + */ +static fib_entry_src_t * +fib_entry_src_action_update_from_cover (fib_entry_t *fib_entry, + const fib_entry_src_t *orig_src) +{ + fib_entry_src_t *esrc; + + esrc = fib_entry_src_find_or_create(fib_entry, + orig_src->fes_src, + orig_src->fes_entry_flags); + + /* + * the source owns a lock on the entry + */ + fib_path_list_unlock(esrc->fes_pl); + esrc->fes_pl = orig_src->fes_pl; + fib_path_list_lock(esrc->fes_pl); + + return (esrc); +} + +static fib_table_walk_rc_t +fib_entry_src_covered_inherit_add_i (fib_entry_t *fib_entry, + const fib_entry_src_t *cover_src) +{ + fib_entry_src_t *esrc; + + esrc = fib_entry_src_find(fib_entry, cover_src->fes_src); + + if (cover_src == esrc) + { + return (FIB_TABLE_WALK_CONTINUE); + } + + if (NULL != esrc) + { + /* + * the covered entry already has this source. + */ + if (esrc->fes_entry_flags & FIB_ENTRY_FLAG_COVERED_INHERIT) + { + /* + * the covered source is itself a COVERED_INHERIT, i.e. + * it also pushes this source down the sub-tree. + * We consider this more specific covered to be the owner + * of the sub-tree from this point down. + */ + return (FIB_TABLE_WALK_SUB_TREE_STOP); + } + if (esrc->fes_flags & FIB_ENTRY_SRC_FLAG_INHERITED) + { + /* + * The covered's source data has been inherited, presumably + * from this cover, i.e. this is a modify. + */ + esrc = fib_entry_src_action_update_from_cover(fib_entry, cover_src); + fib_entry_source_change(fib_entry, esrc->fes_src, esrc->fes_src); + } + else + { + /* + * The covered's source was not inherited and it is also + * not inheriting. Nevertheless, it still owns the sub-tree from + * this point down. + */ + return (FIB_TABLE_WALK_SUB_TREE_STOP); + } + } + else + { + /* + * The covered does not have this source - add it. + */ + fib_source_t best_source; + + best_source = fib_entry_get_best_source( + fib_entry_get_index(fib_entry)); + + fib_entry_src_action_copy(fib_entry, cover_src); + fib_entry_source_change(fib_entry, best_source, cover_src->fes_src); + + } + return (FIB_TABLE_WALK_CONTINUE); +} + +static fib_table_walk_rc_t +fib_entry_src_covered_inherit_walk_add (fib_node_index_t fei, + void *ctx) +{ + return (fib_entry_src_covered_inherit_add_i(fib_entry_get(fei), ctx)); +} + +static fib_table_walk_rc_t +fib_entry_src_covered_inherit_walk_remove (fib_node_index_t fei, + void *ctx) +{ + fib_entry_src_t *cover_src, *esrc; + fib_entry_t *fib_entry; + + fib_entry = fib_entry_get(fei); + + cover_src = ctx; + esrc = fib_entry_src_find(fib_entry, cover_src->fes_src); + + if (cover_src == esrc) + { + return (FIB_TABLE_WALK_CONTINUE); + } + + if (NULL != esrc) + { + /* + * the covered entry already has this source. + */ + if (esrc->fes_entry_flags & FIB_ENTRY_FLAG_COVERED_INHERIT) + { + /* + * the covered source is itself a COVERED_INHERIT, i.e. + * it also pushes this source down the sub-tree. + * We consider this more specific covered to be the owner + * of the sub-tree from this point down. + */ + return (FIB_TABLE_WALK_SUB_TREE_STOP); + } + if (esrc->fes_flags & FIB_ENTRY_SRC_FLAG_INHERITED) + { + /* + * The covered's source data has been inherited, presumably + * from this cover + */ + fib_entry_src_flag_t remaining; + + remaining = fib_entry_special_remove(fei, cover_src->fes_src); + + ASSERT(FIB_ENTRY_SRC_FLAG_ADDED == remaining); + } + else + { + /* + * The covered's source was not inherited and it is also + * not inheriting. Nevertheless, it still owns the sub-tree from + * this point down. + */ + return (FIB_TABLE_WALK_SUB_TREE_STOP); + } + } + else + { + /* + * The covered does not have this source - that's an error, + * since it should have inherited, but there is nothing we can do + * about it now. + */ + } + return (FIB_TABLE_WALK_CONTINUE); +} + +void +fib_entry_src_inherit (const fib_entry_t *cover, + fib_entry_t *covered) +{ + CLIB_UNUSED(fib_source_t source); + const fib_entry_src_t *src; + + FOR_EACH_SRC_ADDED(cover, src, source, + ({ + if ((src->fes_entry_flags & FIB_ENTRY_FLAG_COVERED_INHERIT) || + (src->fes_flags & FIB_ENTRY_SRC_FLAG_INHERITED)) + { + fib_entry_src_covered_inherit_add_i(covered, src); + } + })) +} + +static void +fib_entry_src_covered_inherit_add (fib_entry_t *fib_entry, + fib_source_t source) + +{ + fib_entry_src_t *esrc; + + esrc = fib_entry_src_find(fib_entry, source); + + ASSERT(esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ACTIVE); + + if ((esrc->fes_entry_flags & FIB_ENTRY_FLAG_COVERED_INHERIT) || + (esrc->fes_flags & FIB_ENTRY_SRC_FLAG_INHERITED)) + { + fib_table_sub_tree_walk(fib_entry->fe_fib_index, + fib_entry->fe_prefix.fp_proto, + &fib_entry->fe_prefix, + fib_entry_src_covered_inherit_walk_add, + esrc); + } +} + +static void +fib_entry_src_covered_inherit_remove (fib_entry_t *fib_entry, + fib_entry_src_t *esrc) + +{ + ASSERT(!(esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ACTIVE)); + + if (esrc->fes_entry_flags & FIB_ENTRY_FLAG_COVERED_INHERIT) + { + fib_table_sub_tree_walk(fib_entry->fe_fib_index, + fib_entry->fe_prefix.fp_proto, + &fib_entry->fe_prefix, + fib_entry_src_covered_inherit_walk_remove, + esrc); + } +} + void fib_entry_src_action_activate (fib_entry_t *fib_entry, fib_source_t source) { int houston_we_are_go_for_install; + const fib_entry_src_vft_t *vft; fib_entry_src_t *esrc; - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); ASSERT(!(esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ACTIVE)); ASSERT(esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ADDED); - esrc->fes_flags |= FIB_ENTRY_SRC_FLAG_ACTIVE; + esrc->fes_flags |= (FIB_ENTRY_SRC_FLAG_ACTIVE | + FIB_ENTRY_SRC_FLAG_CONTRIBUTING); + vft = fib_entry_src_get_vft(esrc); - if (NULL != fib_entry_src_vft[source].fesv_activate) + if (NULL != vft->fesv_activate) { - houston_we_are_go_for_install = - fib_entry_src_vft[source].fesv_activate(esrc, fib_entry); + houston_we_are_go_for_install = vft->fesv_activate(esrc, fib_entry); } else { @@ -587,6 +1024,11 @@ fib_entry_src_action_activate (fib_entry_t *fib_entry, FIB_ENTRY_DBG(fib_entry, "activate: %d", fib_entry->fe_parent); + /* + * If this source should push its state to covered prefixs, do that now. + */ + fib_entry_src_covered_inherit_add(fib_entry, source); + if (0 != houston_we_are_go_for_install) { fib_entry_src_action_install(fib_entry, source); @@ -605,19 +1047,26 @@ fib_entry_src_action_deactivate (fib_entry_t *fib_entry, fib_node_index_t path_list_index; fib_entry_src_t *esrc; - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); ASSERT(esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ACTIVE); - if (NULL != fib_entry_src_vft[source].fesv_deactivate) - { - fib_entry_src_vft[source].fesv_deactivate(esrc, fib_entry); - } + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_deactivate, + (esrc, fib_entry)); - esrc->fes_flags &= ~FIB_ENTRY_SRC_FLAG_ACTIVE; + esrc->fes_flags &= ~(FIB_ENTRY_SRC_FLAG_ACTIVE | + FIB_ENTRY_SRC_FLAG_CONTRIBUTING); FIB_ENTRY_DBG(fib_entry, "deactivate: %d", fib_entry->fe_parent); + /* + * If this source should pull its state from covered prefixs, do that now. + * If this source also has the INHERITED flag set then it has a cover + * that wants to push down forwarding. We only want the covereds to see + * one update. + */ + fib_entry_src_covered_inherit_remove(fib_entry, esrc); + /* * un-link from an old path-list. Check for any loops this will clear */ @@ -641,12 +1090,8 @@ fib_entry_src_action_fwd_update (const fib_entry_t *fib_entry, vec_foreach(esrc, fib_entry->fe_srcs) { - if (NULL != fib_entry_src_vft[esrc->fes_src].fesv_fwd_update) - { - fib_entry_src_vft[esrc->fes_src].fesv_fwd_update(esrc, - fib_entry, - source); - } + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_fwd_update, + (esrc, fib_entry, source)); } } @@ -655,9 +1100,11 @@ fib_entry_src_action_reactivate (fib_entry_t *fib_entry, fib_source_t source) { fib_node_index_t path_list_index; + const fib_entry_src_vft_t *vft; fib_entry_src_t *esrc; + int remain_installed; - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); ASSERT(esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ACTIVE); @@ -665,6 +1112,20 @@ fib_entry_src_action_reactivate (fib_entry_t *fib_entry, fib_entry->fe_parent, esrc->fes_pl); + /* + * call the source to reactive and get the go/no-go to remain installed + */ + vft = fib_entry_src_get_vft(esrc); + + if (NULL != vft->fesv_reactivate) + { + remain_installed = vft->fesv_reactivate(esrc, fib_entry); + } + else + { + remain_installed = 1; + } + if (fib_entry->fe_parent != esrc->fes_pl) { /* @@ -700,8 +1161,21 @@ fib_entry_src_action_reactivate (fib_entry_t *fib_entry, fib_entry_recursive_loop_detect_i(fib_entry->fe_parent); fib_path_list_unlock(path_list_index); + + /* + * If this source should push its state to covered prefixs, do that now. + */ + fib_entry_src_covered_inherit_add(fib_entry, source); + } + + if (!remain_installed) + { + fib_entry_src_action_uninstall(fib_entry); + } + else + { + fib_entry_src_action_install(fib_entry, source); } - fib_entry_src_action_install(fib_entry, source); fib_entry_src_action_fwd_update(fib_entry, source); } @@ -711,13 +1185,10 @@ fib_entry_src_action_installed (const fib_entry_t *fib_entry, { fib_entry_src_t *esrc; - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); - if (NULL != fib_entry_src_vft[source].fesv_installed) - { - fib_entry_src_vft[source].fesv_installed(esrc, - fib_entry); - } + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_installed, + (esrc, fib_entry)); fib_entry_src_action_fwd_update(fib_entry, source); } @@ -739,10 +1210,18 @@ fib_entry_src_action_add (fib_entry_t *fib_entry, fib_node_index_t fib_entry_index; fib_entry_src_t *esrc; - esrc = fib_entry_src_find_or_create(fib_entry, source, NULL); + esrc = fib_entry_src_find_or_create(fib_entry, source, flags); + ASSERT(esrc->fes_ref_count < 255); esrc->fes_ref_count++; + if (flags != esrc->fes_entry_flags) + { + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_flags_change, + (esrc, fib_entry, flags)); + } + esrc->fes_entry_flags = flags; + if (1 != esrc->fes_ref_count) { /* @@ -751,21 +1230,17 @@ fib_entry_src_action_add (fib_entry_t *fib_entry, return (fib_entry); } - esrc->fes_entry_flags = flags; - /* * save variable so we can recover from a fib_entry realloc. */ fib_entry_index = fib_entry_get_index(fib_entry); - if (NULL != fib_entry_src_vft[source].fesv_add) - { - fib_entry_src_vft[source].fesv_add(esrc, - fib_entry, - flags, - fib_entry_get_proto(fib_entry), - dpo); - } + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_add, + (esrc, + fib_entry, + flags, + fib_entry_get_dpo_proto(fib_entry), + dpo)); fib_entry = fib_entry_get(fib_entry_index); @@ -798,10 +1273,12 @@ fib_entry_src_action_update (fib_entry_t *fib_entry, fib_node_index_t fib_entry_index, old_path_list_index; fib_entry_src_t *esrc; - esrc = fib_entry_src_find_or_create(fib_entry, source, NULL); + esrc = fib_entry_src_find_or_create(fib_entry, source, flags); if (NULL == esrc) + { return (fib_entry_src_action_add(fib_entry, source, flags, dpo)); + } old_path_list_index = esrc->fes_pl; esrc->fes_entry_flags = flags; @@ -811,14 +1288,12 @@ fib_entry_src_action_update (fib_entry_t *fib_entry, */ fib_entry_index = fib_entry_get_index(fib_entry); - if (NULL != fib_entry_src_vft[source].fesv_add) - { - fib_entry_src_vft[source].fesv_add(esrc, - fib_entry, - flags, - fib_entry_get_proto(fib_entry), - dpo); - } + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_add, + (esrc, + fib_entry, + flags, + fib_entry_get_dpo_proto(fib_entry), + dpo)); fib_entry = fib_entry_get(fib_entry_index); @@ -830,6 +1305,60 @@ fib_entry_src_action_update (fib_entry_t *fib_entry, return (fib_entry); } +fib_entry_src_flag_t +fib_entry_src_action_remove_or_update_inherit (fib_entry_t *fib_entry, + fib_source_t source) +{ + fib_entry_src_t *esrc; + + esrc = fib_entry_src_find(fib_entry, source); + + if (NULL == esrc) + return (FIB_ENTRY_SRC_FLAG_ACTIVE); + + if ((esrc->fes_entry_flags & FIB_ENTRY_FLAG_COVERED_INHERIT) && + (esrc->fes_flags & FIB_ENTRY_SRC_FLAG_INHERITED)) + { + fib_entry_src_t *cover_src; + fib_node_index_t coveri; + fib_entry_t *cover; + + /* + * this source was pushing inherited state, but so is its + * cover. Now that this source is going away, we need to + * pull the covers forwarding and use it to update the covereds. + * Go grab the path-list from the cover, rather than start a walk from + * the cover, so we don't recursively update this entry. + */ + coveri = fib_table_get_less_specific(fib_entry->fe_fib_index, + &fib_entry->fe_prefix); + + /* + * only the default route has itself as its own cover, but the + * default route cannot have inherited from something else. + */ + ASSERT(coveri != fib_entry_get_index(fib_entry)); + + cover = fib_entry_get(coveri); + cover_src = fib_entry_src_find(cover, source); + + ASSERT(NULL != cover_src); + + esrc = fib_entry_src_action_update_from_cover(fib_entry, cover_src); + esrc->fes_entry_flags &= ~FIB_ENTRY_FLAG_COVERED_INHERIT; + + /* + * Now push the new state from the cover down to the covereds + */ + fib_entry_src_covered_inherit_add(fib_entry, source); + + return (esrc->fes_flags); + } + else + { + return (fib_entry_src_action_remove(fib_entry, source)); + } +} fib_entry_src_flag_t fib_entry_src_action_remove (fib_entry_t *fib_entry, @@ -840,7 +1369,7 @@ fib_entry_src_action_remove (fib_entry_t *fib_entry, fib_entry_src_flag_t sflags; fib_entry_src_t *esrc; - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); if (NULL == esrc) return (FIB_ENTRY_SRC_FLAG_ACTIVE); @@ -858,15 +1387,18 @@ fib_entry_src_action_remove (fib_entry_t *fib_entry, if (esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ACTIVE) { - fib_entry_src_action_deactivate(fib_entry, source); + fib_entry_src_action_deactivate(fib_entry, source); + } + else if (esrc->fes_flags & FIB_ENTRY_SRC_FLAG_CONTRIBUTING) + { + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_deactivate, + (esrc, fib_entry)); + esrc->fes_flags &= ~FIB_ENTRY_SRC_FLAG_CONTRIBUTING; } old_path_list = esrc->fes_pl; - if (NULL != fib_entry_src_vft[source].fesv_remove) - { - fib_entry_src_vft[source].fesv_remove(esrc); - } + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_remove, (esrc)); fib_path_list_unlock(old_path_list); fib_entry_unlock(fib_entry_get_index(fib_entry)); @@ -877,21 +1409,6 @@ fib_entry_src_action_remove (fib_entry_t *fib_entry, return (sflags); } -static inline int -fib_route_recurses_via_self (const fib_prefix_t *prefix, - const fib_route_path_t *rpath) -{ - /* - * not all zeros next hop && - * is recursive path && - * nexthop is same as the route's address - */ - return ((!ip46_address_is_zero(&rpath->frp_addr)) && - (~0 == rpath->frp_sw_if_index) && - (0 == ip46_address_cmp(&rpath->frp_addr, &prefix->fp_addr))); - -} - /* * fib_route_attached_cross_table * @@ -909,6 +1426,7 @@ fib_route_attached_cross_table (const fib_entry_t *fib_entry, */ if (ip46_address_is_zero(&rpath->frp_addr) && (~0 != rpath->frp_sw_if_index) && + !(rpath->frp_flags & FIB_ROUTE_PATH_DVR) && (fib_entry->fe_fib_index != fib_table_get_index_for_sw_if_index(fib_entry_get_proto(fib_entry), rpath->frp_sw_if_index))) @@ -919,14 +1437,20 @@ fib_route_attached_cross_table (const fib_entry_t *fib_entry, } /* - * fib_route_attached_cross_table - * - * Return true the the route is attached via an interface that - * is not in the same table as the route + * Return true if the path is attached */ static inline int fib_path_is_attached (const fib_route_path_t *rpath) { + /* + * DVR paths are not attached, since we are not playing the + * L3 game with these + */ + if (rpath->frp_flags & FIB_ROUTE_PATH_DVR) + { + return (0); + } + /* * - All zeros next-hop * - a valid interface @@ -936,6 +1460,10 @@ fib_path_is_attached (const fib_route_path_t *rpath) { return (!0); } + else if (rpath->frp_flags & FIB_ROUTE_PATH_ATTACHED) + { + return (!0); + } return (0); } @@ -948,14 +1476,14 @@ fib_entry_src_flags_2_path_list_flags (fib_entry_flag_t eflags) { plf |= FIB_PATH_LIST_FLAG_DROP; } - if (eflags & FIB_ENTRY_FLAG_LOCAL) - { - plf |= FIB_PATH_LIST_FLAG_LOCAL; - } if (eflags & FIB_ENTRY_FLAG_EXCLUSIVE) { plf |= FIB_PATH_LIST_FLAG_EXCLUSIVE; } + if (eflags & FIB_ENTRY_FLAG_LOCAL) + { + plf |= FIB_PATH_LIST_FLAG_LOCAL; + } return (plf); } @@ -966,25 +1494,6 @@ fib_entry_flags_update (const fib_entry_t *fib_entry, fib_path_list_flags_t *pl_flags, fib_entry_src_t *esrc) { - /* - * don't allow the addition of a recursive looped path for prefix - * via itself. - */ - if (fib_route_recurses_via_self(&fib_entry->fe_prefix, rpath)) - { - /* - * force the install of a drop path-list. - * we want the entry to have some path-list, mainly so - * the dodgy path can be rmeoved when the source stops playing - * silly buggers. - */ - *pl_flags |= FIB_PATH_LIST_FLAG_DROP; - } - else - { - *pl_flags &= ~FIB_PATH_LIST_FLAG_DROP; - } - if ((esrc->fes_src == FIB_SOURCE_API) || (esrc->fes_src == FIB_SOURCE_CLI)) { @@ -996,8 +1505,13 @@ fib_entry_flags_update (const fib_entry_t *fib_entry, { esrc->fes_entry_flags &= ~FIB_ENTRY_FLAG_ATTACHED; } + if (rpath->frp_flags & FIB_ROUTE_PATH_DEAG) + { + esrc->fes_entry_flags |= FIB_ENTRY_FLAG_LOOSE_URPF_EXEMPT; + } } - if (fib_route_attached_cross_table(fib_entry, rpath)) + if (fib_route_attached_cross_table(fib_entry, rpath) && + !(esrc->fes_entry_flags & FIB_ENTRY_FLAG_NO_ATTACHED_EXPORT)) { esrc->fes_entry_flags |= FIB_ENTRY_FLAG_IMPORT; } @@ -1007,58 +1521,6 @@ fib_entry_flags_update (const fib_entry_t *fib_entry, } } -/* - * fib_entry_src_path_ext_add - * - * append a path extension to the entry's list - */ -static void -fib_entry_src_path_ext_append (fib_entry_src_t *esrc, - const fib_route_path_t *rpath) -{ - if (NULL != rpath->frp_label_stack) - { - fib_path_ext_t *path_ext; - - vec_add2(esrc->fes_path_exts, path_ext, 1); - - fib_path_ext_init(path_ext, esrc->fes_pl, rpath); - } -} - -/* - * fib_entry_src_path_ext_insert - * - * insert, sorted, a path extension to the entry's list. - * It's not strictly necessary in sort the path extensions, since each - * extension has the path index to which it resolves. However, by being - * sorted the load-balance produced has a deterministic order, not an order - * based on the sequence of extension additions. this is a considerable benefit. - */ -static void -fib_entry_src_path_ext_insert (fib_entry_src_t *esrc, - const fib_route_path_t *rpath) -{ - if (0 == vec_len(esrc->fes_path_exts)) - return (fib_entry_src_path_ext_append(esrc, rpath)); - - if (NULL != rpath->frp_label_stack) - { - fib_path_ext_t path_ext; - int i = 0; - - fib_path_ext_init(&path_ext, esrc->fes_pl, rpath); - - while (i < vec_len(esrc->fes_path_exts) && - (fib_path_ext_cmp(&esrc->fes_path_exts[i], rpath) < 0)) - { - i++; - } - - vec_insert_elts(esrc->fes_path_exts, &path_ext, 1, i); - } -} - /* * fib_entry_src_action_add * @@ -1075,7 +1537,6 @@ fib_entry_src_action_path_add (fib_entry_t *fib_entry, { fib_node_index_t old_path_list, fib_entry_index; fib_path_list_flags_t pl_flags; - fib_path_ext_t *path_ext; fib_entry_src_t *esrc; /* @@ -1083,17 +1544,23 @@ fib_entry_src_action_path_add (fib_entry_t *fib_entry, */ fib_entry_index = fib_entry_get_index(fib_entry); - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); if (NULL == esrc) { + const dpo_id_t *dpo; + + if (flags == FIB_ENTRY_FLAG_EXCLUSIVE) { + dpo = &rpath->dpo; + } else { + dpo = drop_dpo_get(fib_entry_get_dpo_proto(fib_entry)); + } + fib_entry = fib_entry_src_action_add(fib_entry, source, flags, - drop_dpo_get( - fib_proto_to_dpo( - fib_entry_get_proto(fib_entry)))); - esrc = fib_entry_src_find(fib_entry, source, NULL); + dpo); + esrc = fib_entry_src_find(fib_entry, source); } /* @@ -1104,26 +1571,15 @@ fib_entry_src_action_path_add (fib_entry_t *fib_entry, */ old_path_list = esrc->fes_pl; - ASSERT(NULL != fib_entry_src_vft[source].fesv_path_add); + ASSERT(FIB_ENTRY_SRC_VFT_EXISTS(esrc, fesv_path_add)); pl_flags = fib_entry_src_flags_2_path_list_flags(fib_entry_get_flags_i(fib_entry)); fib_entry_flags_update(fib_entry, rpath, &pl_flags, esrc); - fib_entry_src_vft[source].fesv_path_add(esrc, fib_entry, pl_flags, rpath); + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_path_add, + (esrc, fib_entry, pl_flags, rpath)); fib_entry = fib_entry_get(fib_entry_index); - /* - * re-resolve all the path-extensions with the new path-list - */ - vec_foreach(path_ext, esrc->fes_path_exts) - { - fib_path_ext_resolve(path_ext, esrc->fes_pl); - } - /* - * if the path has a label we need to add a path extension - */ - fib_entry_src_path_ext_insert(esrc, rpath); - fib_path_list_lock(esrc->fes_pl); fib_path_list_unlock(old_path_list); @@ -1142,16 +1598,15 @@ fib_entry_src_action_path_add (fib_entry_t *fib_entry, fib_entry_t* fib_entry_src_action_path_swap (fib_entry_t *fib_entry, fib_source_t source, - fib_entry_flag_t flags, + fib_entry_flag_t flags, const fib_route_path_t *rpaths) { fib_node_index_t old_path_list, fib_entry_index; fib_path_list_flags_t pl_flags; const fib_route_path_t *rpath; - fib_path_ext_t *path_ext; fib_entry_src_t *esrc; - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); /* * save variable so we can recover from a fib_entry realloc. @@ -1160,13 +1615,28 @@ fib_entry_src_action_path_swap (fib_entry_t *fib_entry, if (NULL == esrc) { - fib_entry = fib_entry_src_action_add(fib_entry, + const dpo_id_t *dpo; + + if (flags == FIB_ENTRY_FLAG_EXCLUSIVE) { + dpo = &rpaths->dpo; + } else { + dpo = drop_dpo_get(fib_entry_get_dpo_proto(fib_entry)); + } + + fib_entry = fib_entry_src_action_add(fib_entry, source, flags, - drop_dpo_get( - fib_proto_to_dpo( - fib_entry_get_proto(fib_entry)))); - esrc = fib_entry_src_find(fib_entry, source, NULL); + dpo); + esrc = fib_entry_src_find(fib_entry, source); + } + else + { + if (flags != esrc->fes_entry_flags) + { + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_flags_change, + (esrc, fib_entry, flags)); + } + esrc->fes_entry_flags = flags; } /* @@ -1176,7 +1646,7 @@ fib_entry_src_action_path_swap (fib_entry_t *fib_entry, */ old_path_list = esrc->fes_pl; - ASSERT(NULL != fib_entry_src_vft[source].fesv_path_swap); + ASSERT(FIB_ENTRY_SRC_VFT_EXISTS(esrc, fesv_path_swap)); pl_flags = fib_entry_src_flags_2_path_list_flags(flags); @@ -1185,21 +1655,9 @@ fib_entry_src_action_path_swap (fib_entry_t *fib_entry, fib_entry_flags_update(fib_entry, rpath, &pl_flags, esrc); } - fib_entry_src_vft[source].fesv_path_swap(esrc, - fib_entry, - pl_flags, - rpaths); - - vec_foreach(path_ext, esrc->fes_path_exts) - { - vec_free(path_ext->fpe_label_stack); - } - vec_free(esrc->fes_path_exts); - - vec_foreach(rpath, rpaths) - { - fib_entry_src_path_ext_append(esrc, rpath); - } + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_path_swap, + (esrc, fib_entry, + pl_flags, rpaths)); fib_entry = fib_entry_get(fib_entry_index); @@ -1216,10 +1674,9 @@ fib_entry_src_action_path_remove (fib_entry_t *fib_entry, { fib_path_list_flags_t pl_flags; fib_node_index_t old_path_list; - fib_path_ext_t *path_ext; fib_entry_src_t *esrc; - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); ASSERT(NULL != esrc); ASSERT(esrc->fes_flags & FIB_ENTRY_SRC_FLAG_ADDED); @@ -1232,35 +1689,13 @@ fib_entry_src_action_path_remove (fib_entry_t *fib_entry, */ old_path_list = esrc->fes_pl; - ASSERT(NULL != fib_entry_src_vft[source].fesv_path_remove); + ASSERT(FIB_ENTRY_SRC_VFT_EXISTS(esrc, fesv_path_remove)); pl_flags = fib_entry_src_flags_2_path_list_flags(fib_entry_get_flags_i(fib_entry)); fib_entry_flags_update(fib_entry, rpath, &pl_flags, esrc); - fib_entry_src_vft[source].fesv_path_remove(esrc, pl_flags, rpath); - /* - * find the matching path extension and remove it - */ - vec_foreach(path_ext, esrc->fes_path_exts) - { - if (!fib_path_ext_cmp(path_ext, rpath)) - { - /* - * delete the element moving the remaining elements down 1 position. - * this preserves the sorted order. - */ - vec_free(path_ext->fpe_label_stack); - vec_delete(esrc->fes_path_exts, 1, (path_ext - esrc->fes_path_exts)); - break; - } - } - /* - * re-resolve all the path-extensions with the new path-list - */ - vec_foreach(path_ext, esrc->fes_path_exts) - { - fib_path_ext_resolve(path_ext, esrc->fes_pl); - } + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_path_remove, + (esrc, pl_flags, rpath)); /* * lock the new path-list, unlock the old if it had one @@ -1276,7 +1711,7 @@ fib_entry_src_action_path_remove (fib_entry_t *fib_entry, /* * no more paths left from this source */ - fib_entry_src_action_remove(fib_entry, source); + fib_entry_src_action_remove_or_update_inherit(fib_entry, source); return (FIB_ENTRY_SRC_FLAG_NONE); } } @@ -1288,12 +1723,10 @@ fib_entry_src_format (fib_entry_t *fib_entry, { fib_entry_src_t *esrc; - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); + + FIB_ENTRY_SRC_VFT_INVOKE_AND_RETURN(esrc, fesv_format, (esrc, s)); - if (NULL != fib_entry_src_vft[source].fesv_format) - { - return (fib_entry_src_vft[source].fesv_format(esrc, s)); - } return (s); } @@ -1308,7 +1741,7 @@ fib_entry_get_adj_for_source (fib_node_index_t fib_entry_index, return (ADJ_INDEX_INVALID); fib_entry = fib_entry_get(fib_entry_index); - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); if (NULL != esrc) { @@ -1334,7 +1767,7 @@ fib_entry_get_dpo_for_source (fib_node_index_t fib_entry_index, return (0); fib_entry = fib_entry_get(fib_entry_index); - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); if (NULL != esrc) { @@ -1343,6 +1776,7 @@ fib_entry_get_dpo_for_source (fib_node_index_t fib_entry_index, fib_path_list_contribute_forwarding( esrc->fes_pl, fib_entry_get_default_chain_type(fib_entry), + FIB_PATH_LIST_FWD_FLAG_NONE, dpo); return (dpo_id_is_valid(dpo)); @@ -1360,7 +1794,7 @@ fib_entry_get_resolving_interface_for_source (fib_node_index_t entry_index, fib_entry = fib_entry_get(entry_index); - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); if (NULL != esrc) { @@ -1381,7 +1815,7 @@ fib_entry_get_flags_for_source (fib_node_index_t entry_index, fib_entry = fib_entry_get(entry_index); - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); if (NULL != esrc) { @@ -1423,12 +1857,12 @@ fib_entry_set_source_data (fib_node_index_t fib_entry_index, fib_entry_src_t *esrc; fib_entry = fib_entry_get(fib_entry_index); - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); - if (NULL != esrc && - NULL != fib_entry_src_vft[source].fesv_set_data) + if (NULL != esrc) { - fib_entry_src_vft[source].fesv_set_data(esrc, fib_entry, data); + FIB_ENTRY_SRC_VFT_INVOKE(esrc, fesv_set_data, + (esrc, fib_entry, data)); } } @@ -1440,12 +1874,12 @@ fib_entry_get_source_data (fib_node_index_t fib_entry_index, fib_entry_src_t *esrc; fib_entry = fib_entry_get(fib_entry_index); - esrc = fib_entry_src_find(fib_entry, source, NULL); + esrc = fib_entry_src_find(fib_entry, source); - if (NULL != esrc && - NULL != fib_entry_src_vft[source].fesv_get_data) + if (NULL != esrc) { - return (fib_entry_src_vft[source].fesv_get_data(esrc, fib_entry)); + FIB_ENTRY_SRC_VFT_INVOKE_AND_RETURN(esrc, fesv_get_data, + (esrc, fib_entry)); } return (NULL); } @@ -1455,6 +1889,7 @@ fib_entry_src_module_init (void) { fib_entry_src_rr_register(); fib_entry_src_interface_register(); + fib_entry_src_interpose_register(); fib_entry_src_default_route_register(); fib_entry_src_special_register(); fib_entry_src_api_register();