2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/fib/fib_entry.h>
17 #include <vnet/fib/fib_table.h>
18 #include <vnet/fib/fib_walk.h>
20 #include <vnet/bier/bier_table.h>
21 #include <vnet/bier/bier_fmask.h>
22 #include <vnet/bier/bier_bit_string.h>
23 #include <vnet/bier/bier_disp_table.h>
25 #include <vnet/mpls/mpls.h>
26 #include <vnet/dpo/drop_dpo.h>
27 #include <vnet/dpo/load_balance.h>
30 * attributes names for formatting
32 static const char *const bier_fmask_attr_names[] = BIER_FMASK_ATTR_NAMES;
35 * pool of BIER fmask objects
37 bier_fmask_t *bier_fmask_pool;
40 bier_fmask_get_index (const bier_fmask_t *bfm)
42 return (bfm - bier_fmask_pool);
46 bier_fmask_bits_init (bier_fmask_bits_t *bits,
47 bier_hdr_len_id_t hlid)
49 bits->bfmb_refs = clib_mem_alloc(sizeof(bits->bfmb_refs[0]) *
50 bier_hdr_len_id_to_num_bits(hlid));
51 memset(bits->bfmb_refs,
53 (sizeof(bits->bfmb_refs[0]) *
54 bier_hdr_len_id_to_num_bits(hlid)));
56 bits->bfmb_input_reset_string.bbs_len =
57 bier_hdr_len_id_to_num_buckets(hlid);
60 * The buckets are accessed in the switch path
62 bits->bfmb_input_reset_string.bbs_buckets =
63 clib_mem_alloc_aligned(
64 sizeof(bits->bfmb_input_reset_string.bbs_buckets[0]) *
65 bier_hdr_len_id_to_num_buckets(hlid),
66 CLIB_CACHE_LINE_BYTES);
67 memset(bits->bfmb_input_reset_string.bbs_buckets,
69 sizeof(bits->bfmb_input_reset_string.bbs_buckets[0]) *
70 bier_hdr_len_id_to_num_buckets(hlid));
74 bier_fmask_stack (bier_fmask_t *bfm)
76 dpo_id_t via_dpo = DPO_INVALID;
78 if (bfm->bfm_flags & BIER_FMASK_FLAG_DISP)
80 bier_disp_table_contribute_forwarding(bfm->bfm_disp,
85 fib_entry_contribute_forwarding(bfm->bfm_fei,
86 FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS,
91 * If the via fib entry provides no forwarding (i.e. a drop)
92 * then niether does this fmask. That way children consider this fmask
93 * unresolved and other ECMP options are used instead.
95 if (dpo_is_drop(&via_dpo) ||
96 load_balance_is_drop(&via_dpo))
98 bfm->bfm_flags &= ~BIER_FMASK_FLAG_FORWARDING;
102 bfm->bfm_flags |= BIER_FMASK_FLAG_FORWARDING;
105 dpo_stack(DPO_BIER_FMASK,
113 bier_fmask_contribute_forwarding (index_t bfmi,
118 bfm = bier_fmask_get(bfmi);
120 if (bfm->bfm_flags & BIER_FMASK_FLAG_FORWARDING)
129 dpo_copy(dpo, drop_dpo_get(DPO_PROTO_BIER));
134 bier_fmask_resolve (bier_fmask_t *bfm)
136 if (bfm->bfm_flags & BIER_FMASK_FLAG_DISP)
138 bier_disp_table_lock(bfm->bfm_disp);
143 * source a recursive route through which we resolve.
146 .fp_addr = bfm->bfm_id.bfmi_nh,
147 .fp_proto = (ip46_address_is_ip4(&(bfm->bfm_id.bfmi_nh)) ?
150 .fp_len = (ip46_address_is_ip4(&(bfm->bfm_id.bfmi_nh)) ? 32 : 128),
153 bfm->bfm_fei = fib_table_entry_special_add(0, // default table
156 FIB_ENTRY_FLAG_NONE);
158 bfm->bfm_sibling = fib_entry_child_add(bfm->bfm_fei,
159 FIB_NODE_TYPE_BIER_FMASK,
160 bier_fmask_get_index(bfm));
163 bier_fmask_stack(bfm);
167 bier_fmask_unresolve (bier_fmask_t *bfm)
169 if (bfm->bfm_flags & BIER_FMASK_FLAG_DISP)
171 bier_disp_table_unlock(bfm->bfm_disp);
176 * un-source the recursive route through which we resolve.
179 .fp_addr = bfm->bfm_id.bfmi_nh,
180 .fp_proto = (ip46_address_is_ip4(&(bfm->bfm_id.bfmi_nh)) ?
183 .fp_len = (ip46_address_is_ip4(&(bfm->bfm_id.bfmi_nh)) ? 32 : 128),
186 fib_entry_child_remove(bfm->bfm_fei, bfm->bfm_sibling);
187 fib_table_entry_special_remove(0, &pfx, FIB_SOURCE_RR);
189 dpo_reset(&bfm->bfm_dpo);
193 bier_fmask_child_add (fib_node_index_t bfmi,
194 fib_node_type_t child_type,
195 fib_node_index_t child_index)
197 return (fib_node_child_add(FIB_NODE_TYPE_BIER_FMASK,
204 bier_fmask_child_remove (fib_node_index_t bfmi,
207 fib_node_child_remove(FIB_NODE_TYPE_BIER_FMASK,
213 bier_fmask_init (bier_fmask_t *bfm,
214 const bier_fmask_id_t *fmid,
216 const fib_route_path_t *rpath)
218 const bier_table_id_t *btid;
222 bfm->bfm_fib_index = bti;
223 dpo_reset(&bfm->bfm_dpo);
225 if (ip46_address_is_zero(&(bfm->bfm_id.bfmi_nh)))
227 bfm->bfm_flags |= BIER_FMASK_FLAG_DISP;
230 if (!(bfm->bfm_flags & BIER_FMASK_FLAG_DISP))
232 olabel = rpath->frp_label_stack[0];
233 vnet_mpls_uc_set_label(&bfm->bfm_label, olabel);
234 vnet_mpls_uc_set_exp(&bfm->bfm_label, 0);
235 vnet_mpls_uc_set_s(&bfm->bfm_label, 1);
236 vnet_mpls_uc_set_ttl(&bfm->bfm_label, 0xff);
237 bfm->bfm_label = clib_host_to_net_u32(bfm->bfm_label);
241 bfm->bfm_disp = rpath->frp_bier_fib_index;
244 btid = bier_table_get_id(bfm->bfm_fib_index);
245 bier_fmask_bits_init(&bfm->bfm_bits, btid->bti_hdr_len);
246 bier_fmask_resolve(bfm);
250 bier_fmask_destroy (bier_fmask_t *bfm)
252 clib_mem_free(bfm->bfm_bits.bfmb_refs);
253 clib_mem_free(bfm->bfm_bits.bfmb_input_reset_string.bbs_buckets);
255 bier_fmask_db_remove(bfm->bfm_fib_index, &(bfm->bfm_id));
256 bier_fmask_unresolve(bfm);
257 pool_put(bier_fmask_pool, bfm);
261 bier_fmask_unlock (index_t bfmi)
265 if (INDEX_INVALID == bfmi)
270 bfm = bier_fmask_get(bfmi);
272 fib_node_unlock(&bfm->bfm_node);
276 bier_fmask_lock (index_t bfmi)
280 if (INDEX_INVALID == bfmi)
285 bfm = bier_fmask_get(bfmi);
287 fib_node_lock(&bfm->bfm_node);
291 bier_fmask_create_and_lock (const bier_fmask_id_t *fmid,
293 const fib_route_path_t *rpath)
297 pool_get_aligned(bier_fmask_pool, bfm, CLIB_CACHE_LINE_BYTES);
299 memset(bfm, 0, sizeof(*bfm));
301 fib_node_init(&bfm->bfm_node, FIB_NODE_TYPE_BIER_FMASK);
302 bier_fmask_init(bfm, fmid, bti, rpath);
304 bier_fmask_lock(bier_fmask_get_index(bfm));
306 return (bier_fmask_get_index(bfm));
310 bier_fmask_link (index_t bfmi,
315 bfm = bier_fmask_get(bfmi);
317 if (0 == bfm->bfm_bits.bfmb_refs[BIER_BP_TO_INDEX(bp)])
320 * 0 -> 1 transistion - set the bit in the string
322 bier_bit_string_set_bit(&bfm->bfm_bits.bfmb_input_reset_string, bp);
325 ++bfm->bfm_bits.bfmb_refs[BIER_BP_TO_INDEX(bp)];
326 ++bfm->bfm_bits.bfmb_count;
330 bier_fmask_unlink (index_t bfmi,
335 bfm = bier_fmask_get(bfmi);
337 --bfm->bfm_bits.bfmb_refs[BIER_BP_TO_INDEX(bp)];
338 --bfm->bfm_bits.bfmb_count;
340 if (0 == bfm->bfm_bits.bfmb_refs[BIER_BP_TO_INDEX(bp)])
343 * 1 -> 0 transistion - clear the bit in the string
345 bier_bit_string_clear_bit(&bfm->bfm_bits.bfmb_input_reset_string, bp);
350 format_bier_fmask (u8 *s, va_list *ap)
352 index_t bfmi = va_arg(*ap, index_t);
353 u32 indent = va_arg(*ap, u32);
354 bier_fmask_attributes_t attr;
357 if (pool_is_free_index(bier_fmask_pool, bfmi))
359 return (format(s, "No BIER f-mask %d", bfmi));
362 bfm = bier_fmask_get(bfmi);
364 s = format(s, "fmask: nh:%U bs:%U locks:%d ",
365 format_ip46_address, &bfm->bfm_id.bfmi_nh, IP46_TYPE_ANY,
366 format_bier_bit_string, &bfm->bfm_bits.bfmb_input_reset_string,
367 bfm->bfm_node.fn_locks);
368 s = format(s, "flags:");
369 FOR_EACH_BIER_FMASK_ATTR(attr) {
370 if ((1<<attr) & bfm->bfm_flags) {
371 s = format (s, "%s,", bier_fmask_attr_names[attr]);
374 s = format(s, "\n%U%U",
375 format_white_space, indent,
376 format_dpo_id, &bfm->bfm_dpo, indent+2);
383 bier_fmask_get_node (fib_node_index_t index)
385 bier_fmask_t *bfm = bier_fmask_get(index);
386 return (&(bfm->bfm_node));
390 bier_fmask_get_from_node (fib_node_t *node)
392 return ((bier_fmask_t*)(((char*)node) -
393 STRUCT_OFFSET_OF(bier_fmask_t,
398 * bier_fmask_last_lock_gone
401 bier_fmask_last_lock_gone (fib_node_t *node)
403 bier_fmask_destroy(bier_fmask_get_from_node(node));
407 * bier_fmask_back_walk_notify
409 * A back walk has reached this BIER fmask
411 static fib_node_back_walk_rc_t
412 bier_fmask_back_walk_notify (fib_node_t *node,
413 fib_node_back_walk_ctx_t *ctx)
416 * re-stack the fmask on the n-eos of the via
418 bier_fmask_t *bfm = bier_fmask_get_from_node(node);
420 bier_fmask_stack(bfm);
423 * propagate further up the graph.
424 * we can do this synchronously since the fan out is small.
426 fib_walk_sync(FIB_NODE_TYPE_BIER_FMASK, bier_fmask_get_index(bfm), ctx);
428 return (FIB_NODE_BACK_WALK_CONTINUE);
432 * The BIER fmask's graph node virtual function table
434 static const fib_node_vft_t bier_fmask_vft = {
435 .fnv_get = bier_fmask_get_node,
436 .fnv_last_lock = bier_fmask_last_lock_gone,
437 .fnv_back_walk = bier_fmask_back_walk_notify,
441 bier_fmask_dpo_lock (dpo_id_t *dpo)
446 bier_fmask_dpo_unlock (dpo_id_t *dpo)
451 bier_fmask_dpo_mem_show (void)
453 fib_show_memory_usage("BIER-fmask",
454 pool_elts(bier_fmask_pool),
455 pool_len(bier_fmask_pool),
456 sizeof(bier_fmask_t));
459 const static dpo_vft_t bier_fmask_dpo_vft = {
460 .dv_lock = bier_fmask_dpo_lock,
461 .dv_unlock = bier_fmask_dpo_unlock,
462 .dv_mem_show = bier_fmask_dpo_mem_show,
463 .dv_format = format_bier_fmask,
466 const static char *const bier_fmask_mpls_nodes[] =
470 const static char * const * const bier_fmask_nodes[DPO_PROTO_NUM] =
472 [DPO_PROTO_BIER] = bier_fmask_mpls_nodes,
473 [DPO_PROTO_MPLS] = bier_fmask_mpls_nodes,
477 bier_fmask_module_init (vlib_main_t * vm)
479 fib_node_register_type (FIB_NODE_TYPE_BIER_FMASK, &bier_fmask_vft);
480 dpo_register(DPO_BIER_FMASK, &bier_fmask_dpo_vft, bier_fmask_nodes);
485 VLIB_INIT_FUNCTION (bier_fmask_module_init);
487 static clib_error_t *
488 bier_fmask_show (vlib_main_t * vm,
489 unformat_input_t * input,
490 vlib_cli_command_t * cmd)
495 bfmi = INDEX_INVALID;
497 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
498 if (unformat (input, "%d", &bfmi))
507 if (INDEX_INVALID == bfmi)
509 pool_foreach(bfm, bier_fmask_pool,
511 vlib_cli_output (vm, "%U",
512 format_bier_fmask, bier_fmask_get_index(bfm), 0);
517 vlib_cli_output (vm, "%U", format_bier_fmask, bfmi, 0);
523 VLIB_CLI_COMMAND (show_bier_fmask, static) = {
524 .path = "show bier fmask",
525 .short_help = "show bier fmask",
526 .function = bier_fmask_show,