2 * Copyright (c) 2016 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 #include <vnet/fib/fib_entry.h>
17 #include <vnet/fib/fib_table.h>
18 #include <vnet/fib/fib_walk.h>
19 #include <vnet/fib/fib_path_list.h>
21 #include <vnet/bier/bier_table.h>
22 #include <vnet/bier/bier_fmask.h>
23 #include <vnet/bier/bier_bit_string.h>
24 #include <vnet/bier/bier_disp_table.h>
26 #include <vnet/mpls/mpls.h>
27 #include <vnet/dpo/drop_dpo.h>
28 #include <vnet/dpo/load_balance.h>
31 * attributes names for formatting
33 static const char *const bier_fmask_attr_names[] = BIER_FMASK_ATTR_NAMES;
36 * pool of BIER fmask objects
38 bier_fmask_t *bier_fmask_pool;
41 * Stats for each BIER fmask object
43 vlib_combined_counter_main_t bier_fmask_counters;
46 bier_fmask_get_index (const bier_fmask_t *bfm)
48 return (bfm - bier_fmask_pool);
52 bier_fmask_bits_init (bier_fmask_bits_t *bits,
53 bier_hdr_len_id_t hlid)
55 bits->bfmb_refs = clib_mem_alloc(sizeof(bits->bfmb_refs[0]) *
56 bier_hdr_len_id_to_num_bits(hlid));
57 clib_memset(bits->bfmb_refs,
59 (sizeof(bits->bfmb_refs[0]) *
60 bier_hdr_len_id_to_num_bits(hlid)));
62 bits->bfmb_input_reset_string.bbs_len =
63 bier_hdr_len_id_to_num_buckets(hlid);
66 * The buckets are accessed in the switch path
68 bits->bfmb_input_reset_string.bbs_buckets =
69 clib_mem_alloc_aligned(
70 sizeof(bits->bfmb_input_reset_string.bbs_buckets[0]) *
71 bier_hdr_len_id_to_num_buckets(hlid),
72 CLIB_CACHE_LINE_BYTES);
73 clib_memset(bits->bfmb_input_reset_string.bbs_buckets,
75 sizeof(bits->bfmb_input_reset_string.bbs_buckets[0]) *
76 bier_hdr_len_id_to_num_buckets(hlid));
80 bier_fmask_stack (bier_fmask_t *bfm)
82 dpo_id_t via_dpo = DPO_INVALID;
83 fib_forward_chain_type_t fct;
85 if (bfm->bfm_flags & BIER_FMASK_FLAG_MPLS)
87 fct = FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS;
91 fct = FIB_FORW_CHAIN_TYPE_BIER;
94 fib_path_list_contribute_forwarding(bfm->bfm_pl, fct,
95 FIB_PATH_LIST_FWD_FLAG_COLLAPSE,
99 * If the via PL entry provides no forwarding (i.e. a drop)
100 * then neither does this fmask. That way children consider this fmask
101 * unresolved and other ECMP options are used instead.
103 if (dpo_is_drop(&via_dpo))
105 bfm->bfm_flags &= ~BIER_FMASK_FLAG_FORWARDING;
109 bfm->bfm_flags |= BIER_FMASK_FLAG_FORWARDING;
112 dpo_stack(DPO_BIER_FMASK,
120 bier_fmask_contribute_forwarding (index_t bfmi,
125 bfm = bier_fmask_get(bfmi);
127 if (bfm->bfm_flags & BIER_FMASK_FLAG_FORWARDING)
136 dpo_copy(dpo, drop_dpo_get(DPO_PROTO_BIER));
141 bier_fmask_child_add (fib_node_index_t bfmi,
142 fib_node_type_t child_type,
143 fib_node_index_t child_index)
145 return (fib_node_child_add(FIB_NODE_TYPE_BIER_FMASK,
152 bier_fmask_child_remove (fib_node_index_t bfmi,
155 if (INDEX_INVALID == bfmi)
160 fib_node_child_remove(FIB_NODE_TYPE_BIER_FMASK,
166 bier_fmask_init (bier_fmask_t *bfm,
167 const bier_fmask_id_t *fmid,
168 const fib_route_path_t *rpath)
170 const bier_table_id_t *btid;
171 fib_route_path_t *rpaths;
174 clib_memset(bfm, 0, sizeof(*bfm));
176 bfm->bfm_id = clib_mem_alloc(sizeof(*bfm->bfm_id));
178 fib_node_init(&bfm->bfm_node, FIB_NODE_TYPE_BIER_FMASK);
179 *bfm->bfm_id = *fmid;
180 dpo_reset(&bfm->bfm_dpo);
181 btid = bier_table_get_id(bfm->bfm_id->bfmi_bti);
182 bier_fmask_bits_init(&bfm->bfm_bits, btid->bti_hdr_len);
184 if (rpath->frp_flags & FIB_ROUTE_PATH_UDP_ENCAP)
186 bfm->bfm_id->bfmi_nh_type = BIER_NH_UDP;
188 else if (ip46_address_is_zero(&(bfm->bfm_id->bfmi_nh)))
190 bfm->bfm_flags |= BIER_FMASK_FLAG_DISP;
193 if (!(bfm->bfm_flags & BIER_FMASK_FLAG_DISP))
195 if (NULL != rpath->frp_label_stack)
197 olabel = rpath->frp_label_stack[0].fml_value;
198 vnet_mpls_uc_set_label(&bfm->bfm_label, olabel);
199 vnet_mpls_uc_set_exp(&bfm->bfm_label, 0);
200 vnet_mpls_uc_set_s(&bfm->bfm_label, 1);
201 vnet_mpls_uc_set_ttl(&bfm->bfm_label, 64);
202 bfm->bfm_flags |= BIER_FMASK_FLAG_MPLS;
211 bfm->bfm_flags &= ~BIER_FMASK_FLAG_MPLS;
214 * use a label as encoded for BIFT value
216 id = bier_bift_id_encode(btid->bti_set,
217 btid->bti_sub_domain,
219 vnet_mpls_uc_set_label(&bfm->bfm_label, id);
220 vnet_mpls_uc_set_s(&bfm->bfm_label, 1);
221 vnet_mpls_uc_set_exp(&bfm->bfm_label, 0);
222 vnet_mpls_uc_set_ttl(&bfm->bfm_label, 64);
224 bfm->bfm_label = clib_host_to_net_u32(bfm->bfm_label);
228 vec_add1(rpaths, *rpath);
229 bfm->bfm_pl = fib_path_list_create((FIB_PATH_LIST_FLAG_SHARED |
230 FIB_PATH_LIST_FLAG_NO_URPF),
232 bfm->bfm_sibling = fib_path_list_child_add(bfm->bfm_pl,
233 FIB_NODE_TYPE_BIER_FMASK,
234 bier_fmask_get_index(bfm));
236 bier_fmask_stack(bfm);
240 bier_fmask_destroy (bier_fmask_t *bfm)
242 clib_mem_free(bfm->bfm_bits.bfmb_refs);
243 clib_mem_free(bfm->bfm_bits.bfmb_input_reset_string.bbs_buckets);
245 bier_fmask_db_remove(bfm->bfm_id);
246 fib_path_list_child_remove(bfm->bfm_pl,
248 dpo_reset(&bfm->bfm_dpo);
249 clib_mem_free(bfm->bfm_id);
250 pool_put(bier_fmask_pool, bfm);
254 bier_fmask_unlock (index_t bfmi)
258 if (INDEX_INVALID == bfmi)
263 bfm = bier_fmask_get(bfmi);
265 fib_node_unlock(&bfm->bfm_node);
269 bier_fmask_lock (index_t bfmi)
273 if (INDEX_INVALID == bfmi)
278 bfm = bier_fmask_get(bfmi);
280 fib_node_lock(&bfm->bfm_node);
284 bier_fmask_create_and_lock (const bier_fmask_id_t *fmid,
285 const fib_route_path_t *rpath)
290 pool_get_aligned(bier_fmask_pool, bfm, CLIB_CACHE_LINE_BYTES);
291 bfmi = bier_fmask_get_index(bfm);
293 vlib_validate_combined_counter (&(bier_fmask_counters), bfmi);
294 vlib_zero_combined_counter (&(bier_fmask_counters), bfmi);
296 bier_fmask_init(bfm, fmid, rpath);
298 bier_fmask_lock(bfmi);
304 bier_fmask_link (index_t bfmi,
309 bfm = bier_fmask_get(bfmi);
311 if (0 == bfm->bfm_bits.bfmb_refs[BIER_BP_TO_INDEX(bp)])
314 * 0 -> 1 transistion - set the bit in the string
316 bier_bit_string_set_bit(&bfm->bfm_bits.bfmb_input_reset_string, bp);
319 ++bfm->bfm_bits.bfmb_refs[BIER_BP_TO_INDEX(bp)];
320 ++bfm->bfm_bits.bfmb_count;
324 bier_fmask_unlink (index_t bfmi,
329 bfm = bier_fmask_get(bfmi);
331 --bfm->bfm_bits.bfmb_refs[BIER_BP_TO_INDEX(bp)];
332 --bfm->bfm_bits.bfmb_count;
334 if (0 == bfm->bfm_bits.bfmb_refs[BIER_BP_TO_INDEX(bp)])
337 * 1 -> 0 transistion - clear the bit in the string
339 bier_bit_string_clear_bit(&bfm->bfm_bits.bfmb_input_reset_string, bp);
344 format_bier_fmask (u8 *s, va_list *ap)
346 index_t bfmi = va_arg(*ap, index_t);
347 u32 indent = va_arg(*ap, u32);
348 bier_fmask_attributes_t attr;
352 if (pool_is_free_index(bier_fmask_pool, bfmi))
354 return (format(s, "No BIER f-mask %d", bfmi));
357 bfm = bier_fmask_get(bfmi);
359 s = format(s, "fmask: nh:%U bs:%U locks:%d ",
360 format_ip46_address, &bfm->bfm_id->bfmi_nh, IP46_TYPE_ANY,
361 format_bier_bit_string, &bfm->bfm_bits.bfmb_input_reset_string,
362 bfm->bfm_node.fn_locks);
363 s = format(s, "flags:");
364 FOR_EACH_BIER_FMASK_ATTR(attr) {
365 if ((1<<attr) & bfm->bfm_flags) {
366 s = format (s, "%s,", bier_fmask_attr_names[attr]);
369 vlib_get_combined_counter (&(bier_fmask_counters), bfmi, &to);
370 s = format (s, " to:[%Ld:%Ld]]", to.packets, to.bytes);
372 s = fib_path_list_format(bfm->bfm_pl, s);
374 if (bfm->bfm_flags & BIER_FMASK_FLAG_MPLS)
376 s = format(s, " output-label:%U",
377 format_mpls_unicast_label,
378 vnet_mpls_uc_get_label(clib_net_to_host_u32(bfm->bfm_label)));
382 s = format(s, " output-bfit:[%U]",
384 vnet_mpls_uc_get_label(clib_net_to_host_u32(bfm->bfm_label)));
386 s = format(s, "\n %U%U",
387 format_white_space, indent,
388 format_dpo_id, &bfm->bfm_dpo, indent+2);
394 bier_fmask_get_stats (index_t bfmi, u64 * packets, u64 * bytes)
398 vlib_get_combined_counter (&(bier_fmask_counters), bfmi, &to);
400 *packets = to.packets;
405 bier_fmask_encode (index_t bfmi,
406 bier_table_id_t *btid,
407 fib_route_path_encode_t *rpath)
411 bfm = bier_fmask_get(bfmi);
412 *btid = *bier_table_get_id(bfm->bfm_id->bfmi_bti);
414 clib_memset(rpath, 0, sizeof(*rpath));
416 rpath->rpath.frp_sw_if_index = ~0;
418 switch (bfm->bfm_id->bfmi_nh_type)
421 rpath->rpath.frp_flags = FIB_ROUTE_PATH_UDP_ENCAP;
422 rpath->rpath.frp_udp_encap_id = bfm->bfm_id->bfmi_id;
425 memcpy(&rpath->rpath.frp_addr, &bfm->bfm_id->bfmi_nh,
426 sizeof(rpath->rpath.frp_addr));
432 bier_fmask_get_node (fib_node_index_t index)
434 bier_fmask_t *bfm = bier_fmask_get(index);
435 return (&(bfm->bfm_node));
439 bier_fmask_get_from_node (fib_node_t *node)
441 return ((bier_fmask_t*)(((char*)node) -
442 STRUCT_OFFSET_OF(bier_fmask_t,
447 * bier_fmask_last_lock_gone
450 bier_fmask_last_lock_gone (fib_node_t *node)
452 bier_fmask_destroy(bier_fmask_get_from_node(node));
456 * bier_fmask_back_walk_notify
458 * A back walk has reached this BIER fmask
460 static fib_node_back_walk_rc_t
461 bier_fmask_back_walk_notify (fib_node_t *node,
462 fib_node_back_walk_ctx_t *ctx)
465 * re-stack the fmask on the n-eos of the via
467 bier_fmask_t *bfm = bier_fmask_get_from_node(node);
469 bier_fmask_stack(bfm);
472 * propagate further up the graph.
473 * we can do this synchronously since the fan out is small.
475 fib_walk_sync(FIB_NODE_TYPE_BIER_FMASK, bier_fmask_get_index(bfm), ctx);
477 return (FIB_NODE_BACK_WALK_CONTINUE);
481 * The BIER fmask's graph node virtual function table
483 static const fib_node_vft_t bier_fmask_vft = {
484 .fnv_get = bier_fmask_get_node,
485 .fnv_last_lock = bier_fmask_last_lock_gone,
486 .fnv_back_walk = bier_fmask_back_walk_notify,
490 bier_fmask_dpo_lock (dpo_id_t *dpo)
495 bier_fmask_dpo_unlock (dpo_id_t *dpo)
500 bier_fmask_dpo_mem_show (void)
502 fib_show_memory_usage("BIER-fmask",
503 pool_elts(bier_fmask_pool),
504 pool_len(bier_fmask_pool),
505 sizeof(bier_fmask_t));
508 const static dpo_vft_t bier_fmask_dpo_vft = {
509 .dv_lock = bier_fmask_dpo_lock,
510 .dv_unlock = bier_fmask_dpo_unlock,
511 .dv_mem_show = bier_fmask_dpo_mem_show,
512 .dv_format = format_bier_fmask,
515 const static char *const bier_fmask_mpls_nodes[] =
520 const static char * const * const bier_fmask_nodes[DPO_PROTO_NUM] =
522 [DPO_PROTO_BIER] = bier_fmask_mpls_nodes,
523 [DPO_PROTO_MPLS] = bier_fmask_mpls_nodes,
527 bier_fmask_module_init (vlib_main_t * vm)
529 fib_node_register_type (FIB_NODE_TYPE_BIER_FMASK, &bier_fmask_vft);
530 dpo_register(DPO_BIER_FMASK, &bier_fmask_dpo_vft, bier_fmask_nodes);
535 VLIB_INIT_FUNCTION (bier_fmask_module_init);
537 static clib_error_t *
538 bier_fmask_show (vlib_main_t * vm,
539 unformat_input_t * input,
540 vlib_cli_command_t * cmd)
545 bfmi = INDEX_INVALID;
547 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT) {
548 if (unformat (input, "%d", &bfmi))
557 if (INDEX_INVALID == bfmi)
559 pool_foreach(bfm, bier_fmask_pool,
561 vlib_cli_output (vm, "[@%d] %U",
562 bier_fmask_get_index(bfm),
563 format_bier_fmask, bier_fmask_get_index(bfm), 0);
568 vlib_cli_output (vm, "%U", format_bier_fmask, bfmi, 0);
574 VLIB_CLI_COMMAND (show_bier_fmask, static) = {
575 .path = "show bier fmask",
576 .short_help = "show bier fmask",
577 .function = bier_fmask_show,