2 * mpls_tunnel.c: MPLS tunnel interfaces (i.e. for RSVP-TE)
4 * Copyright (c) 2012 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/mpls/mpls_tunnel.h>
21 #include <vnet/mpls/mpls_types.h>
22 #include <vnet/ip/ip.h>
23 #include <vnet/fib/fib_path_list.h>
24 #include <vnet/adj/adj_midchain.h>
25 #include <vnet/adj/adj_mcast.h>
26 #include <vnet/dpo/replicate_dpo.h>
27 #include <vnet/fib/mpls_fib.h>
30 * @brief pool of tunnel instances
32 static mpls_tunnel_t *mpls_tunnel_pool;
35 * @brief Pool of free tunnel SW indices - i.e. recycled indices
37 static u32 * mpls_tunnel_free_hw_if_indices;
40 * @brief DB of SW index to tunnel index
42 static u32 *mpls_tunnel_db;
45 * @brief MPLS tunnel flags strings
47 static const char *mpls_tunnel_attribute_names[] = MPLS_TUNNEL_ATTRIBUTES;
50 * @brief Get a tunnel object from a SW interface index
53 mpls_tunnel_get_from_sw_if_index (u32 sw_if_index)
55 if ((vec_len(mpls_tunnel_db) < sw_if_index) ||
56 (~0 == mpls_tunnel_db[sw_if_index]))
59 return (pool_elt_at_index(mpls_tunnel_pool,
60 mpls_tunnel_db[sw_if_index]));
64 * @brief Build a rewrite string for the MPLS tunnel.
67 mpls_tunnel_build_rewrite_i (void)
70 * passing the adj code a NULL rewirte means 'i don't have one cos
71 * t'other end is unresolved'. That's not the case here. For the mpls
72 * tunnel there are just no bytes of encap to apply in the adj. We'll impose
73 * the label stack once we choose a path. So return a zero length rewrite.
77 vec_validate(rewrite, 0);
78 vec_reset_length(rewrite);
84 * @brief Build a rewrite string for the MPLS tunnel.
87 mpls_tunnel_build_rewrite (vnet_main_t * vnm,
89 vnet_link_t link_type,
90 const void *dst_address)
92 return (mpls_tunnel_build_rewrite_i());
95 typedef struct mpls_tunnel_collect_forwarding_ctx_t_
97 load_balance_path_t * next_hops;
98 const mpls_tunnel_t *mt;
99 fib_forward_chain_type_t fct;
100 } mpls_tunnel_collect_forwarding_ctx_t;
103 mpls_tunnel_collect_forwarding (fib_node_index_t pl_index,
104 fib_node_index_t path_index,
107 mpls_tunnel_collect_forwarding_ctx_t *ctx;
108 fib_path_ext_t *path_ext;
114 * if the path is not resolved, don't include it.
116 if (!fib_path_is_resolved(path_index))
122 * get the matching path-extension for the path being visited.
125 vec_foreach(path_ext, ctx->mt->mt_path_exts)
127 if (path_ext->fpe_path_index == path_index)
137 * found a matching extension. stack it to obtain the forwarding
138 * info for this path.
140 ctx->next_hops = fib_path_ext_stack(path_ext,
149 * There should be a path-extenios associated with each path
156 mpls_tunnel_mk_lb (mpls_tunnel_t *mt,
158 fib_forward_chain_type_t fct,
161 dpo_proto_t lb_proto;
164 * If the entry has path extensions then we construct a load-balance
165 * by stacking the extensions on the forwarding chains of the paths.
166 * Otherwise we use the load-balance of the path-list
168 mpls_tunnel_collect_forwarding_ctx_t ctx = {
175 * As an optimisation we allocate the vector of next-hops to be sized
176 * equal to the maximum nuber of paths we will need, which is also the
177 * most likely number we will need, since in most cases the paths are 'up'.
179 vec_validate(ctx.next_hops, fib_path_list_get_n_paths(mt->mt_path_list));
180 vec_reset_length(ctx.next_hops);
182 lb_proto = vnet_link_to_dpo_proto(linkt);
184 fib_path_list_walk(mt->mt_path_list,
185 mpls_tunnel_collect_forwarding,
188 if (!dpo_id_is_valid(dpo_lb))
193 if (mt->mt_flags & MPLS_TUNNEL_FLAG_MCAST)
198 replicate_create(0, lb_proto));
202 flow_hash_config_t fhc;
207 fhc = MPLS_FLOW_HASH_DEFAULT;
211 fhc = IP_FLOW_HASH_DEFAULT;
221 load_balance_create(0, lb_proto, fhc));
225 if (mt->mt_flags & MPLS_TUNNEL_FLAG_MCAST)
230 replicate_multipath_update(dpo_lb, ctx.next_hops);
234 load_balance_multipath_update(dpo_lb,
236 LOAD_BALANCE_FLAG_NONE);
237 vec_free(ctx.next_hops);
244 * 'stack' (resolve the recursion for) the tunnel's midchain adjacency
247 mpls_tunnel_stack (adj_index_t ai)
254 sw_if_index = adj->rewrite_header.sw_if_index;
256 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
262 * while we're stacking the adj, remove the tunnel from the child list
263 * of the path list. this breaks a circular dependency of walk updates
264 * where the create of adjacencies in the children can lead to walks
265 * that get back here.
267 fib_path_list_lock(mt->mt_path_list);
269 fib_path_list_child_remove(mt->mt_path_list,
270 mt->mt_sibling_index);
273 * Construct the DPO (load-balance or replicate) that we can stack
274 * the tunnel's midchain on
276 if (vnet_hw_interface_get_flags(vnet_get_main(),
277 mt->mt_hw_if_index) &
278 VNET_HW_INTERFACE_FLAG_LINK_UP)
280 dpo_id_t dpo = DPO_INVALID;
282 mpls_tunnel_mk_lb(mt,
284 FIB_FORW_CHAIN_TYPE_MPLS_EOS,
287 adj_nbr_midchain_stack(ai, &dpo);
292 adj_nbr_midchain_unstack(ai);
295 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
296 FIB_NODE_TYPE_MPLS_TUNNEL,
297 mt - mpls_tunnel_pool);
299 fib_path_list_lock(mt->mt_path_list);
303 * @brief Call back when restacking all adjacencies on a MPLS interface
306 mpls_adj_walk_cb (adj_index_t ai,
309 mpls_tunnel_stack(ai);
311 return (ADJ_WALK_RC_CONTINUE);
315 mpls_tunnel_restack (mpls_tunnel_t *mt)
317 fib_protocol_t proto;
320 * walk all the adjacencies on the MPLS interface and restack them
322 FOR_EACH_FIB_PROTOCOL(proto)
324 adj_nbr_walk(mt->mt_sw_if_index,
331 static clib_error_t *
332 mpls_tunnel_admin_up_down (vnet_main_t * vnm,
336 vnet_hw_interface_t * hi;
339 hi = vnet_get_hw_interface (vnm, hw_if_index);
341 mt = mpls_tunnel_get_from_sw_if_index(hi->sw_if_index);
346 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
347 vnet_hw_interface_set_flags (vnm, hw_if_index,
348 VNET_HW_INTERFACE_FLAG_LINK_UP);
350 vnet_hw_interface_set_flags (vnm, hw_if_index, 0 /* down */);
352 mpls_tunnel_restack(mt);
358 * @brief Fixup the adj rewrite post encap. This is a no-op since the
359 * rewrite is a stack of labels.
362 mpls_tunnel_fixup (vlib_main_t *vm,
367 * A no-op w.r.t. the header. but reset the 'have we pushed any
368 * MPLS labels onto the packet' flag. That way when we enter the
369 * tunnel we'll get a TTL set to 255
371 vnet_buffer(b0)->mpls.first = 0;
375 mpls_tunnel_update_adj (vnet_main_t * vnm,
381 ASSERT(ADJ_INDEX_INVALID != ai);
385 switch (adj->lookup_next_index)
387 case IP_LOOKUP_NEXT_ARP:
388 case IP_LOOKUP_NEXT_GLEAN:
389 adj_nbr_midchain_update_rewrite(ai, mpls_tunnel_fixup,
391 mpls_tunnel_build_rewrite_i());
393 case IP_LOOKUP_NEXT_MCAST:
395 * Construct a partial rewrite from the known ethernet mcast dest MAC
396 * There's no MAC fixup, so the last 2 parameters are 0
398 adj_mcast_midchain_update_rewrite(ai, mpls_tunnel_fixup,
400 mpls_tunnel_build_rewrite_i(),
404 case IP_LOOKUP_NEXT_DROP:
405 case IP_LOOKUP_NEXT_PUNT:
406 case IP_LOOKUP_NEXT_LOCAL:
407 case IP_LOOKUP_NEXT_REWRITE:
408 case IP_LOOKUP_NEXT_MIDCHAIN:
409 case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
410 case IP_LOOKUP_NEXT_ICMP_ERROR:
411 case IP_LOOKUP_N_NEXT:
416 mpls_tunnel_stack(ai);
420 format_mpls_tunnel_name (u8 * s, va_list * args)
422 u32 dev_instance = va_arg (*args, u32);
423 return format (s, "mpls-tunnel%d", dev_instance);
427 format_mpls_tunnel_device (u8 * s, va_list * args)
429 u32 dev_instance = va_arg (*args, u32);
430 CLIB_UNUSED (int verbose) = va_arg (*args, int);
432 return (format (s, "MPLS-tunnel: id %d\n", dev_instance));
436 * @brief Packet trace structure
438 typedef struct mpls_tunnel_trace_t_
441 * Tunnel-id / index in tunnel vector
444 } mpls_tunnel_trace_t;
447 format_mpls_tunnel_tx_trace (u8 * s,
450 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
451 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
452 mpls_tunnel_trace_t * t = va_arg (*args, mpls_tunnel_trace_t *);
454 s = format (s, "MPLS: tunnel %d", t->tunnel_id);
459 * @brief TX function. Only called L2. L3 traffic uses the adj-midchains
462 mpls_tunnel_tx (vlib_main_t * vm,
463 vlib_node_runtime_t * node,
464 vlib_frame_t * frame)
467 u32 * from, * to_next, n_left_from, n_left_to_next;
468 vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
469 const mpls_tunnel_t *mt;
471 mt = pool_elt_at_index(mpls_tunnel_pool, rd->dev_instance);
473 /* Vector of buffer / pkt indices we're supposed to process */
474 from = vlib_frame_vector_args (frame);
476 /* Number of buffers / pkts */
477 n_left_from = frame->n_vectors;
479 /* Speculatively send the first buffer to the last disposition we used */
480 next_index = node->cached_next_index;
482 while (n_left_from > 0)
484 /* set up to enqueue to our disposition with index = next_index */
485 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
490 while (n_left_from > 0 && n_left_to_next > 0)
502 b0 = vlib_get_buffer(vm, bi0);
504 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mt->mt_l2_adj;
506 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
508 mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
510 tr->tunnel_id = rd->dev_instance;
513 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
514 to_next, n_left_to_next,
515 bi0, mt->mt_l2_tx_arc);
518 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
521 return frame->n_vectors;
524 VNET_DEVICE_CLASS (mpls_tunnel_class) = {
525 .name = "MPLS tunnel device",
526 .format_device_name = format_mpls_tunnel_name,
527 .format_device = format_mpls_tunnel_device,
528 .format_tx_trace = format_mpls_tunnel_tx_trace,
529 .tx_function = mpls_tunnel_tx,
530 .admin_up_down_function = mpls_tunnel_admin_up_down,
533 VNET_HW_INTERFACE_CLASS (mpls_tunnel_hw_interface_class) = {
534 .name = "MPLS-Tunnel",
535 // .format_header = format_mpls_eth_header_with_length,
536 // .unformat_header = unformat_mpls_eth_header,
537 .update_adjacency = mpls_tunnel_update_adj,
538 .build_rewrite = mpls_tunnel_build_rewrite,
539 .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
542 const mpls_tunnel_t *
543 mpls_tunnel_get (u32 mti)
545 return (pool_elt_at_index(mpls_tunnel_pool, mti));
549 * @brief Walk all the MPLS tunnels
552 mpls_tunnel_walk (mpls_tunnel_walk_cb_t cb,
557 pool_foreach_index(mti, mpls_tunnel_pool,
564 vnet_mpls_tunnel_del (u32 sw_if_index)
568 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
573 if (FIB_NODE_INDEX_INVALID != mt->mt_path_list)
574 fib_path_list_child_remove(mt->mt_path_list,
575 mt->mt_sibling_index);
576 if (ADJ_INDEX_INVALID != mt->mt_l2_adj)
577 adj_unlock(mt->mt_l2_adj);
579 vec_add1 (mpls_tunnel_free_hw_if_indices, mt->mt_hw_if_index);
580 pool_put(mpls_tunnel_pool, mt);
581 mpls_tunnel_db[sw_if_index] = ~0;
585 vnet_mpls_tunnel_create (u8 l2_only,
588 vnet_hw_interface_t * hi;
593 vnm = vnet_get_main();
594 pool_get(mpls_tunnel_pool, mt);
595 memset (mt, 0, sizeof (*mt));
596 mti = mt - mpls_tunnel_pool;
597 fib_node_init(&mt->mt_node, FIB_NODE_TYPE_MPLS_TUNNEL);
598 mt->mt_l2_adj = ADJ_INDEX_INVALID;
599 mt->mt_path_list = FIB_NODE_INDEX_INVALID;
600 mt->mt_sibling_index = FIB_NODE_INDEX_INVALID;
603 mt->mt_flags |= MPLS_TUNNEL_FLAG_MCAST;
606 * Create a new, or re=use and old, tunnel HW interface
608 if (vec_len (mpls_tunnel_free_hw_if_indices) > 0)
611 mpls_tunnel_free_hw_if_indices[vec_len(mpls_tunnel_free_hw_if_indices)-1];
612 _vec_len (mpls_tunnel_free_hw_if_indices) -= 1;
613 hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index);
614 hi->hw_instance = mti;
615 hi->dev_instance = mti;
619 mt->mt_hw_if_index = vnet_register_interface(
621 mpls_tunnel_class.index,
623 mpls_tunnel_hw_interface_class.index,
625 hi = vnet_get_hw_interface(vnm, mt->mt_hw_if_index);
629 * Add the new tunnel to the tunnel DB - key:SW if index
631 mt->mt_sw_if_index = hi->sw_if_index;
632 vec_validate_init_empty(mpls_tunnel_db, mt->mt_sw_if_index, ~0);
633 mpls_tunnel_db[mt->mt_sw_if_index] = mti;
638 adj_nbr_add_or_lock(fib_path_list_get_proto(mt->mt_path_list),
643 mt->mt_l2_tx_arc = vlib_node_add_named_next(vlib_get_main(),
648 return (mt->mt_sw_if_index);
652 * mpls_tunnel_path_ext_add
654 * append a path extension to the entry's list
657 mpls_tunnel_path_ext_append (mpls_tunnel_t *mt,
658 const fib_route_path_t *rpath)
660 if (NULL != rpath->frp_label_stack)
662 fib_path_ext_t *path_ext;
664 vec_add2(mt->mt_path_exts, path_ext, 1);
666 fib_path_ext_init(path_ext, mt->mt_path_list, rpath);
671 * mpls_tunnel_path_ext_insert
673 * insert, sorted, a path extension to the entry's list.
674 * It's not strictly necessary in sort the path extensions, since each
675 * extension has the path index to which it resolves. However, by being
676 * sorted the load-balance produced has a deterministic order, not an order
677 * based on the sequence of extension additions. this is a considerable benefit.
680 mpls_tunnel_path_ext_insert (mpls_tunnel_t *mt,
681 const fib_route_path_t *rpath)
683 if (0 == vec_len(mt->mt_path_exts))
684 return (mpls_tunnel_path_ext_append(mt, rpath));
686 if (NULL != rpath->frp_label_stack)
688 fib_path_ext_t path_ext;
691 fib_path_ext_init(&path_ext, mt->mt_path_list, rpath);
693 while (i < vec_len(mt->mt_path_exts) &&
694 (fib_path_ext_cmp(&mt->mt_path_exts[i], rpath) < 0))
699 vec_insert_elts(mt->mt_path_exts, &path_ext, 1, i);
704 vnet_mpls_tunnel_path_add (u32 sw_if_index,
705 fib_route_path_t *rpaths)
710 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
715 mti = mt - mpls_tunnel_pool;
718 * construct a path-list from the path provided
720 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
722 mt->mt_path_list = fib_path_list_create(FIB_PATH_LIST_FLAG_SHARED, rpaths);
723 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
724 FIB_NODE_TYPE_MPLS_TUNNEL,
729 fib_node_index_t old_pl_index;
730 fib_path_ext_t *path_ext;
732 old_pl_index = mt->mt_path_list;
735 fib_path_list_copy_and_path_add(old_pl_index,
736 FIB_PATH_LIST_FLAG_SHARED,
739 fib_path_list_child_remove(old_pl_index,
740 mt->mt_sibling_index);
741 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
742 FIB_NODE_TYPE_MPLS_TUNNEL,
745 * re-resolve all the path-extensions with the new path-list
747 vec_foreach(path_ext, mt->mt_path_exts)
749 fib_path_ext_resolve(path_ext, mt->mt_path_list);
752 mpls_tunnel_path_ext_insert(mt, rpaths);
753 mpls_tunnel_restack(mt);
757 vnet_mpls_tunnel_path_remove (u32 sw_if_index,
758 fib_route_path_t *rpaths)
763 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
768 mti = mt - mpls_tunnel_pool;
771 * construct a path-list from the path provided
773 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
775 /* can't remove a path if we have onoe */
780 fib_node_index_t old_pl_index;
781 fib_path_ext_t *path_ext;
783 old_pl_index = mt->mt_path_list;
786 fib_path_list_copy_and_path_remove(old_pl_index,
787 FIB_PATH_LIST_FLAG_SHARED,
790 fib_path_list_child_remove(old_pl_index,
791 mt->mt_sibling_index);
793 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
800 mt->mt_sibling_index =
801 fib_path_list_child_add(mt->mt_path_list,
802 FIB_NODE_TYPE_MPLS_TUNNEL,
806 * find the matching path extension and remove it
808 vec_foreach(path_ext, mt->mt_path_exts)
810 if (!fib_path_ext_cmp(path_ext, rpaths))
813 * delete the element moving the remaining elements down 1 position.
814 * this preserves the sorted order.
816 vec_free(path_ext->fpe_label_stack);
817 vec_delete(mt->mt_path_exts, 1,
818 (path_ext - mt->mt_path_exts));
823 * re-resolve all the path-extensions with the new path-list
825 vec_foreach(path_ext, mt->mt_path_exts)
827 fib_path_ext_resolve(path_ext, mt->mt_path_list);
830 mpls_tunnel_restack(mt);
833 return (fib_path_list_get_n_paths(mt->mt_path_list));
837 static clib_error_t *
838 vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm,
839 unformat_input_t * input,
840 vlib_cli_command_t * cmd)
842 unformat_input_t _line_input, * line_input = &_line_input;
843 vnet_main_t * vnm = vnet_get_main();
844 u8 is_del = 0, l2_only = 0, is_multicast =0;
845 fib_route_path_t rpath, *rpaths = NULL;
846 mpls_label_t out_label = MPLS_LABEL_INVALID;
848 clib_error_t *error = NULL;
850 memset(&rpath, 0, sizeof(rpath));
852 /* Get a line of input. */
853 if (! unformat_user (input, unformat_line_input, line_input))
856 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
858 if (unformat (line_input, "del %U",
859 unformat_vnet_sw_interface, vnm,
862 else if (unformat (line_input, "add"))
864 else if (unformat (line_input, "out-label %U",
865 unformat_mpls_unicast_label, &out_label))
867 vec_add1(rpath.frp_label_stack, out_label);
869 else if (unformat (line_input, "via %U %U",
870 unformat_ip4_address,
872 unformat_vnet_sw_interface, vnm,
873 &rpath.frp_sw_if_index))
875 rpath.frp_weight = 1;
876 rpath.frp_proto = FIB_PROTOCOL_IP4;
879 else if (unformat (line_input, "via %U %U",
880 unformat_ip6_address,
882 unformat_vnet_sw_interface, vnm,
883 &rpath.frp_sw_if_index))
885 rpath.frp_weight = 1;
886 rpath.frp_proto = FIB_PROTOCOL_IP6;
888 else if (unformat (line_input, "via %U",
889 unformat_ip6_address,
890 &rpath.frp_addr.ip6))
892 rpath.frp_fib_index = 0;
893 rpath.frp_weight = 1;
894 rpath.frp_sw_if_index = ~0;
895 rpath.frp_proto = FIB_PROTOCOL_IP6;
897 else if (unformat (line_input, "via %U",
898 unformat_ip4_address,
899 &rpath.frp_addr.ip4))
901 rpath.frp_fib_index = 0;
902 rpath.frp_weight = 1;
903 rpath.frp_sw_if_index = ~0;
904 rpath.frp_proto = FIB_PROTOCOL_IP4;
906 else if (unformat (line_input, "l2-only"))
908 else if (unformat (line_input, "multicast"))
912 error = clib_error_return (0, "unknown input '%U'",
913 format_unformat_error, line_input);
920 vnet_mpls_tunnel_del(sw_if_index);
924 if (0 == vec_len(rpath.frp_label_stack))
926 error = clib_error_return (0, "No Output Labels '%U'",
927 format_unformat_error, line_input);
931 vec_add1(rpaths, rpath);
932 sw_if_index = vnet_mpls_tunnel_create(l2_only, is_multicast);
933 vnet_mpls_tunnel_path_add(sw_if_index, rpaths);
938 unformat_free (line_input);
944 * This command create a uni-directional MPLS tunnel
947 * @cliexstart{create mpls tunnel}
948 * create mpls tunnel via 10.0.0.1 GigEthernet0/8/0 out-label 33 out-label 34
951 VLIB_CLI_COMMAND (create_mpls_tunnel_command, static) = {
952 .path = "mpls tunnel",
954 "mpls tunnel via [addr] [interface] [out-labels]",
955 .function = vnet_create_mpls_tunnel_command_fn,
959 format_mpls_tunnel (u8 * s, va_list * args)
961 mpls_tunnel_t *mt = va_arg (*args, mpls_tunnel_t *);
962 mpls_tunnel_attribute_t attr;
963 fib_path_ext_t *path_ext;
965 s = format(s, "mpls_tunnel%d: sw_if_index:%d hw_if_index:%d",
966 mt - mpls_tunnel_pool,
969 if (MPLS_TUNNEL_FLAG_NONE != mt->mt_flags) {
970 s = format(s, " \n flags:");
971 FOR_EACH_MPLS_TUNNEL_ATTRIBUTE(attr) {
972 if ((1<<attr) & mt->mt_flags) {
973 s = format (s, "%s,", mpls_tunnel_attribute_names[attr]);
977 s = format(s, "\n via:\n");
978 s = fib_path_list_format(mt->mt_path_list, s);
979 s = format(s, " Extensions:");
980 vec_foreach(path_ext, mt->mt_path_exts)
982 s = format(s, "\n %U", format_fib_path_ext, path_ext);
989 static clib_error_t *
990 show_mpls_tunnel_command_fn (vlib_main_t * vm,
991 unformat_input_t * input,
992 vlib_cli_command_t * cmd)
997 if (pool_elts (mpls_tunnel_pool) == 0)
998 vlib_cli_output (vm, "No MPLS tunnels configured...");
1000 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
1002 if (unformat (input, "%d", &mti))
1010 pool_foreach (mt, mpls_tunnel_pool,
1012 vlib_cli_output (vm, "[@%d] %U",
1013 mt - mpls_tunnel_pool,
1014 format_mpls_tunnel, mt);
1019 if (pool_is_free_index(mpls_tunnel_pool, mti))
1020 return clib_error_return (0, "Not atunnel index %d", mti);
1022 mt = pool_elt_at_index(mpls_tunnel_pool, mti);
1024 vlib_cli_output (vm, "[@%d] %U",
1025 mt - mpls_tunnel_pool,
1026 format_mpls_tunnel, mt);
1033 * This command to show MPLS tunnels
1036 * @cliexstart{sh mpls tunnel 2}
1037 * [@2] mpls_tunnel2: sw_if_index:5 hw_if_index:5
1041 * index:26 locks:1 proto:ipv4 uPRF-list:26 len:1 itfs:[2, ]
1042 * index:26 pl-index:26 ipv4 weight=1 attached-nexthop: oper-flags:resolved,
1044 * [@0]: ipv4 via 10.0.0.2 loop0: IP4: de:ad:00:00:00:00 -> 00:00:11:aa:bb:cc
1047 VLIB_CLI_COMMAND (show_mpls_tunnel_command, static) = {
1048 .path = "show mpls tunnel",
1049 .function = show_mpls_tunnel_command_fn,
1052 static mpls_tunnel_t *
1053 mpls_tunnel_from_fib_node (fib_node_t *node)
1055 #if (CLIB_DEBUG > 0)
1056 ASSERT(FIB_NODE_TYPE_MPLS_TUNNEL == node->fn_type);
1058 return ((mpls_tunnel_t*) (((char*)node) -
1059 STRUCT_OFFSET_OF(mpls_tunnel_t, mt_node)));
1063 * Function definition to backwalk a FIB node
1065 static fib_node_back_walk_rc_t
1066 mpls_tunnel_back_walk (fib_node_t *node,
1067 fib_node_back_walk_ctx_t *ctx)
1069 mpls_tunnel_restack(mpls_tunnel_from_fib_node(node));
1071 return (FIB_NODE_BACK_WALK_CONTINUE);
1075 * Function definition to get a FIB node from its index
1078 mpls_tunnel_fib_node_get (fib_node_index_t index)
1082 mt = pool_elt_at_index(mpls_tunnel_pool, index);
1084 return (&mt->mt_node);
1088 * Function definition to inform the FIB node that its last lock has gone.
1091 mpls_tunnel_last_lock_gone (fib_node_t *node)
1094 * The MPLS MPLS tunnel is a root of the graph. As such
1095 * it never has children and thus is never locked.
1101 * Virtual function table registered by MPLS MPLS tunnels
1102 * for participation in the FIB object graph.
1104 const static fib_node_vft_t mpls_vft = {
1105 .fnv_get = mpls_tunnel_fib_node_get,
1106 .fnv_last_lock = mpls_tunnel_last_lock_gone,
1107 .fnv_back_walk = mpls_tunnel_back_walk,
1110 static clib_error_t *
1111 mpls_tunnel_init (vlib_main_t *vm)
1113 fib_node_register_type(FIB_NODE_TYPE_MPLS_TUNNEL, &mpls_vft);
1117 VLIB_INIT_FUNCTION(mpls_tunnel_init);