2 * mpls_tunnel.c: MPLS tunnel interfaces (i.e. for RSVP-TE)
4 * Copyright (c) 2012 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/pg/pg.h>
20 #include <vnet/mpls/mpls_tunnel.h>
21 #include <vnet/mpls/mpls_types.h>
22 #include <vnet/ip/ip.h>
23 #include <vnet/fib/fib_path_list.h>
24 #include <vnet/adj/adj_midchain.h>
25 #include <vnet/adj/adj_mcast.h>
26 #include <vnet/dpo/replicate_dpo.h>
29 * @brief pool of tunnel instances
31 static mpls_tunnel_t *mpls_tunnel_pool;
34 * @brief Pool of free tunnel SW indices - i.e. recycled indices
36 static u32 * mpls_tunnel_free_hw_if_indices;
39 * @brief DB of SW index to tunnel index
41 static u32 *mpls_tunnel_db;
44 * @brief MPLS tunnel flags strings
46 static const char *mpls_tunnel_attribute_names[] = MPLS_TUNNEL_ATTRIBUTES;
49 * @brief Get a tunnel object from a SW interface index
52 mpls_tunnel_get_from_sw_if_index (u32 sw_if_index)
54 if ((vec_len(mpls_tunnel_db) < sw_if_index) ||
55 (~0 == mpls_tunnel_db[sw_if_index]))
58 return (pool_elt_at_index(mpls_tunnel_pool,
59 mpls_tunnel_db[sw_if_index]));
63 * @brief Build a rewrite string for the MPLS tunnel.
66 mpls_tunnel_build_rewrite_i (void)
69 * passing the adj code a NULL rewirte means 'i don't have one cos
70 * t'other end is unresolved'. That's not the case here. For the mpls
71 * tunnel there are just no bytes of encap to apply in the adj. We'll impose
72 * the label stack once we choose a path. So return a zero length rewrite.
76 vec_validate(rewrite, 0);
77 vec_reset_length(rewrite);
83 * @brief Build a rewrite string for the MPLS tunnel.
86 mpls_tunnel_build_rewrite (vnet_main_t * vnm,
88 vnet_link_t link_type,
89 const void *dst_address)
91 return (mpls_tunnel_build_rewrite_i());
94 typedef struct mpls_tunnel_collect_forwarding_ctx_t_
96 load_balance_path_t * next_hops;
97 const mpls_tunnel_t *mt;
98 fib_forward_chain_type_t fct;
99 } mpls_tunnel_collect_forwarding_ctx_t;
102 mpls_tunnel_collect_forwarding (fib_node_index_t pl_index,
103 fib_node_index_t path_index,
106 mpls_tunnel_collect_forwarding_ctx_t *ctx;
107 fib_path_ext_t *path_ext;
113 * if the path is not resolved, don't include it.
115 if (!fib_path_is_resolved(path_index))
121 * get the matching path-extension for the path being visited.
124 vec_foreach(path_ext, ctx->mt->mt_path_exts)
126 if (path_ext->fpe_path_index == path_index)
136 * found a matching extension. stack it to obtain the forwarding
137 * info for this path.
139 ctx->next_hops = fib_path_ext_stack(path_ext,
148 * There should be a path-extenios associated with each path
155 mpls_tunnel_mk_lb (mpls_tunnel_t *mt,
157 fib_forward_chain_type_t fct,
160 dpo_proto_t lb_proto;
163 * If the entry has path extensions then we construct a load-balance
164 * by stacking the extensions on the forwarding chains of the paths.
165 * Otherwise we use the load-balance of the path-list
167 mpls_tunnel_collect_forwarding_ctx_t ctx = {
174 * As an optimisation we allocate the vector of next-hops to be sized
175 * equal to the maximum nuber of paths we will need, which is also the
176 * most likely number we will need, since in most cases the paths are 'up'.
178 vec_validate(ctx.next_hops, fib_path_list_get_n_paths(mt->mt_path_list));
179 vec_reset_length(ctx.next_hops);
181 lb_proto = vnet_link_to_dpo_proto(linkt);
183 fib_path_list_walk(mt->mt_path_list,
184 mpls_tunnel_collect_forwarding,
187 if (!dpo_id_is_valid(dpo_lb))
192 if (mt->mt_flags & MPLS_TUNNEL_FLAG_MCAST)
197 replicate_create(0, lb_proto));
201 flow_hash_config_t fhc;
204 /* fhc = fib_table_get_flow_hash_config(fib_entry->fe_fib_index, */
205 /* dpo_proto_to_fib(lb_proto)); */
209 load_balance_create(0, lb_proto, fhc));
213 if (mt->mt_flags & MPLS_TUNNEL_FLAG_MCAST)
218 replicate_multipath_update(dpo_lb, ctx.next_hops);
222 load_balance_multipath_update(dpo_lb,
224 LOAD_BALANCE_FLAG_NONE);
225 vec_free(ctx.next_hops);
232 * 'stack' (resolve the recursion for) the tunnel's midchain adjacency
235 mpls_tunnel_stack (adj_index_t ai)
242 sw_if_index = adj->rewrite_header.sw_if_index;
244 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
250 * while we're stacking the adj, remove the tunnel from the child list
251 * of the path list. this breaks a circular dependency of walk updates
252 * where the create of adjacencies in the children can lead to walks
253 * that get back here.
255 fib_path_list_lock(mt->mt_path_list);
257 fib_path_list_child_remove(mt->mt_path_list,
258 mt->mt_sibling_index);
261 * Construct the DPO (load-balance or replicate) that we can stack
262 * the tunnel's midchain on
264 if (vnet_hw_interface_get_flags(vnet_get_main(),
265 mt->mt_hw_if_index) &
266 VNET_HW_INTERFACE_FLAG_LINK_UP)
268 dpo_id_t dpo = DPO_INVALID;
270 mpls_tunnel_mk_lb(mt,
272 FIB_FORW_CHAIN_TYPE_MPLS_EOS,
275 adj_nbr_midchain_stack(ai, &dpo);
280 adj_nbr_midchain_unstack(ai);
283 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
284 FIB_NODE_TYPE_MPLS_TUNNEL,
285 mt - mpls_tunnel_pool);
287 fib_path_list_lock(mt->mt_path_list);
291 * @brief Call back when restacking all adjacencies on a MPLS interface
294 mpls_adj_walk_cb (adj_index_t ai,
297 mpls_tunnel_stack(ai);
299 return (ADJ_WALK_RC_CONTINUE);
303 mpls_tunnel_restack (mpls_tunnel_t *mt)
305 fib_protocol_t proto;
308 * walk all the adjacencies on the MPLS interface and restack them
310 FOR_EACH_FIB_PROTOCOL(proto)
312 adj_nbr_walk(mt->mt_sw_if_index,
319 static clib_error_t *
320 mpls_tunnel_admin_up_down (vnet_main_t * vnm,
324 vnet_hw_interface_t * hi;
327 hi = vnet_get_hw_interface (vnm, hw_if_index);
329 mt = mpls_tunnel_get_from_sw_if_index(hi->sw_if_index);
334 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
335 vnet_hw_interface_set_flags (vnm, hw_if_index,
336 VNET_HW_INTERFACE_FLAG_LINK_UP);
338 vnet_hw_interface_set_flags (vnm, hw_if_index, 0 /* down */);
340 mpls_tunnel_restack(mt);
346 * @brief Fixup the adj rewrite post encap. This is a no-op since the
347 * rewrite is a stack of labels.
350 mpls_tunnel_fixup (vlib_main_t *vm,
355 * A no-op w.r.t. the header. but reset the 'have we pushed any
356 * MPLS labels onto the packet' flag. That way when we enter the
357 * tunnel we'll get a TTL set to 255
359 vnet_buffer(b0)->mpls.first = 0;
363 mpls_tunnel_update_adj (vnet_main_t * vnm,
369 ASSERT(ADJ_INDEX_INVALID != ai);
373 switch (adj->lookup_next_index)
375 case IP_LOOKUP_NEXT_ARP:
376 case IP_LOOKUP_NEXT_GLEAN:
377 adj_nbr_midchain_update_rewrite(ai, mpls_tunnel_fixup,
379 mpls_tunnel_build_rewrite_i());
381 case IP_LOOKUP_NEXT_MCAST:
383 * Construct a partial rewrite from the known ethernet mcast dest MAC
384 * There's no MAC fixup, so the last 2 parameters are 0
386 adj_mcast_midchain_update_rewrite(ai, mpls_tunnel_fixup,
388 mpls_tunnel_build_rewrite_i(),
392 case IP_LOOKUP_NEXT_DROP:
393 case IP_LOOKUP_NEXT_PUNT:
394 case IP_LOOKUP_NEXT_LOCAL:
395 case IP_LOOKUP_NEXT_REWRITE:
396 case IP_LOOKUP_NEXT_MIDCHAIN:
397 case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
398 case IP_LOOKUP_NEXT_ICMP_ERROR:
399 case IP_LOOKUP_N_NEXT:
404 mpls_tunnel_stack(ai);
408 format_mpls_tunnel_name (u8 * s, va_list * args)
410 u32 dev_instance = va_arg (*args, u32);
411 return format (s, "mpls-tunnel%d", dev_instance);
415 format_mpls_tunnel_device (u8 * s, va_list * args)
417 u32 dev_instance = va_arg (*args, u32);
418 CLIB_UNUSED (int verbose) = va_arg (*args, int);
420 return (format (s, "MPLS-tunnel: id %d\n", dev_instance));
424 * @brief Packet trace structure
426 typedef struct mpls_tunnel_trace_t_
429 * Tunnel-id / index in tunnel vector
432 } mpls_tunnel_trace_t;
435 format_mpls_tunnel_tx_trace (u8 * s,
438 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
439 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
440 mpls_tunnel_trace_t * t = va_arg (*args, mpls_tunnel_trace_t *);
442 s = format (s, "MPLS: tunnel %d", t->tunnel_id);
447 * @brief TX function. Only called L2. L3 traffic uses the adj-midchains
450 mpls_tunnel_tx (vlib_main_t * vm,
451 vlib_node_runtime_t * node,
452 vlib_frame_t * frame)
455 u32 * from, * to_next, n_left_from, n_left_to_next;
456 vnet_interface_output_runtime_t * rd = (void *) node->runtime_data;
457 const mpls_tunnel_t *mt;
459 mt = pool_elt_at_index(mpls_tunnel_pool, rd->dev_instance);
461 /* Vector of buffer / pkt indices we're supposed to process */
462 from = vlib_frame_vector_args (frame);
464 /* Number of buffers / pkts */
465 n_left_from = frame->n_vectors;
467 /* Speculatively send the first buffer to the last disposition we used */
468 next_index = node->cached_next_index;
470 while (n_left_from > 0)
472 /* set up to enqueue to our disposition with index = next_index */
473 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
478 while (n_left_from > 0 && n_left_to_next > 0)
490 b0 = vlib_get_buffer(vm, bi0);
492 vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mt->mt_l2_adj;
494 if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
496 mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
498 tr->tunnel_id = rd->dev_instance;
501 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
502 to_next, n_left_to_next,
503 bi0, mt->mt_l2_tx_arc);
506 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
509 return frame->n_vectors;
512 VNET_DEVICE_CLASS (mpls_tunnel_class) = {
513 .name = "MPLS tunnel device",
514 .format_device_name = format_mpls_tunnel_name,
515 .format_device = format_mpls_tunnel_device,
516 .format_tx_trace = format_mpls_tunnel_tx_trace,
517 .tx_function = mpls_tunnel_tx,
518 .admin_up_down_function = mpls_tunnel_admin_up_down,
521 VNET_HW_INTERFACE_CLASS (mpls_tunnel_hw_interface_class) = {
522 .name = "MPLS-Tunnel",
523 // .format_header = format_mpls_eth_header_with_length,
524 // .unformat_header = unformat_mpls_eth_header,
525 .update_adjacency = mpls_tunnel_update_adj,
526 .build_rewrite = mpls_tunnel_build_rewrite,
527 .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
530 const mpls_tunnel_t *
531 mpls_tunnel_get (u32 mti)
533 return (pool_elt_at_index(mpls_tunnel_pool, mti));
537 * @brief Walk all the MPLS tunnels
540 mpls_tunnel_walk (mpls_tunnel_walk_cb_t cb,
545 pool_foreach_index(mti, mpls_tunnel_pool,
552 vnet_mpls_tunnel_del (u32 sw_if_index)
556 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
561 if (FIB_NODE_INDEX_INVALID != mt->mt_path_list)
562 fib_path_list_child_remove(mt->mt_path_list,
563 mt->mt_sibling_index);
564 if (ADJ_INDEX_INVALID != mt->mt_l2_adj)
565 adj_unlock(mt->mt_l2_adj);
567 vec_add1 (mpls_tunnel_free_hw_if_indices, mt->mt_hw_if_index);
568 pool_put(mpls_tunnel_pool, mt);
569 mpls_tunnel_db[sw_if_index] = ~0;
573 vnet_mpls_tunnel_create (u8 l2_only,
576 vnet_hw_interface_t * hi;
581 vnm = vnet_get_main();
582 pool_get(mpls_tunnel_pool, mt);
583 memset (mt, 0, sizeof (*mt));
584 mti = mt - mpls_tunnel_pool;
585 fib_node_init(&mt->mt_node, FIB_NODE_TYPE_MPLS_TUNNEL);
586 mt->mt_l2_adj = ADJ_INDEX_INVALID;
587 mt->mt_path_list = FIB_NODE_INDEX_INVALID;
588 mt->mt_sibling_index = FIB_NODE_INDEX_INVALID;
591 mt->mt_flags |= MPLS_TUNNEL_FLAG_MCAST;
594 * Create a new, or re=use and old, tunnel HW interface
596 if (vec_len (mpls_tunnel_free_hw_if_indices) > 0)
599 mpls_tunnel_free_hw_if_indices[vec_len(mpls_tunnel_free_hw_if_indices)-1];
600 _vec_len (mpls_tunnel_free_hw_if_indices) -= 1;
601 hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index);
602 hi->hw_instance = mti;
603 hi->dev_instance = mti;
607 mt->mt_hw_if_index = vnet_register_interface(
609 mpls_tunnel_class.index,
611 mpls_tunnel_hw_interface_class.index,
613 hi = vnet_get_hw_interface(vnm, mt->mt_hw_if_index);
617 * Add the new tunnel to the tunnel DB - key:SW if index
619 mt->mt_sw_if_index = hi->sw_if_index;
620 vec_validate_init_empty(mpls_tunnel_db, mt->mt_sw_if_index, ~0);
621 mpls_tunnel_db[mt->mt_sw_if_index] = mti;
626 adj_nbr_add_or_lock(fib_path_list_get_proto(mt->mt_path_list),
631 mt->mt_l2_tx_arc = vlib_node_add_named_next(vlib_get_main(),
636 return (mt->mt_sw_if_index);
640 * mpls_tunnel_path_ext_add
642 * append a path extension to the entry's list
645 mpls_tunnel_path_ext_append (mpls_tunnel_t *mt,
646 const fib_route_path_t *rpath)
648 if (NULL != rpath->frp_label_stack)
650 fib_path_ext_t *path_ext;
652 vec_add2(mt->mt_path_exts, path_ext, 1);
654 fib_path_ext_init(path_ext, mt->mt_path_list, rpath);
659 * mpls_tunnel_path_ext_insert
661 * insert, sorted, a path extension to the entry's list.
662 * It's not strictly necessary in sort the path extensions, since each
663 * extension has the path index to which it resolves. However, by being
664 * sorted the load-balance produced has a deterministic order, not an order
665 * based on the sequence of extension additions. this is a considerable benefit.
668 mpls_tunnel_path_ext_insert (mpls_tunnel_t *mt,
669 const fib_route_path_t *rpath)
671 if (0 == vec_len(mt->mt_path_exts))
672 return (mpls_tunnel_path_ext_append(mt, rpath));
674 if (NULL != rpath->frp_label_stack)
676 fib_path_ext_t path_ext;
679 fib_path_ext_init(&path_ext, mt->mt_path_list, rpath);
681 while (i < vec_len(mt->mt_path_exts) &&
682 (fib_path_ext_cmp(&mt->mt_path_exts[i], rpath) < 0))
687 vec_insert_elts(mt->mt_path_exts, &path_ext, 1, i);
692 vnet_mpls_tunnel_path_add (u32 sw_if_index,
693 fib_route_path_t *rpaths)
698 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
703 mti = mt - mpls_tunnel_pool;
706 * construct a path-list from the path provided
708 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
710 mt->mt_path_list = fib_path_list_create(FIB_PATH_LIST_FLAG_SHARED, rpaths);
711 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
712 FIB_NODE_TYPE_MPLS_TUNNEL,
717 fib_node_index_t old_pl_index;
718 fib_path_ext_t *path_ext;
720 old_pl_index = mt->mt_path_list;
723 fib_path_list_copy_and_path_add(old_pl_index,
724 FIB_PATH_LIST_FLAG_SHARED,
727 fib_path_list_child_remove(old_pl_index,
728 mt->mt_sibling_index);
729 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
730 FIB_NODE_TYPE_MPLS_TUNNEL,
733 * re-resolve all the path-extensions with the new path-list
735 vec_foreach(path_ext, mt->mt_path_exts)
737 fib_path_ext_resolve(path_ext, mt->mt_path_list);
740 mpls_tunnel_path_ext_insert(mt, rpaths);
741 mpls_tunnel_restack(mt);
745 vnet_mpls_tunnel_path_remove (u32 sw_if_index,
746 fib_route_path_t *rpaths)
751 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
756 mti = mt - mpls_tunnel_pool;
759 * construct a path-list from the path provided
761 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
763 /* can't remove a path if we have onoe */
768 fib_node_index_t old_pl_index;
769 fib_path_ext_t *path_ext;
771 old_pl_index = mt->mt_path_list;
774 fib_path_list_copy_and_path_remove(old_pl_index,
775 FIB_PATH_LIST_FLAG_SHARED,
778 fib_path_list_child_remove(old_pl_index,
779 mt->mt_sibling_index);
781 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
788 mt->mt_sibling_index =
789 fib_path_list_child_add(mt->mt_path_list,
790 FIB_NODE_TYPE_MPLS_TUNNEL,
794 * find the matching path extension and remove it
796 vec_foreach(path_ext, mt->mt_path_exts)
798 if (!fib_path_ext_cmp(path_ext, rpaths))
801 * delete the element moving the remaining elements down 1 position.
802 * this preserves the sorted order.
804 vec_free(path_ext->fpe_label_stack);
805 vec_delete(mt->mt_path_exts, 1,
806 (path_ext - mt->mt_path_exts));
811 * re-resolve all the path-extensions with the new path-list
813 vec_foreach(path_ext, mt->mt_path_exts)
815 fib_path_ext_resolve(path_ext, mt->mt_path_list);
818 mpls_tunnel_restack(mt);
821 return (fib_path_list_get_n_paths(mt->mt_path_list));
825 static clib_error_t *
826 vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm,
827 unformat_input_t * input,
828 vlib_cli_command_t * cmd)
830 unformat_input_t _line_input, * line_input = &_line_input;
831 vnet_main_t * vnm = vnet_get_main();
832 u8 is_del = 0, l2_only = 0, is_multicast =0;
833 fib_route_path_t rpath, *rpaths = NULL;
834 mpls_label_t out_label = MPLS_LABEL_INVALID;
836 clib_error_t *error = NULL;
838 memset(&rpath, 0, sizeof(rpath));
840 /* Get a line of input. */
841 if (! unformat_user (input, unformat_line_input, line_input))
844 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
846 if (unformat (line_input, "del %U",
847 unformat_vnet_sw_interface, vnm,
850 else if (unformat (line_input, "add"))
852 else if (unformat (line_input, "out-label %U",
853 unformat_mpls_unicast_label, &out_label))
855 vec_add1(rpath.frp_label_stack, out_label);
857 else if (unformat (line_input, "via %U %U",
858 unformat_ip4_address,
860 unformat_vnet_sw_interface, vnm,
861 &rpath.frp_sw_if_index))
863 rpath.frp_weight = 1;
864 rpath.frp_proto = FIB_PROTOCOL_IP4;
867 else if (unformat (line_input, "via %U %U",
868 unformat_ip6_address,
870 unformat_vnet_sw_interface, vnm,
871 &rpath.frp_sw_if_index))
873 rpath.frp_weight = 1;
874 rpath.frp_proto = FIB_PROTOCOL_IP6;
876 else if (unformat (line_input, "via %U",
877 unformat_ip6_address,
878 &rpath.frp_addr.ip6))
880 rpath.frp_fib_index = 0;
881 rpath.frp_weight = 1;
882 rpath.frp_sw_if_index = ~0;
883 rpath.frp_proto = FIB_PROTOCOL_IP6;
885 else if (unformat (line_input, "via %U",
886 unformat_ip4_address,
887 &rpath.frp_addr.ip4))
889 rpath.frp_fib_index = 0;
890 rpath.frp_weight = 1;
891 rpath.frp_sw_if_index = ~0;
892 rpath.frp_proto = FIB_PROTOCOL_IP4;
894 else if (unformat (line_input, "l2-only"))
896 else if (unformat (line_input, "multicast"))
900 error = clib_error_return (0, "unknown input '%U'",
901 format_unformat_error, line_input);
908 vnet_mpls_tunnel_del(sw_if_index);
912 if (0 == vec_len(rpath.frp_label_stack))
914 error = clib_error_return (0, "No Output Labels '%U'",
915 format_unformat_error, line_input);
919 vec_add1(rpaths, rpath);
920 sw_if_index = vnet_mpls_tunnel_create(l2_only, is_multicast);
921 vnet_mpls_tunnel_path_add(sw_if_index, rpaths);
926 unformat_free (line_input);
932 * This command create a uni-directional MPLS tunnel
935 * @cliexstart{create mpls tunnel}
936 * create mpls tunnel via 10.0.0.1 GigEthernet0/8/0 out-label 33 out-label 34
939 VLIB_CLI_COMMAND (create_mpls_tunnel_command, static) = {
940 .path = "mpls tunnel",
942 "mpls tunnel via [addr] [interface] [out-labels]",
943 .function = vnet_create_mpls_tunnel_command_fn,
947 format_mpls_tunnel (u8 * s, va_list * args)
949 mpls_tunnel_t *mt = va_arg (*args, mpls_tunnel_t *);
950 mpls_tunnel_attribute_t attr;
951 fib_path_ext_t *path_ext;
953 s = format(s, "mpls_tunnel%d: sw_if_index:%d hw_if_index:%d",
954 mt - mpls_tunnel_pool,
957 if (MPLS_TUNNEL_FLAG_NONE != mt->mt_flags) {
958 s = format(s, " \n flags:");
959 FOR_EACH_MPLS_TUNNEL_ATTRIBUTE(attr) {
960 if ((1<<attr) & mt->mt_flags) {
961 s = format (s, "%s,", mpls_tunnel_attribute_names[attr]);
965 s = format(s, "\n via:\n");
966 s = fib_path_list_format(mt->mt_path_list, s);
967 s = format(s, " Extensions:");
968 vec_foreach(path_ext, mt->mt_path_exts)
970 s = format(s, "\n %U", format_fib_path_ext, path_ext);
977 static clib_error_t *
978 show_mpls_tunnel_command_fn (vlib_main_t * vm,
979 unformat_input_t * input,
980 vlib_cli_command_t * cmd)
985 if (pool_elts (mpls_tunnel_pool) == 0)
986 vlib_cli_output (vm, "No MPLS tunnels configured...");
988 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
990 if (unformat (input, "%d", &mti))
998 pool_foreach (mt, mpls_tunnel_pool,
1000 vlib_cli_output (vm, "[@%d] %U",
1001 mt - mpls_tunnel_pool,
1002 format_mpls_tunnel, mt);
1007 if (pool_is_free_index(mpls_tunnel_pool, mti))
1008 return clib_error_return (0, "Not atunnel index %d", mti);
1010 mt = pool_elt_at_index(mpls_tunnel_pool, mti);
1012 vlib_cli_output (vm, "[@%d] %U",
1013 mt - mpls_tunnel_pool,
1014 format_mpls_tunnel, mt);
1021 * This command to show MPLS tunnels
1024 * @cliexstart{sh mpls tunnel 2}
1025 * [@2] mpls_tunnel2: sw_if_index:5 hw_if_index:5
1029 * index:26 locks:1 proto:ipv4 uPRF-list:26 len:1 itfs:[2, ]
1030 * index:26 pl-index:26 ipv4 weight=1 attached-nexthop: oper-flags:resolved,
1032 * [@0]: ipv4 via 10.0.0.2 loop0: IP4: de:ad:00:00:00:00 -> 00:00:11:aa:bb:cc
1035 VLIB_CLI_COMMAND (show_mpls_tunnel_command, static) = {
1036 .path = "show mpls tunnel",
1037 .function = show_mpls_tunnel_command_fn,
1040 static mpls_tunnel_t *
1041 mpls_tunnel_from_fib_node (fib_node_t *node)
1043 #if (CLIB_DEBUG > 0)
1044 ASSERT(FIB_NODE_TYPE_MPLS_TUNNEL == node->fn_type);
1046 return ((mpls_tunnel_t*) (((char*)node) -
1047 STRUCT_OFFSET_OF(mpls_tunnel_t, mt_node)));
1051 * Function definition to backwalk a FIB node
1053 static fib_node_back_walk_rc_t
1054 mpls_tunnel_back_walk (fib_node_t *node,
1055 fib_node_back_walk_ctx_t *ctx)
1057 mpls_tunnel_restack(mpls_tunnel_from_fib_node(node));
1059 return (FIB_NODE_BACK_WALK_CONTINUE);
1063 * Function definition to get a FIB node from its index
1066 mpls_tunnel_fib_node_get (fib_node_index_t index)
1070 mt = pool_elt_at_index(mpls_tunnel_pool, index);
1072 return (&mt->mt_node);
1076 * Function definition to inform the FIB node that its last lock has gone.
1079 mpls_tunnel_last_lock_gone (fib_node_t *node)
1082 * The MPLS MPLS tunnel is a root of the graph. As such
1083 * it never has children and thus is never locked.
1089 * Virtual function table registered by MPLS MPLS tunnels
1090 * for participation in the FIB object graph.
1092 const static fib_node_vft_t mpls_vft = {
1093 .fnv_get = mpls_tunnel_fib_node_get,
1094 .fnv_last_lock = mpls_tunnel_last_lock_gone,
1095 .fnv_back_walk = mpls_tunnel_back_walk,
1098 static clib_error_t *
1099 mpls_tunnel_init (vlib_main_t *vm)
1101 fib_node_register_type(FIB_NODE_TYPE_MPLS_TUNNEL, &mpls_vft);
1105 VLIB_INIT_FUNCTION(mpls_tunnel_init);