2 * mpls_tunnel.c: MPLS tunnel interfaces (i.e. for RSVP-TE)
4 * Copyright (c) 2012 Cisco and/or its affiliates.
5 * Licensed under the Apache License, Version 2.0 (the "License");
6 * you may not use this file except in compliance with the License.
7 * You may obtain a copy of the License at:
9 * http://www.apache.org/licenses/LICENSE-2.0
11 * Unless required by applicable law or agreed to in writing, software
12 * distributed under the License is distributed on an "AS IS" BASIS,
13 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14 * See the License for the specific language governing permissions and
15 * limitations under the License.
18 #include <vnet/vnet.h>
19 #include <vnet/mpls/mpls_tunnel.h>
20 #include <vnet/mpls/mpls_types.h>
21 #include <vnet/ip/ip.h>
22 #include <vnet/fib/fib_path_list.h>
23 #include <vnet/adj/adj_midchain.h>
24 #include <vnet/adj/adj_mcast.h>
25 #include <vnet/dpo/replicate_dpo.h>
26 #include <vnet/fib/mpls_fib.h>
29 * @brief pool of tunnel instances
31 static mpls_tunnel_t *mpls_tunnel_pool;
34 * @brief DB of SW index to tunnel index
36 static u32 *mpls_tunnel_db;
39 * @brief MPLS tunnel flags strings
41 static const char *mpls_tunnel_attribute_names[] = MPLS_TUNNEL_ATTRIBUTES;
44 * @brief Packet trace structure
46 typedef struct mpls_tunnel_trace_t_
49 * Tunnel-id / index in tunnel vector
52 } mpls_tunnel_trace_t;
55 format_mpls_tunnel_tx_trace (u8 * s,
58 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
59 CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
60 mpls_tunnel_trace_t * t = va_arg (*args, mpls_tunnel_trace_t *);
62 s = format (s, "MPLS: tunnel %d", t->tunnel_id);
68 MPLS_TUNNEL_ENCAP_NEXT_L2_MIDCHAIN,
69 MPLS_TUNNEL_ENCAP_N_NEXT,
70 } mpls_tunnel_encap_next_t;
73 * @brief TX function. Only called L2. L3 traffic uses the adj-midchains
75 VLIB_NODE_FN (mpls_tunnel_tx) (vlib_main_t * vm,
76 vlib_node_runtime_t * node,
79 u32 *from = vlib_frame_vector_args (frame);
80 vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
81 u16 nexts[VLIB_FRAME_SIZE], *next;
84 n_left = frame->n_vectors;
88 vlib_get_buffers (vm, from, bufs, n_left);
92 const mpls_tunnel_t *mt0, *mt1;
93 u32 sw_if_index0, sw_if_index1;
95 sw_if_index0 = vnet_buffer(b[0])->sw_if_index[VLIB_TX];
96 sw_if_index1 = vnet_buffer(b[1])->sw_if_index[VLIB_TX];
98 mt0 = pool_elt_at_index(mpls_tunnel_pool,
99 mpls_tunnel_db[sw_if_index0]);
100 mt1 = pool_elt_at_index(mpls_tunnel_pool,
101 mpls_tunnel_db[sw_if_index1]);
103 vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = mt0->mt_l2_lb.dpoi_index;
104 vnet_buffer(b[1])->ip.adj_index[VLIB_TX] = mt1->mt_l2_lb.dpoi_index;
105 next[0] = mt0->mt_l2_lb.dpoi_next_node;
106 next[1] = mt1->mt_l2_lb.dpoi_next_node;
108 /* since we are coming out of the L2 world, where the vlib_buffer
109 * union is used for other things, make sure it is clean for
112 vnet_buffer(b[0])->mpls.first = 0;
113 vnet_buffer(b[1])->mpls.first = 0;
115 if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED))
117 mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
119 tr->tunnel_id = mpls_tunnel_db[sw_if_index0];
121 if (PREDICT_FALSE(b[1]->flags & VLIB_BUFFER_IS_TRACED))
123 mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
125 tr->tunnel_id = mpls_tunnel_db[sw_if_index1];
134 const mpls_tunnel_t *mt0;
137 sw_if_index0 = vnet_buffer(b[0])->sw_if_index[VLIB_TX];
138 mt0 = pool_elt_at_index(mpls_tunnel_pool,
139 mpls_tunnel_db[sw_if_index0]);
141 vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = mt0->mt_l2_lb.dpoi_index;
142 next[0] = mt0->mt_l2_lb.dpoi_next_node;
144 /* since we are coming out of the L2 world, where the vlib_buffer
145 * union is used for other things, make sure it is clean for
148 vnet_buffer(b[0])->mpls.first = 0;
150 if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED))
152 mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node,
154 tr->tunnel_id = mpls_tunnel_db[sw_if_index0];
162 vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
164 return frame->n_vectors;
167 VLIB_REGISTER_NODE (mpls_tunnel_tx) =
169 .name = "mpls-tunnel-tx",
170 .vector_size = sizeof (u32),
171 .format_trace = format_mpls_tunnel_tx_trace,
172 .type = VLIB_NODE_TYPE_INTERNAL,
175 /* MPLS_TUNNEL_ENCAP_N_NEXT, */
176 /* .next_nodes = { */
177 /* [MPLS_TUNNEL_ENCAP_NEXT_L2_MIDCHAIN] = "mpls-load-balance", */
182 * @brief Get a tunnel object from a SW interface index
184 static mpls_tunnel_t*
185 mpls_tunnel_get_from_sw_if_index (u32 sw_if_index)
187 if ((vec_len(mpls_tunnel_db) <= sw_if_index) ||
188 (~0 == mpls_tunnel_db[sw_if_index]))
191 return (pool_elt_at_index(mpls_tunnel_pool,
192 mpls_tunnel_db[sw_if_index]));
196 * @brief Build a rewrite string for the MPLS tunnel.
199 mpls_tunnel_build_rewrite_i (void)
202 * passing the adj code a NULL rewrite means 'i don't have one cos
203 * t'other end is unresolved'. That's not the case here. For the mpls
204 * tunnel there are just no bytes of encap to apply in the adj. We'll impose
205 * the label stack once we choose a path. So return a zero length rewrite.
209 vec_validate(rewrite, 0);
210 vec_reset_length(rewrite);
216 * @brief Build a rewrite string for the MPLS tunnel.
219 mpls_tunnel_build_rewrite (vnet_main_t * vnm,
221 vnet_link_t link_type,
222 const void *dst_address)
224 return (mpls_tunnel_build_rewrite_i());
227 typedef struct mpls_tunnel_collect_forwarding_ctx_t_
229 load_balance_path_t * next_hops;
230 const mpls_tunnel_t *mt;
231 fib_forward_chain_type_t fct;
232 } mpls_tunnel_collect_forwarding_ctx_t;
234 static fib_path_list_walk_rc_t
235 mpls_tunnel_collect_forwarding (fib_node_index_t pl_index,
236 fib_node_index_t path_index,
239 mpls_tunnel_collect_forwarding_ctx_t *ctx;
240 fib_path_ext_t *path_ext;
245 * if the path is not resolved, don't include it.
247 if (!fib_path_is_resolved(path_index))
249 return (FIB_PATH_LIST_WALK_CONTINUE);
253 * get the matching path-extension for the path being visited.
255 path_ext = fib_path_ext_list_find_by_path_index(&ctx->mt->mt_path_exts,
259 * we don't want IP TTL decrements for packets hitting the MPLS labels
260 * we stack on, since the IP TTL decrement is done by the adj
262 path_ext->fpe_mpls_flags |= FIB_PATH_EXT_MPLS_FLAG_NO_IP_TTL_DECR;
265 * found a matching extension. stack it to obtain the forwarding
266 * info for this path.
268 ctx->next_hops = fib_path_ext_stack(path_ext,
273 return (FIB_PATH_LIST_WALK_CONTINUE);
277 mpls_tunnel_mk_lb (mpls_tunnel_t *mt,
279 fib_forward_chain_type_t fct,
282 dpo_proto_t lb_proto;
285 * If the entry has path extensions then we construct a load-balance
286 * by stacking the extensions on the forwarding chains of the paths.
287 * Otherwise we use the load-balance of the path-list
289 mpls_tunnel_collect_forwarding_ctx_t ctx = {
296 * As an optimisation we allocate the vector of next-hops to be sized
297 * equal to the maximum nuber of paths we will need, which is also the
298 * most likely number we will need, since in most cases the paths are 'up'.
300 vec_validate(ctx.next_hops, fib_path_list_get_n_paths(mt->mt_path_list));
301 vec_reset_length(ctx.next_hops);
303 lb_proto = fib_forw_chain_type_to_dpo_proto(fct);
305 if (FIB_NODE_INDEX_INVALID != mt->mt_path_list)
307 fib_path_list_walk(mt->mt_path_list,
308 mpls_tunnel_collect_forwarding,
312 if (!dpo_id_is_valid(dpo_lb))
317 if (mt->mt_flags & MPLS_TUNNEL_FLAG_MCAST)
322 replicate_create(0, lb_proto));
326 flow_hash_config_t fhc;
331 fhc = MPLS_FLOW_HASH_DEFAULT;
335 fhc = IP_FLOW_HASH_DEFAULT;
345 load_balance_create(0, lb_proto, fhc));
349 if (mt->mt_flags & MPLS_TUNNEL_FLAG_MCAST)
354 replicate_multipath_update(dpo_lb, ctx.next_hops);
358 load_balance_multipath_update(dpo_lb,
360 LOAD_BALANCE_FLAG_NONE);
361 vec_free(ctx.next_hops);
368 * 'stack' (resolve the recursion for) the tunnel's midchain adjacency
371 mpls_tunnel_stack (adj_index_t ai)
378 sw_if_index = adj->rewrite_header.sw_if_index;
380 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
382 if (NULL == mt || FIB_NODE_INDEX_INVALID == mt->mt_path_list)
385 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
387 adj_nbr_midchain_unstack(ai);
392 * while we're stacking the adj, remove the tunnel from the child list
393 * of the path list. this breaks a circular dependency of walk updates
394 * where the create of adjacencies in the children can lead to walks
395 * that get back here.
397 fib_path_list_lock(mt->mt_path_list);
399 fib_path_list_child_remove(mt->mt_path_list,
400 mt->mt_sibling_index);
403 * Construct the DPO (load-balance or replicate) that we can stack
404 * the tunnel's midchain on
406 if (vnet_hw_interface_get_flags(vnet_get_main(),
407 mt->mt_hw_if_index) &
408 VNET_HW_INTERFACE_FLAG_LINK_UP)
410 dpo_id_t dpo = DPO_INVALID;
412 mpls_tunnel_mk_lb(mt,
414 fib_forw_chain_type_from_link_type(
415 adj_get_link_type(ai)),
418 adj_nbr_midchain_stack(ai, &dpo);
423 adj_nbr_midchain_unstack(ai);
426 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
427 FIB_NODE_TYPE_MPLS_TUNNEL,
428 mt - mpls_tunnel_pool);
430 fib_path_list_unlock(mt->mt_path_list);
434 * @brief Call back when restacking all adjacencies on a MPLS interface
437 mpls_adj_walk_cb (adj_index_t ai,
440 mpls_tunnel_stack(ai);
442 return (ADJ_WALK_RC_CONTINUE);
446 mpls_tunnel_restack (mpls_tunnel_t *mt)
448 fib_protocol_t proto;
451 * walk all the adjacencies on the MPLS interface and restack them
453 if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2)
456 * Stack a load-balance that drops, whilst we have no paths
458 dpo_id_t dpo = DPO_INVALID;
460 mpls_tunnel_mk_lb(mt,
462 FIB_FORW_CHAIN_TYPE_ETHERNET,
465 dpo_stack_from_node(mpls_tunnel_tx.index,
472 FOR_EACH_FIB_IP_PROTOCOL(proto)
474 adj_nbr_walk(mt->mt_sw_if_index,
482 static clib_error_t *
483 mpls_tunnel_admin_up_down (vnet_main_t * vnm,
487 vnet_hw_interface_t * hi;
490 hi = vnet_get_hw_interface (vnm, hw_if_index);
492 mt = mpls_tunnel_get_from_sw_if_index(hi->sw_if_index);
497 if (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP)
498 vnet_hw_interface_set_flags (vnm, hw_if_index,
499 VNET_HW_INTERFACE_FLAG_LINK_UP);
501 vnet_hw_interface_set_flags (vnm, hw_if_index, 0 /* down */);
503 mpls_tunnel_restack(mt);
509 * @brief Fixup the adj rewrite post encap. This is a no-op since the
510 * rewrite is a stack of labels.
513 mpls_tunnel_fixup (vlib_main_t *vm,
514 const ip_adjacency_t *adj,
519 * A no-op w.r.t. the header. but reset the 'have we pushed any
520 * MPLS labels onto the packet' flag. That way when we enter the
521 * tunnel we'll get a TTL set to 255
523 vnet_buffer(b0)->mpls.first = 0;
527 mpls_tunnel_update_adj (vnet_main_t * vnm,
533 ASSERT(ADJ_INDEX_INVALID != ai);
537 switch (adj->lookup_next_index)
539 case IP_LOOKUP_NEXT_ARP:
540 case IP_LOOKUP_NEXT_GLEAN:
541 case IP_LOOKUP_NEXT_BCAST:
542 adj_nbr_midchain_update_rewrite(ai, mpls_tunnel_fixup,
545 mpls_tunnel_build_rewrite_i());
547 case IP_LOOKUP_NEXT_MCAST:
549 * Construct a partial rewrite from the known ethernet mcast dest MAC
550 * There's no MAC fixup, so the last 2 parameters are 0
552 adj_mcast_midchain_update_rewrite(ai, mpls_tunnel_fixup,
555 mpls_tunnel_build_rewrite_i(),
559 case IP_LOOKUP_NEXT_DROP:
560 case IP_LOOKUP_NEXT_PUNT:
561 case IP_LOOKUP_NEXT_LOCAL:
562 case IP_LOOKUP_NEXT_REWRITE:
563 case IP_LOOKUP_NEXT_MIDCHAIN:
564 case IP_LOOKUP_NEXT_MCAST_MIDCHAIN:
565 case IP_LOOKUP_NEXT_ICMP_ERROR:
566 case IP_LOOKUP_N_NEXT:
571 mpls_tunnel_stack(ai);
575 format_mpls_tunnel_name (u8 * s, va_list * args)
577 u32 dev_instance = va_arg (*args, u32);
578 return format (s, "mpls-tunnel%d", dev_instance);
582 format_mpls_tunnel_device (u8 * s, va_list * args)
584 u32 dev_instance = va_arg (*args, u32);
585 CLIB_UNUSED (int verbose) = va_arg (*args, int);
587 return (format (s, "MPLS-tunnel: id %d\n", dev_instance));
590 VNET_DEVICE_CLASS (mpls_tunnel_class) = {
591 .name = "MPLS tunnel device",
592 .format_device_name = format_mpls_tunnel_name,
593 .format_device = format_mpls_tunnel_device,
594 .format_tx_trace = format_mpls_tunnel_tx_trace,
595 .admin_up_down_function = mpls_tunnel_admin_up_down,
598 VNET_HW_INTERFACE_CLASS (mpls_tunnel_hw_interface_class) = {
599 .name = "MPLS-Tunnel",
600 .update_adjacency = mpls_tunnel_update_adj,
601 .build_rewrite = mpls_tunnel_build_rewrite,
602 .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
605 const mpls_tunnel_t *
606 mpls_tunnel_get (u32 mti)
608 return (pool_elt_at_index(mpls_tunnel_pool, mti));
612 * @brief Walk all the MPLS tunnels
615 mpls_tunnel_walk (mpls_tunnel_walk_cb_t cb,
620 pool_foreach_index (mti, mpls_tunnel_pool)
627 vnet_mpls_tunnel_del (u32 sw_if_index)
631 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
636 if (FIB_NODE_INDEX_INVALID != mt->mt_path_list)
637 fib_path_list_child_remove(mt->mt_path_list,
638 mt->mt_sibling_index);
639 dpo_reset(&mt->mt_l2_lb);
641 vnet_reset_interface_l3_output_node (vlib_get_main (), mt->mt_sw_if_index);
642 vnet_delete_hw_interface (vnet_get_main(), mt->mt_hw_if_index);
644 pool_put(mpls_tunnel_pool, mt);
645 mpls_tunnel_db[sw_if_index] = ~0;
649 vnet_mpls_tunnel_create (u8 l2_only,
653 vnet_hw_interface_t * hi;
658 vnm = vnet_get_main();
659 pool_get(mpls_tunnel_pool, mt);
660 clib_memset (mt, 0, sizeof (*mt));
661 mti = mt - mpls_tunnel_pool;
662 fib_node_init(&mt->mt_node, FIB_NODE_TYPE_MPLS_TUNNEL);
663 mt->mt_path_list = FIB_NODE_INDEX_INVALID;
664 mt->mt_sibling_index = FIB_NODE_INDEX_INVALID;
667 mt->mt_flags |= MPLS_TUNNEL_FLAG_MCAST;
669 mt->mt_flags |= MPLS_TUNNEL_FLAG_L2;
671 memcpy(mt->mt_tag, tag, sizeof(mt->mt_tag));
673 mt->mt_tag[0] = '\0';
676 * Create a new tunnel HW interface
678 mt->mt_hw_if_index = vnet_register_interface(
680 mpls_tunnel_class.index,
682 mpls_tunnel_hw_interface_class.index,
684 hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index);
686 if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2)
687 vnet_set_interface_output_node (vnm, mt->mt_hw_if_index,
688 mpls_tunnel_tx.index);
690 vnet_set_interface_l3_output_node (vnm->vlib_main, hi->sw_if_index,
691 (u8 *) "tunnel-output");
693 /* Standard default MPLS tunnel MTU. */
694 vnet_sw_interface_set_mtu (vnm, hi->sw_if_index, 9000);
697 * Add the new tunnel to the tunnel DB - key:SW if index
699 mt->mt_sw_if_index = hi->sw_if_index;
700 vec_validate_init_empty(mpls_tunnel_db, mt->mt_sw_if_index, ~0);
701 mpls_tunnel_db[mt->mt_sw_if_index] = mti;
703 return (mt->mt_sw_if_index);
707 vnet_mpls_tunnel_path_add (u32 sw_if_index,
708 fib_route_path_t *rpaths)
710 fib_route_path_t *rpath;
714 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
719 mti = mt - mpls_tunnel_pool;
722 * construct a path-list from the path provided
724 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
726 mt->mt_path_list = fib_path_list_create(FIB_PATH_LIST_FLAG_SHARED, rpaths);
727 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
728 FIB_NODE_TYPE_MPLS_TUNNEL,
733 fib_node_index_t old_pl_index;
735 old_pl_index = mt->mt_path_list;
738 fib_path_list_copy_and_path_add(old_pl_index,
739 FIB_PATH_LIST_FLAG_SHARED,
742 fib_path_list_child_remove(old_pl_index,
743 mt->mt_sibling_index);
744 mt->mt_sibling_index = fib_path_list_child_add(mt->mt_path_list,
745 FIB_NODE_TYPE_MPLS_TUNNEL,
748 * re-resolve all the path-extensions with the new path-list
750 fib_path_ext_list_resolve(&mt->mt_path_exts, mt->mt_path_list);
752 vec_foreach(rpath, rpaths)
754 fib_path_ext_list_insert(&mt->mt_path_exts,
759 mpls_tunnel_restack(mt);
763 vnet_mpls_tunnel_path_remove (u32 sw_if_index,
764 fib_route_path_t *rpaths)
769 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
774 mti = mt - mpls_tunnel_pool;
777 * construct a path-list from the path provided
779 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
781 /* can't remove a path if we have onoe */
786 fib_node_index_t old_pl_index;
788 old_pl_index = mt->mt_path_list;
790 fib_path_list_lock(old_pl_index);
792 fib_path_list_copy_and_path_remove(old_pl_index,
793 FIB_PATH_LIST_FLAG_SHARED,
796 fib_path_list_child_remove(old_pl_index,
797 mt->mt_sibling_index);
799 if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
802 fib_path_list_unlock(old_pl_index);
807 mt->mt_sibling_index =
808 fib_path_list_child_add(mt->mt_path_list,
809 FIB_NODE_TYPE_MPLS_TUNNEL,
813 * find the matching path extension and remove it
815 fib_path_ext_list_remove(&mt->mt_path_exts,
820 * re-resolve all the path-extensions with the new path-list
822 fib_path_ext_list_resolve(&mt->mt_path_exts,
825 mpls_tunnel_restack(mt);
826 fib_path_list_unlock(old_pl_index);
829 return (fib_path_list_get_n_paths(mt->mt_path_list));
833 vnet_mpls_tunnel_get_index (u32 sw_if_index)
837 mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
842 return (mt - mpls_tunnel_pool);
845 static clib_error_t *
846 vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm,
847 unformat_input_t * input,
848 vlib_cli_command_t * cmd)
850 unformat_input_t _line_input, * line_input = &_line_input;
851 vnet_main_t * vnm = vnet_get_main();
852 u8 is_del = 0, l2_only = 0, is_multicast =0;
853 fib_route_path_t rpath, *rpaths = NULL;
854 u32 sw_if_index = ~0, payload_proto;
855 clib_error_t *error = NULL;
857 clib_memset(&rpath, 0, sizeof(rpath));
858 payload_proto = DPO_PROTO_MPLS;
860 /* Get a line of input. */
861 if (! unformat_user (input, unformat_line_input, line_input))
864 while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
866 if (unformat (line_input, "del %U",
867 unformat_vnet_sw_interface, vnm,
870 else if (unformat (line_input, "add %U",
871 unformat_vnet_sw_interface, vnm,
874 else if (unformat (line_input, "add"))
876 else if (unformat (line_input, "l2-only"))
878 else if (unformat (line_input, "multicast"))
880 else if (unformat (line_input, "via %U",
881 unformat_fib_route_path,
882 &rpath, &payload_proto))
883 vec_add1(rpaths, rpath);
886 error = clib_error_return (0, "unknown input '%U'",
887 format_unformat_error, line_input);
896 vnet_mpls_tunnel_del(sw_if_index);
898 else if (!vnet_mpls_tunnel_path_remove(sw_if_index, rpaths))
900 vnet_mpls_tunnel_del(sw_if_index);
905 if (0 == vec_len(rpath.frp_label_stack))
907 error = clib_error_return (0, "No Output Labels '%U'",
908 format_unformat_error, line_input);
912 if (~0 == sw_if_index)
914 sw_if_index = vnet_mpls_tunnel_create(l2_only, is_multicast, NULL);
916 vnet_mpls_tunnel_path_add(sw_if_index, rpaths);
921 unformat_free (line_input);
927 * This command create a uni-directional MPLS tunnel
930 * @cliexstart{create mpls tunnel}
931 * create mpls tunnel via 10.0.0.1 GigEthernet0/8/0 out-label 33 out-label 34
934 VLIB_CLI_COMMAND (create_mpls_tunnel_command, static) = {
935 .path = "mpls tunnel",
937 "mpls tunnel [multicast] [l2-only] via [next-hop-address] [next-hop-interface] [next-hop-table <value>] [weight <value>] [preference <value>] [udp-encap-id <value>] [ip4-lookup-in-table <value>] [ip6-lookup-in-table <value>] [mpls-lookup-in-table <value>] [resolve-via-host] [resolve-via-connected] [rx-ip4 <interface>] [out-labels <value value value>]",
938 .function = vnet_create_mpls_tunnel_command_fn,
942 format_mpls_tunnel (u8 * s, va_list * args)
944 mpls_tunnel_t *mt = va_arg (*args, mpls_tunnel_t *);
945 mpls_tunnel_attribute_t attr;
947 s = format(s, "mpls-tunnel%d: sw_if_index:%d hw_if_index:%d",
948 mt - mpls_tunnel_pool,
951 if (MPLS_TUNNEL_FLAG_NONE != mt->mt_flags) {
952 s = format(s, " \n flags:");
953 FOR_EACH_MPLS_TUNNEL_ATTRIBUTE(attr) {
954 if ((1<<attr) & mt->mt_flags) {
955 s = format (s, "%s,", mpls_tunnel_attribute_names[attr]);
959 s = format(s, "\n via:\n");
960 s = fib_path_list_format(mt->mt_path_list, s);
961 s = format(s, "%U", format_fib_path_ext_list, &mt->mt_path_exts);
964 if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2)
966 s = format(s, " forwarding: %U\n",
967 format_fib_forw_chain_type,
968 FIB_FORW_CHAIN_TYPE_ETHERNET);
969 s = format(s, " %U\n", format_dpo_id, &mt->mt_l2_lb, 2);
975 static clib_error_t *
976 show_mpls_tunnel_command_fn (vlib_main_t * vm,
977 unformat_input_t * input,
978 vlib_cli_command_t * cmd)
983 if (pool_elts (mpls_tunnel_pool) == 0)
984 vlib_cli_output (vm, "No MPLS tunnels configured...");
986 while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
988 if (unformat (input, "%d", &mti))
996 pool_foreach (mt, mpls_tunnel_pool)
998 vlib_cli_output (vm, "[@%d] %U",
999 mt - mpls_tunnel_pool,
1000 format_mpls_tunnel, mt);
1005 if (pool_is_free_index(mpls_tunnel_pool, mti))
1006 return clib_error_return (0, "Not a tunnel index %d", mti);
1008 mt = pool_elt_at_index(mpls_tunnel_pool, mti);
1010 vlib_cli_output (vm, "[@%d] %U",
1011 mt - mpls_tunnel_pool,
1012 format_mpls_tunnel, mt);
1019 * This command to show MPLS tunnels
1022 * @cliexstart{sh mpls tunnel 2}
1023 * [@2] mpls_tunnel2: sw_if_index:5 hw_if_index:5
1027 * index:26 locks:1 proto:ipv4 uPRF-list:26 len:1 itfs:[2, ]
1028 * index:26 pl-index:26 ipv4 weight=1 attached-nexthop: oper-flags:resolved,
1030 * [@0]: ipv4 via 10.0.0.2 loop0: IP4: de:ad:00:00:00:00 -> 00:00:11:aa:bb:cc
1033 VLIB_CLI_COMMAND (show_mpls_tunnel_command, static) = {
1034 .path = "show mpls tunnel",
1035 .function = show_mpls_tunnel_command_fn,
1038 static mpls_tunnel_t *
1039 mpls_tunnel_from_fib_node (fib_node_t *node)
1041 ASSERT(FIB_NODE_TYPE_MPLS_TUNNEL == node->fn_type);
1042 return ((mpls_tunnel_t*) (((char*)node) -
1043 STRUCT_OFFSET_OF(mpls_tunnel_t, mt_node)));
1047 * Function definition to backwalk a FIB node
1049 static fib_node_back_walk_rc_t
1050 mpls_tunnel_back_walk (fib_node_t *node,
1051 fib_node_back_walk_ctx_t *ctx)
1053 mpls_tunnel_restack(mpls_tunnel_from_fib_node(node));
1055 return (FIB_NODE_BACK_WALK_CONTINUE);
1059 * Function definition to get a FIB node from its index
1062 mpls_tunnel_fib_node_get (fib_node_index_t index)
1066 mt = pool_elt_at_index(mpls_tunnel_pool, index);
1068 return (&mt->mt_node);
1072 * Function definition to inform the FIB node that its last lock has gone.
1075 mpls_tunnel_last_lock_gone (fib_node_t *node)
1078 * The MPLS MPLS tunnel is a root of the graph. As such
1079 * it never has children and thus is never locked.
1085 * Virtual function table registered by MPLS MPLS tunnels
1086 * for participation in the FIB object graph.
1088 const static fib_node_vft_t mpls_vft = {
1089 .fnv_get = mpls_tunnel_fib_node_get,
1090 .fnv_last_lock = mpls_tunnel_last_lock_gone,
1091 .fnv_back_walk = mpls_tunnel_back_walk,
1094 static clib_error_t *
1095 mpls_tunnel_init (vlib_main_t *vm)
1097 fib_node_register_type(FIB_NODE_TYPE_MPLS_TUNNEL, &mpls_vft);
1101 VLIB_INIT_FUNCTION(mpls_tunnel_init);