X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fmpls%2Fmpls_tunnel.c;h=54458eacdf8483af7ae813f2b14353148fb38845;hb=a91cb4590;hp=d6e85e70b440b7cd6ec650120a4b33a0384ef329;hpb=8142499cd1cb3b8d0168d0e6cf5309c5b4813cc4;p=vpp.git diff --git a/src/vnet/mpls/mpls_tunnel.c b/src/vnet/mpls/mpls_tunnel.c index d6e85e70b44..54458eacdf8 100644 --- a/src/vnet/mpls/mpls_tunnel.c +++ b/src/vnet/mpls/mpls_tunnel.c @@ -16,7 +16,6 @@ */ #include -#include #include #include #include @@ -31,11 +30,6 @@ */ static mpls_tunnel_t *mpls_tunnel_pool; -/** - * @brief Pool of free tunnel SW indices - i.e. recycled indices - */ -static u32 * mpls_tunnel_free_hw_if_indices; - /** * @brief DB of SW index to tunnel index */ @@ -46,13 +40,151 @@ static u32 *mpls_tunnel_db; */ static const char *mpls_tunnel_attribute_names[] = MPLS_TUNNEL_ATTRIBUTES; +/** + * @brief Packet trace structure + */ +typedef struct mpls_tunnel_trace_t_ +{ + /** + * Tunnel-id / index in tunnel vector + */ + u32 tunnel_id; +} mpls_tunnel_trace_t; + +static u8 * +format_mpls_tunnel_tx_trace (u8 * s, + va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + mpls_tunnel_trace_t * t = va_arg (*args, mpls_tunnel_trace_t *); + + s = format (s, "MPLS: tunnel %d", t->tunnel_id); + return s; +} + +typedef enum +{ + MPLS_TUNNEL_ENCAP_NEXT_L2_MIDCHAIN, + MPLS_TUNNEL_ENCAP_N_NEXT, +} mpls_tunnel_encap_next_t; + +/** + * @brief TX function. Only called L2. L3 traffic uses the adj-midchains + */ +VLIB_NODE_FN (mpls_tunnel_tx) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + u32 *from = vlib_frame_vector_args (frame); + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b; + u16 nexts[VLIB_FRAME_SIZE], *next; + u32 n_left; + + n_left = frame->n_vectors; + b = bufs; + next = nexts; + + vlib_get_buffers (vm, from, bufs, n_left); + + while (n_left > 2) + { + const mpls_tunnel_t *mt0, *mt1; + u32 sw_if_index0, sw_if_index1; + + sw_if_index0 = vnet_buffer(b[0])->sw_if_index[VLIB_TX]; + sw_if_index1 = vnet_buffer(b[1])->sw_if_index[VLIB_TX]; + + mt0 = pool_elt_at_index(mpls_tunnel_pool, + mpls_tunnel_db[sw_if_index0]); + mt1 = pool_elt_at_index(mpls_tunnel_pool, + mpls_tunnel_db[sw_if_index1]); + + vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = mt0->mt_l2_lb.dpoi_index; + vnet_buffer(b[1])->ip.adj_index[VLIB_TX] = mt1->mt_l2_lb.dpoi_index; + next[0] = mt0->mt_l2_lb.dpoi_next_node; + next[1] = mt1->mt_l2_lb.dpoi_next_node; + + /* since we are coming out of the L2 world, where the vlib_buffer + * union is used for other things, make sure it is clean for + * MPLS from now on. + */ + vnet_buffer(b[0])->mpls.first = 0; + vnet_buffer(b[1])->mpls.first = 0; + + if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED)) + { + mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node, + b[0], sizeof (*tr)); + tr->tunnel_id = mpls_tunnel_db[sw_if_index0]; + } + if (PREDICT_FALSE(b[1]->flags & VLIB_BUFFER_IS_TRACED)) + { + mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node, + b[1], sizeof (*tr)); + tr->tunnel_id = mpls_tunnel_db[sw_if_index1]; + } + + b += 2; + n_left -= 2; + next += 2; + } + while (n_left) + { + const mpls_tunnel_t *mt0; + u32 sw_if_index0; + + sw_if_index0 = vnet_buffer(b[0])->sw_if_index[VLIB_TX]; + mt0 = pool_elt_at_index(mpls_tunnel_pool, + mpls_tunnel_db[sw_if_index0]); + + vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = mt0->mt_l2_lb.dpoi_index; + next[0] = mt0->mt_l2_lb.dpoi_next_node; + + /* since we are coming out of the L2 world, where the vlib_buffer + * union is used for other things, make sure it is clean for + * MPLS from now on. + */ + vnet_buffer(b[0])->mpls.first = 0; + + if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED)) + { + mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node, + b[0], sizeof (*tr)); + tr->tunnel_id = mpls_tunnel_db[sw_if_index0]; + } + + b += 1; + n_left -= 1; + next += 1; + } + + vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); + + return frame->n_vectors; +} + +VLIB_REGISTER_NODE (mpls_tunnel_tx) = +{ + .name = "mpls-tunnel-tx", + .vector_size = sizeof (u32), + .format_trace = format_mpls_tunnel_tx_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = 0, + .n_next_nodes = 0, + /* MPLS_TUNNEL_ENCAP_N_NEXT, */ + /* .next_nodes = { */ + /* [MPLS_TUNNEL_ENCAP_NEXT_L2_MIDCHAIN] = "mpls-load-balance", */ + /* }, */ +}; + /** * @brief Get a tunnel object from a SW interface index */ static mpls_tunnel_t* mpls_tunnel_get_from_sw_if_index (u32 sw_if_index) { - if ((vec_len(mpls_tunnel_db) < sw_if_index) || + if ((vec_len(mpls_tunnel_db) <= sw_if_index) || (~0 == mpls_tunnel_db[sw_if_index])) return (NULL); @@ -67,7 +199,7 @@ static u8* mpls_tunnel_build_rewrite_i (void) { /* - * passing the adj code a NULL rewirte means 'i don't have one cos + * passing the adj code a NULL rewrite means 'i don't have one cos * t'other end is unresolved'. That's not the case here. For the mpls * tunnel there are just no bytes of encap to apply in the adj. We'll impose * the label stack once we choose a path. So return a zero length rewrite. @@ -123,23 +255,20 @@ mpls_tunnel_collect_forwarding (fib_node_index_t pl_index, path_ext = fib_path_ext_list_find_by_path_index(&ctx->mt->mt_path_exts, path_index); - if (NULL != path_ext) - { - /* - * found a matching extension. stack it to obtain the forwarding - * info for this path. - */ - ctx->next_hops = fib_path_ext_stack(path_ext, - ctx->fct, - ctx->fct, - ctx->next_hops); - } - else - ASSERT(0); /* - * else - * There should be a path-extenios associated with each path + * we don't want IP TTL decrements for packets hitting the MPLS labels + * we stack on, since the IP TTL decrement is done by the adj + */ + path_ext->fpe_mpls_flags |= FIB_PATH_EXT_MPLS_FLAG_NO_IP_TTL_DECR; + + /* + * found a matching extension. stack it to obtain the forwarding + * info for this path. */ + ctx->next_hops = fib_path_ext_stack(path_ext, + ctx->fct, + ctx->fct, + ctx->next_hops); return (FIB_PATH_LIST_WALK_CONTINUE); } @@ -171,11 +300,14 @@ mpls_tunnel_mk_lb (mpls_tunnel_t *mt, vec_validate(ctx.next_hops, fib_path_list_get_n_paths(mt->mt_path_list)); vec_reset_length(ctx.next_hops); - lb_proto = vnet_link_to_dpo_proto(linkt); + lb_proto = fib_forw_chain_type_to_dpo_proto(fct); - fib_path_list_walk(mt->mt_path_list, - mpls_tunnel_collect_forwarding, - &ctx); + if (FIB_NODE_INDEX_INVALID != mt->mt_path_list) + { + fib_path_list_walk(mt->mt_path_list, + mpls_tunnel_collect_forwarding, + &ctx); + } if (!dpo_id_is_valid(dpo_lb)) { @@ -247,9 +379,15 @@ mpls_tunnel_stack (adj_index_t ai) mt = mpls_tunnel_get_from_sw_if_index(sw_if_index); - if (NULL == mt) + if (NULL == mt || FIB_NODE_INDEX_INVALID == mt->mt_path_list) return; + if (FIB_NODE_INDEX_INVALID == mt->mt_path_list) + { + adj_nbr_midchain_unstack(ai); + return; + } + /* * while we're stacking the adj, remove the tunnel from the child list * of the path list. this breaks a circular dependency of walk updates @@ -273,7 +411,8 @@ mpls_tunnel_stack (adj_index_t ai) mpls_tunnel_mk_lb(mt, adj->ia_link, - FIB_FORW_CHAIN_TYPE_MPLS_EOS, + fib_forw_chain_type_from_link_type( + adj_get_link_type(ai)), &dpo); adj_nbr_midchain_stack(ai, &dpo); @@ -288,7 +427,7 @@ mpls_tunnel_stack (adj_index_t ai) FIB_NODE_TYPE_MPLS_TUNNEL, mt - mpls_tunnel_pool); - fib_path_list_lock(mt->mt_path_list); + fib_path_list_unlock(mt->mt_path_list); } /** @@ -311,12 +450,32 @@ mpls_tunnel_restack (mpls_tunnel_t *mt) /* * walk all the adjacencies on the MPLS interface and restack them */ - FOR_EACH_FIB_PROTOCOL(proto) + if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2) + { + /* + * Stack a load-balance that drops, whilst we have no paths + */ + dpo_id_t dpo = DPO_INVALID; + + mpls_tunnel_mk_lb(mt, + VNET_LINK_MPLS, + FIB_FORW_CHAIN_TYPE_ETHERNET, + &dpo); + + dpo_stack_from_node(mpls_tunnel_tx.index, + &mt->mt_l2_lb, + &dpo); + dpo_reset(&dpo); + } + else { - adj_nbr_walk(mt->mt_sw_if_index, - proto, - mpls_adj_walk_cb, - NULL); + FOR_EACH_FIB_IP_PROTOCOL(proto) + { + adj_nbr_walk(mt->mt_sw_if_index, + proto, + mpls_adj_walk_cb, + NULL); + } } } @@ -352,8 +511,9 @@ mpls_tunnel_admin_up_down (vnet_main_t * vnm, */ static void mpls_tunnel_fixup (vlib_main_t *vm, - ip_adjacency_t *adj, - vlib_buffer_t *b0) + const ip_adjacency_t *adj, + vlib_buffer_t *b0, + const void*data) { /* * A no-op w.r.t. the header. but reset the 'have we pushed any @@ -378,7 +538,9 @@ mpls_tunnel_update_adj (vnet_main_t * vnm, { case IP_LOOKUP_NEXT_ARP: case IP_LOOKUP_NEXT_GLEAN: + case IP_LOOKUP_NEXT_BCAST: adj_nbr_midchain_update_rewrite(ai, mpls_tunnel_fixup, + NULL, ADJ_FLAG_NONE, mpls_tunnel_build_rewrite_i()); break; @@ -388,6 +550,7 @@ mpls_tunnel_update_adj (vnet_main_t * vnm, * There's no MAC fixup, so the last 2 parameters are 0 */ adj_mcast_midchain_update_rewrite(ai, mpls_tunnel_fixup, + NULL, ADJ_FLAG_NONE, mpls_tunnel_build_rewrite_i(), 0, 0); @@ -424,108 +587,16 @@ format_mpls_tunnel_device (u8 * s, va_list * args) return (format (s, "MPLS-tunnel: id %d\n", dev_instance)); } -/** - * @brief Packet trace structure - */ -typedef struct mpls_tunnel_trace_t_ -{ - /** - * Tunnel-id / index in tunnel vector - */ - u32 tunnel_id; -} mpls_tunnel_trace_t; - -static u8 * -format_mpls_tunnel_tx_trace (u8 * s, - va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - mpls_tunnel_trace_t * t = va_arg (*args, mpls_tunnel_trace_t *); - - s = format (s, "MPLS: tunnel %d", t->tunnel_id); - return s; -} - -/** - * @brief TX function. Only called L2. L3 traffic uses the adj-midchains - */ -static uword -mpls_tunnel_tx (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) -{ - u32 next_index; - u32 * from, * to_next, n_left_from, n_left_to_next; - vnet_interface_output_runtime_t * rd = (void *) node->runtime_data; - const mpls_tunnel_t *mt; - - mt = pool_elt_at_index(mpls_tunnel_pool, rd->dev_instance); - - /* Vector of buffer / pkt indices we're supposed to process */ - from = vlib_frame_vector_args (frame); - - /* Number of buffers / pkts */ - n_left_from = frame->n_vectors; - - /* Speculatively send the first buffer to the last disposition we used */ - next_index = node->cached_next_index; - - while (n_left_from > 0) - { - /* set up to enqueue to our disposition with index = next_index */ - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - - /* - * FIXME DUAL LOOP - */ - while (n_left_from > 0 && n_left_to_next > 0) - { - vlib_buffer_t * b0; - u32 bi0; - - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer(vm, bi0); - - vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mt->mt_l2_adj; - - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) - { - mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node, - b0, sizeof (*tr)); - tr->tunnel_id = rd->dev_instance; - } - - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, mt->mt_l2_tx_arc); - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - - return frame->n_vectors; -} - VNET_DEVICE_CLASS (mpls_tunnel_class) = { .name = "MPLS tunnel device", .format_device_name = format_mpls_tunnel_name, .format_device = format_mpls_tunnel_device, .format_tx_trace = format_mpls_tunnel_tx_trace, - .tx_function = mpls_tunnel_tx, .admin_up_down_function = mpls_tunnel_admin_up_down, }; VNET_HW_INTERFACE_CLASS (mpls_tunnel_hw_interface_class) = { .name = "MPLS-Tunnel", -// .format_header = format_mpls_eth_header_with_length, -// .unformat_header = unformat_mpls_eth_header, .update_adjacency = mpls_tunnel_update_adj, .build_rewrite = mpls_tunnel_build_rewrite, .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P, @@ -546,10 +617,10 @@ mpls_tunnel_walk (mpls_tunnel_walk_cb_t cb, { u32 mti; - pool_foreach_index(mti, mpls_tunnel_pool, - ({ + pool_foreach_index (mti, mpls_tunnel_pool) + { cb(mti, ctx); - })); + } } void @@ -565,17 +636,18 @@ vnet_mpls_tunnel_del (u32 sw_if_index) if (FIB_NODE_INDEX_INVALID != mt->mt_path_list) fib_path_list_child_remove(mt->mt_path_list, mt->mt_sibling_index); - if (ADJ_INDEX_INVALID != mt->mt_l2_adj) - adj_unlock(mt->mt_l2_adj); + dpo_reset(&mt->mt_l2_lb); + + vnet_delete_hw_interface (vnet_get_main(), mt->mt_hw_if_index); - vec_add1 (mpls_tunnel_free_hw_if_indices, mt->mt_hw_if_index); pool_put(mpls_tunnel_pool, mt); mpls_tunnel_db[sw_if_index] = ~0; } u32 vnet_mpls_tunnel_create (u8 l2_only, - u8 is_multicast) + u8 is_multicast, + u8 *tag) { vnet_hw_interface_t * hi; mpls_tunnel_t *mt; @@ -584,38 +656,38 @@ vnet_mpls_tunnel_create (u8 l2_only, vnm = vnet_get_main(); pool_get(mpls_tunnel_pool, mt); - memset (mt, 0, sizeof (*mt)); + clib_memset (mt, 0, sizeof (*mt)); mti = mt - mpls_tunnel_pool; fib_node_init(&mt->mt_node, FIB_NODE_TYPE_MPLS_TUNNEL); - mt->mt_l2_adj = ADJ_INDEX_INVALID; mt->mt_path_list = FIB_NODE_INDEX_INVALID; mt->mt_sibling_index = FIB_NODE_INDEX_INVALID; if (is_multicast) mt->mt_flags |= MPLS_TUNNEL_FLAG_MCAST; + if (l2_only) + mt->mt_flags |= MPLS_TUNNEL_FLAG_L2; + if (tag) + memcpy(mt->mt_tag, tag, sizeof(mt->mt_tag)); + else + mt->mt_tag[0] = '\0'; /* - * Create a new, or re=use and old, tunnel HW interface + * Create a new tunnel HW interface */ - if (vec_len (mpls_tunnel_free_hw_if_indices) > 0) - { - mt->mt_hw_if_index = - mpls_tunnel_free_hw_if_indices[vec_len(mpls_tunnel_free_hw_if_indices)-1]; - _vec_len (mpls_tunnel_free_hw_if_indices) -= 1; - hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index); - hi->hw_instance = mti; - hi->dev_instance = mti; - } - else - { - mt->mt_hw_if_index = vnet_register_interface( - vnm, - mpls_tunnel_class.index, - mti, - mpls_tunnel_hw_interface_class.index, - mti); - hi = vnet_get_hw_interface(vnm, mt->mt_hw_if_index); - } + mt->mt_hw_if_index = vnet_register_interface( + vnm, + mpls_tunnel_class.index, + mti, + mpls_tunnel_hw_interface_class.index, + mti); + hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index); + + if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2) + vnet_set_interface_output_node (vnm, mt->mt_hw_if_index, + mpls_tunnel_tx.index); + + /* Standard default MPLS tunnel MTU. */ + vnet_sw_interface_set_mtu (vnm, hi->sw_if_index, 9000); /* * Add the new tunnel to the tunnel DB - key:SW if index @@ -624,19 +696,6 @@ vnet_mpls_tunnel_create (u8 l2_only, vec_validate_init_empty(mpls_tunnel_db, mt->mt_sw_if_index, ~0); mpls_tunnel_db[mt->mt_sw_if_index] = mti; - if (l2_only) - { - mt->mt_l2_adj = - adj_nbr_add_or_lock(fib_path_list_get_proto(mt->mt_path_list), - VNET_LINK_ETHERNET, - &zero_addr, - mt->mt_sw_if_index); - - mt->mt_l2_tx_arc = vlib_node_add_named_next(vlib_get_main(), - hi->tx_node_index, - "adj-l2-midchain"); - } - return (mt->mt_sw_if_index); } @@ -644,6 +703,7 @@ void vnet_mpls_tunnel_path_add (u32 sw_if_index, fib_route_path_t *rpaths) { + fib_route_path_t *rpath; mpls_tunnel_t *mt; u32 mti; @@ -685,10 +745,13 @@ vnet_mpls_tunnel_path_add (u32 sw_if_index, */ fib_path_ext_list_resolve(&mt->mt_path_exts, mt->mt_path_list); } - fib_path_ext_list_insert(&mt->mt_path_exts, - mt->mt_path_list, - FIB_PATH_EXT_MPLS, - rpaths); + vec_foreach(rpath, rpaths) + { + fib_path_ext_list_insert(&mt->mt_path_exts, + mt->mt_path_list, + FIB_PATH_EXT_MPLS, + rpath); + } mpls_tunnel_restack(mt); } @@ -720,6 +783,7 @@ vnet_mpls_tunnel_path_remove (u32 sw_if_index, old_pl_index = mt->mt_path_list; + fib_path_list_lock(old_pl_index); mt->mt_path_list = fib_path_list_copy_and_path_remove(old_pl_index, FIB_PATH_LIST_FLAG_SHARED, @@ -731,6 +795,7 @@ vnet_mpls_tunnel_path_remove (u32 sw_if_index, if (FIB_NODE_INDEX_INVALID == mt->mt_path_list) { /* no paths left */ + fib_path_list_unlock(old_pl_index); return (0); } else @@ -754,11 +819,24 @@ vnet_mpls_tunnel_path_remove (u32 sw_if_index, mt->mt_path_list); mpls_tunnel_restack(mt); + fib_path_list_unlock(old_pl_index); } return (fib_path_list_get_n_paths(mt->mt_path_list)); } +int +vnet_mpls_tunnel_get_index (u32 sw_if_index) +{ + mpls_tunnel_t *mt; + + mt = mpls_tunnel_get_from_sw_if_index(sw_if_index); + + if (NULL == mt) + return (~0); + + return (mt - mpls_tunnel_pool); +} static clib_error_t * vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, @@ -769,11 +847,11 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, vnet_main_t * vnm = vnet_get_main(); u8 is_del = 0, l2_only = 0, is_multicast =0; fib_route_path_t rpath, *rpaths = NULL; - mpls_label_t out_label = MPLS_LABEL_INVALID; - u32 sw_if_index; + u32 sw_if_index = ~0, payload_proto; clib_error_t *error = NULL; - memset(&rpath, 0, sizeof(rpath)); + clib_memset(&rpath, 0, sizeof(rpath)); + payload_proto = DPO_PROTO_MPLS; /* Get a line of input. */ if (! unformat_user (input, unformat_line_input, line_input)) @@ -785,54 +863,20 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, unformat_vnet_sw_interface, vnm, &sw_if_index)) is_del = 1; + else if (unformat (line_input, "add %U", + unformat_vnet_sw_interface, vnm, + &sw_if_index)) + is_del = 0; else if (unformat (line_input, "add")) is_del = 0; - else if (unformat (line_input, "out-label %U", - unformat_mpls_unicast_label, &out_label)) - { - vec_add1(rpath.frp_label_stack, out_label); - } - else if (unformat (line_input, "via %U %U", - unformat_ip4_address, - &rpath.frp_addr.ip4, - unformat_vnet_sw_interface, vnm, - &rpath.frp_sw_if_index)) - { - rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP4; - } - - else if (unformat (line_input, "via %U %U", - unformat_ip6_address, - &rpath.frp_addr.ip6, - unformat_vnet_sw_interface, vnm, - &rpath.frp_sw_if_index)) - { - rpath.frp_weight = 1; - rpath.frp_proto = FIB_PROTOCOL_IP6; - } - else if (unformat (line_input, "via %U", - unformat_ip6_address, - &rpath.frp_addr.ip6)) - { - rpath.frp_fib_index = 0; - rpath.frp_weight = 1; - rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP6; - } - else if (unformat (line_input, "via %U", - unformat_ip4_address, - &rpath.frp_addr.ip4)) - { - rpath.frp_fib_index = 0; - rpath.frp_weight = 1; - rpath.frp_sw_if_index = ~0; - rpath.frp_proto = FIB_PROTOCOL_IP4; - } else if (unformat (line_input, "l2-only")) l2_only = 1; else if (unformat (line_input, "multicast")) is_multicast = 1; + else if (unformat (line_input, "via %U", + unformat_fib_route_path, + &rpath, &payload_proto)) + vec_add1(rpaths, rpath); else { error = clib_error_return (0, "unknown input '%U'", @@ -843,7 +887,14 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, if (is_del) { - vnet_mpls_tunnel_del(sw_if_index); + if (NULL == rpaths) + { + vnet_mpls_tunnel_del(sw_if_index); + } + else if (!vnet_mpls_tunnel_path_remove(sw_if_index, rpaths)) + { + vnet_mpls_tunnel_del(sw_if_index); + } } else { @@ -854,8 +905,10 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, goto done; } - vec_add1(rpaths, rpath); - sw_if_index = vnet_mpls_tunnel_create(l2_only, is_multicast); + if (~0 == sw_if_index) + { + sw_if_index = vnet_mpls_tunnel_create(l2_only, is_multicast, NULL); + } vnet_mpls_tunnel_path_add(sw_if_index, rpaths); } @@ -877,7 +930,7 @@ done: VLIB_CLI_COMMAND (create_mpls_tunnel_command, static) = { .path = "mpls tunnel", .short_help = - "mpls tunnel via [addr] [interface] [out-labels]", + "mpls tunnel [multicast] [l2-only] via [next-hop-address] [next-hop-interface] [next-hop-table ] [weight ] [preference ] [udp-encap-id ] [ip4-lookup-in-table ] [ip6-lookup-in-table ] [mpls-lookup-in-table ] [resolve-via-host] [resolve-via-connected] [rx-ip4 ] [out-labels ]", .function = vnet_create_mpls_tunnel_command_fn, }; @@ -887,7 +940,7 @@ format_mpls_tunnel (u8 * s, va_list * args) mpls_tunnel_t *mt = va_arg (*args, mpls_tunnel_t *); mpls_tunnel_attribute_t attr; - s = format(s, "mpls_tunnel%d: sw_if_index:%d hw_if_index:%d", + s = format(s, "mpls-tunnel%d: sw_if_index:%d hw_if_index:%d", mt - mpls_tunnel_pool, mt->mt_sw_if_index, mt->mt_hw_if_index); @@ -904,6 +957,14 @@ format_mpls_tunnel (u8 * s, va_list * args) s = format(s, "%U", format_fib_path_ext_list, &mt->mt_path_exts); s = format(s, "\n"); + if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2) + { + s = format(s, " forwarding: %U\n", + format_fib_forw_chain_type, + FIB_FORW_CHAIN_TYPE_ETHERNET); + s = format(s, " %U\n", format_dpo_id, &mt->mt_l2_lb, 2); + } + return (s); } @@ -928,17 +989,17 @@ show_mpls_tunnel_command_fn (vlib_main_t * vm, if (~0 == mti) { - pool_foreach (mt, mpls_tunnel_pool, - ({ + pool_foreach (mt, mpls_tunnel_pool) + { vlib_cli_output (vm, "[@%d] %U", mt - mpls_tunnel_pool, format_mpls_tunnel, mt); - })); + } } else { if (pool_is_free_index(mpls_tunnel_pool, mti)) - return clib_error_return (0, "Not atunnel index %d", mti); + return clib_error_return (0, "Not a tunnel index %d", mti); mt = pool_elt_at_index(mpls_tunnel_pool, mti); @@ -973,9 +1034,7 @@ VLIB_CLI_COMMAND (show_mpls_tunnel_command, static) = { static mpls_tunnel_t * mpls_tunnel_from_fib_node (fib_node_t *node) { -#if (CLIB_DEBUG > 0) ASSERT(FIB_NODE_TYPE_MPLS_TUNNEL == node->fn_type); -#endif return ((mpls_tunnel_t*) (((char*)node) - STRUCT_OFFSET_OF(mpls_tunnel_t, mt_node))); }