X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fmpls%2Fmpls_tunnel.c;h=54458eacdf8483af7ae813f2b14353148fb38845;hb=418bf6aaef6048d15bc1f575a166e8f5e52696be;hp=a142edf3f0a160f5d34b7f65538507b4849155e5;hpb=d723161e038d00e59766aa67a6a0dcc350227e4b;p=vpp.git diff --git a/src/vnet/mpls/mpls_tunnel.c b/src/vnet/mpls/mpls_tunnel.c index a142edf3f0a..54458eacdf8 100644 --- a/src/vnet/mpls/mpls_tunnel.c +++ b/src/vnet/mpls/mpls_tunnel.c @@ -16,7 +16,6 @@ */ #include -#include #include #include #include @@ -31,11 +30,6 @@ */ static mpls_tunnel_t *mpls_tunnel_pool; -/** - * @brief Pool of free tunnel SW indices - i.e. recycled indices - */ -static u32 * mpls_tunnel_free_hw_if_indices; - /** * @brief DB of SW index to tunnel index */ @@ -46,13 +40,151 @@ static u32 *mpls_tunnel_db; */ static const char *mpls_tunnel_attribute_names[] = MPLS_TUNNEL_ATTRIBUTES; +/** + * @brief Packet trace structure + */ +typedef struct mpls_tunnel_trace_t_ +{ + /** + * Tunnel-id / index in tunnel vector + */ + u32 tunnel_id; +} mpls_tunnel_trace_t; + +static u8 * +format_mpls_tunnel_tx_trace (u8 * s, + va_list * args) +{ + CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); + CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); + mpls_tunnel_trace_t * t = va_arg (*args, mpls_tunnel_trace_t *); + + s = format (s, "MPLS: tunnel %d", t->tunnel_id); + return s; +} + +typedef enum +{ + MPLS_TUNNEL_ENCAP_NEXT_L2_MIDCHAIN, + MPLS_TUNNEL_ENCAP_N_NEXT, +} mpls_tunnel_encap_next_t; + +/** + * @brief TX function. Only called L2. L3 traffic uses the adj-midchains + */ +VLIB_NODE_FN (mpls_tunnel_tx) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) +{ + u32 *from = vlib_frame_vector_args (frame); + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b; + u16 nexts[VLIB_FRAME_SIZE], *next; + u32 n_left; + + n_left = frame->n_vectors; + b = bufs; + next = nexts; + + vlib_get_buffers (vm, from, bufs, n_left); + + while (n_left > 2) + { + const mpls_tunnel_t *mt0, *mt1; + u32 sw_if_index0, sw_if_index1; + + sw_if_index0 = vnet_buffer(b[0])->sw_if_index[VLIB_TX]; + sw_if_index1 = vnet_buffer(b[1])->sw_if_index[VLIB_TX]; + + mt0 = pool_elt_at_index(mpls_tunnel_pool, + mpls_tunnel_db[sw_if_index0]); + mt1 = pool_elt_at_index(mpls_tunnel_pool, + mpls_tunnel_db[sw_if_index1]); + + vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = mt0->mt_l2_lb.dpoi_index; + vnet_buffer(b[1])->ip.adj_index[VLIB_TX] = mt1->mt_l2_lb.dpoi_index; + next[0] = mt0->mt_l2_lb.dpoi_next_node; + next[1] = mt1->mt_l2_lb.dpoi_next_node; + + /* since we are coming out of the L2 world, where the vlib_buffer + * union is used for other things, make sure it is clean for + * MPLS from now on. + */ + vnet_buffer(b[0])->mpls.first = 0; + vnet_buffer(b[1])->mpls.first = 0; + + if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED)) + { + mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node, + b[0], sizeof (*tr)); + tr->tunnel_id = mpls_tunnel_db[sw_if_index0]; + } + if (PREDICT_FALSE(b[1]->flags & VLIB_BUFFER_IS_TRACED)) + { + mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node, + b[1], sizeof (*tr)); + tr->tunnel_id = mpls_tunnel_db[sw_if_index1]; + } + + b += 2; + n_left -= 2; + next += 2; + } + while (n_left) + { + const mpls_tunnel_t *mt0; + u32 sw_if_index0; + + sw_if_index0 = vnet_buffer(b[0])->sw_if_index[VLIB_TX]; + mt0 = pool_elt_at_index(mpls_tunnel_pool, + mpls_tunnel_db[sw_if_index0]); + + vnet_buffer(b[0])->ip.adj_index[VLIB_TX] = mt0->mt_l2_lb.dpoi_index; + next[0] = mt0->mt_l2_lb.dpoi_next_node; + + /* since we are coming out of the L2 world, where the vlib_buffer + * union is used for other things, make sure it is clean for + * MPLS from now on. + */ + vnet_buffer(b[0])->mpls.first = 0; + + if (PREDICT_FALSE(b[0]->flags & VLIB_BUFFER_IS_TRACED)) + { + mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node, + b[0], sizeof (*tr)); + tr->tunnel_id = mpls_tunnel_db[sw_if_index0]; + } + + b += 1; + n_left -= 1; + next += 1; + } + + vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors); + + return frame->n_vectors; +} + +VLIB_REGISTER_NODE (mpls_tunnel_tx) = +{ + .name = "mpls-tunnel-tx", + .vector_size = sizeof (u32), + .format_trace = format_mpls_tunnel_tx_trace, + .type = VLIB_NODE_TYPE_INTERNAL, + .n_errors = 0, + .n_next_nodes = 0, + /* MPLS_TUNNEL_ENCAP_N_NEXT, */ + /* .next_nodes = { */ + /* [MPLS_TUNNEL_ENCAP_NEXT_L2_MIDCHAIN] = "mpls-load-balance", */ + /* }, */ +}; + /** * @brief Get a tunnel object from a SW interface index */ static mpls_tunnel_t* mpls_tunnel_get_from_sw_if_index (u32 sw_if_index) { - if ((vec_len(mpls_tunnel_db) < sw_if_index) || + if ((vec_len(mpls_tunnel_db) <= sw_if_index) || (~0 == mpls_tunnel_db[sw_if_index])) return (NULL); @@ -67,7 +199,7 @@ static u8* mpls_tunnel_build_rewrite_i (void) { /* - * passing the adj code a NULL rewirte means 'i don't have one cos + * passing the adj code a NULL rewrite means 'i don't have one cos * t'other end is unresolved'. That's not the case here. For the mpls * tunnel there are just no bytes of encap to apply in the adj. We'll impose * the label stack once we choose a path. So return a zero length rewrite. @@ -170,9 +302,12 @@ mpls_tunnel_mk_lb (mpls_tunnel_t *mt, lb_proto = fib_forw_chain_type_to_dpo_proto(fct); - fib_path_list_walk(mt->mt_path_list, - mpls_tunnel_collect_forwarding, - &ctx); + if (FIB_NODE_INDEX_INVALID != mt->mt_path_list) + { + fib_path_list_walk(mt->mt_path_list, + mpls_tunnel_collect_forwarding, + &ctx); + } if (!dpo_id_is_valid(dpo_lb)) { @@ -244,8 +379,14 @@ mpls_tunnel_stack (adj_index_t ai) mt = mpls_tunnel_get_from_sw_if_index(sw_if_index); - if (NULL == mt) + if (NULL == mt || FIB_NODE_INDEX_INVALID == mt->mt_path_list) + return; + + if (FIB_NODE_INDEX_INVALID == mt->mt_path_list) + { + adj_nbr_midchain_unstack(ai); return; + } /* * while we're stacking the adj, remove the tunnel from the child list @@ -314,7 +455,6 @@ mpls_tunnel_restack (mpls_tunnel_t *mt) /* * Stack a load-balance that drops, whilst we have no paths */ - vnet_hw_interface_t * hi; dpo_id_t dpo = DPO_INVALID; mpls_tunnel_mk_lb(mt, @@ -322,15 +462,14 @@ mpls_tunnel_restack (mpls_tunnel_t *mt) FIB_FORW_CHAIN_TYPE_ETHERNET, &dpo); - hi = vnet_get_hw_interface(vnet_get_main(), mt->mt_hw_if_index); - dpo_stack_from_node(hi->tx_node_index, + dpo_stack_from_node(mpls_tunnel_tx.index, &mt->mt_l2_lb, &dpo); dpo_reset(&dpo); } else { - FOR_EACH_FIB_PROTOCOL(proto) + FOR_EACH_FIB_IP_PROTOCOL(proto) { adj_nbr_walk(mt->mt_sw_if_index, proto, @@ -372,7 +511,7 @@ mpls_tunnel_admin_up_down (vnet_main_t * vnm, */ static void mpls_tunnel_fixup (vlib_main_t *vm, - ip_adjacency_t *adj, + const ip_adjacency_t *adj, vlib_buffer_t *b0, const void*data) { @@ -399,6 +538,7 @@ mpls_tunnel_update_adj (vnet_main_t * vnm, { case IP_LOOKUP_NEXT_ARP: case IP_LOOKUP_NEXT_GLEAN: + case IP_LOOKUP_NEXT_BCAST: adj_nbr_midchain_update_rewrite(ai, mpls_tunnel_fixup, NULL, ADJ_FLAG_NONE, @@ -447,106 +587,11 @@ format_mpls_tunnel_device (u8 * s, va_list * args) return (format (s, "MPLS-tunnel: id %d\n", dev_instance)); } -/** - * @brief Packet trace structure - */ -typedef struct mpls_tunnel_trace_t_ -{ - /** - * Tunnel-id / index in tunnel vector - */ - u32 tunnel_id; -} mpls_tunnel_trace_t; - -static u8 * -format_mpls_tunnel_tx_trace (u8 * s, - va_list * args) -{ - CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *); - CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *); - mpls_tunnel_trace_t * t = va_arg (*args, mpls_tunnel_trace_t *); - - s = format (s, "MPLS: tunnel %d", t->tunnel_id); - return s; -} - -/** - * @brief TX function. Only called L2. L3 traffic uses the adj-midchains - */ -static uword -mpls_tunnel_tx (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) -{ - u32 next_index; - u32 * from, * to_next, n_left_from, n_left_to_next; - vnet_interface_output_runtime_t * rd = (void *) node->runtime_data; - const mpls_tunnel_t *mt; - - mt = pool_elt_at_index(mpls_tunnel_pool, rd->dev_instance); - - /* Vector of buffer / pkt indices we're supposed to process */ - from = vlib_frame_vector_args (frame); - - /* Number of buffers / pkts */ - n_left_from = frame->n_vectors; - - /* Speculatively send the first buffer to the last disposition we used */ - next_index = node->cached_next_index; - - while (n_left_from > 0) - { - /* set up to enqueue to our disposition with index = next_index */ - vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - - /* - * FIXME DUAL LOOP - */ - while (n_left_from > 0 && n_left_to_next > 0) - { - vlib_buffer_t * b0; - u32 bi0; - - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer(vm, bi0); - - vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mt->mt_l2_lb.dpoi_index; - /* since we are coming out of the L2 world, where the vlib_buffer - * union is used for other things, make sure it is clean for - * MPLS from now on. - */ - vnet_buffer(b0)->mpls.first = 0; - - if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED)) - { - mpls_tunnel_trace_t *tr = vlib_add_trace (vm, node, - b0, sizeof (*tr)); - tr->tunnel_id = rd->dev_instance; - } - - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - bi0, mt->mt_l2_lb.dpoi_next_node); - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - } - - return frame->n_vectors; -} - VNET_DEVICE_CLASS (mpls_tunnel_class) = { .name = "MPLS tunnel device", .format_device_name = format_mpls_tunnel_name, .format_device = format_mpls_tunnel_device, .format_tx_trace = format_mpls_tunnel_tx_trace, - .tx_function = mpls_tunnel_tx, .admin_up_down_function = mpls_tunnel_admin_up_down, }; @@ -572,10 +617,10 @@ mpls_tunnel_walk (mpls_tunnel_walk_cb_t cb, { u32 mti; - pool_foreach_index(mti, mpls_tunnel_pool, - ({ + pool_foreach_index (mti, mpls_tunnel_pool) + { cb(mti, ctx); - })); + } } void @@ -593,14 +638,16 @@ vnet_mpls_tunnel_del (u32 sw_if_index) mt->mt_sibling_index); dpo_reset(&mt->mt_l2_lb); - vec_add1 (mpls_tunnel_free_hw_if_indices, mt->mt_hw_if_index); + vnet_delete_hw_interface (vnet_get_main(), mt->mt_hw_if_index); + pool_put(mpls_tunnel_pool, mt); mpls_tunnel_db[sw_if_index] = ~0; } u32 vnet_mpls_tunnel_create (u8 l2_only, - u8 is_multicast) + u8 is_multicast, + u8 *tag) { vnet_hw_interface_t * hi; mpls_tunnel_t *mt; @@ -609,7 +656,7 @@ vnet_mpls_tunnel_create (u8 l2_only, vnm = vnet_get_main(); pool_get(mpls_tunnel_pool, mt); - memset (mt, 0, sizeof (*mt)); + clib_memset (mt, 0, sizeof (*mt)); mti = mt - mpls_tunnel_pool; fib_node_init(&mt->mt_node, FIB_NODE_TYPE_MPLS_TUNNEL); mt->mt_path_list = FIB_NODE_INDEX_INVALID; @@ -619,29 +666,25 @@ vnet_mpls_tunnel_create (u8 l2_only, mt->mt_flags |= MPLS_TUNNEL_FLAG_MCAST; if (l2_only) mt->mt_flags |= MPLS_TUNNEL_FLAG_L2; + if (tag) + memcpy(mt->mt_tag, tag, sizeof(mt->mt_tag)); + else + mt->mt_tag[0] = '\0'; /* - * Create a new, or re=use and old, tunnel HW interface + * Create a new tunnel HW interface */ - if (vec_len (mpls_tunnel_free_hw_if_indices) > 0) - { - mt->mt_hw_if_index = - mpls_tunnel_free_hw_if_indices[vec_len(mpls_tunnel_free_hw_if_indices)-1]; - _vec_len (mpls_tunnel_free_hw_if_indices) -= 1; - hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index); - hi->hw_instance = mti; - hi->dev_instance = mti; - } - else - { - mt->mt_hw_if_index = vnet_register_interface( - vnm, - mpls_tunnel_class.index, - mti, - mpls_tunnel_hw_interface_class.index, - mti); - hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index); - } + mt->mt_hw_if_index = vnet_register_interface( + vnm, + mpls_tunnel_class.index, + mti, + mpls_tunnel_hw_interface_class.index, + mti); + hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index); + + if (mt->mt_flags & MPLS_TUNNEL_FLAG_L2) + vnet_set_interface_output_node (vnm, mt->mt_hw_if_index, + mpls_tunnel_tx.index); /* Standard default MPLS tunnel MTU. */ vnet_sw_interface_set_mtu (vnm, hi->sw_if_index, 9000); @@ -660,6 +703,7 @@ void vnet_mpls_tunnel_path_add (u32 sw_if_index, fib_route_path_t *rpaths) { + fib_route_path_t *rpath; mpls_tunnel_t *mt; u32 mti; @@ -701,10 +745,13 @@ vnet_mpls_tunnel_path_add (u32 sw_if_index, */ fib_path_ext_list_resolve(&mt->mt_path_exts, mt->mt_path_list); } - fib_path_ext_list_insert(&mt->mt_path_exts, - mt->mt_path_list, - FIB_PATH_EXT_MPLS, - rpaths); + vec_foreach(rpath, rpaths) + { + fib_path_ext_list_insert(&mt->mt_path_exts, + mt->mt_path_list, + FIB_PATH_EXT_MPLS, + rpath); + } mpls_tunnel_restack(mt); } @@ -736,6 +783,7 @@ vnet_mpls_tunnel_path_remove (u32 sw_if_index, old_pl_index = mt->mt_path_list; + fib_path_list_lock(old_pl_index); mt->mt_path_list = fib_path_list_copy_and_path_remove(old_pl_index, FIB_PATH_LIST_FLAG_SHARED, @@ -747,6 +795,7 @@ vnet_mpls_tunnel_path_remove (u32 sw_if_index, if (FIB_NODE_INDEX_INVALID == mt->mt_path_list) { /* no paths left */ + fib_path_list_unlock(old_pl_index); return (0); } else @@ -770,11 +819,24 @@ vnet_mpls_tunnel_path_remove (u32 sw_if_index, mt->mt_path_list); mpls_tunnel_restack(mt); + fib_path_list_unlock(old_pl_index); } return (fib_path_list_get_n_paths(mt->mt_path_list)); } +int +vnet_mpls_tunnel_get_index (u32 sw_if_index) +{ + mpls_tunnel_t *mt; + + mt = mpls_tunnel_get_from_sw_if_index(sw_if_index); + + if (NULL == mt) + return (~0); + + return (mt - mpls_tunnel_pool); +} static clib_error_t * vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, @@ -788,7 +850,7 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, u32 sw_if_index = ~0, payload_proto; clib_error_t *error = NULL; - memset(&rpath, 0, sizeof(rpath)); + clib_memset(&rpath, 0, sizeof(rpath)); payload_proto = DPO_PROTO_MPLS; /* Get a line of input. */ @@ -825,7 +887,11 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, if (is_del) { - if (!vnet_mpls_tunnel_path_remove(sw_if_index, rpaths)) + if (NULL == rpaths) + { + vnet_mpls_tunnel_del(sw_if_index); + } + else if (!vnet_mpls_tunnel_path_remove(sw_if_index, rpaths)) { vnet_mpls_tunnel_del(sw_if_index); } @@ -841,7 +907,7 @@ vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm, if (~0 == sw_if_index) { - sw_if_index = vnet_mpls_tunnel_create(l2_only, is_multicast); + sw_if_index = vnet_mpls_tunnel_create(l2_only, is_multicast, NULL); } vnet_mpls_tunnel_path_add(sw_if_index, rpaths); } @@ -874,7 +940,7 @@ format_mpls_tunnel (u8 * s, va_list * args) mpls_tunnel_t *mt = va_arg (*args, mpls_tunnel_t *); mpls_tunnel_attribute_t attr; - s = format(s, "mpls_tunnel%d: sw_if_index:%d hw_if_index:%d", + s = format(s, "mpls-tunnel%d: sw_if_index:%d hw_if_index:%d", mt - mpls_tunnel_pool, mt->mt_sw_if_index, mt->mt_hw_if_index); @@ -923,17 +989,17 @@ show_mpls_tunnel_command_fn (vlib_main_t * vm, if (~0 == mti) { - pool_foreach (mt, mpls_tunnel_pool, - ({ + pool_foreach (mt, mpls_tunnel_pool) + { vlib_cli_output (vm, "[@%d] %U", mt - mpls_tunnel_pool, format_mpls_tunnel, mt); - })); + } } else { if (pool_is_free_index(mpls_tunnel_pool, mti)) - return clib_error_return (0, "Not atunnel index %d", mti); + return clib_error_return (0, "Not a tunnel index %d", mti); mt = pool_elt_at_index(mpls_tunnel_pool, mti);