*/
static mpls_tunnel_t *mpls_tunnel_pool;
-/**
- * @brief Pool of free tunnel SW indices - i.e. recycled indices
- */
-static u32 * mpls_tunnel_free_hw_if_indices;
-
/**
* @brief DB of SW index to tunnel index
*/
static mpls_tunnel_t*
mpls_tunnel_get_from_sw_if_index (u32 sw_if_index)
{
- if ((vec_len(mpls_tunnel_db) < sw_if_index) ||
+ if ((vec_len(mpls_tunnel_db) <= sw_if_index) ||
(~0 == mpls_tunnel_db[sw_if_index]))
return (NULL);
mpls_tunnel_build_rewrite_i (void)
{
/*
- * passing the adj code a NULL rewirte means 'i don't have one cos
+ * passing the adj code a NULL rewrite means 'i don't have one cos
* t'other end is unresolved'. That's not the case here. For the mpls
* tunnel there are just no bytes of encap to apply in the adj. We'll impose
* the label stack once we choose a path. So return a zero length rewrite.
path_ext = fib_path_ext_list_find_by_path_index(&ctx->mt->mt_path_exts,
path_index);
- if (NULL != path_ext)
- {
- /*
- * found a matching extension. stack it to obtain the forwarding
- * info for this path.
- */
- ctx->next_hops = fib_path_ext_stack(path_ext,
- ctx->fct,
- ctx->fct,
- ctx->next_hops);
- }
- else
- ASSERT(0);
/*
- * else
- * There should be a path-extenios associated with each path
+ * we don't want IP TTL decrements for packets hitting the MPLS labels
+ * we stack on, since the IP TTL decrement is done by the adj
+ */
+ path_ext->fpe_mpls_flags |= FIB_PATH_EXT_MPLS_FLAG_NO_IP_TTL_DECR;
+
+ /*
+ * found a matching extension. stack it to obtain the forwarding
+ * info for this path.
*/
+ ctx->next_hops = fib_path_ext_stack(path_ext,
+ ctx->fct,
+ ctx->fct,
+ ctx->next_hops);
return (FIB_PATH_LIST_WALK_CONTINUE);
}
lb_proto = fib_forw_chain_type_to_dpo_proto(fct);
- fib_path_list_walk(mt->mt_path_list,
- mpls_tunnel_collect_forwarding,
- &ctx);
+ if (FIB_NODE_INDEX_INVALID != mt->mt_path_list)
+ {
+ fib_path_list_walk(mt->mt_path_list,
+ mpls_tunnel_collect_forwarding,
+ &ctx);
+ }
if (!dpo_id_is_valid(dpo_lb))
{
mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
- if (NULL == mt)
+ if (NULL == mt || FIB_NODE_INDEX_INVALID == mt->mt_path_list)
return;
+ if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
+ {
+ adj_nbr_midchain_unstack(ai);
+ return;
+ }
+
/*
* while we're stacking the adj, remove the tunnel from the child list
* of the path list. this breaks a circular dependency of walk updates
mpls_tunnel_mk_lb(mt,
adj->ia_link,
- (VNET_LINK_MPLS == adj_get_link_type(ai) ?
- FIB_FORW_CHAIN_TYPE_MPLS_NON_EOS:
- FIB_FORW_CHAIN_TYPE_MPLS_EOS),
+ fib_forw_chain_type_from_link_type(
+ adj_get_link_type(ai)),
&dpo);
adj_nbr_midchain_stack(ai, &dpo);
*/
static void
mpls_tunnel_fixup (vlib_main_t *vm,
- ip_adjacency_t *adj,
+ const ip_adjacency_t *adj,
vlib_buffer_t *b0,
const void*data)
{
{
case IP_LOOKUP_NEXT_ARP:
case IP_LOOKUP_NEXT_GLEAN:
+ case IP_LOOKUP_NEXT_BCAST:
adj_nbr_midchain_update_rewrite(ai, mpls_tunnel_fixup,
NULL,
ADJ_FLAG_NONE,
b0 = vlib_get_buffer(vm, bi0);
vnet_buffer(b0)->ip.adj_index[VLIB_TX] = mt->mt_l2_lb.dpoi_index;
+ /* since we are coming out of the L2 world, where the vlib_buffer
+ * union is used for other things, make sure it is clean for
+ * MPLS from now on.
+ */
+ vnet_buffer(b0)->mpls.first = 0;
if (PREDICT_FALSE(b0->flags & VLIB_BUFFER_IS_TRACED))
{
mt->mt_sibling_index);
dpo_reset(&mt->mt_l2_lb);
- vec_add1 (mpls_tunnel_free_hw_if_indices, mt->mt_hw_if_index);
+ vnet_delete_hw_interface (vnet_get_main(), mt->mt_hw_if_index);
+
pool_put(mpls_tunnel_pool, mt);
mpls_tunnel_db[sw_if_index] = ~0;
}
u32
vnet_mpls_tunnel_create (u8 l2_only,
- u8 is_multicast)
+ u8 is_multicast,
+ u8 *tag)
{
vnet_hw_interface_t * hi;
mpls_tunnel_t *mt;
vnm = vnet_get_main();
pool_get(mpls_tunnel_pool, mt);
- memset (mt, 0, sizeof (*mt));
+ clib_memset (mt, 0, sizeof (*mt));
mti = mt - mpls_tunnel_pool;
fib_node_init(&mt->mt_node, FIB_NODE_TYPE_MPLS_TUNNEL);
mt->mt_path_list = FIB_NODE_INDEX_INVALID;
mt->mt_flags |= MPLS_TUNNEL_FLAG_MCAST;
if (l2_only)
mt->mt_flags |= MPLS_TUNNEL_FLAG_L2;
+ if (tag)
+ memcpy(mt->mt_tag, tag, sizeof(mt->mt_tag));
+ else
+ mt->mt_tag[0] = '\0';
/*
- * Create a new, or re=use and old, tunnel HW interface
+ * Create a new tunnel HW interface
*/
- if (vec_len (mpls_tunnel_free_hw_if_indices) > 0)
- {
- mt->mt_hw_if_index =
- mpls_tunnel_free_hw_if_indices[vec_len(mpls_tunnel_free_hw_if_indices)-1];
- _vec_len (mpls_tunnel_free_hw_if_indices) -= 1;
- hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index);
- hi->hw_instance = mti;
- hi->dev_instance = mti;
- }
- else
- {
- mt->mt_hw_if_index = vnet_register_interface(
- vnm,
- mpls_tunnel_class.index,
- mti,
- mpls_tunnel_hw_interface_class.index,
- mti);
- hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index);
- }
+ mt->mt_hw_if_index = vnet_register_interface(
+ vnm,
+ mpls_tunnel_class.index,
+ mti,
+ mpls_tunnel_hw_interface_class.index,
+ mti);
+ hi = vnet_get_hw_interface (vnm, mt->mt_hw_if_index);
+
+ /* Standard default MPLS tunnel MTU. */
+ vnet_sw_interface_set_mtu (vnm, hi->sw_if_index, 9000);
/*
* Add the new tunnel to the tunnel DB - key:SW if index
vnet_mpls_tunnel_path_add (u32 sw_if_index,
fib_route_path_t *rpaths)
{
+ fib_route_path_t *rpath;
mpls_tunnel_t *mt;
u32 mti;
*/
fib_path_ext_list_resolve(&mt->mt_path_exts, mt->mt_path_list);
}
- fib_path_ext_list_insert(&mt->mt_path_exts,
- mt->mt_path_list,
- FIB_PATH_EXT_MPLS,
- rpaths);
+ vec_foreach(rpath, rpaths)
+ {
+ fib_path_ext_list_insert(&mt->mt_path_exts,
+ mt->mt_path_list,
+ FIB_PATH_EXT_MPLS,
+ rpath);
+ }
mpls_tunnel_restack(mt);
}
old_pl_index = mt->mt_path_list;
+ fib_path_list_lock(old_pl_index);
mt->mt_path_list =
fib_path_list_copy_and_path_remove(old_pl_index,
FIB_PATH_LIST_FLAG_SHARED,
if (FIB_NODE_INDEX_INVALID == mt->mt_path_list)
{
/* no paths left */
+ fib_path_list_unlock(old_pl_index);
return (0);
}
else
mt->mt_path_list);
mpls_tunnel_restack(mt);
+ fib_path_list_unlock(old_pl_index);
}
return (fib_path_list_get_n_paths(mt->mt_path_list));
}
+int
+vnet_mpls_tunnel_get_index (u32 sw_if_index)
+{
+ mpls_tunnel_t *mt;
+
+ mt = mpls_tunnel_get_from_sw_if_index(sw_if_index);
+
+ if (NULL == mt)
+ return (~0);
+
+ return (mt - mpls_tunnel_pool);
+}
static clib_error_t *
vnet_create_mpls_tunnel_command_fn (vlib_main_t * vm,
u32 sw_if_index = ~0, payload_proto;
clib_error_t *error = NULL;
- memset(&rpath, 0, sizeof(rpath));
+ clib_memset(&rpath, 0, sizeof(rpath));
payload_proto = DPO_PROTO_MPLS;
/* Get a line of input. */
if (is_del)
{
- if (!vnet_mpls_tunnel_path_remove(sw_if_index, rpaths))
+ if (NULL == rpaths)
+ {
+ vnet_mpls_tunnel_del(sw_if_index);
+ }
+ else if (!vnet_mpls_tunnel_path_remove(sw_if_index, rpaths))
{
vnet_mpls_tunnel_del(sw_if_index);
}
if (~0 == sw_if_index)
{
- sw_if_index = vnet_mpls_tunnel_create(l2_only, is_multicast);
+ sw_if_index = vnet_mpls_tunnel_create(l2_only, is_multicast, NULL);
}
vnet_mpls_tunnel_path_add(sw_if_index, rpaths);
}
mpls_tunnel_t *mt = va_arg (*args, mpls_tunnel_t *);
mpls_tunnel_attribute_t attr;
- s = format(s, "mpls_tunnel%d: sw_if_index:%d hw_if_index:%d",
+ s = format(s, "mpls-tunnel%d: sw_if_index:%d hw_if_index:%d",
mt - mpls_tunnel_pool,
mt->mt_sw_if_index,
mt->mt_hw_if_index);
else
{
if (pool_is_free_index(mpls_tunnel_pool, mti))
- return clib_error_return (0, "Not atunnel index %d", mti);
+ return clib_error_return (0, "Not a tunnel index %d", mti);
mt = pool_elt_at_index(mpls_tunnel_pool, mti);