#include <vnet/fib/ip6_fib.h>
#include <vnet/ip/format.h>
#include <vnet/ipip/ipip.h>
-#include <vnet/nhrp/nhrp.h>
+#include <vnet/teib/teib.h>
#include <vnet/tunnel/tunnel_dp.h>
ipip_main_t ipip_main;
ip4_header_set_dscp (ip4, t->dscp);
if (t->flags & TUNNEL_ENCAP_DECAP_FLAG_ENCAP_SET_DF)
ip4_header_set_df (ip4);
- ip4->checksum = ip4_header_checksum (ip4);
switch (link_type)
{
case VNET_LINK_IP4:
ip4->protocol = IP_PROTOCOL_IP_IN_IP;
break;
+ case VNET_LINK_MPLS:
+ ip4->protocol = IP_PROTOCOL_MPLS_IN_IP;
+ break;
default:
break;
}
+ ip4->checksum = ip4_header_checksum (ip4);
break;
case IPIP_TRANSPORT_IP6:
case VNET_LINK_IP4:
ip6->protocol = IP_PROTOCOL_IP_IN_IP;
break;
+ case VNET_LINK_MPLS:
+ ip6->protocol = IP_PROTOCOL_MPLS_IN_IP;
+ break;
default:
break;
}
ip6->payload_length =
clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b) -
sizeof (*ip6));
- tunnel_encap_fixup_4o6 (flags, ((ip4_header_t *) (ip6 + 1)), ip6);
+ tunnel_encap_fixup_4o6 (flags, b, ((ip4_header_t *) (ip6 + 1)), ip6);
}
static void
tunnel_encap_fixup_6o6 (flags, ip6 + 1, ip6);
}
+static void
+ipipm6_fixup (vlib_main_t *vm, const ip_adjacency_t *adj, vlib_buffer_t *b,
+ const void *data)
+{
+ tunnel_encap_decap_flags_t flags;
+ ip6_header_t *ip6;
+
+ flags = pointer_to_uword (data);
+
+ /* Must set locally originated otherwise we're not allowed to
+ fragment the packet later and we'll get an unwanted hop-limt
+ decrement */
+ b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
+
+ ip6 = vlib_buffer_get_current (b);
+ ip6->payload_length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b) - sizeof (*ip6));
+ tunnel_encap_fixup_mplso6 (flags, b, (mpls_unicast_header_t *) (ip6 + 1),
+ ip6);
+}
+
+static void
+ipipm4_fixup (vlib_main_t *vm, const ip_adjacency_t *adj, vlib_buffer_t *b,
+ const void *data)
+{
+ tunnel_encap_decap_flags_t flags;
+ ip4_header_t *ip4;
+
+ flags = pointer_to_uword (data);
+
+ /* Must set locally originated otherwise we'll do a TTL decrement
+ * during ip4-rewrite */
+ b->flags |= VNET_BUFFER_F_LOCALLY_ORIGINATED;
+
+ ip4 = vlib_buffer_get_current (b);
+ ip4->length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b) - sizeof (*ip4));
+ tunnel_encap_fixup_mplso4 (flags, (mpls_unicast_header_t *) (ip4 + 1), ip4);
+ ip4->checksum = ip4_header_checksum (ip4);
+}
+
static void
ipip_tunnel_stack (adj_index_t ai)
{
}
static adj_midchain_fixup_t
-ipip_get_fixup (const ipip_tunnel_t * t, vnet_link_t lt)
+ipip_get_fixup (const ipip_tunnel_t * t, vnet_link_t lt, adj_flags_t * aflags)
{
if (t->transport == IPIP_TRANSPORT_IP6 && lt == VNET_LINK_IP6)
return (ipip66_fixup);
if (t->transport == IPIP_TRANSPORT_IP6 && lt == VNET_LINK_IP4)
return (ipip46_fixup);
+ if (t->transport == IPIP_TRANSPORT_IP6 && lt == VNET_LINK_MPLS)
+ return (ipipm6_fixup);
if (t->transport == IPIP_TRANSPORT_IP4 && lt == VNET_LINK_IP6)
return (ipip64_fixup);
+ if (t->transport == IPIP_TRANSPORT_IP4 && lt == VNET_LINK_MPLS)
+ return (ipipm4_fixup);
if (t->transport == IPIP_TRANSPORT_IP4 && lt == VNET_LINK_IP4)
- return (ipip44_fixup);
+ {
+ *aflags = *aflags | ADJ_FLAG_MIDCHAIN_FIXUP_IP4O4_HDR;
+ return (ipip44_fixup);
+ }
ASSERT (0);
return (ipip44_fixup);
void
ipip_update_adj (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai)
{
+ adj_midchain_fixup_t fixup;
ipip_tunnel_t *t;
adj_flags_t af;
+ af = ADJ_FLAG_NONE;
t = ipip_tunnel_db_find_by_sw_if_index (sw_if_index);
if (!t)
return;
- af = ADJ_FLAG_MIDCHAIN_IP_STACK;
+ /*
+ * the user has not requested that the load-balancing be based on
+ * a flow hash of the inner packet. so use the stacking to choose
+ * a path.
+ */
+ if (!(t->flags & TUNNEL_ENCAP_DECAP_FLAG_ENCAP_INNER_HASH))
+ af |= ADJ_FLAG_MIDCHAIN_IP_STACK;
+
if (VNET_LINK_ETHERNET == adj_get_link_type (ai))
af |= ADJ_FLAG_MIDCHAIN_NO_COUNT;
+ fixup = ipip_get_fixup (t, adj_get_link_type (ai), &af);
adj_nbr_midchain_update_rewrite
- (ai, ipip_get_fixup (t, adj_get_link_type (ai)),
+ (ai, fixup,
uword_to_pointer (t->flags, void *), af,
ipip_build_rewrite (vnm, sw_if_index,
adj_get_link_type (ai), &t->tunnel_dst));
typedef struct mipip_walk_ctx_t_
{
const ipip_tunnel_t *t;
- const nhrp_entry_t *ne;
+ const teib_entry_t *ne;
} mipip_walk_ctx_t;
static adj_walk_rc_t
mipip_mk_complete_walk (adj_index_t ai, void *data)
{
+ adj_midchain_fixup_t fixup;
mipip_walk_ctx_t *ctx = data;
+ adj_flags_t af;
+
+ af = ADJ_FLAG_NONE;
+ fixup = ipip_get_fixup (ctx->t, adj_get_link_type (ai), &af);
+
+ /*
+ * the user has not requested that the load-balancing be based on
+ * a flow hash of the inner packet. so use the stacking to choose
+ * a path.
+ */
+ if (!(ctx->t->flags & TUNNEL_ENCAP_DECAP_FLAG_ENCAP_INNER_HASH))
+ af |= ADJ_FLAG_MIDCHAIN_IP_STACK;
adj_nbr_midchain_update_rewrite
- (ai, ipip_get_fixup (ctx->t, adj_get_link_type (ai)),
+ (ai, fixup,
uword_to_pointer (ctx->t->flags, void *),
- ADJ_FLAG_MIDCHAIN_IP_STACK, ipip_build_rewrite (vnet_get_main (),
- ctx->t->sw_if_index,
- adj_get_link_type (ai),
- &nhrp_entry_get_nh
- (ctx->ne)->fp_addr));
+ af, ipip_build_rewrite (vnet_get_main (),
+ ctx->t->sw_if_index,
+ adj_get_link_type (ai),
+ &teib_entry_get_nh (ctx->ne)->fp_addr));
- nhrp_entry_adj_stack (ctx->ne, ai);
+ teib_entry_adj_stack (ctx->ne, ai);
return (ADJ_WALK_RC_CONTINUE);
}
static adj_walk_rc_t
mipip_mk_incomplete_walk (adj_index_t ai, void *data)
{
+ adj_midchain_fixup_t fixup;
ipip_tunnel_t *t = data;
+ adj_flags_t af;
- adj_nbr_midchain_update_rewrite
- (ai, ipip_get_fixup (t, adj_get_link_type (ai)),
- NULL, ADJ_FLAG_NONE, NULL);
+ af = ADJ_FLAG_NONE;
+ fixup = ipip_get_fixup (t, adj_get_link_type (ai), &af);
+
+ adj_nbr_midchain_update_rewrite (ai, fixup, NULL, ADJ_FLAG_NONE, NULL);
adj_midchain_delegate_unstack (ai);
mipip_update_adj (vnet_main_t * vnm, u32 sw_if_index, adj_index_t ai)
{
ipip_main_t *gm = &ipip_main;
+ adj_midchain_fixup_t fixup;
ip_adjacency_t *adj;
- nhrp_entry_t *ne;
+ teib_entry_t *ne;
ipip_tunnel_t *t;
+ adj_flags_t af;
u32 ti;
+ af = ADJ_FLAG_NONE;
adj = adj_get (ai);
ti = gm->tunnel_index_by_sw_if_index[sw_if_index];
t = pool_elt_at_index (gm->tunnels, ti);
- ne = nhrp_entry_find (sw_if_index, &adj->sub_type.nbr.next_hop);
+ ne = teib_entry_find_46 (sw_if_index,
+ adj->ia_nh_proto, &adj->sub_type.nbr.next_hop);
if (NULL == ne)
{
- // no NHRP entry to provide the next-hop
+ // no TEIB entry to provide the next-hop
+ fixup = ipip_get_fixup (t, adj_get_link_type (ai), &af);
adj_nbr_midchain_update_rewrite
- (ai, ipip_get_fixup (t, adj_get_link_type (ai)),
- uword_to_pointer (t->flags, void *), ADJ_FLAG_NONE, NULL);
+ (ai, fixup, uword_to_pointer (t->flags, void *), ADJ_FLAG_NONE, NULL);
return;
}
}
static void
-ipip_nhrp_mk_key (const ipip_tunnel_t * t,
- const nhrp_entry_t * ne, ipip_tunnel_key_t * key)
+ipip_teib_mk_key (const ipip_tunnel_t * t,
+ const teib_entry_t * ne, ipip_tunnel_key_t * key)
{
const fib_prefix_t *nh;
- nh = nhrp_entry_get_nh (ne);
+ nh = teib_entry_get_nh (ne);
/* construct the key using mode P2P so it can be found in the DP */
ipip_mk_key_i (t->transport, IPIP_MODE_P2P,
&t->tunnel_src, &nh->fp_addr,
- nhrp_entry_get_fib_index (ne), key);
+ teib_entry_get_fib_index (ne), key);
}
static void
-ipip_nhrp_entry_added (const nhrp_entry_t * ne)
+ipip_teib_entry_added (const teib_entry_t * ne)
{
ipip_main_t *gm = &ipip_main;
- const ip46_address_t *nh;
+ const ip_address_t *nh;
ipip_tunnel_key_t key;
ipip_tunnel_t *t;
u32 sw_if_index;
u32 t_idx;
- sw_if_index = nhrp_entry_get_sw_if_index (ne);
+ sw_if_index = teib_entry_get_sw_if_index (ne);
if (vec_len (gm->tunnel_index_by_sw_if_index) < sw_if_index)
return;
t = pool_elt_at_index (gm->tunnels, t_idx);
- ipip_nhrp_mk_key (t, ne, &key);
+ ipip_teib_mk_key (t, ne, &key);
ipip_tunnel_db_add (t, &key);
// update the rewrites for each of the adjacencies for this next-hop
.t = t,
.ne = ne
};
- nh = nhrp_entry_get_peer (ne);
- adj_nbr_walk_nh (nhrp_entry_get_sw_if_index (ne),
- (ip46_address_is_ip4 (nh) ?
+ nh = teib_entry_get_peer (ne);
+ adj_nbr_walk_nh (teib_entry_get_sw_if_index (ne),
+ (AF_IP4 == ip_addr_version (nh) ?
FIB_PROTOCOL_IP4 :
- FIB_PROTOCOL_IP6), nh, mipip_mk_complete_walk, &ctx);
+ FIB_PROTOCOL_IP6),
+ &ip_addr_46 (nh), mipip_mk_complete_walk, &ctx);
}
static void
-ipip_nhrp_entry_deleted (const nhrp_entry_t * ne)
+ipip_teib_entry_deleted (const teib_entry_t * ne)
{
ipip_main_t *gm = &ipip_main;
- const ip46_address_t *nh;
+ const ip_address_t *nh;
ipip_tunnel_key_t key;
ipip_tunnel_t *t;
u32 sw_if_index;
u32 t_idx;
- sw_if_index = nhrp_entry_get_sw_if_index (ne);
+ sw_if_index = teib_entry_get_sw_if_index (ne);
if (vec_len (gm->tunnel_index_by_sw_if_index) < sw_if_index)
return;
t = pool_elt_at_index (gm->tunnels, t_idx);
- ipip_nhrp_mk_key (t, ne, &key);
+ ipip_teib_mk_key (t, ne, &key);
ipip_tunnel_db_remove (t, &key);
- nh = nhrp_entry_get_peer (ne);
+ nh = teib_entry_get_peer (ne);
/* make all the adjacencies incomplete */
- adj_nbr_walk_nh (nhrp_entry_get_sw_if_index (ne),
- (ip46_address_is_ip4 (nh) ?
+ adj_nbr_walk_nh (teib_entry_get_sw_if_index (ne),
+ (AF_IP4 == ip_addr_version (nh) ?
FIB_PROTOCOL_IP4 :
- FIB_PROTOCOL_IP6), nh, mipip_mk_incomplete_walk, t);
+ FIB_PROTOCOL_IP6),
+ &ip_addr_46 (nh), mipip_mk_incomplete_walk, t);
}
static walk_rc_t
-ipip_tunnel_delete_nhrp_walk (index_t nei, void *ctx)
+ipip_tunnel_delete_teib_walk (index_t nei, void *ctx)
{
ipip_tunnel_t *t = ctx;
ipip_tunnel_key_t key;
- ipip_nhrp_mk_key (t, nhrp_entry_get (nei), &key);
+ ipip_teib_mk_key (t, teib_entry_get (nei), &key);
ipip_tunnel_db_remove (t, &key);
return (WALK_CONTINUE);
}
static walk_rc_t
-ipip_tunnel_add_nhrp_walk (index_t nei, void *ctx)
+ipip_tunnel_add_teib_walk (index_t nei, void *ctx)
{
ipip_tunnel_t *t = ctx;
ipip_tunnel_key_t key;
- ipip_nhrp_mk_key (t, nhrp_entry_get (nei), &key);
+ ipip_teib_mk_key (t, teib_entry_get (nei), &key);
ipip_tunnel_db_add (t, &key);
return (WALK_CONTINUE);
{
ipip_main_t *gm = &ipip_main;
vnet_main_t *vnm = gm->vnet_main;
- ip4_main_t *im4 = &ip4_main;
- ip6_main_t *im6 = &ip6_main;
ipip_tunnel_t *t;
vnet_hw_interface_t *hi;
u32 hw_if_index, sw_if_index;
if (t->transport == IPIP_TRANSPORT_IP4)
{
- vec_validate (im4->fib_index_by_sw_if_index, sw_if_index);
hi->min_packet_bytes = 64 + sizeof (ip4_header_t);
}
else
{
- vec_validate (im6->fib_index_by_sw_if_index, sw_if_index);
hi->min_packet_bytes = 64 + sizeof (ip6_header_t);
}
ipip_tunnel_db_add (t, &key);
if (t->mode == IPIP_MODE_P2MP)
- nhrp_walk_itf (t->sw_if_index, ipip_tunnel_add_nhrp_walk, t);
+ teib_walk_itf (t->sw_if_index, ipip_tunnel_add_teib_walk, t);
if (sw_if_indexp)
*sw_if_indexp = sw_if_index;
if (t->transport == IPIP_TRANSPORT_IP6 && !gm->ip6_protocol_registered)
{
ip6_register_protocol (IP_PROTOCOL_IP_IN_IP, ipip6_input_node.index);
+ ip6_register_protocol (IP_PROTOCOL_MPLS_IN_IP, ipip6_input_node.index);
ip6_register_protocol (IP_PROTOCOL_IPV6, ipip6_input_node.index);
gm->ip6_protocol_registered = true;
}
else if (t->transport == IPIP_TRANSPORT_IP4 && !gm->ip4_protocol_registered)
{
ip4_register_protocol (IP_PROTOCOL_IP_IN_IP, ipip4_input_node.index);
+ ip4_register_protocol (IP_PROTOCOL_MPLS_IN_IP, ipip4_input_node.index);
ip4_register_protocol (IP_PROTOCOL_IPV6, ipip4_input_node.index);
gm->ip4_protocol_registered = true;
}
return VNET_API_ERROR_NO_SUCH_ENTRY;
if (t->mode == IPIP_MODE_P2MP)
- nhrp_walk_itf (t->sw_if_index, ipip_tunnel_delete_nhrp_walk, t);
+ teib_walk_itf (t->sw_if_index, ipip_tunnel_delete_teib_walk, t);
vnet_sw_interface_set_flags (vnm, sw_if_index, 0 /* down */ );
gm->tunnel_index_by_sw_if_index[sw_if_index] = ~0;
return 0;
}
-const static nhrp_vft_t ipip_nhrp_vft = {
- .nv_added = ipip_nhrp_entry_added,
- .nv_deleted = ipip_nhrp_entry_deleted,
+const static teib_vft_t ipip_teib_vft = {
+ .nv_added = ipip_teib_entry_added,
+ .nv_deleted = ipip_teib_entry_deleted,
};
static clib_error_t *
gm->tunnel_by_key =
hash_create_mem (0, sizeof (ipip_tunnel_key_t), sizeof (uword));
- nhrp_register (&ipip_nhrp_vft);
+ teib_register (&ipip_teib_vft);
return 0;
}