* at the midchain node */
ip0->ip6.payload_length =
clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
- sizeof (*ip0));
+ sizeof (ip0->ip6));
tunnel_encap_fixup_4o6 (flags, (ip4_header_t *) (ip0 + 1), &ip0->ip6);
}
* at the midchain node */
ip0->ip6.payload_length =
clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
- sizeof (*ip0));
+ sizeof (ip0->ip6));
tunnel_encap_fixup_6o6 (flags, (ip6_header_t *) (ip0 + 1), &ip0->ip6);
}
grex6_fixup (vlib_main_t * vm,
const ip_adjacency_t * adj, vlib_buffer_t * b0, const void *data)
{
- ip6_header_t *ip0;
+ ip6_and_gre_header_t *ip0;
ip0 = vlib_buffer_get_current (b0);
/* Fixup the payload length field in the GRE tunnel encap that was applied
* at the midchain node */
- ip0->payload_length =
+ ip0->ip6.payload_length =
clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b0) -
- sizeof (*ip0));
+ sizeof (ip0->ip6));
}
/**
gre_build_rewrite (vnet_get_main (),
ctx->t->sw_if_index,
adj_get_link_type (ai),
- &nhrp_entry_get_nh (ctx->ne)->fp_addr));
+ &teib_entry_get_nh (ctx->ne)->fp_addr));
- nhrp_entry_adj_stack (ctx->ne, ai);
+ teib_entry_adj_stack (ctx->ne, ai);
return (ADJ_WALK_RC_CONTINUE);
}
{
gre_main_t *gm = &gre_main;
ip_adjacency_t *adj;
- nhrp_entry_t *ne;
+ teib_entry_t *ne;
gre_tunnel_t *t;
u32 ti;
ti = gm->tunnel_index_by_sw_if_index[sw_if_index];
t = pool_elt_at_index (gm->tunnels, ti);
- ne = nhrp_entry_find (sw_if_index, &adj->sub_type.nbr.next_hop);
+ ne = teib_entry_find_46 (sw_if_index,
+ adj->ia_nh_proto, &adj->sub_type.nbr.next_hop);
if (NULL == ne)
// no NHRP entry to provide the next-hop
* @brief TX function. Only called for L2 payload including TEB or ERSPAN.
* L3 traffic uses the adj-midchains.
*/
-VLIB_NODE_FN (gre_encap_node) (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * frame)
+static_always_inline u32
+gre_encap_inline (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame, gre_tunnel_type_t type)
{
gre_main_t *gm = &gre_main;
u32 *from, n_left_from;
vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = adj_index[0];
vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = adj_index[1];
- if (PREDICT_FALSE (gt[0]->type == GRE_TUNNEL_TYPE_ERSPAN))
+ if (type == GRE_TUNNEL_TYPE_ERSPAN)
{
/* Encap GRE seq# and ERSPAN type II header */
erspan_t2_t *h0;
h0->t2_u64 = hdr;
h0->t2.cos_en_t_session |= clib_host_to_net_u16 (gt[0]->session_id);
}
- if (PREDICT_FALSE (gt[1]->type == GRE_TUNNEL_TYPE_ERSPAN))
+ if (type == GRE_TUNNEL_TYPE_ERSPAN)
{
/* Encap GRE seq# and ERSPAN type II header */
erspan_t2_t *h0;
vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = adj_index[0];
- if (PREDICT_FALSE (gt[0]->type == GRE_TUNNEL_TYPE_ERSPAN))
+ if (type == GRE_TUNNEL_TYPE_ERSPAN)
{
/* Encap GRE seq# and ERSPAN type II header */
erspan_t2_t *h0;
u32 seq_num;
u64 hdr;
+ ASSERT (gt[0]->type == GRE_TUNNEL_TYPE_ERSPAN);
vlib_buffer_advance (b[0], -sizeof (erspan_t2_t));
h0 = vlib_buffer_get_current (b[0]);
seq_num = clib_atomic_fetch_add (>[0]->gre_sn->seq_num, 1);
#undef gre_error
};
+VLIB_NODE_FN (gre_teb_encap_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gre_encap_inline (vm, node, frame, GRE_TUNNEL_TYPE_TEB));
+}
+
+VLIB_NODE_FN (gre_erspan_encap_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ return (gre_encap_inline (vm, node, frame, GRE_TUNNEL_TYPE_ERSPAN));
+}
+
/* *INDENT-OFF* */
-VLIB_REGISTER_NODE (gre_encap_node) =
+VLIB_REGISTER_NODE (gre_teb_encap_node) =
+{
+ .name = "gre-teb-encap",
+ .vector_size = sizeof (u32),
+ .format_trace = format_gre_tx_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = GRE_N_ERROR,
+ .error_strings = gre_error_strings,
+ .n_next_nodes = GRE_ENCAP_N_NEXT,
+ .next_nodes = {
+ [GRE_ENCAP_NEXT_L2_MIDCHAIN] = "adj-l2-midchain",
+ },
+};
+VLIB_REGISTER_NODE (gre_erspan_encap_node) =
{
- .name = "gre-encap",
+ .name = "gre-erspan-encap",
.vector_size = sizeof (u32),
.format_trace = format_gre_tx_trace,
.type = VLIB_NODE_TYPE_INTERNAL,