#include <vnet/pg/pg.h>
#include <vnet/vxlan-gpe/vxlan_gpe.h>
-vlib_node_registration_t vxlan_gpe_input_node;
-
/**
* @brief Struct for VXLAN GPE decap packet tracing
*
vxlan4_gpe_tunnel_key_t last_key4;
vxlan6_gpe_tunnel_key_t last_key6;
u32 pkts_decapsulated = 0;
- u32 thread_index = vlib_get_thread_index ();
+ u32 thread_index = vm->thread_index;
u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
if (is_ip4)
- memset (&last_key4, 0xff, sizeof (last_key4));
+ clib_memset (&last_key4, 0xff, sizeof (last_key4));
else
- memset (&last_key6, 0xff, sizeof (last_key6));
+ clib_memset (&last_key6, 0xff, sizeof (last_key6));
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
- vlib_node_increment_counter (vm, vxlan_gpe_input_node.index,
+
+ vlib_node_increment_counter (vm,
+ is_ip4 ? vxlan4_gpe_input_node.index :
+ vxlan6_gpe_input_node.index,
VXLAN_GPE_ERROR_DECAPSULATED,
pkts_decapsulated);
+
/* Increment any remaining batch stats */
if (stats_n_packets)
{
* @return from_frame->n_vectors
*
*/
-static uword
-vxlan4_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+VLIB_NODE_FN (vxlan4_gpe_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
{
return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 1);
}
-
+#ifndef CLIB_MARCH_VARIANT
void
vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index)
{
hm->decap_next_node_list[protocol_id] = VXLAN_GPE_INPUT_NEXT_DROP;
return;
}
-
+#endif /* CLIB_MARCH_VARIANT */
/**
* @brief Graph processing dispatch function for IPv6 VXLAN GPE
* @return from_frame->n_vectors - uword
*
*/
-static uword
-vxlan6_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+VLIB_NODE_FN (vxlan6_gpe_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
{
return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 0);
}
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = {
- .function = vxlan4_gpe_input,
.name = "vxlan4-gpe-input",
/* Takes a vector of packets. */
.vector_size = sizeof (u32),
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gpe_input_node, vxlan4_gpe_input);
-
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = {
- .function = vxlan6_gpe_input,
.name = "vxlan6-gpe-input",
/* Takes a vector of packets. */
.vector_size = sizeof (u32),
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gpe_input_node, vxlan6_gpe_input);
typedef enum
{
IP_VXLAN_BYPASS_NEXT_DROP,
IP_VXLAN_BYPASS_NEXT_VXLAN,
IP_VXLAN_BYPASS_N_NEXT,
-} ip_vxan_bypass_next_t;
+} ip_vxlan_bypass_next_t;
always_inline uword
ip_vxlan_gpe_bypass_inline (vlib_main_t * vm,
u32 *from, *to_next, n_left_from, n_left_to_next, next_index;
vlib_node_runtime_t *error_node =
vlib_node_get_runtime (vm, ip4_input_node.index);
- ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */
- ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */
+ vtep4_key_t last_vtep4; /* last IPv4 address / fib index
+ matching a local VTEP address */
+ vtep6_key_t last_vtep6; /* last IPv6 address / fib index
+ matching a local VTEP address */
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+#ifdef CLIB_HAVE_VEC512
+ vtep4_cache_t vtep4_u512;
+ clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512));
+#endif
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+
if (node->flags & VLIB_NODE_FLAG_TRACE)
ip4_forward_next_trace (vm, node, frame, VLIB_TX);
if (is_ip4)
- addr4.data_u32 = ~0;
+ vtep4_key_init (&last_vtep4);
else
- ip6_address_set_zero (&addr6);
+ vtep6_key_init (&last_vtep6);
while (n_left_from > 0)
{
/* Prefetch next iteration. */
{
- vlib_buffer_t *p2, *p3;
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ vlib_prefetch_buffer_header (b[3], LOAD);
- p2 = vlib_get_buffer (vm, from[2]);
- p3 = vlib_get_buffer (vm, from[3]);
-
- vlib_prefetch_buffer_header (p2, LOAD);
- vlib_prefetch_buffer_header (p3, LOAD);
-
- CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
}
bi0 = to_next[0] = from[0];
to_next += 2;
n_left_to_next -= 2;
- b0 = vlib_get_buffer (vm, bi0);
- b1 = vlib_get_buffer (vm, bi1);
+ b0 = b[0];
+ b1 = b[1];
+ b += 2;
if (is_ip4)
{
ip40 = vlib_buffer_get_current (b0);
}
/* Setup packet for next IP feature */
- vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
- b0);
- vnet_feature_next (vnet_buffer (b1)->sw_if_index[VLIB_RX], &next1,
- b1);
+ vnet_feature_next (&next0, b0);
+ vnet_feature_next (&next1, b1);
if (is_ip4)
{
/* Validate DIP against VTEPs */
if (is_ip4)
{
- if (addr4.as_u32 != ip40->dst_address.as_u32)
- {
- if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
- goto exit0; /* no local VTEP for VXLAN packet */
- addr4 = ip40->dst_address;
- }
+#ifdef CLIB_HAVE_VEC512
+ if (!vtep4_check_vector
+ (&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+#else
+ if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
+#endif
+ goto exit0; /* no local VTEP for VXLAN packet */
}
else
{
- if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
- {
- if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
- goto exit0; /* no local VTEP for VXLAN packet */
- addr6 = ip60->dst_address;
- }
+ if (!vtep6_check (&ngm->vtep_table, b0, ip60, &last_vtep6))
+ goto exit0; /* no local VTEP for VXLAN packet */
}
flags0 = b0->flags;
/* Validate DIP against VTEPs */
if (is_ip4)
{
- if (addr4.as_u32 != ip41->dst_address.as_u32)
- {
- if (!hash_get (ngm->vtep4, ip41->dst_address.as_u32))
- goto exit1; /* no local VTEP for VXLAN packet */
- addr4 = ip41->dst_address;
- }
+#ifdef CLIB_HAVE_VEC512
+ if (!vtep4_check_vector
+ (&ngm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512))
+#else
+ if (!vtep4_check (&ngm->vtep_table, b1, ip41, &last_vtep4))
+#endif
+ goto exit1; /* no local VTEP for VXLAN packet */
}
else
{
- if (!ip6_address_is_equal (&addr6, &ip61->dst_address))
- {
- if (!hash_get_mem (ngm->vtep6, &ip61->dst_address))
- goto exit1; /* no local VTEP for VXLAN packet */
- addr6 = ip61->dst_address;
- }
+ if (!vtep6_check (&ngm->vtep_table, b1, ip61, &last_vtep6))
+ goto exit1; /* no local VTEP for VXLAN packet */
}
flags1 = b1->flags;
to_next += 1;
n_left_to_next -= 1;
- b0 = vlib_get_buffer (vm, bi0);
+ b0 = b[0];
+ b++;
if (is_ip4)
ip40 = vlib_buffer_get_current (b0);
else
ip60 = vlib_buffer_get_current (b0);
/* Setup packet for next IP feature */
- vnet_feature_next (vnet_buffer (b0)->sw_if_index[VLIB_RX], &next0,
- b0);
+ vnet_feature_next (&next0, b0);
if (is_ip4)
proto0 = ip40->protocol;
goto exit; /* not VXLAN packet */
/* Validate DIP against VTEPs */
+
if (is_ip4)
{
- if (addr4.as_u32 != ip40->dst_address.as_u32)
- {
- if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32))
- goto exit; /* no local VTEP for VXLAN packet */
- addr4 = ip40->dst_address;
- }
+#ifdef CLIB_HAVE_VEC512
+ if (!vtep4_check_vector
+ (&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512))
+#else
+ if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4))
+#endif
+ goto exit; /* no local VTEP for VXLAN packet */
}
else
{
- if (!ip6_address_is_equal (&addr6, &ip60->dst_address))
- {
- if (!hash_get_mem (ngm->vtep6, &ip60->dst_address))
- goto exit; /* no local VTEP for VXLAN packet */
- addr6 = ip60->dst_address;
- }
+ if (!vtep6_check (&ngm->vtep_table, b0, ip60, &last_vtep6))
+ goto exit; /* no local VTEP for VXLAN packet */
}
flags0 = b0->flags;
return frame->n_vectors;
}
-static uword
-ip4_vxlan_gpe_bypass (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip4_vxlan_gpe_bypass_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 1);
}
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = {
- .function = ip4_vxlan_gpe_bypass,
.name = "ip4-vxlan-gpe-bypass",
.vector_size = sizeof (u32),
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_gpe_bypass_node, ip4_vxlan_gpe_bypass)
+#ifndef CLIB_MARCH_VARIANT
/* Dummy init function to get us linked in. */
- clib_error_t *ip4_vxlan_gpe_bypass_init (vlib_main_t * vm)
+clib_error_t *
+ip4_vxlan_gpe_bypass_init (vlib_main_t * vm)
{
return 0;
}
VLIB_INIT_FUNCTION (ip4_vxlan_gpe_bypass_init);
+#endif /* CLIB_MARCH_VARIANT */
-static uword
-ip6_vxlan_gpe_bypass (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * frame)
+VLIB_NODE_FN (ip6_vxlan_gpe_bypass_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
{
return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 0);
}
/* *INDENT-OFF* */
VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = {
- .function = ip6_vxlan_gpe_bypass,
.name = "ip6-vxlan-gpe-bypass",
.vector_size = sizeof (u32),
};
/* *INDENT-ON* */
-VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_gpe_bypass_node, ip6_vxlan_gpe_bypass)
+#ifndef CLIB_MARCH_VARIANT
/* Dummy init function to get us linked in. */
- clib_error_t *ip6_vxlan_gpe_bypass_init (vlib_main_t * vm)
+clib_error_t *
+ip6_vxlan_gpe_bypass_init (vlib_main_t * vm)
{
return 0;
}
VLIB_INIT_FUNCTION (ip6_vxlan_gpe_bypass_init);
+#endif /* CLIB_MARCH_VARIANT */
/*
* fd.io coding-style-patch-verification: ON