X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=src%2Fvnet%2Fvxlan-gpe%2Fdecap.c;h=aea793b82b646c7e25cb958beff9bdfccf7fae33;hb=b040f98a88954ccf2e63fd7b2676865a2dbe0078;hp=9c429294d671b5dac042e3fbf585f353d9de9df5;hpb=b7b929931a07fbb27b43d5cd105f366c3e29807e;p=vpp.git diff --git a/src/vnet/vxlan-gpe/decap.c b/src/vnet/vxlan-gpe/decap.c index 9c429294d67..aea793b82b6 100644 --- a/src/vnet/vxlan-gpe/decap.c +++ b/src/vnet/vxlan-gpe/decap.c @@ -21,11 +21,9 @@ */ #include -#include +#include #include -vlib_node_registration_t vxlan_gpe_input_node; - /** * @brief Struct for VXLAN GPE decap packet tracing * @@ -647,9 +645,13 @@ vxlan_gpe_input (vlib_main_t * vm, vlib_put_next_frame (vm, node, next_index, n_left_to_next); } - vlib_node_increment_counter (vm, vxlan_gpe_input_node.index, + + vlib_node_increment_counter (vm, + is_ip4 ? vxlan4_gpe_input_node.index : + vxlan6_gpe_input_node.index, VXLAN_GPE_ERROR_DECAPSULATED, pkts_decapsulated); + /* Increment any remaining batch stats */ if (stats_n_packets) { @@ -673,14 +675,14 @@ vxlan_gpe_input (vlib_main_t * vm, * @return from_frame->n_vectors * */ -static uword -vxlan4_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * from_frame) +VLIB_NODE_FN (vxlan4_gpe_input_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 1); } - +#ifndef CLIB_MARCH_VARIANT void vxlan_gpe_register_decap_protocol (u8 protocol_id, uword next_node_index) { @@ -696,7 +698,7 @@ vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index) hm->decap_next_node_list[protocol_id] = VXLAN_GPE_INPUT_NEXT_DROP; return; } - +#endif /* CLIB_MARCH_VARIANT */ /** * @brief Graph processing dispatch function for IPv6 VXLAN GPE @@ -709,9 +711,9 @@ vxlan_gpe_unregister_decap_protocol (u8 protocol_id, uword next_node_index) * @return from_frame->n_vectors - uword * */ -static uword -vxlan6_gpe_input (vlib_main_t * vm, vlib_node_runtime_t * node, - vlib_frame_t * from_frame) +VLIB_NODE_FN (vxlan6_gpe_input_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * from_frame) { return vxlan_gpe_input (vm, node, from_frame, /* is_ip4 */ 0); } @@ -728,7 +730,6 @@ static char *vxlan_gpe_error_strings[] = { /* *INDENT-OFF* */ VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = { - .function = vxlan4_gpe_input, .name = "vxlan4-gpe-input", /* Takes a vector of packets. */ .vector_size = sizeof (u32), @@ -749,11 +750,8 @@ VLIB_REGISTER_NODE (vxlan4_gpe_input_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gpe_input_node, vxlan4_gpe_input); - /* *INDENT-OFF* */ VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = { - .function = vxlan6_gpe_input, .name = "vxlan6-gpe-input", /* Takes a vector of packets. */ .vector_size = sizeof (u32), @@ -774,13 +772,12 @@ VLIB_REGISTER_NODE (vxlan6_gpe_input_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gpe_input_node, vxlan6_gpe_input); typedef enum { IP_VXLAN_BYPASS_NEXT_DROP, IP_VXLAN_BYPASS_NEXT_VXLAN, IP_VXLAN_BYPASS_N_NEXT, -} ip_vxan_bypass_next_t; +} ip_vxlan_bypass_next_t; always_inline uword ip_vxlan_gpe_bypass_inline (vlib_main_t * vm, @@ -791,20 +788,29 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm, u32 *from, *to_next, n_left_from, n_left_to_next, next_index; vlib_node_runtime_t *error_node = vlib_node_get_runtime (vm, ip4_input_node.index); - ip4_address_t addr4; /* last IPv4 address matching a local VTEP address */ - ip6_address_t addr6; /* last IPv6 address matching a local VTEP address */ + vtep4_key_t last_vtep4; /* last IPv4 address / fib index + matching a local VTEP address */ + vtep6_key_t last_vtep6; /* last IPv6 address / fib index + matching a local VTEP address */ + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; +#ifdef CLIB_HAVE_VEC512 + vtep4_cache_t vtep4_u512; + clib_memset (&vtep4_u512, 0, sizeof (vtep4_u512)); +#endif from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; next_index = node->cached_next_index; + vlib_get_buffers (vm, from, bufs, n_left_from); + if (node->flags & VLIB_NODE_FLAG_TRACE) ip4_forward_next_trace (vm, node, frame, VLIB_TX); if (is_ip4) - addr4.data_u32 = ~0; + vtep4_key_init (&last_vtep4); else - ip6_address_set_zero (&addr6); + vtep6_key_init (&last_vtep6); while (n_left_from > 0) { @@ -824,16 +830,11 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm, /* Prefetch next iteration. */ { - vlib_buffer_t *p2, *p3; + vlib_prefetch_buffer_header (b[2], LOAD); + vlib_prefetch_buffer_header (b[3], LOAD); - p2 = vlib_get_buffer (vm, from[2]); - p3 = vlib_get_buffer (vm, from[3]); - - vlib_prefetch_buffer_header (p2, LOAD); - vlib_prefetch_buffer_header (p3, LOAD); - - CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD); - CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (b[2]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD); + CLIB_PREFETCH (b[3]->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD); } bi0 = to_next[0] = from[0]; @@ -843,8 +844,9 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm, to_next += 2; n_left_to_next -= 2; - b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); + b0 = b[0]; + b1 = b[1]; + b += 2; if (is_ip4) { ip40 = vlib_buffer_get_current (b0); @@ -886,21 +888,18 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm, /* Validate DIP against VTEPs */ if (is_ip4) { - if (addr4.as_u32 != ip40->dst_address.as_u32) - { - if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32)) - goto exit0; /* no local VTEP for VXLAN packet */ - addr4 = ip40->dst_address; - } +#ifdef CLIB_HAVE_VEC512 + if (!vtep4_check_vector + (&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512)) +#else + if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4)) +#endif + goto exit0; /* no local VTEP for VXLAN packet */ } else { - if (!ip6_address_is_equal (&addr6, &ip60->dst_address)) - { - if (!hash_get_mem (ngm->vtep6, &ip60->dst_address)) - goto exit0; /* no local VTEP for VXLAN packet */ - addr6 = ip60->dst_address; - } + if (!vtep6_check (&ngm->vtep_table, b0, ip60, &last_vtep6)) + goto exit0; /* no local VTEP for VXLAN packet */ } flags0 = b0->flags; @@ -972,21 +971,18 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm, /* Validate DIP against VTEPs */ if (is_ip4) { - if (addr4.as_u32 != ip41->dst_address.as_u32) - { - if (!hash_get (ngm->vtep4, ip41->dst_address.as_u32)) - goto exit1; /* no local VTEP for VXLAN packet */ - addr4 = ip41->dst_address; - } +#ifdef CLIB_HAVE_VEC512 + if (!vtep4_check_vector + (&ngm->vtep_table, b1, ip41, &last_vtep4, &vtep4_u512)) +#else + if (!vtep4_check (&ngm->vtep_table, b1, ip41, &last_vtep4)) +#endif + goto exit1; /* no local VTEP for VXLAN packet */ } else { - if (!ip6_address_is_equal (&addr6, &ip61->dst_address)) - { - if (!hash_get_mem (ngm->vtep6, &ip61->dst_address)) - goto exit1; /* no local VTEP for VXLAN packet */ - addr6 = ip61->dst_address; - } + if (!vtep6_check (&ngm->vtep_table, b1, ip61, &last_vtep6)) + goto exit1; /* no local VTEP for VXLAN packet */ } flags1 = b1->flags; @@ -1064,7 +1060,8 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm, to_next += 1; n_left_to_next -= 1; - b0 = vlib_get_buffer (vm, bi0); + b0 = b[0]; + b++; if (is_ip4) ip40 = vlib_buffer_get_current (b0); else @@ -1090,23 +1087,21 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm, goto exit; /* not VXLAN packet */ /* Validate DIP against VTEPs */ + if (is_ip4) { - if (addr4.as_u32 != ip40->dst_address.as_u32) - { - if (!hash_get (ngm->vtep4, ip40->dst_address.as_u32)) - goto exit; /* no local VTEP for VXLAN packet */ - addr4 = ip40->dst_address; - } +#ifdef CLIB_HAVE_VEC512 + if (!vtep4_check_vector + (&ngm->vtep_table, b0, ip40, &last_vtep4, &vtep4_u512)) +#else + if (!vtep4_check (&ngm->vtep_table, b0, ip40, &last_vtep4)) +#endif + goto exit; /* no local VTEP for VXLAN packet */ } else { - if (!ip6_address_is_equal (&addr6, &ip60->dst_address)) - { - if (!hash_get_mem (ngm->vtep6, &ip60->dst_address)) - goto exit; /* no local VTEP for VXLAN packet */ - addr6 = ip60->dst_address; - } + if (!vtep6_check (&ngm->vtep_table, b0, ip60, &last_vtep6)) + goto exit; /* no local VTEP for VXLAN packet */ } flags0 = b0->flags; @@ -1174,16 +1169,15 @@ ip_vxlan_gpe_bypass_inline (vlib_main_t * vm, return frame->n_vectors; } -static uword -ip4_vxlan_gpe_bypass (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip4_vxlan_gpe_bypass_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 1); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = { - .function = ip4_vxlan_gpe_bypass, .name = "ip4-vxlan-gpe-bypass", .vector_size = sizeof (u32), @@ -1198,25 +1192,26 @@ VLIB_REGISTER_NODE (ip4_vxlan_gpe_bypass_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ip4_vxlan_gpe_bypass_node, ip4_vxlan_gpe_bypass) +#ifndef CLIB_MARCH_VARIANT /* Dummy init function to get us linked in. */ - clib_error_t *ip4_vxlan_gpe_bypass_init (vlib_main_t * vm) +clib_error_t * +ip4_vxlan_gpe_bypass_init (vlib_main_t * vm) { return 0; } VLIB_INIT_FUNCTION (ip4_vxlan_gpe_bypass_init); +#endif /* CLIB_MARCH_VARIANT */ -static uword -ip6_vxlan_gpe_bypass (vlib_main_t * vm, - vlib_node_runtime_t * node, vlib_frame_t * frame) +VLIB_NODE_FN (ip6_vxlan_gpe_bypass_node) (vlib_main_t * vm, + vlib_node_runtime_t * node, + vlib_frame_t * frame) { return ip_vxlan_gpe_bypass_inline (vm, node, frame, /* is_ip4 */ 0); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = { - .function = ip6_vxlan_gpe_bypass, .name = "ip6-vxlan-gpe-bypass", .vector_size = sizeof (u32), @@ -1231,14 +1226,16 @@ VLIB_REGISTER_NODE (ip6_vxlan_gpe_bypass_node) = { }; /* *INDENT-ON* */ -VLIB_NODE_FUNCTION_MULTIARCH (ip6_vxlan_gpe_bypass_node, ip6_vxlan_gpe_bypass) +#ifndef CLIB_MARCH_VARIANT /* Dummy init function to get us linked in. */ - clib_error_t *ip6_vxlan_gpe_bypass_init (vlib_main_t * vm) +clib_error_t * +ip6_vxlan_gpe_bypass_init (vlib_main_t * vm) { return 0; } VLIB_INIT_FUNCTION (ip6_vxlan_gpe_bypass_init); +#endif /* CLIB_MARCH_VARIANT */ /* * fd.io coding-style-patch-verification: ON