vxlan-gbp: prefetch encap header memory area
[vpp.git] / src / vnet / vxlan-gbp / encap.c
index f1b839c..9766d1c 100644 (file)
@@ -53,6 +53,7 @@ typedef struct
   u8 flags;
 } vxlan_gbp_encap_trace_t;
 
+#ifndef CLIB_MARCH_VARIANT
 u8 *
 format_vxlan_gbp_encap_trace (u8 * s, va_list * args)
 {
@@ -62,10 +63,12 @@ format_vxlan_gbp_encap_trace (u8 * s, va_list * args)
 
   s =
     format (s,
-           "VXLAN_GBP encap to vxlan_gbp_tunnel%d vni %d sclass %d flags %d",
-           t->tunnel_index, t->vni, t->sclass, t->flags);
+           "VXLAN_GBP encap to vxlan_gbp_tunnel%d vni %d sclass %d flags %U",
+           t->tunnel_index, t->vni, t->sclass,
+           format_vxlan_gbp_header_gpflags, t->flags);
   return s;
 }
+#endif /* CLIB_MARCH_VARIANT */
 
 always_inline uword
 vxlan_gbp_encap_inline (vlib_main_t * vm,
@@ -84,9 +87,11 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
   u32 next0 = 0, next1 = 0;
   vxlan_gbp_tunnel_t *t0 = NULL, *t1 = NULL;
   index_t dpoi_idx0 = INDEX_INVALID, dpoi_idx1 = INDEX_INVALID;
+  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
 
   from = vlib_frame_vector_args (from_frame);
   n_left_from = from_frame->n_vectors;
+  vlib_get_buffers (vm, from, bufs, n_left_from);
 
   next_index = node->cached_next_index;
 
@@ -95,7 +100,6 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
 
   u8 const underlay_hdr_len = is_ip4 ?
     sizeof (ip4_vxlan_gbp_header_t) : sizeof (ip6_vxlan_gbp_header_t);
-  u8 const rw_hdr_offset = sizeof t0->rewrite_data - underlay_hdr_len;
   u16 const l3_len = is_ip4 ? sizeof (ip4_header_t) : sizeof (ip6_header_t);
   u32 const csum_flags = is_ip4 ?
     VNET_BUFFER_F_OFFLOAD_IP_CKSUM | VNET_BUFFER_F_IS_IP4 |
@@ -119,8 +123,10 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
            vlib_prefetch_buffer_header (p2, LOAD);
            vlib_prefetch_buffer_header (p3, LOAD);
 
-           CLIB_PREFETCH (p2->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
-           CLIB_PREFETCH (p3->data, 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+           CLIB_PREFETCH (b[2]->data - CLIB_CACHE_LINE_BYTES,
+                          2 * CLIB_CACHE_LINE_BYTES, LOAD);
+           CLIB_PREFETCH (b[3]->data - CLIB_CACHE_LINE_BYTES,
+                          2 * CLIB_CACHE_LINE_BYTES, LOAD);
          }
 
          u32 bi0 = to_next[0] = from[0];
@@ -130,15 +136,13 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
          n_left_to_next -= 2;
          n_left_from -= 2;
 
-         vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
-         vlib_buffer_t *b1 = vlib_get_buffer (vm, bi1);
-         u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
-         u32 flow_hash1 = vnet_l2_compute_flow_hash (b1);
+         u32 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
+         u32 flow_hash1 = vnet_l2_compute_flow_hash (b[1]);
 
          /* Get next node index and adj index from tunnel next_dpo */
-         if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
+         if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
            {
-             sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+             sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
              vnet_hw_interface_t *hi0 =
                vnet_get_sup_hw_interface (vnm, sw_if_index0);
              t0 = &vxm->tunnels[hi0->dev_instance];
@@ -148,9 +152,9 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
            }
 
          /* Get next node index and adj index from tunnel next_dpo */
-         if (sw_if_index1 != vnet_buffer (b1)->sw_if_index[VLIB_TX])
+         if (sw_if_index1 != vnet_buffer (b[1])->sw_if_index[VLIB_TX])
            {
-             if (sw_if_index0 == vnet_buffer (b1)->sw_if_index[VLIB_TX])
+             if (sw_if_index0 == vnet_buffer (b[1])->sw_if_index[VLIB_TX])
                {
                  sw_if_index1 = sw_if_index0;
                  t1 = t0;
@@ -159,7 +163,7 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
                }
              else
                {
-                 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_TX];
+                 sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_TX];
                  vnet_hw_interface_t *hi1 =
                    vnet_get_sup_hw_interface (vnm, sw_if_index1);
                  t1 = &vxm->tunnels[hi1->dev_instance];
@@ -169,32 +173,25 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
                }
            }
 
-         vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
-         vnet_buffer (b1)->ip.adj_index[VLIB_TX] = dpoi_idx1;
+         vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpoi_idx0;
+         vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpoi_idx1;
 
          ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
          ASSERT (t1->rewrite_header.data_bytes == underlay_hdr_len);
+         vnet_rewrite_two_headers (*t0, *t1, vlib_buffer_get_current (b[0]),
+                                   vlib_buffer_get_current (b[1]),
+                                   underlay_hdr_len);
 
-         vlib_buffer_advance (b0, -underlay_hdr_len);
-         vlib_buffer_advance (b1, -underlay_hdr_len);
+         vlib_buffer_advance (b[0], -underlay_hdr_len);
+         vlib_buffer_advance (b[1], -underlay_hdr_len);
 
-         u32 len0 = vlib_buffer_length_in_chain (vm, b0);
-         u32 len1 = vlib_buffer_length_in_chain (vm, b1);
+         u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
+         u32 len1 = vlib_buffer_length_in_chain (vm, b[1]);
          u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
          u16 payload_l1 = clib_host_to_net_u16 (len1 - l3_len);
 
-         void *underlay0 = vlib_buffer_get_current (b0);
-         void *underlay1 = vlib_buffer_get_current (b1);
-
-         /* vnet_rewrite_two_header writes only in (uword) 8 bytes chunks
-          * and discards the first 4 bytes of the (36 bytes ip4 underlay)  rewrite
-          * use memcpy as a workaround */
-         clib_memcpy_fast (underlay0,
-                           t0->rewrite_header.data + rw_hdr_offset,
-                           underlay_hdr_len);
-         clib_memcpy_fast (underlay1,
-                           t1->rewrite_header.data + rw_hdr_offset,
-                           underlay_hdr_len);
+         void *underlay0 = vlib_buffer_get_current (b[0]);
+         void *underlay1 = vlib_buffer_get_current (b[1]);
 
          ip4_header_t *ip4_0, *ip4_1;
          qos_bits_t ip4_0_tos = 0, ip4_1_tos = 0;
@@ -213,14 +210,14 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
              ip4_0->length = clib_host_to_net_u16 (len0);
              ip4_1->length = clib_host_to_net_u16 (len1);
 
-             if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
+             if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_QOS_DATA_VALID))
                {
-                 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
+                 ip4_0_tos = vnet_buffer2 (b[0])->qos.bits;
                  ip4_0->tos = ip4_0_tos;
                }
-             if (PREDICT_FALSE (b1->flags & VNET_BUFFER_F_QOS_DATA_VALID))
+             if (PREDICT_FALSE (b[1]->flags & VNET_BUFFER_F_QOS_DATA_VALID))
                {
-                 ip4_1_tos = vnet_buffer2 (b1)->qos.bits;
+                 ip4_1_tos = vnet_buffer2 (b[1])->qos.bits;
                  ip4_1->tos = ip4_1_tos;
                }
 
@@ -257,21 +254,21 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
          udp1->src_port = flow_hash1;
 
          /* set source class and gpflags */
-         vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
-         vxlan_gbp1->gpflags = vnet_buffer2 (b1)->gbp.flags;
+         vxlan_gbp0->gpflags = vnet_buffer2 (b[0])->gbp.flags;
+         vxlan_gbp1->gpflags = vnet_buffer2 (b[1])->gbp.flags;
          vxlan_gbp0->sclass =
-           clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.src_epg);
+           clib_host_to_net_u16 (vnet_buffer2 (b[0])->gbp.sclass);
          vxlan_gbp1->sclass =
-           clib_host_to_net_u16 (vnet_buffer2 (b1)->gbp.src_epg);
+           clib_host_to_net_u16 (vnet_buffer2 (b[1])->gbp.sclass);
 
          if (csum_offload)
            {
-             b0->flags |= csum_flags;
-             vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
-             vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
-             b1->flags |= csum_flags;
-             vnet_buffer (b1)->l3_hdr_offset = l3_1 - b1->data;
-             vnet_buffer (b1)->l4_hdr_offset = (u8 *) udp1 - b1->data;
+             b[0]->flags |= csum_flags;
+             vnet_buffer (b[0])->l3_hdr_offset = l3_0 - b[0]->data;
+             vnet_buffer (b[0])->l4_hdr_offset = (u8 *) udp0 - b[0]->data;
+             b[1]->flags |= csum_flags;
+             vnet_buffer (b[1])->l3_hdr_offset = l3_1 - b[1]->data;
+             vnet_buffer (b[1])->l4_hdr_offset = (u8 *) udp1 - b[1]->data;
            }
          /* IPv4 UDP checksum only if checksum offload is used */
          else if (is_ip4)
@@ -301,12 +298,12 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
              int bogus = 0;
 
              udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
-               (vm, b0, ip6_0, &bogus);
+               (vm, b[0], ip6_0, &bogus);
              ASSERT (bogus == 0);
              if (udp0->checksum == 0)
                udp0->checksum = 0xffff;
              udp1->checksum = ip6_tcp_udp_icmp_compute_checksum
-               (vm, b1, ip6_1, &bogus);
+               (vm, b[1], ip6_1, &bogus);
              ASSERT (bogus == 0);
              if (udp1->checksum == 0)
                udp1->checksum = 0xffff;
@@ -318,25 +315,26 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
                                           sw_if_index1, 1, len1);
          pkts_encapsulated += 2;
 
-         if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+         if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
            {
              vxlan_gbp_encap_trace_t *tr =
-               vlib_add_trace (vm, node, b0, sizeof (*tr));
+               vlib_add_trace (vm, node, b[0], sizeof (*tr));
              tr->tunnel_index = t0 - vxm->tunnels;
              tr->vni = t0->vni;
-             tr->sclass = vnet_buffer2 (b0)->gbp.src_epg;
-             tr->flags = vnet_buffer2 (b0)->gbp.flags;
+             tr->sclass = vnet_buffer2 (b[0])->gbp.sclass;
+             tr->flags = vnet_buffer2 (b[0])->gbp.flags;
            }
 
-         if (PREDICT_FALSE (b1->flags & VLIB_BUFFER_IS_TRACED))
+         if (PREDICT_FALSE (b[1]->flags & VLIB_BUFFER_IS_TRACED))
            {
              vxlan_gbp_encap_trace_t *tr =
-               vlib_add_trace (vm, node, b1, sizeof (*tr));
+               vlib_add_trace (vm, node, b[1], sizeof (*tr));
              tr->tunnel_index = t1 - vxm->tunnels;
              tr->vni = t1->vni;
-             tr->sclass = vnet_buffer2 (b1)->gbp.src_epg;
-             tr->flags = vnet_buffer2 (b1)->gbp.flags;
+             tr->sclass = vnet_buffer2 (b[1])->gbp.sclass;
+             tr->flags = vnet_buffer2 (b[1])->gbp.flags;
            }
+         b += 2;
 
          vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
                                           to_next, n_left_to_next,
@@ -351,13 +349,12 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
          n_left_from -= 1;
          n_left_to_next -= 1;
 
-         vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
-         u32 flow_hash0 = vnet_l2_compute_flow_hash (b0);
+         u32 flow_hash0 = vnet_l2_compute_flow_hash (b[0]);
 
          /* Get next node index and adj index from tunnel next_dpo */
-         if (sw_if_index0 != vnet_buffer (b0)->sw_if_index[VLIB_TX])
+         if (sw_if_index0 != vnet_buffer (b[0])->sw_if_index[VLIB_TX])
            {
-             sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_TX];
+             sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_TX];
              vnet_hw_interface_t *hi0 =
                vnet_get_sup_hw_interface (vnm, sw_if_index0);
              t0 = &vxm->tunnels[hi0->dev_instance];
@@ -365,21 +362,16 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
              next0 = t0->next_dpo.dpoi_next_node;
              dpoi_idx0 = t0->next_dpo.dpoi_index;
            }
-         vnet_buffer (b0)->ip.adj_index[VLIB_TX] = dpoi_idx0;
+         vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpoi_idx0;
 
          ASSERT (t0->rewrite_header.data_bytes == underlay_hdr_len);
+         vnet_rewrite_one_header (*t0, vlib_buffer_get_current (b[0]),
+                                  underlay_hdr_len);
 
-         vlib_buffer_advance (b0, -underlay_hdr_len);
-         void *underlay0 = vlib_buffer_get_current (b0);
-
-         /* vnet_rewrite_one_header writes only in (uword) 8 bytes chunks
-          * and discards the first 4 bytes of the (36 bytes ip4 underlay)  rewrite
-          * use memcpy as a workaround */
-         clib_memcpy_fast (underlay0,
-                           t0->rewrite_header.data + rw_hdr_offset,
-                           underlay_hdr_len);
+         vlib_buffer_advance (b[0], -underlay_hdr_len);
+         void *underlay0 = vlib_buffer_get_current (b[0]);
 
-         u32 len0 = vlib_buffer_length_in_chain (vm, b0);
+         u32 len0 = vlib_buffer_length_in_chain (vm, b[0]);
          u16 payload_l0 = clib_host_to_net_u16 (len0 - l3_len);
 
          vxlan_gbp_header_t *vxlan_gbp0;
@@ -396,9 +388,9 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
              ip4_0 = &hdr->ip4;
              ip4_0->length = clib_host_to_net_u16 (len0);
 
-             if (PREDICT_FALSE (b0->flags & VNET_BUFFER_F_QOS_DATA_VALID))
+             if (PREDICT_FALSE (b[0]->flags & VNET_BUFFER_F_QOS_DATA_VALID))
                {
-                 ip4_0_tos = vnet_buffer2 (b0)->qos.bits;
+                 ip4_0_tos = vnet_buffer2 (b[0])->qos.bits;
                  ip4_0->tos = ip4_0_tos;
                }
 
@@ -424,15 +416,15 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
          udp0->src_port = flow_hash0;
 
          /* set source class and gpflags */
-         vxlan_gbp0->gpflags = vnet_buffer2 (b0)->gbp.flags;
+         vxlan_gbp0->gpflags = vnet_buffer2 (b[0])->gbp.flags;
          vxlan_gbp0->sclass =
-           clib_host_to_net_u16 (vnet_buffer2 (b0)->gbp.src_epg);
+           clib_host_to_net_u16 (vnet_buffer2 (b[0])->gbp.sclass);
 
          if (csum_offload)
            {
-             b0->flags |= csum_flags;
-             vnet_buffer (b0)->l3_hdr_offset = l3_0 - b0->data;
-             vnet_buffer (b0)->l4_hdr_offset = (u8 *) udp0 - b0->data;
+             b[0]->flags |= csum_flags;
+             vnet_buffer (b[0])->l3_hdr_offset = l3_0 - b[0]->data;
+             vnet_buffer (b[0])->l4_hdr_offset = (u8 *) udp0 - b[0]->data;
            }
          /* IPv4 UDP checksum only if checksum offload is used */
          else if (is_ip4)
@@ -453,7 +445,7 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
              int bogus = 0;
 
              udp0->checksum = ip6_tcp_udp_icmp_compute_checksum
-               (vm, b0, ip6_0, &bogus);
+               (vm, b[0], ip6_0, &bogus);
              ASSERT (bogus == 0);
              if (udp0->checksum == 0)
                udp0->checksum = 0xffff;
@@ -463,15 +455,17 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
                                           sw_if_index0, 1, len0);
          pkts_encapsulated++;
 
-         if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+         if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
            {
              vxlan_gbp_encap_trace_t *tr =
-               vlib_add_trace (vm, node, b0, sizeof (*tr));
+               vlib_add_trace (vm, node, b[0], sizeof (*tr));
              tr->tunnel_index = t0 - vxm->tunnels;
              tr->vni = t0->vni;
-             tr->sclass = vnet_buffer2 (b0)->gbp.src_epg;
-             tr->flags = vnet_buffer2 (b0)->gbp.flags;
+             tr->sclass = vnet_buffer2 (b[0])->gbp.sclass;
+             tr->flags = vnet_buffer2 (b[0])->gbp.flags;
            }
+         b += 1;
+
          vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
                                           to_next, n_left_to_next,
                                           bi0, next0);
@@ -488,9 +482,9 @@ vxlan_gbp_encap_inline (vlib_main_t * vm,
   return from_frame->n_vectors;
 }
 
-static uword
-vxlan4_gbp_encap (vlib_main_t * vm,
-                 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+VLIB_NODE_FN (vxlan4_gbp_encap_node) (vlib_main_t * vm,
+                                     vlib_node_runtime_t * node,
+                                     vlib_frame_t * from_frame)
 {
   /* Disable chksum offload as setup overhead in tx node is not worthwhile
      for ip4 header checksum only, unless udp checksum is also required */
@@ -498,9 +492,9 @@ vxlan4_gbp_encap (vlib_main_t * vm,
                                 /* csum_offload */ 0);
 }
 
-static uword
-vxlan6_gbp_encap (vlib_main_t * vm,
-                 vlib_node_runtime_t * node, vlib_frame_t * from_frame)
+VLIB_NODE_FN (vxlan6_gbp_encap_node) (vlib_main_t * vm,
+                                     vlib_node_runtime_t * node,
+                                     vlib_frame_t * from_frame)
 {
   /* Enable checksum offload for ip6 as udp checksum is mandatory, */
   return vxlan_gbp_encap_inline (vm, node, from_frame, /* is_ip4 */ 0,
@@ -510,7 +504,6 @@ vxlan6_gbp_encap (vlib_main_t * vm,
 /* *INDENT-OFF* */
 VLIB_REGISTER_NODE (vxlan4_gbp_encap_node) =
 {
-  .function = vxlan4_gbp_encap,
   .name = "vxlan4-gbp-encap",
   .vector_size = sizeof (u32),
   .format_trace = format_vxlan_gbp_encap_trace,
@@ -523,11 +516,8 @@ VLIB_REGISTER_NODE (vxlan4_gbp_encap_node) =
   },
 };
 
-VLIB_NODE_FUNCTION_MULTIARCH (vxlan4_gbp_encap_node, vxlan4_gbp_encap)
-
 VLIB_REGISTER_NODE (vxlan6_gbp_encap_node) =
 {
-  .function = vxlan6_gbp_encap,
   .name = "vxlan6-gbp-encap",
   .vector_size = sizeof (u32),
   .format_trace = format_vxlan_gbp_encap_trace,
@@ -539,8 +529,6 @@ VLIB_REGISTER_NODE (vxlan6_gbp_encap_node) =
     [VXLAN_GBP_ENCAP_NEXT_DROP] = "error-drop",
   },
 };
-
-VLIB_NODE_FUNCTION_MULTIARCH (vxlan6_gbp_encap_node, vxlan6_gbp_encap)
 /* *INDENT-ON* */
 
 /*