vnet: device flow offload infra
[vpp.git] / src / plugins / dpdk / device / node.c
index 9b59830..7ba4dad 100644 (file)
@@ -255,7 +255,7 @@ dpdk_mbufs_to_buffer_indices (vlib_main_t * vm, struct rte_mbuf **mb,
       bi[2] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[2]));
       bi[3] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[3]));
       bi[4] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[4]));
-      bi[5] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[4]));
+      bi[5] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[5]));
       bi[6] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[6]));
       bi[7] = vlib_get_buffer_index (vm, vlib_buffer_from_rte_mbuf (mb[7]));
 #endif
@@ -333,19 +333,19 @@ dpdk_process_rx_burst (vlib_main_t * vm, dpdk_per_thread_data_t * ptd,
       vnet_buffer (b[0])->l2_hdr_offset = off;
       b[0]->current_data = off;
 
-      off = mb[0]->data_off;
+      off = mb[1]->data_off;
       next[1] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
       off -= RTE_PKTMBUF_HEADROOM;
       vnet_buffer (b[1])->l2_hdr_offset = off;
       b[1]->current_data = off;
 
-      off = mb[0]->data_off;
+      off = mb[2]->data_off;
       next[2] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
       off -= RTE_PKTMBUF_HEADROOM;
       vnet_buffer (b[2])->l2_hdr_offset = off;
       b[2]->current_data = off;
 
-      off = mb[0]->data_off;
+      off = mb[3]->data_off;
       next[3] = off + STRUCT_OFFSET_OF (ethernet_header_t, type);
       off -= RTE_PKTMBUF_HEADROOM;
       vnet_buffer (b[3])->l2_hdr_offset = off;
@@ -474,6 +474,40 @@ dpdk_set_next_from_etype (vlib_main_t * vm, vlib_node_runtime_t * node,
     }
 }
 
+static_always_inline void
+dpdk_process_flow_offload (dpdk_device_t * xd, dpdk_per_thread_data_t * ptd,
+                          uword n_rx_packets)
+{
+  uword n;
+  dpdk_flow_lookup_entry_t *fle;
+  vlib_buffer_t *b0;
+
+  /* TODO prefetch and quad-loop */
+  for (n = 0; n < n_rx_packets; n++)
+    {
+      if ((ptd->flags[n] & (1 << DPDK_RX_F_FDIR)) == 0)
+       continue;
+
+      fle = vec_elt_at_index (xd->flow_lookup_entries,
+                             ptd->mbufs[n]->hash.fdir.hi);
+
+      if (fle->next_index != (u16) ~ 0)
+       ptd->next[n] = fle->next_index;
+
+      if (fle->flow_id != ~0)
+       {
+         b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
+         b0->flow_id = fle->flow_id;
+       }
+
+      if (fle->buffer_advance != ~0)
+       {
+         b0 = vlib_buffer_from_rte_mbuf (ptd->mbufs[n]);
+         vlib_buffer_advance (b0, fle->buffer_advance);
+       }
+    }
+}
+
 static_always_inline u32
 dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd,
                   vlib_node_runtime_t * node, u32 thread_index, u16 queue_id)
@@ -549,6 +583,12 @@ dpdk_device_input (vlib_main_t * vm, dpdk_main_t * dm, dpdk_device_t * xd,
   else
     dpdk_set_next_from_etype (vm, node, ptd, n_rx_packets);
 
+  /* flow offload - process if rx flow offlaod enabled and at least one packet
+     is marked */
+  if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) &&
+                    (or_flags & (1 << DPDK_RX_F_FDIR))))
+    dpdk_process_flow_offload (xd, ptd, n_rx_packets);
+
   /* is at least one packet marked as ip4 checksum bad? */
   if (PREDICT_FALSE (or_flags & (1 << DPDK_RX_F_CKSUM_BAD)))
     for (n = 0; n < n_rx_packets; n++)