devices: af-packet v3 support 36/35636/3
authorMohsin Kazmi <sykazmi@cisco.com>
Fri, 18 Mar 2022 16:58:31 +0000 (16:58 +0000)
committerDamjan Marion <dmarion@me.com>
Wed, 23 Mar 2022 18:47:15 +0000 (18:47 +0000)
Type: feature

CPU usage ~20% less than v2.
Performance improvement 20% more than v2.
High vector rate.

Change-Id: I24bc594200f42664b59d07b44d44578e61068bbc
Signed-off-by: Mohsin Kazmi <sykazmi@cisco.com>
src/vnet/devices/af_packet/af_packet.c
src/vnet/devices/af_packet/af_packet.h
src/vnet/devices/af_packet/device.c
src/vnet/devices/af_packet/node.c

index bc9caac..60eadf2 100644 (file)
@@ -47,15 +47,13 @@ VNET_HW_INTERFACE_CLASS (af_packet_ip_device_hw_interface_class, static) = {
 #define AF_PACKET_DEFAULT_TX_FRAME_SIZE              (2048 * 5)
 #define AF_PACKET_TX_BLOCK_NR          1
 
-#define AF_PACKET_DEFAULT_RX_FRAMES_PER_BLOCK 1024
-#define AF_PACKET_DEFAULT_RX_FRAME_SIZE              (2048 * 5)
-#define AF_PACKET_RX_BLOCK_NR          1
+#define AF_PACKET_DEFAULT_RX_FRAMES_PER_BLOCK 256
+#define AF_PACKET_DEFAULT_RX_FRAME_SIZE              2048
+#define AF_PACKET_RX_BLOCK_NR                20
 
 /*defined in net/if.h but clashes with dpdk headers */
 unsigned int if_nametoindex (const char *ifname);
 
-typedef struct tpacket_req tpacket_req_t;
-
 static clib_error_t *
 af_packet_eth_set_max_frame_size (vnet_main_t *vnm, vnet_hw_interface_t *hi,
                                  u32 frame_size)
@@ -130,14 +128,17 @@ is_bridge (const u8 * host_if_name)
 }
 
 static int
-create_packet_v2_sock (int host_if_index, tpacket_req_t * rx_req,
-                      tpacket_req_t * tx_req, int *fd, u8 ** ring)
+create_packet_v3_sock (int host_if_index, tpacket_req3_t *rx_req,
+                      tpacket_req3_t *tx_req, int *fd, u8 **ring,
+                      u32 *hdrlen_ptr)
 {
   af_packet_main_t *apm = &af_packet_main;
-  int ret;
   struct sockaddr_ll sll;
-  int ver = TPACKET_V2;
-  socklen_t req_sz = sizeof (struct tpacket_req);
+  socklen_t req_sz = sizeof (tpacket_req3_t);
+  int ret;
+  int ver = TPACKET_V3;
+  u32 hdrlen = 0;
+  u32 len = sizeof (hdrlen);
   u32 ring_sz = rx_req->tp_block_size * rx_req->tp_block_nr +
     tx_req->tp_block_size * tx_req->tp_block_nr;
 
@@ -166,19 +167,33 @@ create_packet_v2_sock (int host_if_index, tpacket_req_t * rx_req,
 
   if (setsockopt (*fd, SOL_PACKET, PACKET_VERSION, &ver, sizeof (ver)) < 0)
     {
-      vlib_log_debug (apm->log_class,
-                     "Failed to set rx packet interface version: %s (errno %d)",
-                     strerror (errno), errno);
+      vlib_log_debug (
+       apm->log_class,
+       "Failed to set rx packet interface version: %s (errno %d)",
+       strerror (errno), errno);
       ret = VNET_API_ERROR_SYSCALL_ERROR_1;
       goto error;
     }
 
+  if (getsockopt (*fd, SOL_PACKET, PACKET_HDRLEN, &hdrlen, &len) < 0)
+    {
+      vlib_log_debug (
+       apm->log_class,
+       "Failed to get packet hdr len error handling option: %s (errno %d)",
+       strerror (errno), errno);
+      ret = VNET_API_ERROR_SYSCALL_ERROR_1;
+      goto error;
+    }
+  else
+    *hdrlen_ptr = hdrlen;
+
   int opt = 1;
   if (setsockopt (*fd, SOL_PACKET, PACKET_LOSS, &opt, sizeof (opt)) < 0)
     {
-      vlib_log_debug (apm->log_class,
-                     "Failed to set packet tx ring error handling option: %s (errno %d)",
-                     strerror (errno), errno);
+      vlib_log_debug (
+       apm->log_class,
+       "Failed to set packet tx ring error handling option: %s (errno %d)",
+       strerror (errno), errno);
       ret = VNET_API_ERROR_SYSCALL_ERROR_1;
       goto error;
     }
@@ -213,9 +228,8 @@ create_packet_v2_sock (int host_if_index, tpacket_req_t * rx_req,
       goto error;
     }
 
-  *ring =
-    mmap (NULL, ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, *fd,
-         0);
+  *ring = mmap (NULL, ring_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED,
+               *fd, 0);
   if (*ring == MAP_FAILED)
     {
       vlib_log_debug (apm->log_class, "mmap failure: %s (errno %d)",
@@ -240,8 +254,8 @@ af_packet_create_if (af_packet_create_if_arg_t *arg)
   af_packet_main_t *apm = &af_packet_main;
   vlib_main_t *vm = vlib_get_main ();
   int ret, fd = -1, fd2 = -1;
-  struct tpacket_req *rx_req = 0;
-  struct tpacket_req *tx_req = 0;
+  tpacket_req3_t *rx_req = 0;
+  tpacket_req3_t *tx_req = 0;
   struct ifreq ifr;
   u8 *ring = 0;
   af_packet_if_t *apif = 0;
@@ -255,6 +269,8 @@ af_packet_create_if (af_packet_create_if_arg_t *arg)
   int host_if_index = -1;
   u32 rx_frames_per_block, tx_frames_per_block;
   u32 rx_frame_size, tx_frame_size;
+  u32 hdrlen = 0;
+  u32 i = 0;
 
   p = mhash_get (&apm->if_index_by_host_if_name, arg->host_if_name);
   if (p)
@@ -282,12 +298,18 @@ af_packet_create_if (af_packet_create_if_arg_t *arg)
   rx_req->tp_frame_size = rx_frame_size;
   rx_req->tp_block_nr = AF_PACKET_RX_BLOCK_NR;
   rx_req->tp_frame_nr = AF_PACKET_RX_BLOCK_NR * rx_frames_per_block;
+  rx_req->tp_retire_blk_tov = 0;
+  rx_req->tp_feature_req_word = 0;
+  rx_req->tp_sizeof_priv = 0;
 
   vec_validate (tx_req, 0);
   tx_req->tp_block_size = tx_frame_size * tx_frames_per_block;
   tx_req->tp_frame_size = tx_frame_size;
   tx_req->tp_block_nr = AF_PACKET_TX_BLOCK_NR;
   tx_req->tp_frame_nr = AF_PACKET_TX_BLOCK_NR * tx_frames_per_block;
+  tx_req->tp_retire_blk_tov = 0;
+  tx_req->tp_sizeof_priv = 0;
+  tx_req->tp_feature_req_word = 0;
 
   /*
    * make sure host side of interface is 'UP' before binding AF_PACKET
@@ -343,7 +365,8 @@ af_packet_create_if (af_packet_create_if_arg_t *arg)
       fd2 = -1;
     }
 
-  ret = create_packet_v2_sock (host_if_index, rx_req, tx_req, &fd, &ring);
+  ret =
+    create_packet_v3_sock (host_if_index, rx_req, tx_req, &fd, &ring, &hdrlen);
 
   if (ret != 0)
     goto error;
@@ -359,15 +382,29 @@ af_packet_create_if (af_packet_create_if_arg_t *arg)
 
   apif->host_if_index = host_if_index;
   apif->fd = fd;
-  apif->rx_ring = ring;
-  apif->tx_ring = ring + rx_req->tp_block_size * rx_req->tp_block_nr;
+
+  vec_validate (apif->rx_ring, rx_req->tp_block_nr - 1);
+  vec_foreach_index (i, apif->rx_ring)
+    {
+      apif->rx_ring[i] = ring + i * rx_req->tp_block_size;
+    }
+
+  ring = ring + rx_req->tp_block_size * rx_req->tp_block_nr;
+
+  vec_validate (apif->tx_ring, tx_req->tp_block_nr - 1);
+  vec_foreach_index (i, apif->tx_ring)
+    {
+      apif->tx_ring[i] = ring + i * tx_req->tp_block_size;
+    }
+
   apif->rx_req = rx_req;
   apif->tx_req = tx_req;
   apif->host_if_name = host_if_name_dup;
   apif->per_interface_next_index = ~0;
   apif->next_tx_frame = 0;
-  apif->next_rx_frame = 0;
+  apif->next_rx_block = 0;
   apif->mode = arg->mode;
+  apif->hdrlen = hdrlen;
 
   ret = af_packet_read_mtu (apif);
   if (ret != 0)
index d7d16b9..ed3f10b 100644 (file)
  *------------------------------------------------------------------
  */
 
+#include <linux/if_packet.h>
+
 #include <vppinfra/lock.h>
 #include <vlib/log.h>
 
+typedef struct tpacket_block_desc block_desc_t;
+typedef struct tpacket_req3 tpacket_req3_t;
+typedef struct tpacket3_hdr tpacket3_hdr_t;
+
 typedef enum
 {
   AF_PACKET_IF_MODE_ETHERNET = 1,
@@ -39,15 +45,16 @@ typedef struct
   u8 *host_if_name;
   int host_if_index;
   int fd;
-  struct tpacket_req *rx_req;
-  struct tpacket_req *tx_req;
-  u8 *rx_ring;
-  u8 *tx_ring;
+  tpacket_req3_t *rx_req;
+  tpacket_req3_t *tx_req;
+  u8 **rx_ring;
+  u8 **tx_ring;
+  u32 hdrlen;
   u32 hw_if_index;
   u32 sw_if_index;
   u32 clib_file_index;
 
-  u32 next_rx_frame;
+  u32 next_rx_block;
   u32 next_tx_frame;
 
   u32 per_interface_next_index;
index c8e59c3..23b1883 100644 (file)
@@ -83,9 +83,9 @@ format_af_packet_device (u8 * s, va_list * args)
   u32 rx_frame_nr = apif->rx_req->tp_frame_nr;
   u32 rx_block_nr = apif->rx_req->tp_block_nr;
   int block = 0;
-  u8 *tx_block_start = apif->tx_ring + block * tx_block_sz;
+  u8 *tx_block_start = apif->tx_ring[block];
   u32 tx_frame = apif->next_tx_frame;
-  struct tpacket2_hdr *tph;
+  tpacket3_hdr_t *tph;
 
   s = format (s, "Linux PACKET socket interface\n");
   s = format (s, "%UTX block size:%d nr:%d  TX frame size:%d nr:%d\n",
@@ -100,7 +100,7 @@ format_af_packet_device (u8 * s, va_list * args)
   int n_send_req = 0, n_avail = 0, n_sending = 0, n_tot = 0, n_wrong = 0;
   do
     {
-      tph = (struct tpacket2_hdr *) (tx_block_start + tx_frame * tx_frame_sz);
+      tph = (struct tpacket3_hdr *) (tx_block_start + tx_frame * tx_frame_sz);
       tx_frame = (tx_frame + 1) % tx_frame_nr;
       if (tph->tp_status == 0)
        n_avail++;
@@ -140,13 +140,12 @@ VNET_DEVICE_CLASS_TX_FN (af_packet_device_class) (vlib_main_t * vm,
   af_packet_if_t *apif =
     pool_elt_at_index (apm->interfaces, rd->dev_instance);
   clib_spinlock_lock_if_init (&apif->lockp);
-  int block = 0;
-  u32 block_size = apif->tx_req->tp_block_size;
+  u32 block = 0;
   u32 frame_size = apif->tx_req->tp_frame_size;
   u32 frame_num = apif->tx_req->tp_frame_nr;
-  u8 *block_start = apif->tx_ring + block * block_size;
+  u8 *block_start = apif->tx_ring[block];
   u32 tx_frame = apif->next_tx_frame;
-  struct tpacket2_hdr *tph;
+  tpacket3_hdr_t *tph;
   u32 frame_not_ready = 0;
 
   while (n_left)
@@ -158,7 +157,7 @@ VNET_DEVICE_CLASS_TX_FN (af_packet_device_class) (vlib_main_t * vm,
       u32 bi = buffers[0];
       buffers++;
 
-      tph = (struct tpacket2_hdr *) (block_start + tx_frame * frame_size);
+      tph = (struct tpacket3_hdr *) (block_start + tx_frame * frame_size);
       if (PREDICT_FALSE (tph->tp_status &
                         (TP_STATUS_SEND_REQUEST | TP_STATUS_SENDING)))
        {
@@ -170,9 +169,9 @@ VNET_DEVICE_CLASS_TX_FN (af_packet_device_class) (vlib_main_t * vm,
        {
          b0 = vlib_get_buffer (vm, bi);
          len = b0->current_length;
-         clib_memcpy_fast ((u8 *) tph +
-                           TPACKET_ALIGN (sizeof (struct tpacket2_hdr)) +
-                           offset, vlib_buffer_get_current (b0), len);
+         clib_memcpy_fast (
+           (u8 *) tph + TPACKET_ALIGN (sizeof (struct tpacket3_hdr)) + offset,
+           vlib_buffer_get_current (b0), len);
          offset += len;
        }
       while ((bi =
index efe7016..229e050 100644 (file)
@@ -51,7 +51,10 @@ typedef struct
   u32 next_index;
   u32 hw_if_index;
   int block;
-  struct tpacket2_hdr tph;
+  u32 num_pkts;
+  void *block_start;
+  block_desc_t bd;
+  tpacket3_hdr_t tph;
 } af_packet_input_trace_t;
 
 static u8 *
@@ -65,26 +68,27 @@ format_af_packet_input_trace (u8 * s, va_list * args)
   s = format (s, "af_packet: hw_if_index %d next-index %d",
              t->hw_if_index, t->next_index);
 
+  s = format (s,
+             "\n%Ublock %u:\n%Uaddress %p version %u seq_num %lu"
+             " num_pkts %u",
+             format_white_space, indent + 2, t->block, format_white_space,
+             indent + 4, t->block_start, t->bd.version, t->bd.hdr.bh1.seq_num,
+             t->num_pkts);
   s =
     format (s,
-           "\n%Utpacket2_hdr:\n%Ustatus 0x%x len %u snaplen %u mac %u net %u"
+           "\n%Utpacket3_hdr:\n%Ustatus 0x%x len %u snaplen %u mac %u net %u"
            "\n%Usec 0x%x nsec 0x%x vlan %U"
 #ifdef TP_STATUS_VLAN_TPID_VALID
            " vlan_tpid %u"
 #endif
            ,
-           format_white_space, indent + 2,
-           format_white_space, indent + 4,
-           t->tph.tp_status,
-           t->tph.tp_len,
-           t->tph.tp_snaplen,
-           t->tph.tp_mac,
-           t->tph.tp_net,
-           format_white_space, indent + 4,
-           t->tph.tp_sec,
-           t->tph.tp_nsec, format_ethernet_vlan_tci, t->tph.tp_vlan_tci
+           format_white_space, indent + 2, format_white_space, indent + 4,
+           t->tph.tp_status, t->tph.tp_len, t->tph.tp_snaplen, t->tph.tp_mac,
+           t->tph.tp_net, format_white_space, indent + 4, t->tph.tp_sec,
+           t->tph.tp_nsec, format_ethernet_vlan_tci, t->tph.hv1.tp_vlan_tci
 #ifdef TP_STATUS_VLAN_TPID_VALID
-           , t->tph.tp_vlan_tpid
+           ,
+           t->tph.hv1.tp_vlan_tpid
 #endif
     );
   return s;
@@ -193,23 +197,23 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
                           vlib_frame_t * frame, af_packet_if_t * apif)
 {
   af_packet_main_t *apm = &af_packet_main;
-  struct tpacket2_hdr *tph;
+  tpacket3_hdr_t *tph;
   u32 next_index;
-  u32 block = 0;
-  u32 rx_frame;
   u32 n_free_bufs;
   u32 n_rx_packets = 0;
   u32 n_rx_bytes = 0;
   u32 *to_next = 0;
-  u32 block_size = apif->rx_req->tp_block_size;
-  u32 frame_size = apif->rx_req->tp_frame_size;
-  u32 frame_num = apif->rx_req->tp_frame_nr;
-  u8 *block_start = apif->rx_ring + block * block_size;
+  u32 block = apif->next_rx_block;
+  u32 block_nr = apif->rx_req->tp_block_nr;
+  u8 *block_start = 0;
   uword n_trace = vlib_get_trace_count (vm, node);
   u32 thread_index = vm->thread_index;
   u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
   u32 min_bufs = apif->rx_req->tp_frame_size / n_buffer_bytes;
   u32 eth_header_size = 0;
+  u32 num_pkts = 0;
+  u32 rx_frame_offset = 0;
+  block_desc_t *bd = 0;
   vlib_buffer_t bt;
 
   if (apif->mode == AF_PACKET_IF_MODE_IP)
@@ -227,170 +231,199 @@ af_packet_device_input_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
       vnet_feature_start_device_input_x1 (apif->sw_if_index, &next_index, &bt);
     }
 
-  n_free_bufs = vec_len (apm->rx_buffers[thread_index]);
-  if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE))
+  while ((((block_desc_t *) (block_start = apif->rx_ring[block]))
+           ->hdr.bh1.block_status &
+         TP_STATUS_USER) != 0)
     {
-      vec_validate (apm->rx_buffers[thread_index],
-                   VLIB_FRAME_SIZE + n_free_bufs - 1);
-      n_free_bufs +=
-       vlib_buffer_alloc (vm, &apm->rx_buffers[thread_index][n_free_bufs],
-                          VLIB_FRAME_SIZE);
-      _vec_len (apm->rx_buffers[thread_index]) = n_free_bufs;
-    }
+      u32 n_required = 0;
 
-  rx_frame = apif->next_rx_frame;
-  tph = (struct tpacket2_hdr *) (block_start + rx_frame * frame_size);
-  while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs))
-    {
-      vlib_buffer_t *b0 = 0, *first_b0 = 0;
-      u32 next0 = next_index;
+      if (PREDICT_FALSE (num_pkts == 0))
+       {
+         bd = (block_desc_t *) block_start;
+         num_pkts = bd->hdr.bh1.num_pkts;
+         rx_frame_offset = sizeof (block_desc_t);
+       }
 
-      u32 n_left_to_next;
-      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-      while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs) &&
-            n_left_to_next)
+      n_required = clib_max (num_pkts, VLIB_FRAME_SIZE);
+      n_free_bufs = vec_len (apm->rx_buffers[thread_index]);
+      if (PREDICT_FALSE (n_free_bufs < n_required))
        {
-         u32 data_len = tph->tp_snaplen;
-         u32 offset = 0;
-         u32 bi0 = 0, first_bi0 = 0, prev_bi0;
-         u8 l4_hdr_sz = 0;
+         vec_validate (apm->rx_buffers[thread_index],
+                       n_required + n_free_bufs - 1);
+         n_free_bufs += vlib_buffer_alloc (
+           vm, &apm->rx_buffers[thread_index][n_free_bufs], n_required);
+         _vec_len (apm->rx_buffers[thread_index]) = n_free_bufs;
+       }
 
-         while (data_len)
+      while (num_pkts && (n_free_bufs > min_bufs))
+       {
+         vlib_buffer_t *b0 = 0, *first_b0 = 0;
+         u32 next0 = next_index;
+         u32 n_left_to_next;
+         vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+         while (num_pkts && n_left_to_next && (n_free_bufs > min_bufs))
            {
-             /* grab free buffer */
-             u32 last_empty_buffer =
-               vec_len (apm->rx_buffers[thread_index]) - 1;
-             prev_bi0 = bi0;
-             bi0 = apm->rx_buffers[thread_index][last_empty_buffer];
-             b0 = vlib_get_buffer (vm, bi0);
-             _vec_len (apm->rx_buffers[thread_index]) = last_empty_buffer;
-             n_free_bufs--;
-
-             /* copy data */
-             u32 bytes_to_copy =
-               data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
-             u32 vlan_len = 0;
-             u32 bytes_copied = 0;
-             b0->current_data = 0;
-             /* Kernel removes VLAN headers, so reconstruct VLAN */
-             if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID))
+             tph = (tpacket3_hdr_t *) (block_start + rx_frame_offset);
+
+             if (num_pkts > 1)
+               CLIB_PREFETCH (block_start + rx_frame_offset +
+                                tph->tp_next_offset,
+                              2 * CLIB_CACHE_LINE_BYTES, LOAD);
+             u32 data_len = tph->tp_snaplen;
+             u32 offset = 0;
+             u32 bi0 = 0, first_bi0 = 0, prev_bi0;
+             u8 l4_hdr_sz = 0;
+
+             while (data_len)
                {
-                 if (PREDICT_TRUE (offset == 0))
+                 /* grab free buffer */
+                 u32 last_empty_buffer =
+                   vec_len (apm->rx_buffers[thread_index]) - 1;
+                 prev_bi0 = bi0;
+                 bi0 = apm->rx_buffers[thread_index][last_empty_buffer];
+                 b0 = vlib_get_buffer (vm, bi0);
+                 _vec_len (apm->rx_buffers[thread_index]) = last_empty_buffer;
+                 n_free_bufs--;
+
+                 /* copy data */
+                 u32 bytes_to_copy =
+                   data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
+                 u32 vlan_len = 0;
+                 u32 bytes_copied = 0;
+                 b0->current_data = 0;
+                 /* Kernel removes VLAN headers, so reconstruct VLAN */
+                 if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID))
                    {
-                     clib_memcpy_fast (vlib_buffer_get_current (b0),
-                                       (u8 *) tph + tph->tp_mac,
-                                       sizeof (ethernet_header_t));
-                     ethernet_header_t *eth = vlib_buffer_get_current (b0);
-                     ethernet_vlan_header_t *vlan =
-                       (ethernet_vlan_header_t *) (eth + 1);
-                     vlan->priority_cfi_and_id =
-                       clib_host_to_net_u16 (tph->tp_vlan_tci);
-                     vlan->type = eth->type;
-                     eth->type = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
-                     vlan_len = sizeof (ethernet_vlan_header_t);
-                     bytes_copied = sizeof (ethernet_header_t);
+                     if (PREDICT_TRUE (offset == 0))
+                       {
+                         clib_memcpy_fast (vlib_buffer_get_current (b0),
+                                           (u8 *) tph + tph->tp_mac,
+                                           sizeof (ethernet_header_t));
+                         ethernet_header_t *eth =
+                           vlib_buffer_get_current (b0);
+                         ethernet_vlan_header_t *vlan =
+                           (ethernet_vlan_header_t *) (eth + 1);
+                         vlan->priority_cfi_and_id =
+                           clib_host_to_net_u16 (tph->hv1.tp_vlan_tci);
+                         vlan->type = eth->type;
+                         eth->type =
+                           clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+                         vlan_len = sizeof (ethernet_vlan_header_t);
+                         bytes_copied = sizeof (ethernet_header_t);
+                       }
                    }
-               }
-             clib_memcpy_fast (((u8 *) vlib_buffer_get_current (b0)) +
-                               bytes_copied + vlan_len,
-                               (u8 *) tph + tph->tp_mac + offset +
-                               bytes_copied, (bytes_to_copy - bytes_copied));
+                 clib_memcpy_fast (((u8 *) vlib_buffer_get_current (b0)) +
+                                     bytes_copied + vlan_len,
+                                   (u8 *) tph + tph->tp_mac + offset +
+                                     bytes_copied,
+                                   (bytes_to_copy - bytes_copied));
+
+                 /* fill buffer header */
+                 b0->current_length = bytes_to_copy + vlan_len;
 
-             /* fill buffer header */
-             b0->current_length = bytes_to_copy + vlan_len;
+                 if (offset == 0)
+                   {
+                     b0->total_length_not_including_first_buffer = 0;
+                     b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+                     vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+                       apif->sw_if_index;
+                     vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~0;
+                     first_bi0 = bi0;
+                     first_b0 = vlib_get_buffer (vm, first_bi0);
+                     if (tph->tp_status & TP_STATUS_CSUMNOTREADY)
+                       mark_tcp_udp_cksum_calc (first_b0, &l4_hdr_sz);
+                     /* This is a trade-off for GSO. As kernel isn't passing
+                      * us the GSO state or size, we guess it by comparing it
+                      * to the host MTU of the interface */
+                     if (tph->tp_snaplen > (apif->host_mtu + eth_header_size))
+                       fill_gso_buffer_flags (first_b0, apif->host_mtu,
+                                              l4_hdr_sz);
+                   }
+                 else
+                   buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0);
 
-             if (offset == 0)
+                 offset += bytes_to_copy;
+                 data_len -= bytes_to_copy;
+               }
+             n_rx_packets++;
+             n_rx_bytes += tph->tp_snaplen;
+             to_next[0] = first_bi0;
+             to_next += 1;
+             n_left_to_next--;
+
+             /* drop partial packets */
+             if (PREDICT_FALSE (tph->tp_len != tph->tp_snaplen))
                {
-                 b0->total_length_not_including_first_buffer = 0;
-                 b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
-                 vnet_buffer (b0)->sw_if_index[VLIB_RX] = apif->sw_if_index;
-                 vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
-                 first_bi0 = bi0;
-                 first_b0 = vlib_get_buffer (vm, first_bi0);
-                 if (tph->tp_status & TP_STATUS_CSUMNOTREADY)
-                   mark_tcp_udp_cksum_calc (first_b0, &l4_hdr_sz);
-                 /* This is a trade-off for GSO. As kernel isn't passing
-                  * us the GSO state or size, we guess it by comparing it
-                  * to the host MTU of the interface */
-                 if (tph->tp_snaplen > (apif->host_mtu + eth_header_size))
-                   fill_gso_buffer_flags (first_b0, apif->host_mtu,
-                                          l4_hdr_sz);
+                 next0 = VNET_DEVICE_INPUT_NEXT_DROP;
+                 first_b0->error =
+                   node->errors[AF_PACKET_INPUT_ERROR_PARTIAL_PKT];
                }
              else
-               buffer_add_to_chain (vm, bi0, first_bi0, prev_bi0);
-
-             offset += bytes_to_copy;
-             data_len -= bytes_to_copy;
-           }
-         n_rx_packets++;
-         n_rx_bytes += tph->tp_snaplen;
-         to_next[0] = first_bi0;
-         to_next += 1;
-         n_left_to_next--;
-
-         /* drop partial packets */
-         if (PREDICT_FALSE (tph->tp_len != tph->tp_snaplen))
-           {
-             next0 = VNET_DEVICE_INPUT_NEXT_DROP;
-             first_b0->error =
-               node->errors[AF_PACKET_INPUT_ERROR_PARTIAL_PKT];
-           }
-         else
-           {
-             if (PREDICT_FALSE (apif->mode == AF_PACKET_IF_MODE_IP))
                {
-                 switch (first_b0->data[0] & 0xf0)
+                 if (PREDICT_FALSE (apif->mode == AF_PACKET_IF_MODE_IP))
                    {
-                   case 0x40:
-                     next0 = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
-                     break;
-                   case 0x60:
-                     next0 = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
-                     break;
-                   default:
-                     next0 = VNET_DEVICE_INPUT_NEXT_DROP;
-                     break;
+                     switch (first_b0->data[0] & 0xf0)
+                       {
+                       case 0x40:
+                         next0 = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
+                         break;
+                       case 0x60:
+                         next0 = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
+                         break;
+                       default:
+                         next0 = VNET_DEVICE_INPUT_NEXT_DROP;
+                         break;
+                       }
+                     if (PREDICT_FALSE (apif->per_interface_next_index != ~0))
+                       next0 = apif->per_interface_next_index;
+                   }
+                 else
+                   {
+                     /* copy feature arc data from template */
+                     first_b0->current_config_index = bt.current_config_index;
+                     vnet_buffer (first_b0)->feature_arc_index =
+                       vnet_buffer (&bt)->feature_arc_index;
                    }
-                 if (PREDICT_FALSE (apif->per_interface_next_index != ~0))
-                   next0 = apif->per_interface_next_index;
                }
-             else
+
+             /* trace */
+             if (PREDICT_FALSE (n_trace > 0 &&
+                                vlib_trace_buffer (vm, node, next0, first_b0,
+                                                   /* follow_chain */ 0)))
                {
-                 /* copy feature arc data from template */
-                 first_b0->current_config_index = bt.current_config_index;
-                 vnet_buffer (first_b0)->feature_arc_index =
-                   vnet_buffer (&bt)->feature_arc_index;
+                 af_packet_input_trace_t *tr;
+                 vlib_set_trace_count (vm, node, --n_trace);
+                 tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
+                 tr->next_index = next0;
+                 tr->hw_if_index = apif->hw_if_index;
+                 tr->block = block;
+                 tr->block_start = bd;
+                 tr->num_pkts = num_pkts;
+                 clib_memcpy_fast (&tr->bd, bd, sizeof (block_desc_t));
+                 clib_memcpy_fast (&tr->tph, tph, sizeof (tpacket3_hdr_t));
                }
-           }
 
-         /* trace */
-         if (PREDICT_FALSE
-             (n_trace > 0 && vlib_trace_buffer (vm, node, next0, first_b0,
-                                                /* follow_chain */ 0)))
-           {
-             af_packet_input_trace_t *tr;
-             vlib_set_trace_count (vm, node, --n_trace);
-             tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
-             tr->next_index = next0;
-             tr->hw_if_index = apif->hw_if_index;
-             clib_memcpy_fast (&tr->tph, tph, sizeof (struct tpacket2_hdr));
-           }
+             /* enque and take next packet */
+             vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+                                              n_left_to_next, first_bi0,
+                                              next0);
 
-         /* enque and take next packet */
-         vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
-                                          n_left_to_next, first_bi0, next0);
+             /* next packet */
+             num_pkts--;
+             rx_frame_offset += tph->tp_next_offset;
+           }
 
-         /* next packet */
-         tph->tp_status = TP_STATUS_KERNEL;
-         rx_frame = (rx_frame + 1) % frame_num;
-         tph = (struct tpacket2_hdr *) (block_start + rx_frame * frame_size);
+         vlib_put_next_frame (vm, node, next_index, n_left_to_next);
        }
 
-      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+      if (PREDICT_TRUE (num_pkts == 0))
+       {
+         bd->hdr.bh1.block_status = TP_STATUS_KERNEL;
+         block = (block + 1) % block_nr;
+       }
     }
 
-  apif->next_rx_frame = rx_frame;
-
+  apif->next_rx_block = block;
   vlib_increment_combined_counter
     (vnet_get_main ()->interface_main.combined_sw_if_counters
      + VNET_INTERFACE_COUNTER_RX,