c11 safe string handling support
[vpp.git] / src / vnet / ip / ip4_forward.c
index 48442a0..192f301 100644 (file)
@@ -753,13 +753,13 @@ VNET_FEATURE_INIT (ip4_policer_classify, static) =
 {
   .arc_name = "ip4-unicast",
   .node_name = "ip4-policer-classify",
-  .runs_before = VNET_FEATURES ("ipsec-input-ip4"),
+  .runs_before = VNET_FEATURES ("ipsec4-input"),
 };
 
 VNET_FEATURE_INIT (ip4_ipsec, static) =
 {
   .arc_name = "ip4-unicast",
-  .node_name = "ipsec-input-ip4",
+  .node_name = "ipsec4-input",
   .runs_before = VNET_FEATURES ("vpath-input-ip4"),
 };
 
@@ -839,13 +839,13 @@ VNET_FEATURE_INIT (ip4_outacl, static) =
 {
   .arc_name = "ip4-output",
   .node_name = "ip4-outacl",
-  .runs_before = VNET_FEATURES ("ipsec-output-ip4"),
+  .runs_before = VNET_FEATURES ("ipsec4-output"),
 };
 
 VNET_FEATURE_INIT (ip4_ipsec_output, static) =
 {
   .arc_name = "ip4-output",
-  .node_name = "ipsec-output-ip4",
+  .node_name = "ipsec4-output",
   .runs_before = VNET_FEATURES ("interface-output"),
 };
 
@@ -943,11 +943,11 @@ ip4_lookup_init (vlib_main_t * vm)
   {
     ethernet_arp_header_t h;
 
-    memset (&h, 0, sizeof (h));
+    clib_memset (&h, 0, sizeof (h));
 
     /* Set target ethernet address to all zeros. */
-    memset (h.ip4_over_ethernet[1].ethernet, 0,
-           sizeof (h.ip4_over_ethernet[1].ethernet));
+    clib_memset (h.ip4_over_ethernet[1].ethernet, 0,
+                sizeof (h.ip4_over_ethernet[1].ethernet));
 
 #define _16(f,v) h.f = clib_host_to_net_u16 (v);
 #define _8(f,v) h.f = v;
@@ -1481,20 +1481,51 @@ ip4_local_check_src_x2 (vlib_buffer_t ** b, ip4_header_t ** ip,
     }
 }
 
+enum ip_local_packet_type_e
+{
+  IP_LOCAL_PACKET_TYPE_L4,
+  IP_LOCAL_PACKET_TYPE_NAT,
+  IP_LOCAL_PACKET_TYPE_FRAG,
+};
+
+/**
+ * Determine packet type and next node.
+ *
+ * The expectation is that all packets that are not L4 will skip
+ * checksums and source checks.
+ */
+always_inline u8
+ip4_local_classify (vlib_buffer_t * b, ip4_header_t * ip, u16 * next)
+{
+  ip_lookup_main_t *lm = &ip4_main.lookup_main;
+
+  if (PREDICT_FALSE (ip4_is_fragment (ip)))
+    {
+      *next = IP_LOCAL_NEXT_REASSEMBLY;
+      return IP_LOCAL_PACKET_TYPE_FRAG;
+    }
+  if (PREDICT_FALSE (b->flags & VNET_BUFFER_F_IS_NATED))
+    {
+      *next = lm->local_next_by_ip_protocol[ip->protocol];
+      return IP_LOCAL_PACKET_TYPE_NAT;
+    }
+
+  *next = lm->local_next_by_ip_protocol[ip->protocol];
+  return IP_LOCAL_PACKET_TYPE_L4;
+}
+
 static inline uword
 ip4_local_inline (vlib_main_t * vm,
                  vlib_node_runtime_t * node,
                  vlib_frame_t * frame, int head_of_feature_arc)
 {
-  ip4_main_t *im = &ip4_main;
-  ip_lookup_main_t *lm = &im->lookup_main;
   u32 *from, n_left_from;
   vlib_node_runtime_t *error_node =
     vlib_node_get_runtime (vm, ip4_input_node.index);
   u16 nexts[VLIB_FRAME_SIZE], *next;
   vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
   ip4_header_t *ip[2];
-  u8 error[2];
+  u8 error[2], pt[2];
 
   ip4_local_last_check_t last_check = {
     .src = {.as_u32 = 0},
@@ -1514,7 +1545,7 @@ ip4_local_inline (vlib_main_t * vm,
 
   while (n_left_from >= 6)
     {
-      u32 is_nat, not_batch = 0;
+      u8 not_batch = 0;
 
       /* Prefetch next iteration. */
       {
@@ -1533,10 +1564,12 @@ ip4_local_inline (vlib_main_t * vm,
       vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data;
       vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data;
 
-      is_nat = b[0]->flags & VNET_BUFFER_F_IS_NATED;
-      not_batch |= is_nat ^ (b[1]->flags & VNET_BUFFER_F_IS_NATED);
+      pt[0] = ip4_local_classify (b[0], ip[0], &next[0]);
+      pt[1] = ip4_local_classify (b[1], ip[1], &next[1]);
 
-      if (head_of_feature_arc == 0 || (is_nat && not_batch == 0))
+      not_batch = pt[0] ^ pt[1];
+
+      if (head_of_feature_arc == 0 || (pt[0] && not_batch == 0))
        goto skip_checks;
 
       if (PREDICT_TRUE (not_batch == 0))
@@ -1546,12 +1579,12 @@ ip4_local_inline (vlib_main_t * vm,
        }
       else
        {
-         if (!(b[0]->flags & VNET_BUFFER_F_IS_NATED))
+         if (!pt[0])
            {
              ip4_local_check_l4_csum (vm, b[0], ip[0], &error[0]);
              ip4_local_check_src (b[0], ip[0], &last_check, &error[0]);
            }
-         if (!(b[1]->flags & VNET_BUFFER_F_IS_NATED))
+         if (!pt[1])
            {
              ip4_local_check_l4_csum (vm, b[1], ip[1], &error[1]);
              ip4_local_check_src (b[1], ip[1], &last_check, &error[1]);
@@ -1560,8 +1593,6 @@ ip4_local_inline (vlib_main_t * vm,
 
     skip_checks:
 
-      next[0] = lm->local_next_by_ip_protocol[ip[0]->protocol];
-      next[1] = lm->local_next_by_ip_protocol[ip[1]->protocol];
       ip4_local_set_next_and_error (error_node, b[0], &next[0], error[0],
                                    head_of_feature_arc);
       ip4_local_set_next_and_error (error_node, b[1], &next[1], error[1],
@@ -1578,8 +1609,9 @@ ip4_local_inline (vlib_main_t * vm,
 
       ip[0] = vlib_buffer_get_current (b[0]);
       vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data;
+      pt[0] = ip4_local_classify (b[0], ip[0], &next[0]);
 
-      if (head_of_feature_arc == 0 || (b[0]->flags & VNET_BUFFER_F_IS_NATED))
+      if (head_of_feature_arc == 0 || pt[0])
        goto skip_check;
 
       ip4_local_check_l4_csum (vm, b[0], ip[0], &error[0]);
@@ -1587,7 +1619,6 @@ ip4_local_inline (vlib_main_t * vm,
 
     skip_check:
 
-      next[0] = lm->local_next_by_ip_protocol[ip[0]->protocol];
       ip4_local_set_next_and_error (error_node, b[0], &next[0], error[0],
                                    head_of_feature_arc);
 
@@ -1619,6 +1650,7 @@ VLIB_REGISTER_NODE (ip4_local_node) =
     [IP_LOCAL_NEXT_PUNT] = "ip4-punt",
     [IP_LOCAL_NEXT_UDP_LOOKUP] = "ip4-udp-lookup",
     [IP_LOCAL_NEXT_ICMP] = "ip4-icmp-input",
+    [IP_LOCAL_NEXT_REASSEMBLY] = "ip4-reassembly",
   },
 };
 /* *INDENT-ON* */
@@ -1719,21 +1751,11 @@ ip4_arp_inline (vlib_main_t * vm,
   uword n_left_from, n_left_to_next_drop, next_index;
   u32 thread_index = vm->thread_index;
   u32 seed;
-  f64 time_now;
 
   if (node->flags & VLIB_NODE_FLAG_TRACE)
     ip4_forward_next_trace (vm, node, frame, VLIB_TX);
 
-  time_now = vlib_time_now (vm);
-  if (time_now - im->arp_throttle_last_seed_change_time[thread_index] > 1e-3)
-    {
-      (void) random_u32 (&im->arp_throttle_seeds[thread_index]);
-      memset (im->arp_throttle_bitmaps[thread_index], 0,
-             ARP_THROTTLE_BITS / BITS (u8));
-
-      im->arp_throttle_last_seed_change_time[thread_index] = time_now;
-    }
-  seed = im->arp_throttle_seeds[thread_index];
+  seed = throttle_seed (&im->arp_throttle, thread_index, vlib_time_now (vm));
 
   from = vlib_frame_vector_args (frame);
   n_left_from = frame->n_vectors;
@@ -1748,8 +1770,7 @@ ip4_arp_inline (vlib_main_t * vm,
 
       while (n_left_from > 0 && n_left_to_next_drop > 0)
        {
-         u32 pi0, adj_index0, r0, w0, sw_if_index0, drop0;
-         uword m0;
+         u32 pi0, adj_index0, r0, sw_if_index0, drop0;
          ip_adjacency_t *adj0;
          vlib_buffer_t *p0;
          ip4_header_t *ip0;
@@ -1778,14 +1799,7 @@ ip4_arp_inline (vlib_main_t * vm,
              r0 = adj0->sub_type.nbr.next_hop.ip4.data_u32;
            }
 
-         r0 ^= seed;
-         /* Select bit number */
-         r0 &= ARP_THROTTLE_BITS - 1;
-         w0 = r0 / BITS (uword);
-         m0 = (uword) 1 << (r0 % BITS (uword));
-
-         drop0 = (im->arp_throttle_bitmaps[thread_index][w0] & m0) != 0;
-         im->arp_throttle_bitmaps[thread_index][w0] |= m0;
+         drop0 = throttle_check (&im->arp_throttle, thread_index, r0, seed);
 
          from += 1;
          n_left_from -= 1;
@@ -2099,7 +2113,7 @@ typedef enum
 
 always_inline void
 ip4_mtu_check (vlib_buffer_t * b, u16 packet_len,
-              u16 adj_packet_bytes, bool df, u32 * next, u32 * error)
+              u16 adj_packet_bytes, bool df, u16 * next, u32 * error)
 {
   if (packet_len > adj_packet_bytes)
     {
@@ -2115,13 +2129,59 @@ ip4_mtu_check (vlib_buffer_t * b, u16 packet_len,
       else
        {
          /* IP fragmentation */
-         ip_frag_set_vnet_buffer (b, 0, adj_packet_bytes,
+         ip_frag_set_vnet_buffer (b, adj_packet_bytes,
                                   IP4_FRAG_NEXT_IP4_REWRITE, 0);
          *next = IP4_REWRITE_NEXT_FRAGMENT;
        }
     }
 }
 
+/* Decrement TTL & update checksum.
+   Works either endian, so no need for byte swap. */
+static_always_inline void
+ip4_ttl_and_checksum_check (vlib_buffer_t * b, ip4_header_t * ip, u16 * next,
+                           u32 * error)
+{
+  i32 ttl;
+  u32 checksum;
+  if (PREDICT_FALSE (b->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED))
+    {
+      b->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED;
+      return;
+    }
+
+  ttl = ip->ttl;
+
+  /* Input node should have reject packets with ttl 0. */
+  ASSERT (ip->ttl > 0);
+
+  checksum = ip->checksum + clib_host_to_net_u16 (0x0100);
+  checksum += checksum >= 0xffff;
+
+  ip->checksum = checksum;
+  ttl -= 1;
+  ip->ttl = ttl;
+
+  /*
+   * If the ttl drops below 1 when forwarding, generate
+   * an ICMP response.
+   */
+  if (PREDICT_FALSE (ttl <= 0))
+    {
+      *error = IP4_ERROR_TIME_EXPIRED;
+      vnet_buffer (b)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+      icmp4_error_set_vnet_buffer (b, ICMP4_time_exceeded,
+                                  ICMP4_time_exceeded_ttl_exceeded_in_transit,
+                                  0);
+      *next = IP4_REWRITE_NEXT_ICMP_ERROR;
+    }
+
+  /* Verify checksum. */
+  ASSERT ((ip->checksum == ip4_header_checksum (ip)) ||
+         (b->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM));
+}
+
+
 always_inline uword
 ip4_rewrite_inline (vlib_main_t * vm,
                    vlib_node_runtime_t * node,
@@ -2130,406 +2190,275 @@ ip4_rewrite_inline (vlib_main_t * vm,
 {
   ip_lookup_main_t *lm = &ip4_main.lookup_main;
   u32 *from = vlib_frame_vector_args (frame);
-  u32 n_left_from, n_left_to_next, *to_next, next_index;
+  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
+  u16 nexts[VLIB_FRAME_SIZE], *next;
+  u32 n_left_from;
   vlib_node_runtime_t *error_node =
     vlib_node_get_runtime (vm, ip4_input_node.index);
 
   n_left_from = frame->n_vectors;
-  next_index = node->cached_next_index;
   u32 thread_index = vm->thread_index;
 
-  while (n_left_from > 0)
-    {
-      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
-      while (n_left_from >= 4 && n_left_to_next >= 2)
-       {
-         ip_adjacency_t *adj0, *adj1;
-         vlib_buffer_t *p0, *p1;
-         ip4_header_t *ip0, *ip1;
-         u32 pi0, rw_len0, next0, error0, checksum0, adj_index0;
-         u32 pi1, rw_len1, next1, error1, checksum1, adj_index1;
-         u32 tx_sw_if_index0, tx_sw_if_index1;
-
-         /* Prefetch next iteration. */
-         {
-           vlib_buffer_t *p2, *p3;
-
-           p2 = vlib_get_buffer (vm, from[2]);
-           p3 = vlib_get_buffer (vm, from[3]);
-
-           vlib_prefetch_buffer_header (p2, STORE);
-           vlib_prefetch_buffer_header (p3, STORE);
-
-           CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE);
-           CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE);
-         }
-
-         pi0 = to_next[0] = from[0];
-         pi1 = to_next[1] = from[1];
-
-         from += 2;
-         n_left_from -= 2;
-         to_next += 2;
-         n_left_to_next -= 2;
-
-         p0 = vlib_get_buffer (vm, pi0);
-         p1 = vlib_get_buffer (vm, pi1);
-
-         adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
-         adj_index1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
-
-         /*
-          * pre-fetch the per-adjacency counters
-          */
-         if (do_counters)
-           {
-             vlib_prefetch_combined_counter (&adjacency_counters,
-                                             thread_index, adj_index0);
-             vlib_prefetch_combined_counter (&adjacency_counters,
-                                             thread_index, adj_index1);
-           }
-
-         ip0 = vlib_buffer_get_current (p0);
-         ip1 = vlib_buffer_get_current (p1);
-
-         error0 = error1 = IP4_ERROR_NONE;
-         next0 = next1 = IP4_REWRITE_NEXT_DROP;
-
-         /* Decrement TTL & update checksum.
-            Works either endian, so no need for byte swap. */
-         if (PREDICT_TRUE (!(p0->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED)))
-           {
-             i32 ttl0 = ip0->ttl;
-
-             /* Input node should have reject packets with ttl 0. */
-             ASSERT (ip0->ttl > 0);
-
-             checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
-             checksum0 += checksum0 >= 0xffff;
-
-             ip0->checksum = checksum0;
-             ttl0 -= 1;
-             ip0->ttl = ttl0;
-
-             /*
-              * If the ttl drops below 1 when forwarding, generate
-              * an ICMP response.
-              */
-             if (PREDICT_FALSE (ttl0 <= 0))
-               {
-                 error0 = IP4_ERROR_TIME_EXPIRED;
-                 vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
-                 icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
-                                              ICMP4_time_exceeded_ttl_exceeded_in_transit,
-                                              0);
-                 next0 = IP4_REWRITE_NEXT_ICMP_ERROR;
-               }
+  vlib_get_buffers (vm, from, bufs, n_left_from);
+  clib_memset_u16 (nexts, IP4_REWRITE_NEXT_DROP, n_left_from);
 
-             /* Verify checksum. */
-             ASSERT ((ip0->checksum == ip4_header_checksum (ip0)) ||
-                     (p0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM));
-           }
-         else
-           {
-             p0->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED;
-           }
-         if (PREDICT_TRUE (!(p1->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED)))
-           {
-             i32 ttl1 = ip1->ttl;
+  if (n_left_from >= 6)
+    {
+      int i;
+      for (i = 0; i < 6; i++)
+       vlib_prefetch_buffer_header (bufs[i], LOAD);
+    }
 
-             /* Input node should have reject packets with ttl 0. */
-             ASSERT (ip1->ttl > 0);
+  next = nexts;
+  b = bufs;
+  while (n_left_from >= 8)
+    {
+      ip_adjacency_t *adj0, *adj1;
+      ip4_header_t *ip0, *ip1;
+      u32 rw_len0, error0, adj_index0;
+      u32 rw_len1, error1, adj_index1;
+      u32 tx_sw_if_index0, tx_sw_if_index1;
+      u8 *p;
 
-             checksum1 = ip1->checksum + clib_host_to_net_u16 (0x0100);
-             checksum1 += checksum1 >= 0xffff;
+      vlib_prefetch_buffer_header (b[6], LOAD);
+      vlib_prefetch_buffer_header (b[7], LOAD);
 
-             ip1->checksum = checksum1;
-             ttl1 -= 1;
-             ip1->ttl = ttl1;
+      adj_index0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
+      adj_index1 = vnet_buffer (b[1])->ip.adj_index[VLIB_TX];
 
-             /*
-              * If the ttl drops below 1 when forwarding, generate
-              * an ICMP response.
-              */
-             if (PREDICT_FALSE (ttl1 <= 0))
-               {
-                 error1 = IP4_ERROR_TIME_EXPIRED;
-                 vnet_buffer (p1)->sw_if_index[VLIB_TX] = (u32) ~ 0;
-                 icmp4_error_set_vnet_buffer (p1, ICMP4_time_exceeded,
-                                              ICMP4_time_exceeded_ttl_exceeded_in_transit,
-                                              0);
-                 next1 = IP4_REWRITE_NEXT_ICMP_ERROR;
-               }
+      /*
+       * pre-fetch the per-adjacency counters
+       */
+      if (do_counters)
+       {
+         vlib_prefetch_combined_counter (&adjacency_counters,
+                                         thread_index, adj_index0);
+         vlib_prefetch_combined_counter (&adjacency_counters,
+                                         thread_index, adj_index1);
+       }
 
-             /* Verify checksum. */
-             ASSERT ((ip1->checksum == ip4_header_checksum (ip1)) ||
-                     (p1->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM));
-           }
-         else
-           {
-             p1->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED;
-           }
+      ip0 = vlib_buffer_get_current (b[0]);
+      ip1 = vlib_buffer_get_current (b[1]);
+
+      error0 = error1 = IP4_ERROR_NONE;
+
+      ip4_ttl_and_checksum_check (b[0], ip0, next + 0, &error0);
+      ip4_ttl_and_checksum_check (b[1], ip1, next + 1, &error1);
+
+      /* Rewrite packet header and updates lengths. */
+      adj0 = adj_get (adj_index0);
+      adj1 = adj_get (adj_index1);
+
+      /* Worth pipelining. No guarantee that adj0,1 are hot... */
+      rw_len0 = adj0[0].rewrite_header.data_bytes;
+      rw_len1 = adj1[0].rewrite_header.data_bytes;
+      vnet_buffer (b[0])->ip.save_rewrite_length = rw_len0;
+      vnet_buffer (b[1])->ip.save_rewrite_length = rw_len1;
+
+      p = vlib_buffer_get_current (b[2]);
+      CLIB_PREFETCH (p - CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES, STORE);
+      CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+
+      p = vlib_buffer_get_current (b[3]);
+      CLIB_PREFETCH (p - CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES, STORE);
+      CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+
+      /* Check MTU of outgoing interface. */
+      ip4_mtu_check (b[0], clib_net_to_host_u16 (ip0->length),
+                    adj0[0].rewrite_header.max_l3_packet_bytes,
+                    ip0->flags_and_fragment_offset &
+                    clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
+                    next + 0, &error0);
+      ip4_mtu_check (b[1], clib_net_to_host_u16 (ip1->length),
+                    adj1[0].rewrite_header.max_l3_packet_bytes,
+                    ip1->flags_and_fragment_offset &
+                    clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
+                    next + 1, &error1);
+
+      if (is_mcast)
+       {
+         error0 = ((adj0[0].rewrite_header.sw_if_index ==
+                    vnet_buffer (b[0])->sw_if_index[VLIB_RX]) ?
+                   IP4_ERROR_SAME_INTERFACE : error0);
+         error1 = ((adj1[0].rewrite_header.sw_if_index ==
+                    vnet_buffer (b[1])->sw_if_index[VLIB_RX]) ?
+                   IP4_ERROR_SAME_INTERFACE : error1);
+       }
 
-         /* Rewrite packet header and updates lengths. */
-         adj0 = adj_get (adj_index0);
-         adj1 = adj_get (adj_index1);
-
-         /* Worth pipelining. No guarantee that adj0,1 are hot... */
-         rw_len0 = adj0[0].rewrite_header.data_bytes;
-         rw_len1 = adj1[0].rewrite_header.data_bytes;
-         vnet_buffer (p0)->ip.save_rewrite_length = rw_len0;
-         vnet_buffer (p1)->ip.save_rewrite_length = rw_len1;
-
-         /* Check MTU of outgoing interface. */
-         ip4_mtu_check (p0, clib_net_to_host_u16 (ip0->length),
-                        adj0[0].rewrite_header.max_l3_packet_bytes,
-                        ip0->flags_and_fragment_offset &
-                        clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
-                        &next0, &error0);
-         ip4_mtu_check (p1, clib_net_to_host_u16 (ip1->length),
-                        adj1[0].rewrite_header.max_l3_packet_bytes,
-                        ip1->flags_and_fragment_offset &
-                        clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
-                        &next1, &error1);
-
-         if (is_mcast)
-           {
-             error0 = ((adj0[0].rewrite_header.sw_if_index ==
-                        vnet_buffer (p0)->sw_if_index[VLIB_RX]) ?
-                       IP4_ERROR_SAME_INTERFACE : error0);
-             error1 = ((adj1[0].rewrite_header.sw_if_index ==
-                        vnet_buffer (p1)->sw_if_index[VLIB_RX]) ?
-                       IP4_ERROR_SAME_INTERFACE : error1);
-           }
+      b[0]->error = error_node->errors[error0];
+      b[1]->error = error_node->errors[error1];
+      /* Don't adjust the buffer for ttl issue; icmp-error node wants
+       * to see the IP headerr */
+      if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
+       {
+         u32 next_index = adj0[0].rewrite_header.next_index;
+         b[0]->current_data -= rw_len0;
+         b[0]->current_length += rw_len0;
+         tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
+         vnet_buffer (b[0])->sw_if_index[VLIB_TX] = tx_sw_if_index0;
+
+         if (PREDICT_FALSE
+             (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
+           vnet_feature_arc_start (lm->output_feature_arc_index,
+                                   tx_sw_if_index0, &next_index, b[0]);
+         next[0] = next_index;
+       }
+      if (PREDICT_TRUE (error1 == IP4_ERROR_NONE))
+       {
+         u32 next_index = adj1[0].rewrite_header.next_index;
+         b[1]->current_data -= rw_len1;
+         b[1]->current_length += rw_len1;
+
+         tx_sw_if_index1 = adj1[0].rewrite_header.sw_if_index;
+         vnet_buffer (b[1])->sw_if_index[VLIB_TX] = tx_sw_if_index1;
+
+         if (PREDICT_FALSE
+             (adj1[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
+           vnet_feature_arc_start (lm->output_feature_arc_index,
+                                   tx_sw_if_index1, &next_index, b[1]);
+         next[1] = next_index;
+       }
 
-         p0->error = error_node->errors[error0];
-         p1->error = error_node->errors[error1];
-         /* Don't adjust the buffer for ttl issue; icmp-error node wants
-          * to see the IP headerr */
-         if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
-           {
-             next0 = adj0[0].rewrite_header.next_index;
-             p0->current_data -= rw_len0;
-             p0->current_length += rw_len0;
-             tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
-             vnet_buffer (p0)->sw_if_index[VLIB_TX] = tx_sw_if_index0;
-
-             if (PREDICT_FALSE
-                 (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
-               vnet_feature_arc_start (lm->output_feature_arc_index,
-                                       tx_sw_if_index0, &next0, p0);
-           }
-         if (PREDICT_TRUE (error1 == IP4_ERROR_NONE))
-           {
-             next1 = adj1[0].rewrite_header.next_index;
-             p1->current_data -= rw_len1;
-             p1->current_length += rw_len1;
+      /* Guess we are only writing on simple Ethernet header. */
+      vnet_rewrite_two_headers (adj0[0], adj1[0],
+                               ip0, ip1, sizeof (ethernet_header_t));
 
-             tx_sw_if_index1 = adj1[0].rewrite_header.sw_if_index;
-             vnet_buffer (p1)->sw_if_index[VLIB_TX] = tx_sw_if_index1;
+      /*
+       * Bump the per-adjacency counters
+       */
+      if (do_counters)
+       {
+         vlib_increment_combined_counter
+           (&adjacency_counters,
+            thread_index,
+            adj_index0, 1, vlib_buffer_length_in_chain (vm, b[0]) + rw_len0);
 
-             if (PREDICT_FALSE
-                 (adj1[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
-               vnet_feature_arc_start (lm->output_feature_arc_index,
-                                       tx_sw_if_index1, &next1, p1);
-           }
+         vlib_increment_combined_counter
+           (&adjacency_counters,
+            thread_index,
+            adj_index1, 1, vlib_buffer_length_in_chain (vm, b[1]) + rw_len1);
+       }
 
-         /* Guess we are only writing on simple Ethernet header. */
-         vnet_rewrite_two_headers (adj0[0], adj1[0],
-                                   ip0, ip1, sizeof (ethernet_header_t));
+      if (is_midchain)
+       {
+         adj0->sub_type.midchain.fixup_func
+           (vm, adj0, b[0], adj0->sub_type.midchain.fixup_data);
+         adj1->sub_type.midchain.fixup_func
+           (vm, adj1, b[1], adj0->sub_type.midchain.fixup_data);
+       }
 
+      if (is_mcast)
+       {
          /*
-          * Bump the per-adjacency counters
+          * copy bytes from the IP address into the MAC rewrite
           */
-         if (do_counters)
-           {
-             vlib_increment_combined_counter
-               (&adjacency_counters,
-                thread_index,
-                adj_index0, 1,
-                vlib_buffer_length_in_chain (vm, p0) + rw_len0);
-
-             vlib_increment_combined_counter
-               (&adjacency_counters,
-                thread_index,
-                adj_index1, 1,
-                vlib_buffer_length_in_chain (vm, p1) + rw_len1);
-           }
-
-         if (is_midchain)
-           {
-             adj0->sub_type.midchain.fixup_func
-               (vm, adj0, p0, adj0->sub_type.midchain.fixup_data);
-             adj1->sub_type.midchain.fixup_func
-               (vm, adj1, p1, adj0->sub_type.midchain.fixup_data);
-           }
-         if (is_mcast)
-           {
-             /*
-              * copy bytes from the IP address into the MAC rewrite
-              */
-             vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK,
-                                         adj0->
-                                         rewrite_header.dst_mcast_offset,
-                                         &ip0->dst_address.as_u32,
-                                         (u8 *) ip0);
-             vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK,
-                                         adj0->
-                                         rewrite_header.dst_mcast_offset,
-                                         &ip1->dst_address.as_u32,
-                                         (u8 *) ip1);
-           }
-
-         vlib_validate_buffer_enqueue_x2 (vm, node, next_index,
-                                          to_next, n_left_to_next,
-                                          pi0, pi1, next0, next1);
+         vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK,
+                                     adj0->rewrite_header.dst_mcast_offset,
+                                     &ip0->dst_address.as_u32, (u8 *) ip0);
+         vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK,
+                                     adj0->rewrite_header.dst_mcast_offset,
+                                     &ip1->dst_address.as_u32, (u8 *) ip1);
        }
 
-      while (n_left_from > 0 && n_left_to_next > 0)
-       {
-         ip_adjacency_t *adj0;
-         vlib_buffer_t *p0;
-         ip4_header_t *ip0;
-         u32 pi0, rw_len0, adj_index0, next0, error0, checksum0;
-         u32 tx_sw_if_index0;
-
-         pi0 = to_next[0] = from[0];
-
-         p0 = vlib_get_buffer (vm, pi0);
-
-         adj_index0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
-
-         adj0 = adj_get (adj_index0);
-
-         ip0 = vlib_buffer_get_current (p0);
-
-         error0 = IP4_ERROR_NONE;
-         next0 = IP4_REWRITE_NEXT_DROP;        /* drop on error */
-
-         /* Decrement TTL & update checksum. */
-         if (PREDICT_TRUE (!(p0->flags & VNET_BUFFER_F_LOCALLY_ORIGINATED)))
-           {
-             i32 ttl0 = ip0->ttl;
-
-             checksum0 = ip0->checksum + clib_host_to_net_u16 (0x0100);
-
-             checksum0 += checksum0 >= 0xffff;
-
-             ip0->checksum = checksum0;
-
-             ASSERT (ip0->ttl > 0);
-
-             ttl0 -= 1;
+      next += 2;
+      b += 2;
+      n_left_from -= 2;
+    }
 
-             ip0->ttl = ttl0;
+  while (n_left_from > 0)
+    {
+      ip_adjacency_t *adj0;
+      ip4_header_t *ip0;
+      u32 rw_len0, adj_index0, error0;
+      u32 tx_sw_if_index0;
 
-             ASSERT ((ip0->checksum == ip4_header_checksum (ip0)) ||
-                     (p0->flags & VNET_BUFFER_F_OFFLOAD_IP_CKSUM));
+      adj_index0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
 
-             if (PREDICT_FALSE (ttl0 <= 0))
-               {
-                 /*
-                  * If the ttl drops below 1 when forwarding, generate
-                  * an ICMP response.
-                  */
-                 error0 = IP4_ERROR_TIME_EXPIRED;
-                 next0 = IP4_REWRITE_NEXT_ICMP_ERROR;
-                 vnet_buffer (p0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
-                 icmp4_error_set_vnet_buffer (p0, ICMP4_time_exceeded,
-                                              ICMP4_time_exceeded_ttl_exceeded_in_transit,
-                                              0);
-               }
-           }
-         else
-           {
-             p0->flags &= ~VNET_BUFFER_F_LOCALLY_ORIGINATED;
-           }
+      adj0 = adj_get (adj_index0);
 
-         if (do_counters)
-           vlib_prefetch_combined_counter (&adjacency_counters,
-                                           thread_index, adj_index0);
+      if (do_counters)
+       vlib_prefetch_combined_counter (&adjacency_counters,
+                                       thread_index, adj_index0);
 
-         /* Guess we are only writing on simple Ethernet header. */
-         vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t));
-         if (is_mcast)
-           {
-             /*
-              * copy bytes from the IP address into the MAC rewrite
-              */
-             vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK,
-                                         adj0->
-                                         rewrite_header.dst_mcast_offset,
-                                         &ip0->dst_address.as_u32,
-                                         (u8 *) ip0);
-           }
+      ip0 = vlib_buffer_get_current (b[0]);
 
-         /* Update packet buffer attributes/set output interface. */
-         rw_len0 = adj0[0].rewrite_header.data_bytes;
-         vnet_buffer (p0)->ip.save_rewrite_length = rw_len0;
+      error0 = IP4_ERROR_NONE;
 
-         if (do_counters)
-           vlib_increment_combined_counter
-             (&adjacency_counters,
-              thread_index, adj_index0, 1,
-              vlib_buffer_length_in_chain (vm, p0) + rw_len0);
+      ip4_ttl_and_checksum_check (b[0], ip0, next + 0, &error0);
 
-         /* Check MTU of outgoing interface. */
-         ip4_mtu_check (p0, clib_net_to_host_u16 (ip0->length),
-                        adj0[0].rewrite_header.max_l3_packet_bytes,
-                        ip0->flags_and_fragment_offset &
-                        clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
-                        &next0, &error0);
 
-         if (is_mcast)
-           {
-             error0 = ((adj0[0].rewrite_header.sw_if_index ==
-                        vnet_buffer (p0)->sw_if_index[VLIB_RX]) ?
-                       IP4_ERROR_SAME_INTERFACE : error0);
-           }
-         p0->error = error_node->errors[error0];
+      /* Update packet buffer attributes/set output interface. */
+      rw_len0 = adj0[0].rewrite_header.data_bytes;
+      vnet_buffer (b[0])->ip.save_rewrite_length = rw_len0;
 
-         /* Don't adjust the buffer for ttl issue; icmp-error node wants
-          * to see the IP headerr */
-         if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
-           {
-             p0->current_data -= rw_len0;
-             p0->current_length += rw_len0;
-             tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
+      /* Check MTU of outgoing interface. */
+      ip4_mtu_check (b[0], clib_net_to_host_u16 (ip0->length),
+                    adj0[0].rewrite_header.max_l3_packet_bytes,
+                    ip0->flags_and_fragment_offset &
+                    clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
+                    next + 0, &error0);
 
-             vnet_buffer (p0)->sw_if_index[VLIB_TX] = tx_sw_if_index0;
-             next0 = adj0[0].rewrite_header.next_index;
+      if (is_mcast)
+       {
+         error0 = ((adj0[0].rewrite_header.sw_if_index ==
+                    vnet_buffer (b[0])->sw_if_index[VLIB_RX]) ?
+                   IP4_ERROR_SAME_INTERFACE : error0);
+       }
+      b[0]->error = error_node->errors[error0];
 
-             if (is_midchain)
-               {
-                 adj0->sub_type.midchain.fixup_func
-                   (vm, adj0, p0, adj0->sub_type.midchain.fixup_data);
-               }
+      /* Don't adjust the buffer for ttl issue; icmp-error node wants
+       * to see the IP headerr */
+      if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
+       {
+         u32 next_index = adj0[0].rewrite_header.next_index;
+         b[0]->current_data -= rw_len0;
+         b[0]->current_length += rw_len0;
+         tx_sw_if_index0 = adj0[0].rewrite_header.sw_if_index;
+         vnet_buffer (b[0])->sw_if_index[VLIB_TX] = tx_sw_if_index0;
+
+         if (PREDICT_FALSE
+             (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
+           vnet_feature_arc_start (lm->output_feature_arc_index,
+                                   tx_sw_if_index0, &next_index, b[0]);
+         next[0] = next_index;
+       }
 
-             if (PREDICT_FALSE
-                 (adj0[0].rewrite_header.flags & VNET_REWRITE_HAS_FEATURES))
-               vnet_feature_arc_start (lm->output_feature_arc_index,
-                                       tx_sw_if_index0, &next0, p0);
+      /* Guess we are only writing on simple Ethernet header. */
+      vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t));
 
-           }
+      if (do_counters)
+       vlib_increment_combined_counter
+         (&adjacency_counters,
+          thread_index, adj_index0, 1,
+          vlib_buffer_length_in_chain (vm, b[0]) + rw_len0);
 
-         from += 1;
-         n_left_from -= 1;
-         to_next += 1;
-         n_left_to_next -= 1;
+      if (is_midchain)
+       {
+         adj0->sub_type.midchain.fixup_func
+           (vm, adj0, b[0], adj0->sub_type.midchain.fixup_data);
+       }
 
-         vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                          to_next, n_left_to_next,
-                                          pi0, next0);
+      if (is_mcast)
+       {
+         /*
+          * copy bytes from the IP address into the MAC rewrite
+          */
+         vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK,
+                                     adj0->rewrite_header.dst_mcast_offset,
+                                     &ip0->dst_address.as_u32, (u8 *) ip0);
        }
 
-      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+      next += 1;
+      b += 1;
+      n_left_from -= 1;
     }
 
+
   /* Need to do trace after rewrites to pick up new packet data. */
   if (node->flags & VLIB_NODE_FLAG_TRACE)
     ip4_forward_next_trace (vm, node, frame, VLIB_TX);
 
+  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
   return frame->n_vectors;
 }