Punt: specify packets by IP protocol Type
[vpp.git] / src / vnet / ip / ip4_forward.c
index 4fdedc1..43213fe 100644 (file)
@@ -55,6 +55,7 @@
 #include <vnet/mfib/mfib_table.h>      /* for mFIB table and entry creation */
 
 #include <vnet/ip/ip4_forward.h>
+#include <vnet/interface_output.h>
 
 /** @brief IPv4 lookup node.
     @node ip4-lookup
@@ -115,185 +116,152 @@ VLIB_NODE_FN (ip4_load_balance_node) (vlib_main_t * vm,
                                      vlib_frame_t * frame)
 {
   vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters;
-  u32 n_left_from, n_left_to_next, *from, *to_next;
-  ip_lookup_next_t next;
+  u32 n_left, *from;
   u32 thread_index = vm->thread_index;
+  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+  u16 nexts[VLIB_FRAME_SIZE], *next;
 
   from = vlib_frame_vector_args (frame);
-  n_left_from = frame->n_vectors;
-  next = node->cached_next_index;
+  n_left = frame->n_vectors;
+  next = nexts;
 
-  if (node->flags & VLIB_NODE_FLAG_TRACE)
-    ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+  vlib_get_buffers (vm, from, bufs, n_left);
 
-  while (n_left_from > 0)
+  while (n_left >= 4)
     {
-      vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
-
+      const load_balance_t *lb0, *lb1;
+      const ip4_header_t *ip0, *ip1;
+      u32 lbi0, hc0, lbi1, hc1;
+      const dpo_id_t *dpo0, *dpo1;
 
-      while (n_left_from >= 4 && n_left_to_next >= 2)
-       {
-         ip_lookup_next_t next0, next1;
-         const load_balance_t *lb0, *lb1;
-         vlib_buffer_t *p0, *p1;
-         u32 pi0, lbi0, hc0, pi1, lbi1, hc1;
-         const ip4_header_t *ip0, *ip1;
-         const dpo_id_t *dpo0, *dpo1;
-
-         /* Prefetch next iteration. */
-         {
-           vlib_buffer_t *p2, *p3;
-
-           p2 = vlib_get_buffer (vm, from[2]);
-           p3 = vlib_get_buffer (vm, from[3]);
-
-           vlib_prefetch_buffer_header (p2, STORE);
-           vlib_prefetch_buffer_header (p3, STORE);
-
-           CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE);
-           CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE);
-         }
-
-         pi0 = to_next[0] = from[0];
-         pi1 = to_next[1] = from[1];
-
-         from += 2;
-         n_left_from -= 2;
-         to_next += 2;
-         n_left_to_next -= 2;
+      /* Prefetch next iteration. */
+      {
+       vlib_prefetch_buffer_header (b[2], LOAD);
+       vlib_prefetch_buffer_header (b[3], LOAD);
 
-         p0 = vlib_get_buffer (vm, pi0);
-         p1 = vlib_get_buffer (vm, pi1);
+       CLIB_PREFETCH (b[2]->data, sizeof (ip0[0]), LOAD);
+       CLIB_PREFETCH (b[3]->data, sizeof (ip0[0]), LOAD);
+      }
 
-         ip0 = vlib_buffer_get_current (p0);
-         ip1 = vlib_buffer_get_current (p1);
-         lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
-         lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
+      ip0 = vlib_buffer_get_current (b[0]);
+      ip1 = vlib_buffer_get_current (b[1]);
+      lbi0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
+      lbi1 = vnet_buffer (b[1])->ip.adj_index[VLIB_TX];
 
-         lb0 = load_balance_get (lbi0);
-         lb1 = load_balance_get (lbi1);
+      lb0 = load_balance_get (lbi0);
+      lb1 = load_balance_get (lbi1);
 
-         /*
-          * this node is for via FIBs we can re-use the hash value from the
-          * to node if present.
-          * We don't want to use the same hash value at each level in the recursion
-          * graph as that would lead to polarisation
-          */
-         hc0 = hc1 = 0;
+      /*
+       * this node is for via FIBs we can re-use the hash value from the
+       * to node if present.
+       * We don't want to use the same hash value at each level in the recursion
+       * graph as that would lead to polarisation
+       */
+      hc0 = hc1 = 0;
 
-         if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+      if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+       {
+         if (PREDICT_TRUE (vnet_buffer (b[0])->ip.flow_hash))
            {
-             if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
-               {
-                 hc0 = vnet_buffer (p0)->ip.flow_hash =
-                   vnet_buffer (p0)->ip.flow_hash >> 1;
-               }
-             else
-               {
-                 hc0 = vnet_buffer (p0)->ip.flow_hash =
-                   ip4_compute_flow_hash (ip0, lb0->lb_hash_config);
-               }
-             dpo0 = load_balance_get_fwd_bucket
-               (lb0, (hc0 & (lb0->lb_n_buckets_minus_1)));
+             hc0 = vnet_buffer (b[0])->ip.flow_hash =
+               vnet_buffer (b[0])->ip.flow_hash >> 1;
            }
          else
            {
-             dpo0 = load_balance_get_bucket_i (lb0, 0);
+             hc0 = vnet_buffer (b[0])->ip.flow_hash =
+               ip4_compute_flow_hash (ip0, lb0->lb_hash_config);
            }
-         if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+         dpo0 = load_balance_get_fwd_bucket
+           (lb0, (hc0 & (lb0->lb_n_buckets_minus_1)));
+       }
+      else
+       {
+         dpo0 = load_balance_get_bucket_i (lb0, 0);
+       }
+      if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+       {
+         if (PREDICT_TRUE (vnet_buffer (b[1])->ip.flow_hash))
            {
-             if (PREDICT_TRUE (vnet_buffer (p1)->ip.flow_hash))
-               {
-                 hc1 = vnet_buffer (p1)->ip.flow_hash =
-                   vnet_buffer (p1)->ip.flow_hash >> 1;
-               }
-             else
-               {
-                 hc1 = vnet_buffer (p1)->ip.flow_hash =
-                   ip4_compute_flow_hash (ip1, lb1->lb_hash_config);
-               }
-             dpo1 = load_balance_get_fwd_bucket
-               (lb1, (hc1 & (lb1->lb_n_buckets_minus_1)));
+             hc1 = vnet_buffer (b[1])->ip.flow_hash =
+               vnet_buffer (b[1])->ip.flow_hash >> 1;
            }
          else
            {
-             dpo1 = load_balance_get_bucket_i (lb1, 0);
+             hc1 = vnet_buffer (b[1])->ip.flow_hash =
+               ip4_compute_flow_hash (ip1, lb1->lb_hash_config);
            }
+         dpo1 = load_balance_get_fwd_bucket
+           (lb1, (hc1 & (lb1->lb_n_buckets_minus_1)));
+       }
+      else
+       {
+         dpo1 = load_balance_get_bucket_i (lb1, 0);
+       }
 
-         next0 = dpo0->dpoi_next_node;
-         next1 = dpo1->dpoi_next_node;
-
-         vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
-         vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
-
-         vlib_increment_combined_counter
-           (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
-         vlib_increment_combined_counter
-           (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1));
+      next[0] = dpo0->dpoi_next_node;
+      next[1] = dpo1->dpoi_next_node;
 
-         vlib_validate_buffer_enqueue_x2 (vm, node, next,
-                                          to_next, n_left_to_next,
-                                          pi0, pi1, next0, next1);
-       }
+      vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+      vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
 
-      while (n_left_from > 0 && n_left_to_next > 0)
-       {
-         ip_lookup_next_t next0;
-         const load_balance_t *lb0;
-         vlib_buffer_t *p0;
-         u32 pi0, lbi0, hc0;
-         const ip4_header_t *ip0;
-         const dpo_id_t *dpo0;
+      vlib_increment_combined_counter
+       (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b[0]));
+      vlib_increment_combined_counter
+       (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, b[1]));
 
-         pi0 = from[0];
-         to_next[0] = pi0;
-         from += 1;
-         to_next += 1;
-         n_left_to_next -= 1;
-         n_left_from -= 1;
+      b += 2;
+      next += 2;
+      n_left -= 2;
+    }
 
-         p0 = vlib_get_buffer (vm, pi0);
+  while (n_left > 0)
+    {
+      const load_balance_t *lb0;
+      const ip4_header_t *ip0;
+      const dpo_id_t *dpo0;
+      u32 lbi0, hc0;
 
-         ip0 = vlib_buffer_get_current (p0);
-         lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+      ip0 = vlib_buffer_get_current (b[0]);
+      lbi0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
 
-         lb0 = load_balance_get (lbi0);
+      lb0 = load_balance_get (lbi0);
 
-         hc0 = 0;
-         if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+      hc0 = 0;
+      if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+       {
+         if (PREDICT_TRUE (vnet_buffer (b[0])->ip.flow_hash))
            {
-             if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
-               {
-                 hc0 = vnet_buffer (p0)->ip.flow_hash =
-                   vnet_buffer (p0)->ip.flow_hash >> 1;
-               }
-             else
-               {
-                 hc0 = vnet_buffer (p0)->ip.flow_hash =
-                   ip4_compute_flow_hash (ip0, lb0->lb_hash_config);
-               }
-             dpo0 = load_balance_get_fwd_bucket
-               (lb0, (hc0 & (lb0->lb_n_buckets_minus_1)));
+             hc0 = vnet_buffer (b[0])->ip.flow_hash =
+               vnet_buffer (b[0])->ip.flow_hash >> 1;
            }
          else
            {
-             dpo0 = load_balance_get_bucket_i (lb0, 0);
+             hc0 = vnet_buffer (b[0])->ip.flow_hash =
+               ip4_compute_flow_hash (ip0, lb0->lb_hash_config);
            }
+         dpo0 = load_balance_get_fwd_bucket
+           (lb0, (hc0 & (lb0->lb_n_buckets_minus_1)));
+       }
+      else
+       {
+         dpo0 = load_balance_get_bucket_i (lb0, 0);
+       }
 
-         next0 = dpo0->dpoi_next_node;
-         vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+      next[0] = dpo0->dpoi_next_node;
+      vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
 
-         vlib_increment_combined_counter
-           (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+      vlib_increment_combined_counter
+       (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b[0]));
 
-         vlib_validate_buffer_enqueue_x1 (vm, node, next,
-                                          to_next, n_left_to_next,
-                                          pi0, next0);
-       }
-
-      vlib_put_next_frame (vm, node, next, n_left_to_next);
+      b += 1;
+      next += 1;
+      n_left -= 1;
     }
 
+  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+  if (node->flags & VLIB_NODE_FLAG_TRACE)
+    ip4_forward_next_trace (vm, node, frame, VLIB_TX);
+
   return frame->n_vectors;
 }
 
@@ -547,6 +515,12 @@ ip4_sw_interface_enable_disable (u32 sw_if_index, u32 is_enable)
 
   vnet_feature_enable_disable ("ip4-multicast", "ip4-not-enabled",
                               sw_if_index, !is_enable, 0, 0);
+
+  {
+    ip4_enable_disable_interface_callback_t *cb;
+    vec_foreach (cb, im->enable_disable_interface_callbacks)
+      cb->function (im, cb->function_opaque, sw_if_index, is_enable);
+  }
 }
 
 static clib_error_t *
@@ -711,6 +685,7 @@ VNET_FEATURE_ARC_INIT (ip4_unicast, static) =
 {
   .arc_name = "ip4-unicast",
   .start_nodes = VNET_FEATURES ("ip4-input", "ip4-input-no-checksum"),
+  .last_in_arc = "ip4-lookup",
   .arc_index_ptr = &ip4_main.lookup_main.ucast_feature_arc_index,
 };
 
@@ -753,13 +728,13 @@ VNET_FEATURE_INIT (ip4_policer_classify, static) =
 {
   .arc_name = "ip4-unicast",
   .node_name = "ip4-policer-classify",
-  .runs_before = VNET_FEATURES ("ipsec4-input"),
+  .runs_before = VNET_FEATURES ("ipsec4-input-feature"),
 };
 
 VNET_FEATURE_INIT (ip4_ipsec, static) =
 {
   .arc_name = "ip4-unicast",
-  .node_name = "ipsec4-input",
+  .node_name = "ipsec4-input-feature",
   .runs_before = VNET_FEATURES ("vpath-input-ip4"),
 };
 
@@ -796,6 +771,7 @@ VNET_FEATURE_ARC_INIT (ip4_multicast, static) =
 {
   .arc_name = "ip4-multicast",
   .start_nodes = VNET_FEATURES ("ip4-input", "ip4-input-no-checksum"),
+  .last_in_arc = "ip4-mfib-forward-lookup",
   .arc_index_ptr = &ip4_main.lookup_main.mcast_feature_arc_index,
 };
 
@@ -825,6 +801,7 @@ VNET_FEATURE_ARC_INIT (ip4_output, static) =
 {
   .arc_name = "ip4-output",
   .start_nodes = VNET_FEATURES ("ip4-rewrite", "ip4-midchain", "ip4-dvr-dpo"),
+  .last_in_arc = "interface-output",
   .arc_index_ptr = &ip4_main.lookup_main.output_feature_arc_index,
 };
 
@@ -839,13 +816,13 @@ VNET_FEATURE_INIT (ip4_outacl, static) =
 {
   .arc_name = "ip4-output",
   .node_name = "ip4-outacl",
-  .runs_before = VNET_FEATURES ("ipsec4-output"),
+  .runs_before = VNET_FEATURES ("ipsec4-output-feature"),
 };
 
 VNET_FEATURE_INIT (ip4_ipsec_output, static) =
 {
   .arc_name = "ip4-output",
-  .node_name = "ipsec4-output",
+  .node_name = "ipsec4-output-feature",
   .runs_before = VNET_FEATURES ("interface-output"),
 };
 
@@ -897,7 +874,9 @@ ip4_sw_interface_add_del (vnet_main_t * vnm, u32 sw_if_index, u32 is_add)
 VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ip4_sw_interface_add_del);
 
 /* Global IP4 main. */
+#ifndef CLIB_MARCH_VARIANT
 ip4_main_t ip4_main;
+#endif /* CLIB_MARCH_VARIANT */
 
 static clib_error_t *
 ip4_lookup_init (vlib_main_t * vm)
@@ -945,10 +924,6 @@ ip4_lookup_init (vlib_main_t * vm)
 
     clib_memset (&h, 0, sizeof (h));
 
-    /* Set target ethernet address to all zeros. */
-    clib_memset (h.ip4_over_ethernet[1].ethernet, 0,
-                sizeof (h.ip4_over_ethernet[1].ethernet));
-
 #define _16(f,v) h.f = clib_host_to_net_u16 (v);
 #define _8(f,v) h.f = v;
     _16 (l2_type, ETHERNET_ARP_HARDWARE_TYPE_ethernet);
@@ -1072,9 +1047,9 @@ ip4_forward_next_trace (vlib_main_t * vm,
            vec_elt (im->fib_index_by_sw_if_index,
                     vnet_buffer (b0)->sw_if_index[VLIB_RX]);
 
-         clib_memcpy (t0->packet_data,
-                      vlib_buffer_get_current (b0),
-                      sizeof (t0->packet_data));
+         clib_memcpy_fast (t0->packet_data,
+                           vlib_buffer_get_current (b0),
+                           sizeof (t0->packet_data));
        }
       if (b1->flags & VLIB_BUFFER_IS_TRACED)
        {
@@ -1086,8 +1061,8 @@ ip4_forward_next_trace (vlib_main_t * vm,
             (u32) ~ 0) ? vnet_buffer (b1)->sw_if_index[VLIB_TX] :
            vec_elt (im->fib_index_by_sw_if_index,
                     vnet_buffer (b1)->sw_if_index[VLIB_RX]);
-         clib_memcpy (t1->packet_data, vlib_buffer_get_current (b1),
-                      sizeof (t1->packet_data));
+         clib_memcpy_fast (t1->packet_data, vlib_buffer_get_current (b1),
+                           sizeof (t1->packet_data));
        }
       from += 2;
       n_left -= 2;
@@ -1113,8 +1088,8 @@ ip4_forward_next_trace (vlib_main_t * vm,
             (u32) ~ 0) ? vnet_buffer (b0)->sw_if_index[VLIB_TX] :
            vec_elt (im->fib_index_by_sw_if_index,
                     vnet_buffer (b0)->sw_if_index[VLIB_RX]);
-         clib_memcpy (t0->packet_data, vlib_buffer_get_current (b0),
-                      sizeof (t0->packet_data));
+         clib_memcpy_fast (t0->packet_data, vlib_buffer_get_current (b0),
+                           sizeof (t0->packet_data));
        }
       from += 1;
       n_left -= 1;
@@ -1172,7 +1147,7 @@ ip4_tcp_udp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0,
       ASSERT (p0->flags & VLIB_BUFFER_NEXT_PRESENT);
       p0 = vlib_get_buffer (vm, p0->next_buffer);
       data_this_buffer = vlib_buffer_get_current (p0);
-      n_this_buffer = p0->current_length;
+      n_this_buffer = clib_min (p0->current_length, n_bytes_left);
     }
 
   sum16 = ~ip_csum_fold (sum0);
@@ -1212,6 +1187,7 @@ VNET_FEATURE_ARC_INIT (ip4_local) =
 {
   .arc_name  = "ip4-local",
   .start_nodes = VNET_FEATURES ("ip4-local"),
+  .last_in_arc = "ip4-local-end-of-arc",
 };
 /* *INDENT-ON* */
 
@@ -1330,6 +1306,7 @@ typedef struct
   ip4_address_t src;
   u32 lbi;
   u8 error;
+  u8 first;
 } ip4_local_last_check_t;
 
 static inline void
@@ -1346,7 +1323,8 @@ ip4_local_check_src (vlib_buffer_t * b, ip4_header_t * ip0,
     vnet_buffer (b)->sw_if_index[VLIB_TX] != ~0 ?
     vnet_buffer (b)->sw_if_index[VLIB_TX] : vnet_buffer (b)->ip.fib_index;
 
-  if (PREDICT_FALSE (last_check->src.as_u32 != ip0->src_address.as_u32))
+  if (PREDICT_FALSE (last_check->first ||
+                    (last_check->src.as_u32 != ip0->src_address.as_u32)))
     {
       mtrie0 = &ip4_fib_get (vnet_buffer (b)->ip.fib_index)->mtrie;
       leaf0 = ip4_fib_mtrie_lookup_step_one (mtrie0, &ip0->src_address);
@@ -1388,6 +1366,7 @@ ip4_local_check_src (vlib_buffer_t * b, ip4_header_t * ip0,
       vnet_buffer (b)->ip.adj_index[VLIB_TX] = last_check->lbi;
       vnet_buffer (b)->ip.adj_index[VLIB_RX] = last_check->lbi;
       *error0 = last_check->error;
+      last_check->first = 0;
     }
 }
 
@@ -1399,9 +1378,10 @@ ip4_local_check_src_x2 (vlib_buffer_t ** b, ip4_header_t ** ip,
   ip4_fib_mtrie_t *mtrie[2];
   const dpo_id_t *dpo[2];
   load_balance_t *lb[2];
-  u32 not_last_hit = 0;
+  u32 not_last_hit;
   u32 lbi[2];
 
+  not_last_hit = last_check->first;
   not_last_hit |= ip[0]->src_address.as_u32 ^ last_check->src.as_u32;
   not_last_hit |= ip[1]->src_address.as_u32 ^ last_check->src.as_u32;
 
@@ -1478,6 +1458,7 @@ ip4_local_check_src_x2 (vlib_buffer_t ** b, ip4_header_t ** ip,
 
       error[0] = last_check->error;
       error[1] = last_check->error;
+      last_check->first = 0;
     }
 }
 
@@ -1528,9 +1509,16 @@ ip4_local_inline (vlib_main_t * vm,
   u8 error[2], pt[2];
 
   ip4_local_last_check_t last_check = {
+    /*
+     * 0.0.0.0 can appear as the source address of an IP packet,
+     * as can any other address, hence the need to use the 'first'
+     * member to make sure the .lbi is initialised for the first
+     * packet.
+     */
     .src = {.as_u32 = 0},
     .lbi = ~0,
-    .error = IP4_ERROR_UNKNOWN_PROTOCOL
+    .error = IP4_ERROR_UNKNOWN_PROTOCOL,
+    .first = 1,
   };
 
   from = vlib_frame_vector_args (frame);
@@ -1691,6 +1679,16 @@ ip4_register_protocol (u32 protocol, u32 node_index)
   lm->local_next_by_ip_protocol[protocol] =
     vlib_node_add_next (vm, ip4_local_node.index, node_index);
 }
+
+void
+ip4_unregister_protocol (u32 protocol)
+{
+  ip4_main_t *im = &ip4_main;
+  ip_lookup_main_t *lm = &im->lookup_main;
+
+  ASSERT (protocol < ARRAY_LEN (lm->local_next_by_ip_protocol));
+  lm->local_next_by_ip_protocol[protocol] = IP_LOCAL_NEXT_PUNT;
+}
 #endif
 
 static clib_error_t *
@@ -1709,8 +1707,8 @@ show_ip_local_command_fn (vlib_main_t * vm,
          u32 node_index = vlib_get_node (vm,
                                          ip4_local_node.index)->
            next_nodes[lm->local_next_by_ip_protocol[i]];
-         vlib_cli_output (vm, "%d: %U", i, format_vlib_node_name, vm,
-                          node_index);
+         vlib_cli_output (vm, "%U: %U", format_ip_protocol, i,
+                          format_vlib_node_name, vm, node_index);
        }
     }
   return 0;
@@ -1838,7 +1836,6 @@ ip4_arp_inline (vlib_main_t * vm,
            vlib_packet_template_get_packet (vm,
                                             &im->ip4_arp_request_packet_template,
                                             &bi0);
-
          /* Seems we're out of buffers */
          if (PREDICT_FALSE (!h0))
            {
@@ -1846,15 +1843,19 @@ ip4_arp_inline (vlib_main_t * vm,
              continue;
            }
 
+         b0 = vlib_get_buffer (vm, bi0);
+
+         /* copy the persistent fields from the original */
+         clib_memcpy_fast (b0->opaque2, p0->opaque2, sizeof (p0->opaque2));
+
          /* Add rewrite/encap string for ARP packet. */
          vnet_rewrite_one_header (adj0[0], h0, sizeof (ethernet_header_t));
 
          hw_if0 = vnet_get_sup_hw_interface (vnm, sw_if_index0);
 
          /* Src ethernet address in ARP header. */
-         clib_memcpy (h0->ip4_over_ethernet[0].ethernet,
-                      hw_if0->hw_address,
-                      sizeof (h0->ip4_over_ethernet[0].ethernet));
+         mac_address_from_bytes (&h0->ip4_over_ethernet[0].mac,
+                                 hw_if0->hw_address);
          if (is_glean)
            {
              /* The interface's source address is stashed in the Glean Adj */
@@ -1878,7 +1879,6 @@ ip4_arp_inline (vlib_main_t * vm,
          p0->error = node->errors[IP4_ARP_ERROR_REQUEST_SENT];
 
          vlib_buffer_copy_trace_flag (vm, p0, bi0);
-         b0 = vlib_get_buffer (vm, bi0);
          VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
          vnet_buffer (b0)->sw_if_index[VLIB_TX] = sw_if_index0;
 
@@ -2027,8 +2027,7 @@ ip4_probe_neighbor (vlib_main_t * vm, ip4_address_t * dst, u32 sw_if_index,
                                sw_if_index);
     }
 
-  clib_memcpy (h->ip4_over_ethernet[0].ethernet, hi->hw_address,
-              sizeof (h->ip4_over_ethernet[0].ethernet));
+  mac_address_from_bytes (&h->ip4_over_ethernet[0].mac, hi->hw_address);
 
   h->ip4_over_ethernet[0].ip4 = src[0];
   h->ip4_over_ethernet[1].ip4 = dst[0];
@@ -2171,10 +2170,11 @@ ip4_ttl_and_checksum_check (vlib_buffer_t * b, ip4_header_t * ip, u16 * next,
 
 
 always_inline uword
-ip4_rewrite_inline (vlib_main_t * vm,
-                   vlib_node_runtime_t * node,
-                   vlib_frame_t * frame,
-                   int do_counters, int is_midchain, int is_mcast)
+ip4_rewrite_inline_with_gso (vlib_main_t * vm,
+                            vlib_node_runtime_t * node,
+                            vlib_frame_t * frame,
+                            int do_counters, int is_midchain, int is_mcast,
+                            int do_gso)
 {
   ip_lookup_main_t *lm = &ip4_main.lookup_main;
   u32 *from = vlib_frame_vector_args (frame);
@@ -2193,7 +2193,7 @@ ip4_rewrite_inline (vlib_main_t * vm,
   if (n_left_from >= 6)
     {
       int i;
-      for (i = 0; i < 6; i++)
+      for (i = 2; i < 6; i++)
        vlib_prefetch_buffer_header (bufs[i], LOAD);
     }
 
@@ -2252,12 +2252,20 @@ ip4_rewrite_inline (vlib_main_t * vm,
       CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
 
       /* Check MTU of outgoing interface. */
-      ip4_mtu_check (b[0], clib_net_to_host_u16 (ip0->length),
+      u16 ip0_len = clib_net_to_host_u16 (ip0->length);
+      u16 ip1_len = clib_net_to_host_u16 (ip1->length);
+
+      if (do_gso && (b[0]->flags & VNET_BUFFER_F_GSO))
+       ip0_len = gso_mtu_sz (b[0]);
+      if (do_gso && (b[1]->flags & VNET_BUFFER_F_GSO))
+       ip1_len = gso_mtu_sz (b[1]);
+
+      ip4_mtu_check (b[0], ip0_len,
                     adj0[0].rewrite_header.max_l3_packet_bytes,
                     ip0->flags_and_fragment_offset &
                     clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
                     next + 0, &error0);
-      ip4_mtu_check (b[1], clib_net_to_host_u16 (ip1->length),
+      ip4_mtu_check (b[1], ip1_len,
                     adj1[0].rewrite_header.max_l3_packet_bytes,
                     ip1->flags_and_fragment_offset &
                     clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
@@ -2273,10 +2281,8 @@ ip4_rewrite_inline (vlib_main_t * vm,
                    IP4_ERROR_SAME_INTERFACE : error1);
        }
 
-      b[0]->error = error_node->errors[error0];
-      b[1]->error = error_node->errors[error1];
       /* Don't adjust the buffer for ttl issue; icmp-error node wants
-       * to see the IP headerr */
+       * to see the IP header */
       if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
        {
          u32 next_index = adj0[0].rewrite_header.next_index;
@@ -2291,6 +2297,10 @@ ip4_rewrite_inline (vlib_main_t * vm,
                                    tx_sw_if_index0, &next_index, b[0]);
          next[0] = next_index;
        }
+      else
+       {
+         b[0]->error = error_node->errors[error0];
+       }
       if (PREDICT_TRUE (error1 == IP4_ERROR_NONE))
        {
          u32 next_index = adj1[0].rewrite_header.next_index;
@@ -2306,7 +2316,15 @@ ip4_rewrite_inline (vlib_main_t * vm,
                                    tx_sw_if_index1, &next_index, b[1]);
          next[1] = next_index;
        }
-
+      else
+       {
+         b[1]->error = error_node->errors[error1];
+       }
+      if (is_midchain)
+       {
+         calc_checksums (vm, b[0]);
+         calc_checksums (vm, b[1]);
+       }
       /* Guess we are only writing on simple Ethernet header. */
       vnet_rewrite_two_headers (adj0[0], adj1[0],
                                ip0, ip1, sizeof (ethernet_header_t));
@@ -2329,10 +2347,12 @@ ip4_rewrite_inline (vlib_main_t * vm,
 
       if (is_midchain)
        {
-         adj0->sub_type.midchain.fixup_func
-           (vm, adj0, b[0], adj0->sub_type.midchain.fixup_data);
-         adj1->sub_type.midchain.fixup_func
-           (vm, adj1, b[1], adj0->sub_type.midchain.fixup_data);
+         if (adj0->sub_type.midchain.fixup_func)
+           adj0->sub_type.midchain.fixup_func
+             (vm, adj0, b[0], adj0->sub_type.midchain.fixup_data);
+         if (adj1->sub_type.midchain.fixup_func)
+           adj1->sub_type.midchain.fixup_func
+             (vm, adj1, b[1], adj1->sub_type.midchain.fixup_data);
        }
 
       if (is_mcast)
@@ -2344,7 +2364,7 @@ ip4_rewrite_inline (vlib_main_t * vm,
                                      adj0->rewrite_header.dst_mcast_offset,
                                      &ip0->dst_address.as_u32, (u8 *) ip0);
          vnet_ip_mcast_fixup_header (IP4_MCAST_ADDR_MASK,
-                                     adj0->rewrite_header.dst_mcast_offset,
+                                     adj1->rewrite_header.dst_mcast_offset,
                                      &ip1->dst_address.as_u32, (u8 *) ip1);
        }
 
@@ -2380,7 +2400,11 @@ ip4_rewrite_inline (vlib_main_t * vm,
       vnet_buffer (b[0])->ip.save_rewrite_length = rw_len0;
 
       /* Check MTU of outgoing interface. */
-      ip4_mtu_check (b[0], clib_net_to_host_u16 (ip0->length),
+      u16 ip0_len = clib_net_to_host_u16 (ip0->length);
+      if (do_gso && (b[0]->flags & VNET_BUFFER_F_GSO))
+       ip0_len = gso_mtu_sz (b[0]);
+
+      ip4_mtu_check (b[0], ip0_len,
                     adj0[0].rewrite_header.max_l3_packet_bytes,
                     ip0->flags_and_fragment_offset &
                     clib_host_to_net_u16 (IP4_HEADER_FLAG_DONT_FRAGMENT),
@@ -2392,10 +2416,9 @@ ip4_rewrite_inline (vlib_main_t * vm,
                     vnet_buffer (b[0])->sw_if_index[VLIB_RX]) ?
                    IP4_ERROR_SAME_INTERFACE : error0);
        }
-      b[0]->error = error_node->errors[error0];
 
       /* Don't adjust the buffer for ttl issue; icmp-error node wants
-       * to see the IP headerr */
+       * to see the IP header */
       if (PREDICT_TRUE (error0 == IP4_ERROR_NONE))
        {
          u32 next_index = adj0[0].rewrite_header.next_index;
@@ -2410,7 +2433,14 @@ ip4_rewrite_inline (vlib_main_t * vm,
                                    tx_sw_if_index0, &next_index, b[0]);
          next[0] = next_index;
        }
-
+      else
+       {
+         b[0]->error = error_node->errors[error0];
+       }
+      if (is_midchain)
+       {
+         calc_checksums (vm, b[0]);
+       }
       /* Guess we are only writing on simple Ethernet header. */
       vnet_rewrite_one_header (adj0[0], ip0, sizeof (ethernet_header_t));
 
@@ -2422,8 +2452,9 @@ ip4_rewrite_inline (vlib_main_t * vm,
 
       if (is_midchain)
        {
-         adj0->sub_type.midchain.fixup_func
-           (vm, adj0, b[0], adj0->sub_type.midchain.fixup_data);
+         if (adj0->sub_type.midchain.fixup_func)
+           adj0->sub_type.midchain.fixup_func
+             (vm, adj0, b[0], adj0->sub_type.midchain.fixup_data);
        }
 
       if (is_mcast)
@@ -2450,6 +2481,23 @@ ip4_rewrite_inline (vlib_main_t * vm,
   return frame->n_vectors;
 }
 
+always_inline uword
+ip4_rewrite_inline (vlib_main_t * vm,
+                   vlib_node_runtime_t * node,
+                   vlib_frame_t * frame,
+                   int do_counters, int is_midchain, int is_mcast)
+{
+  vnet_main_t *vnm = vnet_get_main ();
+  if (PREDICT_FALSE (vnm->interface_main.gso_interface_count > 0))
+    return ip4_rewrite_inline_with_gso (vm, node, frame, do_counters,
+                                       is_midchain, is_mcast,
+                                       1 /* do_gso */ );
+  else
+    return ip4_rewrite_inline_with_gso (vm, node, frame, do_counters,
+                                       is_midchain, is_mcast,
+                                       0 /* no do_gso */ );
+}
+
 
 /** @brief IPv4 rewrite node.
     @node ip4-rewrite