ip: fix udp/tcp checksum corner cases
[vpp.git] / src / vnet / ip / ip6_forward.c
index 6df3d4b..067db77 100644 (file)
 #define OI_DECAP   0x80000000
 
 static void
-ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index,
-                         ip6_main_t * im, u32 fib_index,
-                         ip_interface_address_t * a)
+ip6_add_interface_prefix_routes (ip6_main_t * im,
+                                u32 sw_if_index,
+                                u32 fib_index,
+                                ip6_address_t * address, u32 address_length)
 {
   ip_lookup_main_t *lm = &im->lookup_main;
-  ip6_address_t *address = ip_interface_address_get_address (lm, a);
-  fib_prefix_t pfx = {
-    .fp_len = a->address_length,
-    .fp_proto = FIB_PROTOCOL_IP6,
-    .fp_addr.ip6 = *address,
+  ip_interface_prefix_t *if_prefix;
+
+  ip_interface_prefix_key_t key = {
+    .prefix = {
+              .fp_len = address_length,
+              .fp_proto = FIB_PROTOCOL_IP6,
+              .fp_addr.ip6 = {
+                              .as_u64 = {
+                                         address->as_u64[0] &
+                                         im->fib_masks[address_length].
+                                         as_u64[0],
+                                         address->
+                                         as_u64[1] &
+                                         im->fib_masks[address_length].
+                                         as_u64[1],
+                                         },
+                              },
+              },
+    .sw_if_index = sw_if_index,
   };
 
-  if (a->address_length < 128)
+  /* If prefix already set on interface, just increment ref count & return */
+  if_prefix = ip_get_interface_prefix (lm, &key);
+  if (if_prefix)
+    {
+      if_prefix->ref_count += 1;
+      return;
+    }
+
+  /* New prefix - allocate a pool entry, initialize it, add to the hash */
+  pool_get (lm->if_prefix_pool, if_prefix);
+  if_prefix->ref_count = 1;
+  clib_memcpy (&if_prefix->key, &key, sizeof (key));
+  mhash_set (&lm->prefix_to_if_prefix_index, &key,
+            if_prefix - lm->if_prefix_pool, 0 /* old value */ );
+
+  /* length < 128 - add glean */
+  if (address_length < 128)
     {
-      fib_table_entry_update_one_path (fib_index,
-                                      &pfx,
+      /* set the glean route for the prefix */
+      fib_table_entry_update_one_path (fib_index, &key.prefix,
                                       FIB_SOURCE_INTERFACE,
                                       (FIB_ENTRY_FLAG_CONNECTED |
                                        FIB_ENTRY_FLAG_ATTACHED),
@@ -84,9 +115,27 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index,
                                       NULL, sw_if_index,
                                       /* invalid FIB index */
                                       ~0, 1,
-                                      /* no label stack */
+                                      /* no out-label stack */
                                       NULL, FIB_ROUTE_PATH_FLAG_NONE);
     }
+}
+
+static void
+ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index,
+                         ip6_main_t * im, u32 fib_index,
+                         ip_interface_address_t * a)
+{
+  ip_lookup_main_t *lm = &im->lookup_main;
+  ip6_address_t *address = ip_interface_address_get_address (lm, a);
+  fib_prefix_t pfx = {
+    .fp_len = a->address_length,
+    .fp_proto = FIB_PROTOCOL_IP6,
+    .fp_addr.ip6 = *address,
+  };
+
+  /* set special routes for the prefix if needed */
+  ip6_add_interface_prefix_routes (im, sw_if_index, fib_index,
+                                  address, a->address_length);
 
   pfx.fp_len = 128;
   if (sw_if_index < vec_len (lm->classify_table_index_by_sw_if_index))
@@ -121,23 +170,73 @@ ip6_add_interface_routes (vnet_main_t * vnm, u32 sw_if_index,
 }
 
 static void
-ip6_del_interface_routes (ip6_main_t * im,
+ip6_del_interface_prefix_routes (ip6_main_t * im,
+                                u32 sw_if_index,
+                                u32 fib_index,
+                                ip6_address_t * address, u32 address_length)
+{
+  ip_lookup_main_t *lm = &im->lookup_main;
+  ip_interface_prefix_t *if_prefix;
+
+  ip_interface_prefix_key_t key = {
+    .prefix = {
+              .fp_len = address_length,
+              .fp_proto = FIB_PROTOCOL_IP6,
+              .fp_addr.ip6 = {
+                              .as_u64 = {
+                                         address->as_u64[0] &
+                                         im->fib_masks[address_length].
+                                         as_u64[0],
+                                         address->
+                                         as_u64[1] &
+                                         im->fib_masks[address_length].
+                                         as_u64[1],
+                                         },
+                              },
+              },
+    .sw_if_index = sw_if_index,
+  };
+
+  if_prefix = ip_get_interface_prefix (lm, &key);
+  if (!if_prefix)
+    {
+      clib_warning ("Prefix not found while deleting %U",
+                   format_ip4_address_and_length, address, address_length);
+      return;
+    }
+
+  /* If not deleting last intf addr in prefix, decrement ref count & return */
+  if_prefix->ref_count -= 1;
+  if (if_prefix->ref_count > 0)
+    return;
+
+  /* length <= 30, delete glean route */
+  if (address_length <= 128)
+    {
+      /* remove glean route for prefix */
+      fib_table_entry_delete (fib_index, &key.prefix, FIB_SOURCE_INTERFACE);
+
+    }
+
+  mhash_unset (&lm->prefix_to_if_prefix_index, &key, 0 /* old_value */ );
+  pool_put (lm->if_prefix_pool, if_prefix);
+}
+
+static void
+ip6_del_interface_routes (u32 sw_if_index, ip6_main_t * im,
                          u32 fib_index,
                          ip6_address_t * address, u32 address_length)
 {
   fib_prefix_t pfx = {
-    .fp_len = address_length,
+    .fp_len = 128,
     .fp_proto = FIB_PROTOCOL_IP6,
     .fp_addr.ip6 = *address,
   };
 
-  if (pfx.fp_len < 128)
-    {
-      fib_table_entry_delete (fib_index, &pfx, FIB_SOURCE_INTERFACE);
-
-    }
+  /* delete special routes for the prefix if needed */
+  ip6_del_interface_prefix_routes (im, sw_if_index, fib_index,
+                                  address, address_length);
 
-  pfx.fp_len = 128;
   fib_table_entry_delete (fib_index, &pfx, FIB_SOURCE_INTERFACE);
 }
 
@@ -278,6 +377,13 @@ ip6_add_del_interface_address (vlib_main_t * vm,
                                                       address,
                                                       address_length))
                      {
+                      /* an intf may have >1 addr from the same prefix */
+                      if ((sw_if_index == sif->sw_if_index) &&
+                          (ia->address_length == address_length) &&
+                          !ip6_address_is_equal (x, address))
+                        continue;
+
+                      /* error if the length or intf was different */
                        vnm->api_errno = VNET_API_ERROR_DUPLICATE_IF_ADDRESS;
                        return
                          clib_error_create
@@ -311,7 +417,8 @@ ip6_add_del_interface_address (vlib_main_t * vm,
   ip6_sw_interface_enable_disable (sw_if_index, !is_del);
 
   if (is_del)
-    ip6_del_interface_routes (im, ip6_af.fib_index, address, address_length);
+    ip6_del_interface_routes (sw_if_index,
+                             im, ip6_af.fib_index, address, address_length);
   else
     ip6_add_interface_routes (vnm, sw_if_index,
                              im, ip6_af.fib_index,
@@ -361,7 +468,7 @@ ip6_sw_interface_admin_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
                                im, fib_index,
                                ia);
     else
-      ip6_del_interface_routes (im, fib_index,
+      ip6_del_interface_routes (sw_if_index, im, fib_index,
                                a, ia->address_length);
   }));
   /* *INDENT-ON* */
@@ -564,212 +671,171 @@ VLIB_NODE_FN (ip6_load_balance_node) (vlib_main_t * vm,
                                      vlib_frame_t * frame)
 {
   vlib_combined_counter_main_t *cm = &load_balance_main.lbm_via_counters;
-  u32 n_left_from, n_left_to_next, *from, *to_next;
-  ip_lookup_next_t next;
+  u32 n_left, *from;
   u32 thread_index = vm->thread_index;
   ip6_main_t *im = &ip6_main;
+  vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+  u16 nexts[VLIB_FRAME_SIZE], *next;
 
   from = vlib_frame_vector_args (frame);
-  n_left_from = frame->n_vectors;
-  next = node->cached_next_index;
+  n_left = frame->n_vectors;
+  next = nexts;
 
-  while (n_left_from > 0)
-    {
-      vlib_get_next_frame (vm, node, next, to_next, n_left_to_next);
+  vlib_get_buffers (vm, from, bufs, n_left);
 
+  while (n_left >= 4)
+    {
+      const load_balance_t *lb0, *lb1;
+      const ip6_header_t *ip0, *ip1;
+      u32 lbi0, hc0, lbi1, hc1;
+      const dpo_id_t *dpo0, *dpo1;
 
-      while (n_left_from >= 4 && n_left_to_next >= 2)
+      /* Prefetch next iteration. */
+      {
+       vlib_prefetch_buffer_header (b[2], STORE);
+       vlib_prefetch_buffer_header (b[3], STORE);
+
+       CLIB_PREFETCH (b[2]->data, sizeof (ip0[0]), STORE);
+       CLIB_PREFETCH (b[3]->data, sizeof (ip0[0]), STORE);
+      }
+
+      ip0 = vlib_buffer_get_current (b[0]);
+      ip1 = vlib_buffer_get_current (b[1]);
+      lbi0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
+      lbi1 = vnet_buffer (b[1])->ip.adj_index[VLIB_TX];
+
+      lb0 = load_balance_get (lbi0);
+      lb1 = load_balance_get (lbi1);
+
+      /*
+       * this node is for via FIBs we can re-use the hash value from the
+       * to node if present.
+       * We don't want to use the same hash value at each level in the recursion
+       * graph as that would lead to polarisation
+       */
+      hc0 = hc1 = 0;
+
+      if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
        {
-         ip_lookup_next_t next0, next1;
-         const load_balance_t *lb0, *lb1;
-         vlib_buffer_t *p0, *p1;
-         u32 pi0, lbi0, hc0, pi1, lbi1, hc1;
-         const ip6_header_t *ip0, *ip1;
-         const dpo_id_t *dpo0, *dpo1;
-
-         /* Prefetch next iteration. */
-         {
-           vlib_buffer_t *p2, *p3;
-
-           p2 = vlib_get_buffer (vm, from[2]);
-           p3 = vlib_get_buffer (vm, from[3]);
-
-           vlib_prefetch_buffer_header (p2, STORE);
-           vlib_prefetch_buffer_header (p3, STORE);
-
-           CLIB_PREFETCH (p2->data, sizeof (ip0[0]), STORE);
-           CLIB_PREFETCH (p3->data, sizeof (ip0[0]), STORE);
-         }
-
-         pi0 = to_next[0] = from[0];
-         pi1 = to_next[1] = from[1];
-
-         from += 2;
-         n_left_from -= 2;
-         to_next += 2;
-         n_left_to_next -= 2;
-
-         p0 = vlib_get_buffer (vm, pi0);
-         p1 = vlib_get_buffer (vm, pi1);
-
-         ip0 = vlib_buffer_get_current (p0);
-         ip1 = vlib_buffer_get_current (p1);
-         lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
-         lbi1 = vnet_buffer (p1)->ip.adj_index[VLIB_TX];
-
-         lb0 = load_balance_get (lbi0);
-         lb1 = load_balance_get (lbi1);
-
-         /*
-          * this node is for via FIBs we can re-use the hash value from the
-          * to node if present.
-          * We don't want to use the same hash value at each level in the recursion
-          * graph as that would lead to polarisation
-          */
-         hc0 = hc1 = 0;
-
-         if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+         if (PREDICT_TRUE (vnet_buffer (b[0])->ip.flow_hash))
            {
-             if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
-               {
-                 hc0 = vnet_buffer (p0)->ip.flow_hash =
-                   vnet_buffer (p0)->ip.flow_hash >> 1;
-               }
-             else
-               {
-                 hc0 = vnet_buffer (p0)->ip.flow_hash =
-                   ip6_compute_flow_hash (ip0, lb0->lb_hash_config);
-               }
-             dpo0 =
-               load_balance_get_fwd_bucket (lb0,
-                                            (hc0 &
-                                             lb0->lb_n_buckets_minus_1));
+             hc0 = vnet_buffer (b[0])->ip.flow_hash =
+               vnet_buffer (b[0])->ip.flow_hash >> 1;
            }
          else
            {
-             dpo0 = load_balance_get_bucket_i (lb0, 0);
+             hc0 = vnet_buffer (b[0])->ip.flow_hash =
+               ip6_compute_flow_hash (ip0, lb0->lb_hash_config);
            }
-         if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+         dpo0 = load_balance_get_fwd_bucket
+           (lb0, (hc0 & (lb0->lb_n_buckets_minus_1)));
+       }
+      else
+       {
+         dpo0 = load_balance_get_bucket_i (lb0, 0);
+       }
+      if (PREDICT_FALSE (lb1->lb_n_buckets > 1))
+       {
+         if (PREDICT_TRUE (vnet_buffer (b[1])->ip.flow_hash))
            {
-             if (PREDICT_TRUE (vnet_buffer (p1)->ip.flow_hash))
-               {
-                 hc1 = vnet_buffer (p1)->ip.flow_hash =
-                   vnet_buffer (p1)->ip.flow_hash >> 1;
-               }
-             else
-               {
-                 hc1 = vnet_buffer (p1)->ip.flow_hash =
-                   ip6_compute_flow_hash (ip1, lb1->lb_hash_config);
-               }
-             dpo1 =
-               load_balance_get_fwd_bucket (lb1,
-                                            (hc1 &
-                                             lb1->lb_n_buckets_minus_1));
+             hc1 = vnet_buffer (b[1])->ip.flow_hash =
+               vnet_buffer (b[1])->ip.flow_hash >> 1;
            }
          else
            {
-             dpo1 = load_balance_get_bucket_i (lb1, 0);
+             hc1 = vnet_buffer (b[1])->ip.flow_hash =
+               ip6_compute_flow_hash (ip1, lb1->lb_hash_config);
            }
+         dpo1 = load_balance_get_fwd_bucket
+           (lb1, (hc1 & (lb1->lb_n_buckets_minus_1)));
+       }
+      else
+       {
+         dpo1 = load_balance_get_bucket_i (lb1, 0);
+       }
 
-         next0 = dpo0->dpoi_next_node;
-         next1 = dpo1->dpoi_next_node;
-
-         /* Only process the HBH Option Header if explicitly configured to do so */
-         if (PREDICT_FALSE
-             (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
-           {
-             next0 = (dpo_is_adj (dpo0) && im->hbh_enabled) ?
-               (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next0;
-           }
-         /* Only process the HBH Option Header if explicitly configured to do so */
-         if (PREDICT_FALSE
-             (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
-           {
-             next1 = (dpo_is_adj (dpo1) && im->hbh_enabled) ?
-               (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next1;
-           }
-
-         vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
-         vnet_buffer (p1)->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
-
-         vlib_increment_combined_counter
-           (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
-         vlib_increment_combined_counter
-           (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, p1));
+      next[0] = dpo0->dpoi_next_node;
+      next[1] = dpo1->dpoi_next_node;
 
-         vlib_validate_buffer_enqueue_x2 (vm, node, next,
-                                          to_next, n_left_to_next,
-                                          pi0, pi1, next0, next1);
+      /* Only process the HBH Option Header if explicitly configured to do so */
+      if (PREDICT_FALSE (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+       {
+         next[0] = (dpo_is_adj (dpo0) && im->hbh_enabled) ?
+           (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next[0];
        }
-
-      while (n_left_from > 0 && n_left_to_next > 0)
+      /* Only process the HBH Option Header if explicitly configured to do so */
+      if (PREDICT_FALSE (ip1->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
        {
-         ip_lookup_next_t next0;
-         const load_balance_t *lb0;
-         vlib_buffer_t *p0;
-         u32 pi0, lbi0, hc0;
-         const ip6_header_t *ip0;
-         const dpo_id_t *dpo0;
+         next[1] = (dpo_is_adj (dpo1) && im->hbh_enabled) ?
+           (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next[1];
+       }
 
-         pi0 = from[0];
-         to_next[0] = pi0;
-         from += 1;
-         to_next += 1;
-         n_left_to_next -= 1;
-         n_left_from -= 1;
+      vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
+      vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = dpo1->dpoi_index;
 
-         p0 = vlib_get_buffer (vm, pi0);
+      vlib_increment_combined_counter
+       (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b[0]));
+      vlib_increment_combined_counter
+       (cm, thread_index, lbi1, 1, vlib_buffer_length_in_chain (vm, b[1]));
 
-         ip0 = vlib_buffer_get_current (p0);
-         lbi0 = vnet_buffer (p0)->ip.adj_index[VLIB_TX];
+      b += 2;
+      next += 2;
+      n_left -= 2;
+    }
+
+  while (n_left > 0)
+    {
+      const load_balance_t *lb0;
+      const ip6_header_t *ip0;
+      const dpo_id_t *dpo0;
+      u32 lbi0, hc0;
+
+      ip0 = vlib_buffer_get_current (b[0]);
+      lbi0 = vnet_buffer (b[0])->ip.adj_index[VLIB_TX];
 
-         lb0 = load_balance_get (lbi0);
+      lb0 = load_balance_get (lbi0);
 
-         hc0 = 0;
-         if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+      hc0 = 0;
+      if (PREDICT_FALSE (lb0->lb_n_buckets > 1))
+       {
+         if (PREDICT_TRUE (vnet_buffer (b[0])->ip.flow_hash))
            {
-             if (PREDICT_TRUE (vnet_buffer (p0)->ip.flow_hash))
-               {
-                 hc0 = vnet_buffer (p0)->ip.flow_hash =
-                   vnet_buffer (p0)->ip.flow_hash >> 1;
-               }
-             else
-               {
-                 hc0 = vnet_buffer (p0)->ip.flow_hash =
-                   ip6_compute_flow_hash (ip0, lb0->lb_hash_config);
-               }
-             dpo0 =
-               load_balance_get_fwd_bucket (lb0,
-                                            (hc0 &
-                                             lb0->lb_n_buckets_minus_1));
+             hc0 = vnet_buffer (b[0])->ip.flow_hash =
+               vnet_buffer (b[0])->ip.flow_hash >> 1;
            }
          else
            {
-             dpo0 = load_balance_get_bucket_i (lb0, 0);
-           }
-
-         next0 = dpo0->dpoi_next_node;
-         vnet_buffer (p0)->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
-
-         /* Only process the HBH Option Header if explicitly configured to do so */
-         if (PREDICT_FALSE
-             (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
-           {
-             next0 = (dpo_is_adj (dpo0) && im->hbh_enabled) ?
-               (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next0;
+             hc0 = vnet_buffer (b[0])->ip.flow_hash =
+               ip6_compute_flow_hash (ip0, lb0->lb_hash_config);
            }
+         dpo0 = load_balance_get_fwd_bucket
+           (lb0, (hc0 & (lb0->lb_n_buckets_minus_1)));
+       }
+      else
+       {
+         dpo0 = load_balance_get_bucket_i (lb0, 0);
+       }
 
-         vlib_increment_combined_counter
-           (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, p0));
+      next[0] = dpo0->dpoi_next_node;
+      vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo0->dpoi_index;
 
-         vlib_validate_buffer_enqueue_x1 (vm, node, next,
-                                          to_next, n_left_to_next,
-                                          pi0, next0);
+      /* Only process the HBH Option Header if explicitly configured to do so */
+      if (PREDICT_FALSE (ip0->protocol == IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS))
+       {
+         next[0] = (dpo_is_adj (dpo0) && im->hbh_enabled) ?
+           (ip_lookup_next_t) IP6_LOOKUP_NEXT_HOP_BY_HOP : next[0];
        }
 
-      vlib_put_next_frame (vm, node, next, n_left_to_next);
+      vlib_increment_combined_counter
+       (cm, thread_index, lbi0, 1, vlib_buffer_length_in_chain (vm, b[0]));
+
+      b += 1;
+      next += 1;
+      n_left -= 1;
     }
 
+  vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+
   if (node->flags & VLIB_NODE_FLAG_TRACE)
     ip6_forward_next_trace (vm, node, frame, VLIB_TX);
 
@@ -951,7 +1017,8 @@ ip6_tcp_udp_icmp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0,
   u16 sum16, payload_length_host_byte_order;
   u32 i, n_this_buffer, n_bytes_left;
   u32 headers_size = sizeof (ip0[0]);
-  void *data_this_buffer;
+  u8 *data_this_buffer;
+  u8 length_odd;
 
   ASSERT (bogus_lengthp);
   *bogus_lengthp = 0;
@@ -959,7 +1026,7 @@ ip6_tcp_udp_icmp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0,
   /* Initialize checksum with ip header. */
   sum0 = ip0->payload_length + clib_host_to_net_u16 (ip0->protocol);
   payload_length_host_byte_order = clib_net_to_host_u16 (ip0->payload_length);
-  data_this_buffer = (void *) (ip0 + 1);
+  data_this_buffer = (u8 *) (ip0 + 1);
 
   for (i = 0; i < ARRAY_LEN (ip0->src_address.as_uword); i++)
     {
@@ -993,10 +1060,18 @@ ip6_tcp_udp_icmp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0,
     }
 
   n_bytes_left = n_this_buffer = payload_length_host_byte_order;
-  if (p0 && n_this_buffer + headers_size > p0->current_length)
-    n_this_buffer =
-      p0->current_length >
-      headers_size ? p0->current_length - headers_size : 0;
+
+  if (p0)
+    {
+      u32 n_ip_bytes_this_buffer =
+       p0->current_length - (((u8 *) ip0 - p0->data) - p0->current_data);
+      if (n_this_buffer + headers_size > n_ip_bytes_this_buffer)
+       {
+         n_this_buffer = p0->current_length > headers_size ?
+           n_ip_bytes_this_buffer - headers_size : 0;
+       }
+    }
+
   while (1)
     {
       sum0 = ip_incremental_checksum (sum0, data_this_buffer, n_this_buffer);
@@ -1004,14 +1079,27 @@ ip6_tcp_udp_icmp_compute_checksum (vlib_main_t * vm, vlib_buffer_t * p0,
       if (n_bytes_left == 0)
        break;
 
+      ASSERT (p0->flags & VLIB_BUFFER_NEXT_PRESENT);
       if (!(p0->flags & VLIB_BUFFER_NEXT_PRESENT))
        {
          *bogus_lengthp = 1;
          return 0xfefe;
        }
+
+      length_odd = (n_this_buffer & 1);
+
       p0 = vlib_get_buffer (vm, p0->next_buffer);
       data_this_buffer = vlib_buffer_get_current (p0);
       n_this_buffer = clib_min (p0->current_length, n_bytes_left);
+
+      if (PREDICT_FALSE (length_odd))
+       {
+         /* Prepend a 0 or the resulting checksum will be incorrect. */
+         data_this_buffer--;
+         n_this_buffer++;
+         n_bytes_left++;
+         data_this_buffer[0] = 0;
+       }
     }
 
   sum16 = ~ip_csum_fold (sum0);
@@ -1066,7 +1154,7 @@ ip6_urpf_loose_check (ip6_main_t * im, vlib_buffer_t * b, ip6_header_t * i)
     (vnet_buffer (b)->sw_if_index[VLIB_TX] == (u32) ~ 0) ?
     fib_index : vnet_buffer (b)->sw_if_index[VLIB_TX];
 
-  lbi = ip6_fib_table_fwding_lookup (im, fib_index, &i->src_address);
+  lbi = ip6_fib_table_fwding_lookup (fib_index, &i->src_address);
   lb0 = load_balance_get (lbi);
 
   return (fib_urpf_check_size (lb0->lb_urpf));
@@ -1474,6 +1562,16 @@ ip6_register_protocol (u32 protocol, u32 node_index)
     vlib_node_add_next (vm, ip6_local_node.index, node_index);
 }
 
+void
+ip6_unregister_protocol (u32 protocol)
+{
+  ip6_main_t *im = &ip6_main;
+  ip_lookup_main_t *lm = &im->lookup_main;
+
+  ASSERT (protocol < ARRAY_LEN (lm->local_next_by_ip_protocol));
+  lm->local_next_by_ip_protocol[protocol] = IP_LOCAL_NEXT_PUNT;
+}
+
 clib_error_t *
 ip6_probe_neighbor (vlib_main_t * vm, ip6_address_t * dst, u32 sw_if_index,
                    u8 refresh)