ip: force full reassembly before virtual
[vpp.git] / src / vnet / ip / reass / ip4_full_reass.c
index 18ac4d1..7f0b8d9 100644 (file)
 #include <vppinfra/vec.h>
 #include <vnet/vnet.h>
 #include <vnet/ip/ip.h>
+#include <vnet/ip/ip.api_enum.h>
 #include <vppinfra/fifo.h>
 #include <vppinfra/bihash_16_8.h>
 #include <vnet/ip/reass/ip4_full_reass.h>
 #include <stddef.h>
 
 #define MSEC_PER_SEC 1000
-#define IP4_REASS_TIMEOUT_DEFAULT_MS 100
-#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 10000        // 10 seconds default
+#define IP4_REASS_TIMEOUT_DEFAULT_MS 200
+
+/* As there are only 1024 reass context per thread, either the DDOS attacks
+ * or fractions of real timeouts, would consume these contexts quickly and
+ * running out context space and unable to perform reassembly */
+#define IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS 50 // 50 ms default
 #define IP4_REASS_MAX_REASSEMBLIES_DEFAULT 1024
-#define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT 3
+#define IP4_REASS_MAX_REASSEMBLY_LENGTH_DEFAULT          3
 #define IP4_REASS_HT_LOAD_FACTOR (0.75)
 
 #define IP4_REASS_DEBUG_BUFFERS 0
@@ -68,21 +73,19 @@ typedef enum
 
 typedef struct
 {
-  union
+  struct
   {
-    struct
-    {
-      u32 xx_id;
-      ip4_address_t src;
-      ip4_address_t dst;
-      u16 frag_id;
-      u8 proto;
-      u8 unused;
-    };
-    u64 as_u64[2];
+    u16 frag_id;
+    u8 proto;
+    u8 unused;
+    u32 fib_index;
+    ip4_address_t src;
+    ip4_address_t dst;
   };
 } ip4_full_reass_key_t;
 
+STATIC_ASSERT_SIZEOF (ip4_full_reass_key_t, 16);
+
 typedef union
 {
   struct
@@ -155,6 +158,8 @@ typedef struct
   ip4_full_reass_t *pool;
   u32 reass_n;
   u32 id_counter;
+  // for pacing the main thread timeouts
+  u32 last_id;
   clib_spinlock_t lock;
 } ip4_full_reass_per_thread_t;
 
@@ -177,14 +182,19 @@ typedef struct
   // convenience
   vlib_main_t *vlib_main;
 
-  // node index of ip4-drop node
-  u32 ip4_drop_idx;
   u32 ip4_full_reass_expire_node_idx;
 
   /** Worker handoff */
   u32 fq_index;
+  u32 fq_local_index;
   u32 fq_feature_index;
+  u32 fq_custom_index;
+
+  // reference count for enabling/disabling feature - per interface
+  u32 *feature_use_refcount_per_intf;
 
+  // whether local fragmented packets are reassembled or not
+  int is_local_reass_enabled;
 } ip4_full_reass_main_t;
 
 extern ip4_full_reass_main_t ip4_full_reass_main;
@@ -201,6 +211,13 @@ typedef enum
   IP4_FULL_REASS_N_NEXT,
 } ip4_full_reass_next_t;
 
+typedef enum
+{
+  NORMAL,
+  FEATURE,
+  CUSTOM
+} ip4_full_reass_node_type_t;
+
 typedef enum
 {
   RANGE_NEW,
@@ -209,6 +226,7 @@ typedef enum
   RANGE_OVERLAP,
   FINALIZE,
   HANDOFF,
+  PASSTHROUGH,
 } ip4_full_reass_trace_operation_e;
 
 typedef struct
@@ -233,10 +251,13 @@ typedef struct
   u32 fragment_first;
   u32 fragment_last;
   u32 total_data_len;
+  bool is_after_handoff;
+  ip4_header_t ip4_header;
 } ip4_full_reass_trace_t;
 
 extern vlib_node_registration_t ip4_full_reass_node;
 extern vlib_node_registration_t ip4_full_reass_node_feature;
+extern vlib_node_registration_t ip4_full_reass_node_custom;
 
 static void
 ip4_full_reass_trace_details (vlib_main_t * vm, u32 bi,
@@ -272,7 +293,16 @@ format_ip4_full_reass_trace (u8 * s, va_list * args)
   u32 indent = 0;
   if (~0 != t->reass_id)
     {
-      s = format (s, "reass id: %u, op id: %u, ", t->reass_id, t->op_id);
+      if (t->is_after_handoff)
+       {
+         s =
+           format (s, "%U\n", format_ip4_header, &t->ip4_header,
+                   sizeof (t->ip4_header));
+         indent = 2;
+       }
+      s =
+       format (s, "%Ureass id: %u, op id: %u, ", format_white_space, indent,
+               t->reass_id, t->op_id);
       indent = format_get_indent (s);
       s =
        format (s,
@@ -307,20 +337,40 @@ format_ip4_full_reass_trace (u8 * s, va_list * args)
        format (s, "handoff from thread #%u to thread #%u", t->thread_id,
                t->thread_id_to);
       break;
+    case PASSTHROUGH:
+      s = format (s, "passthrough - not a fragment");
+      break;
     }
   return s;
 }
 
 static void
 ip4_full_reass_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
-                         ip4_full_reass_main_t * rm,
                          ip4_full_reass_t * reass, u32 bi,
                          ip4_full_reass_trace_operation_e action,
                          u32 size_diff, u32 thread_id_to)
 {
   vlib_buffer_t *b = vlib_get_buffer (vm, bi);
   vnet_buffer_opaque_t *vnb = vnet_buffer (b);
+  if (pool_is_free_index
+      (vm->trace_main.trace_buffer_pool, vlib_buffer_get_trace_index (b)))
+    {
+      // this buffer's trace is gone
+      b->flags &= ~VLIB_BUFFER_IS_TRACED;
+      return;
+    }
+  bool is_after_handoff = false;
+  if (vlib_buffer_get_trace_thread (b) != vm->thread_index)
+    {
+      is_after_handoff = true;
+    }
   ip4_full_reass_trace_t *t = vlib_add_trace (vm, node, b, sizeof (t[0]));
+  t->is_after_handoff = is_after_handoff;
+  if (t->is_after_handoff)
+    {
+      clib_memcpy (&t->ip4_header, vlib_buffer_get_current (b),
+                  clib_min (sizeof (t->ip4_header), b->current_length));
+    }
   if (reass)
     {
       t->reass_id = reass->id;
@@ -365,71 +415,120 @@ ip4_full_reass_free (ip4_full_reass_main_t * rm,
                     ip4_full_reass_per_thread_t * rt,
                     ip4_full_reass_t * reass)
 {
-  clib_bihash_kv_16_8_t kv;
-  kv.key[0] = reass->key.as_u64[0];
-  kv.key[1] = reass->key.as_u64[1];
+  clib_bihash_kv_16_8_t kv = {};
+  clib_memcpy_fast (&kv, &reass->key, sizeof (kv.key));
   clib_bihash_add_del_16_8 (&rm->hash, &kv, 0);
   return ip4_full_reass_free_ctx (rt, reass);
 }
 
+/* n_left_to_next, and to_next are taken as input params, as this function
+ * could be called from a graphnode, where its managing local copy of these
+ * variables, and ignoring those and still trying to enqueue the buffers
+ * with local variables would cause either buffer leak or corruption */
 always_inline void
-ip4_full_reass_drop_all (vlib_main_t * vm, vlib_node_runtime_t * node,
-                        ip4_full_reass_main_t * rm, ip4_full_reass_t * reass)
+ip4_full_reass_drop_all (vlib_main_t *vm, vlib_node_runtime_t *node,
+                        ip4_full_reass_t *reass)
 {
   u32 range_bi = reass->first_bi;
   vlib_buffer_t *range_b;
   vnet_buffer_opaque_t *range_vnb;
   u32 *to_free = NULL;
+
   while (~0 != range_bi)
     {
       range_b = vlib_get_buffer (vm, range_bi);
       range_vnb = vnet_buffer (range_b);
-      u32 bi = range_bi;
-      while (~0 != bi)
+
+      if (~0 != range_bi)
        {
-         vec_add1 (to_free, bi);
-         vlib_buffer_t *b = vlib_get_buffer (vm, bi);
-         if (b->flags & VLIB_BUFFER_NEXT_PRESENT)
-           {
-             bi = b->next_buffer;
-             b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
-           }
-         else
-           {
-             bi = ~0;
-           }
+         vec_add1 (to_free, range_bi);
        }
+
       range_bi = range_vnb->ip.reass.next_range_bi;
     }
+
   /* send to next_error_index */
-  if (~0 != reass->error_next_index)
+  if (~0 != reass->error_next_index &&
+      reass->error_next_index < node->n_next_nodes)
+    {
+      u32 n_free = vec_len (to_free);
+
+      /* record number of packets sent to custom app */
+      vlib_node_increment_counter (vm, node->node_index,
+                                  IP4_ERROR_REASS_TO_CUSTOM_APP, n_free);
+
+      if (node->flags & VLIB_NODE_FLAG_TRACE)
+       for (u32 i = 0; i < n_free; i++)
+         {
+           vlib_buffer_t *b = vlib_get_buffer (vm, to_free[i]);
+           if (PREDICT_FALSE (b->flags & VLIB_BUFFER_IS_TRACED))
+             ip4_full_reass_add_trace (vm, node, reass, to_free[i],
+                                       RANGE_DISCARD, 0, ~0);
+         }
+
+      vlib_buffer_enqueue_to_single_next (vm, node, to_free,
+                                         reass->error_next_index, n_free);
+    }
+  else
     {
-      u32 n_left_to_next, *to_next, next_index;
+      vlib_buffer_free (vm, to_free, vec_len (to_free));
+    }
+  vec_free (to_free);
+}
 
-      next_index = reass->error_next_index;
-      u32 bi = ~0;
+always_inline void
+sanitize_reass_buffers_add_missing (vlib_main_t *vm, ip4_full_reass_t *reass,
+                                   u32 *bi0)
+{
+  u32 range_bi = reass->first_bi;
+  vlib_buffer_t *range_b;
+  vnet_buffer_opaque_t *range_vnb;
 
-      while (vec_len (to_free) > 0)
+  while (~0 != range_bi)
+    {
+      range_b = vlib_get_buffer (vm, range_bi);
+      range_vnb = vnet_buffer (range_b);
+      u32 bi = range_bi;
+      if (~0 != bi)
        {
-         vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
-         while (vec_len (to_free) > 0 && n_left_to_next > 0)
+         if (bi == *bi0)
+           *bi0 = ~0;
+         if (range_b->flags & VLIB_BUFFER_NEXT_PRESENT)
            {
-             bi = vec_pop (to_free);
-
-             if (~0 != bi)
+             u32 _bi = bi;
+             vlib_buffer_t *_b = vlib_get_buffer (vm, _bi);
+             while (_b->flags & VLIB_BUFFER_NEXT_PRESENT)
                {
-                 to_next[0] = bi;
-                 to_next += 1;
-                 n_left_to_next -= 1;
+                 if (_b->next_buffer != range_vnb->ip.reass.next_range_bi)
+                   {
+                     _bi = _b->next_buffer;
+                     _b = vlib_get_buffer (vm, _bi);
+                   }
+                 else
+                   {
+                     _b->flags &= ~VLIB_BUFFER_NEXT_PRESENT;
+                     break;
+                   }
                }
            }
-         vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+         range_bi = range_vnb->ip.reass.next_range_bi;
        }
     }
-  else
+  if (*bi0 != ~0)
     {
-      vlib_buffer_free (vm, to_free, vec_len (to_free));
+      vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
+      vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
+      if (~0 != reass->first_bi)
+       {
+         fvnb->ip.reass.next_range_bi = reass->first_bi;
+         reass->first_bi = *bi0;
+       }
+      else
+       {
+         reass->first_bi = *bi0;
+         fvnb->ip.reass.next_range_bi = ~0;
+       }
+      *bi0 = ~0;
     }
 }
 
@@ -444,10 +543,10 @@ ip4_full_reass_init (ip4_full_reass_t * reass)
 }
 
 always_inline ip4_full_reass_t *
-ip4_full_reass_find_or_create (vlib_main_t * vm, vlib_node_runtime_t * node,
-                              ip4_full_reass_main_t * rm,
-                              ip4_full_reass_per_thread_t * rt,
-                              ip4_full_reass_kv_t * kv, u8 * do_handoff)
+ip4_full_reass_find_or_create (vlib_main_t *vm, vlib_node_runtime_t *node,
+                              ip4_full_reass_main_t *rm,
+                              ip4_full_reass_per_thread_t *rt,
+                              ip4_full_reass_kv_t *kv, u8 *do_handoff)
 {
   ip4_full_reass_t *reass;
   f64 now;
@@ -456,22 +555,23 @@ again:
 
   reass = NULL;
   now = vlib_time_now (vm);
-  if (!clib_bihash_search_16_8
-      (&rm->hash, (clib_bihash_kv_16_8_t *) kv, (clib_bihash_kv_16_8_t *) kv))
+  if (!clib_bihash_search_16_8 (&rm->hash, &kv->kv, &kv->kv))
     {
+      if (vm->thread_index != kv->v.memory_owner_thread_index)
+       {
+         *do_handoff = 1;
+         return NULL;
+       }
       reass =
        pool_elt_at_index (rm->per_thread_data
                           [kv->v.memory_owner_thread_index].pool,
                           kv->v.reass_index);
-      if (vm->thread_index != reass->memory_owner_thread_index)
-       {
-         *do_handoff = 1;
-         return reass;
-       }
 
       if (now > reass->last_heard + rm->timeout)
        {
-         ip4_full_reass_drop_all (vm, node, rm, reass);
+         vlib_node_increment_counter (vm, node->node_index,
+                                      IP4_ERROR_REASS_TIMEOUT, 1);
+         ip4_full_reass_drop_all (vm, node, reass);
          ip4_full_reass_free (rm, rt, reass);
          reass = NULL;
        }
@@ -499,14 +599,12 @@ again:
       ++rt->reass_n;
     }
 
-  reass->key.as_u64[0] = ((clib_bihash_kv_16_8_t *) kv)->key[0];
-  reass->key.as_u64[1] = ((clib_bihash_kv_16_8_t *) kv)->key[1];
+  clib_memcpy_fast (&reass->key, &kv->kv.key, sizeof (reass->key));
   kv->v.reass_index = (reass - rt->pool);
   kv->v.memory_owner_thread_index = vm->thread_index;
   reass->last_heard = now;
 
-  int rv =
-    clib_bihash_add_del_16_8 (&rm->hash, (clib_bihash_kv_16_8_t *) kv, 2);
+  int rv = clib_bihash_add_del_16_8 (&rm->hash, &kv->kv, 2);
   if (rv)
     {
       ip4_full_reass_free_ctx (rt, reass);
@@ -524,13 +622,12 @@ ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
                         ip4_full_reass_main_t * rm,
                         ip4_full_reass_per_thread_t * rt,
                         ip4_full_reass_t * reass, u32 * bi0,
-                        u32 * next0, u32 * error0, bool is_custom_app)
+                        u32 * next0, u32 * error0, bool is_custom)
 {
   vlib_buffer_t *first_b = vlib_get_buffer (vm, reass->first_bi);
   vlib_buffer_t *last_b = NULL;
   u32 sub_chain_bi = reass->first_bi;
   u32 total_length = 0;
-  u32 buf_cnt = 0;
   do
     {
       u32 tmp_bi = sub_chain_bi;
@@ -567,7 +664,6 @@ ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
        vlib_buffer_length_in_chain (vm, tmp) - trim_front - trim_end;
       while (1)
        {
-         ++buf_cnt;
          if (trim_front)
            {
              if (trim_front > tmp->current_length)
@@ -678,8 +774,8 @@ ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
   first_b->flags &= ~VLIB_BUFFER_EXT_HDR_VALID;
   if (PREDICT_FALSE (first_b->flags & VLIB_BUFFER_IS_TRACED))
     {
-      ip4_full_reass_add_trace (vm, node, rm, reass, reass->first_bi,
-                               FINALIZE, 0, ~0);
+      ip4_full_reass_add_trace (vm, node, reass, reass->first_bi, FINALIZE, 0,
+                               ~0);
 #if 0
       // following code does a hexdump of packet fragments to stdout ...
       do
@@ -708,7 +804,7 @@ ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
 #endif
     }
   *bi0 = reass->first_bi;
-  if (!is_custom_app)
+  if (!is_custom)
     {
       *next0 = IP4_FULL_REASS_NEXT_INPUT;
     }
@@ -717,6 +813,16 @@ ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
       *next0 = reass->next_index;
     }
   vnet_buffer (first_b)->ip.reass.estimated_mtu = reass->min_fragment_length;
+
+  /* Keep track of number of successfully reassembled packets and number of
+   * fragments reassembled */
+  vlib_node_increment_counter (vm, node->node_index, IP4_ERROR_REASS_SUCCESS,
+                              1);
+
+  vlib_node_increment_counter (vm, node->node_index,
+                              IP4_ERROR_REASS_FRAGMENTS_REASSEMBLED,
+                              reass->fragments_n);
+
   *error0 = IP4_ERROR_NONE;
   ip4_full_reass_free (rm, rt, reass);
   reass = NULL;
@@ -725,8 +831,6 @@ ip4_full_reass_finalize (vlib_main_t * vm, vlib_node_runtime_t * node,
 
 always_inline ip4_full_reass_rc_t
 ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
-                                     ip4_full_reass_main_t * rm,
-                                     ip4_full_reass_per_thread_t * rt,
                                      ip4_full_reass_t * reass,
                                      u32 prev_range_bi, u32 new_next_bi)
 {
@@ -760,7 +864,6 @@ ip4_full_reass_insert_range_in_chain (vlib_main_t * vm,
 always_inline ip4_full_reass_rc_t
 ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
                                        vlib_node_runtime_t * node,
-                                       ip4_full_reass_main_t * rm,
                                        ip4_full_reass_t * reass,
                                        u32 prev_range_bi, u32 discard_bi)
 {
@@ -792,8 +895,8 @@ ip4_full_reass_remove_range_from_chain (vlib_main_t * vm,
       u32 to_be_freed_bi = discard_bi;
       if (PREDICT_FALSE (discard_b->flags & VLIB_BUFFER_IS_TRACED))
        {
-         ip4_full_reass_add_trace (vm, node, rm, reass, discard_bi,
-                                   RANGE_DISCARD, 0, ~0);
+         ip4_full_reass_add_trace (vm, node, reass, discard_bi, RANGE_DISCARD,
+                                   0, ~0);
        }
       if (discard_b->flags & VLIB_BUFFER_NEXT_PRESENT)
        {
@@ -818,12 +921,11 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
                       ip4_full_reass_main_t * rm,
                       ip4_full_reass_per_thread_t * rt,
                       ip4_full_reass_t * reass, u32 * bi0, u32 * next0,
-                      u32 * error0, bool is_custom_app,
-                      u32 * handoff_thread_idx)
+                      u32 * error0, bool is_custom, u32 * handoff_thread_idx)
 {
   vlib_buffer_t *fb = vlib_get_buffer (vm, *bi0);
   vnet_buffer_opaque_t *fvnb = vnet_buffer (fb);
-  if (is_custom_app)
+  if (is_custom)
     {
       // store (error_)next_index before it's overwritten
       reass->next_index = fvnb->ip.reass.next_index;
@@ -852,16 +954,14 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
     {
       // starting a new reassembly
       rc =
-       ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
-                                             prev_range_bi, *bi0);
+       ip4_full_reass_insert_range_in_chain (vm, reass, prev_range_bi, *bi0);
       if (IP4_REASS_RC_OK != rc)
        {
          return rc;
        }
       if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
        {
-         ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
-                                   ~0);
+         ip4_full_reass_add_trace (vm, node, reass, *bi0, RANGE_NEW, 0, ~0);
        }
       *bi0 = ~0;
       reass->min_fragment_length = clib_net_to_host_u16 (fip->length);
@@ -884,9 +984,8 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
              ~0 == candidate_range_bi)
            {
              // special case - this fragment falls beyond all known ranges
-             rc =
-               ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
-                                                     prev_range_bi, *bi0);
+             rc = ip4_full_reass_insert_range_in_chain (vm, reass,
+                                                        prev_range_bi, *bi0);
              if (IP4_REASS_RC_OK != rc)
                {
                  return rc;
@@ -899,9 +998,8 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
       if (fragment_last < candidate_vnb->ip.reass.range_first)
        {
          // this fragment ends before candidate range without any overlap
-         rc =
-           ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
-                                                 prev_range_bi, *bi0);
+         rc = ip4_full_reass_insert_range_in_chain (vm, reass, prev_range_bi,
+                                                    *bi0);
          if (IP4_REASS_RC_OK != rc)
            {
              return rc;
@@ -916,7 +1014,7 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
              // this fragment is a (sub)part of existing range, ignore it
              if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
                {
-                 ip4_full_reass_add_trace (vm, node, rm, reass, *bi0,
+                 ip4_full_reass_add_trace (vm, node, reass, *bi0,
                                            RANGE_OVERLAP, 0, ~0);
                }
              break;
@@ -936,14 +1034,12 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
                  reass->data_len -= overlap;
                  if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
                    {
-                     ip4_full_reass_add_trace (vm, node, rm, reass,
+                     ip4_full_reass_add_trace (vm, node, reass,
                                                candidate_range_bi,
                                                RANGE_SHRINK, 0, ~0);
                    }
-                 rc =
-                   ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
-                                                         prev_range_bi,
-                                                         *bi0);
+                 rc = ip4_full_reass_insert_range_in_chain (
+                   vm, reass, prev_range_bi, *bi0);
                  if (IP4_REASS_RC_OK != rc)
                    {
                      return rc;
@@ -972,11 +1068,8 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
                  else
                    {
                      // special case - last range discarded
-                     rc =
-                       ip4_full_reass_insert_range_in_chain (vm, rm, rt,
-                                                             reass,
-                                                             candidate_range_bi,
-                                                             *bi0);
+                     rc = ip4_full_reass_insert_range_in_chain (
+                       vm, reass, candidate_range_bi, *bi0);
                      if (IP4_REASS_RC_OK != rc)
                        {
                          return rc;
@@ -997,10 +1090,8 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
            {
              u32 next_range_bi = candidate_vnb->ip.reass.next_range_bi;
              // discard candidate range, probe next range
-             rc =
-               ip4_full_reass_remove_range_from_chain (vm, node, rm, reass,
-                                                       prev_range_bi,
-                                                       candidate_range_bi);
+             rc = ip4_full_reass_remove_range_from_chain (
+               vm, node, reass, prev_range_bi, candidate_range_bi);
              if (IP4_REASS_RC_OK != rc)
                {
                  return rc;
@@ -1013,10 +1104,8 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
              else
                {
                  // special case - last range discarded
-                 rc =
-                   ip4_full_reass_insert_range_in_chain (vm, rm, rt, reass,
-                                                         prev_range_bi,
-                                                         *bi0);
+                 rc = ip4_full_reass_insert_range_in_chain (
+                   vm, reass, prev_range_bi, *bi0);
                  if (IP4_REASS_RC_OK != rc)
                    {
                      return rc;
@@ -1032,19 +1121,19 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
     {
       if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
        {
-         ip4_full_reass_add_trace (vm, node, rm, reass, *bi0, RANGE_NEW, 0,
-                                   ~0);
+         ip4_full_reass_add_trace (vm, node, reass, *bi0, RANGE_NEW, 0, ~0);
        }
     }
   if (~0 != reass->last_packet_octet &&
       reass->data_len == reass->last_packet_octet + 1)
     {
       *handoff_thread_idx = reass->sendout_thread_index;
+      int handoff =
+       reass->memory_owner_thread_index != reass->sendout_thread_index;
       rc =
        ip4_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
-                                is_custom_app);
-      if (IP4_REASS_RC_OK == rc
-         && reass->memory_owner_thread_index != reass->sendout_thread_index)
+                                is_custom);
+      if (IP4_REASS_RC_OK == rc && handoff)
        {
          rc = IP4_REASS_RC_HANDOFF;
        }
@@ -1069,197 +1158,216 @@ ip4_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
 }
 
 always_inline uword
-ip4_full_reass_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
-                      vlib_frame_t * frame, bool is_feature,
-                      bool is_custom_app)
+ip4_full_reass_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+                      vlib_frame_t *frame, ip4_full_reass_node_type_t type,
+                      bool is_local)
 {
   u32 *from = vlib_frame_vector_args (frame);
-  u32 n_left_from, n_left_to_next, *to_next, next_index;
+  u32 n_left, n_next = 0, to_next[VLIB_FRAME_SIZE];
   ip4_full_reass_main_t *rm = &ip4_full_reass_main;
   ip4_full_reass_per_thread_t *rt = &rm->per_thread_data[vm->thread_index];
+  u16 nexts[VLIB_FRAME_SIZE];
+
   clib_spinlock_lock (&rt->lock);
 
-  n_left_from = frame->n_vectors;
-  next_index = node->cached_next_index;
-  while (n_left_from > 0)
+  n_left = frame->n_vectors;
+  while (n_left > 0)
     {
-      vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
-
-      while (n_left_from > 0 && n_left_to_next > 0)
-       {
-         u32 bi0;
-         vlib_buffer_t *b0;
-         u32 next0;
-         u32 error0 = IP4_ERROR_NONE;
+      u32 bi0;
+      vlib_buffer_t *b0;
+      u32 next0;
+      u32 error0 = IP4_ERROR_NONE;
 
-         bi0 = from[0];
-         b0 = vlib_get_buffer (vm, bi0);
+      bi0 = from[0];
+      b0 = vlib_get_buffer (vm, bi0);
 
-         ip4_header_t *ip0 = vlib_buffer_get_current (b0);
-         if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
+      ip4_header_t *ip0 = vlib_buffer_get_current (b0);
+      if (!ip4_get_fragment_more (ip0) && !ip4_get_fragment_offset (ip0))
+       {
+         // this is a whole packet - no fragmentation
+         if (CUSTOM != type)
            {
-             // this is a whole packet - no fragmentation
-             if (!is_custom_app)
-               {
-                 next0 = IP4_FULL_REASS_NEXT_INPUT;
-               }
-             else
-               {
-                 next0 = vnet_buffer (b0)->ip.reass.next_index;
-               }
-             goto packet_enqueue;
+             next0 = IP4_FULL_REASS_NEXT_INPUT;
            }
-         const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
-         const u32 fragment_length =
-           clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
-         const u32 fragment_last = fragment_first + fragment_length - 1;
-         if (fragment_first > fragment_last || fragment_first + fragment_length > UINT16_MAX - 20 || (fragment_length < 8 && ip4_get_fragment_more (ip0)))     // 8 is minimum frag length per RFC 791
+         else
            {
-             next0 = IP4_FULL_REASS_NEXT_DROP;
-             error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
-             goto packet_enqueue;
+             next0 = vnet_buffer (b0)->ip.reass.next_index;
            }
-         ip4_full_reass_kv_t kv;
-         u8 do_handoff = 0;
-
-         kv.k.as_u64[0] =
-           (u64) vec_elt (ip4_main.fib_index_by_sw_if_index,
-                          vnet_buffer (b0)->sw_if_index[VLIB_RX]) |
-           (u64) ip0->src_address.as_u32 << 32;
-         kv.k.as_u64[1] =
-           (u64) ip0->dst_address.
-           as_u32 | (u64) ip0->fragment_id << 32 | (u64) ip0->protocol << 48;
-
-         ip4_full_reass_t *reass =
-           ip4_full_reass_find_or_create (vm, node, rm, rt, &kv,
-                                          &do_handoff);
-
-         if (reass)
+         ip4_full_reass_add_trace (vm, node, NULL, bi0, PASSTHROUGH, 0, ~0);
+         goto packet_enqueue;
+       }
+
+      if (is_local && !rm->is_local_reass_enabled)
+       {
+         next0 = IP4_FULL_REASS_NEXT_DROP;
+         goto packet_enqueue;
+       }
+
+      const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
+      const u32 fragment_length =
+       clib_net_to_host_u16 (ip0->length) - ip4_header_bytes (ip0);
+      const u32 fragment_last = fragment_first + fragment_length - 1;
+
+      /* Keep track of received fragments */
+      vlib_node_increment_counter (vm, node->node_index,
+                                  IP4_ERROR_REASS_FRAGMENTS_RCVD, 1);
+
+      if (fragment_first > fragment_last ||
+         fragment_first + fragment_length > UINT16_MAX - 20 ||
+         (fragment_length < 8 && // 8 is minimum frag length per RFC 791
+          ip4_get_fragment_more (ip0)))
+       {
+         next0 = IP4_FULL_REASS_NEXT_DROP;
+         error0 = IP4_ERROR_REASS_MALFORMED_PACKET;
+         goto packet_enqueue;
+       }
+
+      u32 fib_index = (vnet_buffer (b0)->sw_if_index[VLIB_TX] == (u32) ~0) ?
+                             vec_elt (ip4_main.fib_index_by_sw_if_index,
+                                vnet_buffer (b0)->sw_if_index[VLIB_RX]) :
+                             vnet_buffer (b0)->sw_if_index[VLIB_TX];
+
+      ip4_full_reass_kv_t kv = { .k.fib_index = fib_index,
+                                .k.src.as_u32 = ip0->src_address.as_u32,
+                                .k.dst.as_u32 = ip0->dst_address.as_u32,
+                                .k.frag_id = ip0->fragment_id,
+                                .k.proto = ip0->protocol
+
+      };
+      u8 do_handoff = 0;
+
+      ip4_full_reass_t *reass =
+       ip4_full_reass_find_or_create (vm, node, rm, rt, &kv, &do_handoff);
+
+      if (reass)
+       {
+         const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
+         if (0 == fragment_first)
            {
-             const u32 fragment_first = ip4_get_fragment_offset_bytes (ip0);
-             if (0 == fragment_first)
-               {
-                 reass->sendout_thread_index = vm->thread_index;
-               }
+             reass->sendout_thread_index = vm->thread_index;
            }
+       }
 
-         if (PREDICT_FALSE (do_handoff))
+      if (PREDICT_FALSE (do_handoff))
+       {
+         next0 = IP4_FULL_REASS_NEXT_HANDOFF;
+         vnet_buffer (b0)->ip.reass.owner_thread_index =
+           kv.v.memory_owner_thread_index;
+       }
+      else if (reass)
+       {
+         u32 handoff_thread_idx;
+         u32 counter = ~0;
+         switch (ip4_full_reass_update (vm, node, rm, rt, reass, &bi0, &next0,
+                                        &error0, CUSTOM == type,
+                                        &handoff_thread_idx))
            {
+           case IP4_REASS_RC_OK:
+             /* nothing to do here */
+             break;
+           case IP4_REASS_RC_HANDOFF:
              next0 = IP4_FULL_REASS_NEXT_HANDOFF;
+             b0 = vlib_get_buffer (vm, bi0);
              vnet_buffer (b0)->ip.reass.owner_thread_index =
-               kv.v.memory_owner_thread_index;
-           }
-         else if (reass)
-           {
-             u32 handoff_thread_idx;
-             switch (ip4_full_reass_update
-                     (vm, node, rm, rt, reass, &bi0, &next0,
-                      &error0, is_custom_app, &handoff_thread_idx))
-               {
-               case IP4_REASS_RC_OK:
-                 /* nothing to do here */
-                 break;
-               case IP4_REASS_RC_HANDOFF:
-                 next0 = IP4_FULL_REASS_NEXT_HANDOFF;
-                 b0 = vlib_get_buffer (vm, bi0);
-                 vnet_buffer (b0)->ip.reass.owner_thread_index =
-                   handoff_thread_idx;
-                 break;
-               case IP4_REASS_RC_TOO_MANY_FRAGMENTS:
-                 vlib_node_increment_counter (vm, node->node_index,
-                                              IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG,
-                                              1);
-                 ip4_full_reass_drop_all (vm, node, rm, reass);
-                 ip4_full_reass_free (rm, rt, reass);
-                 goto next_packet;
-                 break;
-               case IP4_REASS_RC_NO_BUF:
-                 vlib_node_increment_counter (vm, node->node_index,
-                                              IP4_ERROR_REASS_NO_BUF, 1);
-                 ip4_full_reass_drop_all (vm, node, rm, reass);
-                 ip4_full_reass_free (rm, rt, reass);
-                 goto next_packet;
-                 break;
-               case IP4_REASS_RC_INTERNAL_ERROR:
-                 /* drop everything and start with a clean slate */
-                 vlib_node_increment_counter (vm, node->node_index,
-                                              IP4_ERROR_REASS_INTERNAL_ERROR,
-                                              1);
-                 ip4_full_reass_drop_all (vm, node, rm, reass);
-                 ip4_full_reass_free (rm, rt, reass);
-                 goto next_packet;
-                 break;
-               }
+               handoff_thread_idx;
+             break;
+           case IP4_REASS_RC_TOO_MANY_FRAGMENTS:
+             counter = IP4_ERROR_REASS_FRAGMENT_CHAIN_TOO_LONG;
+             break;
+           case IP4_REASS_RC_NO_BUF:
+             counter = IP4_ERROR_REASS_NO_BUF;
+             break;
+           case IP4_REASS_RC_INTERNAL_ERROR:
+             counter = IP4_ERROR_REASS_INTERNAL_ERROR;
+             /* Sanitization is needed in internal error cases only, as
+              * the incoming packet is already dropped in other cases,
+              * also adding bi0 back to the reassembly list, fixes the
+              * leaking of buffers during internal errors.
+              *
+              * Also it doesnt make sense to send these buffers custom
+              * app, these fragments are with internal errors */
+             sanitize_reass_buffers_add_missing (vm, reass, &bi0);
+             reass->error_next_index = ~0;
+             break;
            }
-         else
+
+         if (~0 != counter)
            {
-             next0 = IP4_FULL_REASS_NEXT_DROP;
-             error0 = IP4_ERROR_REASS_LIMIT_REACHED;
+             vlib_node_increment_counter (vm, node->node_index, counter, 1);
+             ip4_full_reass_drop_all (vm, node, reass);
+             ip4_full_reass_free (rm, rt, reass);
+             goto next_packet;
            }
+       }
+      else
+       {
+         next0 = IP4_FULL_REASS_NEXT_DROP;
+         error0 = IP4_ERROR_REASS_LIMIT_REACHED;
+       }
 
+    packet_enqueue:
 
-       packet_enqueue:
-         b0->error = node->errors[error0];
+      if (bi0 != ~0)
+       {
+         /* bi0 might have been updated by reass_finalize, reload */
+         b0 = vlib_get_buffer (vm, bi0);
+         if (IP4_ERROR_NONE != error0)
+           {
+             b0->error = node->errors[error0];
+           }
 
-         if (bi0 != ~0)
+         if (next0 == IP4_FULL_REASS_NEXT_HANDOFF)
            {
-             to_next[0] = bi0;
-             to_next += 1;
-             n_left_to_next -= 1;
-             if (next0 == IP4_FULL_REASS_NEXT_HANDOFF)
+             if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
                {
-                 if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
-                   {
-                     ip4_full_reass_add_trace (vm, node, rm, NULL, bi0,
-                                               HANDOFF, 0,
-                                               vnet_buffer (b0)->ip.
-                                               reass.owner_thread_index);
-                   }
+                 ip4_full_reass_add_trace (
+                   vm, node, NULL, bi0, HANDOFF, 0,
+                   vnet_buffer (b0)->ip.reass.owner_thread_index);
                }
-             else if (is_feature && IP4_ERROR_NONE == error0)
-               {
-                 b0 = vlib_get_buffer (vm, bi0);
-                 vnet_feature_next (&next0, b0);
-               }
-             vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
-                                              to_next, n_left_to_next,
-                                              bi0, next0);
-             IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next);
+           }
+         else if (FEATURE == type && IP4_ERROR_NONE == error0)
+           {
+             vnet_feature_next (&next0, b0);
            }
 
-       next_packet:
-         from += 1;
-         n_left_from -= 1;
+         /* Increment the counter to-custom-app also as this fragment is
+          * also going to application */
+         if (CUSTOM == type)
+           {
+             vlib_node_increment_counter (vm, node->node_index,
+                                          IP4_ERROR_REASS_TO_CUSTOM_APP, 1);
+           }
+
+         to_next[n_next] = bi0;
+         nexts[n_next] = next0;
+         n_next++;
+         IP4_REASS_DEBUG_BUFFER (bi0, enqueue_next);
        }
 
-      vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+    next_packet:
+      from += 1;
+      n_left -= 1;
     }
 
   clib_spinlock_unlock (&rt->lock);
+
+  vlib_buffer_enqueue_to_next (vm, node, to_next, nexts, n_next);
   return frame->n_vectors;
 }
 
-static char *ip4_full_reass_error_strings[] = {
-#define _(sym, string) string,
-  foreach_ip4_error
-#undef _
-};
-
 VLIB_NODE_FN (ip4_full_reass_node) (vlib_main_t * vm,
                                    vlib_node_runtime_t * node,
                                    vlib_frame_t * frame)
 {
-  return ip4_full_reass_inline (vm, node, frame, false /* is_feature */ ,
-                               false /* is_custom_app */ );
+  return ip4_full_reass_inline (vm, node, frame, NORMAL, false /* is_local */);
 }
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (ip4_full_reass_node) = {
     .name = "ip4-full-reassembly",
     .vector_size = sizeof (u32),
     .format_trace = format_ip4_full_reass_trace,
-    .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
-    .error_strings = ip4_full_reass_error_strings,
+    .n_errors = IP4_N_ERROR,
+    .error_counters = ip4_error_counters,
     .n_next_nodes = IP4_FULL_REASS_N_NEXT,
     .next_nodes =
         {
@@ -1269,23 +1377,43 @@ VLIB_REGISTER_NODE (ip4_full_reass_node) = {
 
         },
 };
-/* *INDENT-ON* */
+
+VLIB_NODE_FN (ip4_local_full_reass_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+  return ip4_full_reass_inline (vm, node, frame, NORMAL, true /* is_local */);
+}
+
+VLIB_REGISTER_NODE (ip4_local_full_reass_node) = {
+    .name = "ip4-local-full-reassembly",
+    .vector_size = sizeof (u32),
+    .format_trace = format_ip4_full_reass_trace,
+    .n_errors = IP4_N_ERROR,
+    .error_counters = ip4_error_counters,
+    .n_next_nodes = IP4_FULL_REASS_N_NEXT,
+    .next_nodes =
+        {
+                [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
+                [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
+                [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-local-full-reassembly-handoff",
+
+        },
+};
 
 VLIB_NODE_FN (ip4_full_reass_node_feature) (vlib_main_t * vm,
                                            vlib_node_runtime_t * node,
                                            vlib_frame_t * frame)
 {
-  return ip4_full_reass_inline (vm, node, frame, true /* is_feature */ ,
-                               false /* is_custom_app */ );
+  return ip4_full_reass_inline (vm, node, frame, FEATURE,
+                               false /* is_local */);
 }
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
     .name = "ip4-full-reassembly-feature",
     .vector_size = sizeof (u32),
     .format_trace = format_ip4_full_reass_trace,
-    .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
-    .error_strings = ip4_full_reass_error_strings,
+    .n_errors = IP4_N_ERROR,
+    .error_counters = ip4_error_counters,
     .n_next_nodes = IP4_FULL_REASS_N_NEXT,
     .next_nodes =
         {
@@ -1294,19 +1422,45 @@ VLIB_REGISTER_NODE (ip4_full_reass_node_feature) = {
                 [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-feature-hoff",
         },
 };
-/* *INDENT-ON* */
 
-/* *INDENT-OFF* */
 VNET_FEATURE_INIT (ip4_full_reass_feature, static) = {
-    .arc_name = "ip4-unicast",
-    .node_name = "ip4-full-reassembly-feature",
-    .runs_before = VNET_FEATURES ("ip4-lookup",
-                                  "ipsec4-input-feature"),
-    .runs_after = 0,
+  .arc_name = "ip4-unicast",
+  .node_name = "ip4-full-reassembly-feature",
+  .runs_before = VNET_FEATURES ("ip4-lookup", "ipsec4-input-feature",
+                               "ip4-sv-reassembly-feature"),
+  .runs_after = 0,
+};
+
+VLIB_NODE_FN (ip4_full_reass_node_custom) (vlib_main_t * vm,
+                                          vlib_node_runtime_t * node,
+                                          vlib_frame_t * frame)
+{
+  return ip4_full_reass_inline (vm, node, frame, CUSTOM, false /* is_local */);
+}
+
+VLIB_REGISTER_NODE (ip4_full_reass_node_custom) = {
+    .name = "ip4-full-reassembly-custom",
+    .vector_size = sizeof (u32),
+    .format_trace = format_ip4_full_reass_trace,
+    .n_errors = IP4_N_ERROR,
+    .error_counters = ip4_error_counters,
+    .n_next_nodes = IP4_FULL_REASS_N_NEXT,
+    .next_nodes =
+        {
+                [IP4_FULL_REASS_NEXT_INPUT] = "ip4-input",
+                [IP4_FULL_REASS_NEXT_DROP] = "ip4-drop",
+                [IP4_FULL_REASS_NEXT_HANDOFF] = "ip4-full-reass-custom-hoff",
+        },
 };
-/* *INDENT-ON* */
 
 #ifndef CLIB_MARCH_VARIANT
+uword
+ip4_full_reass_custom_register_next_node (uword node_index)
+{
+  return vlib_node_add_next (vlib_get_main (),
+                            ip4_full_reass_node_custom.index, node_index);
+}
+
 always_inline u32
 ip4_full_reass_get_nbuckets ()
 {
@@ -1314,7 +1468,9 @@ ip4_full_reass_get_nbuckets ()
   u32 nbuckets;
   u8 i;
 
-  nbuckets = (u32) (rm->max_reass_n / IP4_REASS_HT_LOAD_FACTOR);
+  /* need more mem with more workers */
+  nbuckets = (u32) (rm->max_reass_n * (vlib_num_workers () + 1) /
+                   IP4_REASS_HT_LOAD_FACTOR);
 
   for (i = 0; i < 31; i++)
     if ((1 << i) >= nbuckets)
@@ -1337,7 +1493,7 @@ typedef struct
 } ip4_rehash_cb_ctx;
 
 #ifndef CLIB_MARCH_VARIANT
-static void
+static int
 ip4_rehash_cb (clib_bihash_kv_16_8_t * kv, void *_ctx)
 {
   ip4_rehash_cb_ctx *ctx = _ctx;
@@ -1345,6 +1501,7 @@ ip4_rehash_cb (clib_bihash_kv_16_8_t * kv, void *_ctx)
     {
       ctx->failure = 1;
     }
+  return (BIHASH_WALK_CONTINUE);
 }
 
 static void
@@ -1439,13 +1596,16 @@ ip4_full_reass_init_function (vlib_main_t * vm)
   nbuckets = ip4_full_reass_get_nbuckets ();
   clib_bihash_init_16_8 (&rm->hash, "ip4-dr", nbuckets, nbuckets * 1024);
 
-  node = vlib_get_node_by_name (vm, (u8 *) "ip4-drop");
-  ASSERT (node);
-  rm->ip4_drop_idx = node->index;
-
   rm->fq_index = vlib_frame_queue_main_init (ip4_full_reass_node.index, 0);
+  rm->fq_local_index =
+    vlib_frame_queue_main_init (ip4_local_full_reass_node.index, 0);
   rm->fq_feature_index =
     vlib_frame_queue_main_init (ip4_full_reass_node_feature.index, 0);
+  rm->fq_custom_index =
+    vlib_frame_queue_main_init (ip4_full_reass_node_custom.index, 0);
+
+  rm->feature_use_refcount_per_intf = NULL;
+  rm->is_local_reass_enabled = 1;
 
   return error;
 }
@@ -1454,8 +1614,8 @@ VLIB_INIT_FUNCTION (ip4_full_reass_init_function);
 #endif /* CLIB_MARCH_VARIANT */
 
 static uword
-ip4_full_reass_walk_expired (vlib_main_t * vm,
-                            vlib_node_runtime_t * node, vlib_frame_t * f)
+ip4_full_reass_walk_expired (vlib_main_t *vm, vlib_node_runtime_t *node,
+                            CLIB_UNUSED (vlib_frame_t *f))
 {
   ip4_full_reass_main_t *rm = &ip4_full_reass_main;
   uword event_type, *event_data = 0;
@@ -1470,10 +1630,11 @@ ip4_full_reass_walk_expired (vlib_main_t * vm,
 
       switch (event_type)
        {
-       case ~0:                /* no events => timeout */
-         /* nothing to do here */
-         break;
+       case ~0:
+         /* no events => timeout */
+         /* fallthrough */
        case IP4_EVENT_CONFIG_CHANGED:
+         /* nothing to do here */
          break;
        default:
          clib_warning ("BUG: event type 0x%wx", event_type);
@@ -1487,6 +1648,7 @@ ip4_full_reass_walk_expired (vlib_main_t * vm,
       uword thread_index = 0;
       int index;
       const uword nthreads = vlib_num_workers () + 1;
+
       for (thread_index = 0; thread_index < nthreads; ++thread_index)
        {
          ip4_full_reass_per_thread_t *rt =
@@ -1494,24 +1656,46 @@ ip4_full_reass_walk_expired (vlib_main_t * vm,
          clib_spinlock_lock (&rt->lock);
 
          vec_reset_length (pool_indexes_to_free);
-          /* *INDENT-OFF* */
-          pool_foreach_index (index, rt->pool, ({
-                                reass = pool_elt_at_index (rt->pool, index);
-                                if (now > reass->last_heard + rm->timeout)
-                                  {
-                                    vec_add1 (pool_indexes_to_free, index);
-                                  }
-                              }));
-          /* *INDENT-ON* */
+
+         /* Pace the number of timeouts handled per thread,to avoid barrier
+          * sync issues in real world scenarios */
+
+         u32 beg = rt->last_id;
+         /* to ensure we walk at least once per sec per context */
+         u32 end =
+           beg + (IP4_REASS_MAX_REASSEMBLIES_DEFAULT *
+                    IP4_REASS_EXPIRE_WALK_INTERVAL_DEFAULT_MS / MSEC_PER_SEC +
+                  1);
+         if (end > vec_len (rt->pool))
+           {
+             end = vec_len (rt->pool);
+             rt->last_id = 0;
+           }
+         else
+           {
+             rt->last_id = end;
+           }
+
+         pool_foreach_stepping_index (index, beg, end, rt->pool)
+         {
+           reass = pool_elt_at_index (rt->pool, index);
+           if (now > reass->last_heard + rm->timeout)
+             {
+               vec_add1 (pool_indexes_to_free, index);
+             }
+         }
+
+         if (vec_len (pool_indexes_to_free))
+           vlib_node_increment_counter (vm, node->node_index,
+                                        IP4_ERROR_REASS_TIMEOUT,
+                                        vec_len (pool_indexes_to_free));
          int *i;
-          /* *INDENT-OFF* */
           vec_foreach (i, pool_indexes_to_free)
           {
             ip4_full_reass_t *reass = pool_elt_at_index (rt->pool, i[0]);
-            ip4_full_reass_drop_all (vm, node, rm, reass);
-            ip4_full_reass_free (rm, rt, reass);
-          }
-          /* *INDENT-ON* */
+           ip4_full_reass_drop_all (vm, node, reass);
+           ip4_full_reass_free (rm, rt, reass);
+         }
 
          clib_spinlock_unlock (&rt->lock);
        }
@@ -1519,33 +1703,29 @@ ip4_full_reass_walk_expired (vlib_main_t * vm,
       vec_free (pool_indexes_to_free);
       if (event_data)
        {
-         _vec_len (event_data) = 0;
+         vec_set_len (event_data, 0);
        }
     }
 
   return 0;
 }
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (ip4_full_reass_expire_node) = {
-    .function = ip4_full_reass_walk_expired,
-    .type = VLIB_NODE_TYPE_PROCESS,
-    .name = "ip4-full-reassembly-expire-walk",
-    .format_trace = format_ip4_full_reass_trace,
-    .n_errors = ARRAY_LEN (ip4_full_reass_error_strings),
-    .error_strings = ip4_full_reass_error_strings,
-
+  .function = ip4_full_reass_walk_expired,
+  .type = VLIB_NODE_TYPE_PROCESS,
+  .name = "ip4-full-reassembly-expire-walk",
+  .format_trace = format_ip4_full_reass_trace,
+  .n_errors = IP4_N_ERROR,
+  .error_counters = ip4_error_counters,
 };
-/* *INDENT-ON* */
 
 static u8 *
 format_ip4_full_reass_key (u8 * s, va_list * args)
 {
   ip4_full_reass_key_t *key = va_arg (*args, ip4_full_reass_key_t *);
   s =
-    format (s,
-           "xx_id: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
-           key->xx_id, format_ip4_address, &key->src, format_ip4_address,
+    format (s, "fib_index: %u, src: %U, dst: %U, frag_id: %u, proto: %u",
+           key->fib_index, format_ip4_address, &key->src, format_ip4_address,
            &key->dst, clib_net_to_host_u16 (key->frag_id), key->proto);
   return s;
 }
@@ -1614,31 +1794,37 @@ show_ip4_reass (vlib_main_t * vm,
       clib_spinlock_lock (&rt->lock);
       if (details)
        {
-          /* *INDENT-OFF* */
-          pool_foreach (reass, rt->pool, {
+          pool_foreach (reass, rt->pool) {
             vlib_cli_output (vm, "%U", format_ip4_reass, vm, reass);
-          });
-          /* *INDENT-ON* */
+          }
        }
       sum_reass_n += rt->reass_n;
       clib_spinlock_unlock (&rt->lock);
     }
   vlib_cli_output (vm, "---------------------");
-  vlib_cli_output (vm, "Current IP4 reassemblies count: %lu\n",
+  vlib_cli_output (vm, "Current full IP4 reassemblies count: %lu\n",
                   (long unsigned) sum_reass_n);
   vlib_cli_output (vm,
-                  "Maximum configured concurrent IP4 reassemblies per worker-thread: %lu\n",
+                  "Maximum configured concurrent full IP4 reassemblies per worker-thread: %lu\n",
                   (long unsigned) rm->max_reass_n);
+  vlib_cli_output (vm,
+                  "Maximum configured amount of fragments "
+                  "per full IP4 reassembly: %lu\n",
+                  (long unsigned) rm->max_reass_len);
+  vlib_cli_output (vm,
+                  "Maximum configured full IP4 reassembly timeout: %lums\n",
+                  (long unsigned) rm->timeout_ms);
+  vlib_cli_output (vm,
+                  "Maximum configured full IP4 reassembly expire walk interval: %lums\n",
+                  (long unsigned) rm->expire_walk_interval_ms);
   return 0;
 }
 
-/* *INDENT-OFF* */
 VLIB_CLI_COMMAND (show_ip4_full_reass_cmd, static) = {
     .path = "show ip4-full-reassembly",
     .short_help = "show ip4-full-reassembly [details]",
     .function = show_ip4_reass,
 };
-/* *INDENT-ON* */
 
 #ifndef CLIB_MARCH_VARIANT
 vnet_api_error_t
@@ -1690,9 +1876,10 @@ format_ip4_full_reass_handoff_trace (u8 * s, va_list * args)
 }
 
 always_inline uword
-ip4_full_reass_handoff_node_inline (vlib_main_t * vm,
-                                   vlib_node_runtime_t * node,
-                                   vlib_frame_t * frame, bool is_feature)
+ip4_full_reass_handoff_node_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+                                   vlib_frame_t *frame,
+                                   ip4_full_reass_node_type_t type,
+                                   bool is_local)
 {
   ip4_full_reass_main_t *rm = &ip4_full_reass_main;
 
@@ -1708,7 +1895,27 @@ ip4_full_reass_handoff_node_inline (vlib_main_t * vm,
   b = bufs;
   ti = thread_indices;
 
-  fq_index = (is_feature) ? rm->fq_feature_index : rm->fq_index;
+  switch (type)
+    {
+    case NORMAL:
+      if (is_local)
+       {
+         fq_index = rm->fq_local_index;
+       }
+      else
+       {
+         fq_index = rm->fq_index;
+       }
+      break;
+    case FEATURE:
+      fq_index = rm->fq_feature_index;
+      break;
+    case CUSTOM:
+      fq_index = rm->fq_custom_index;
+      break;
+    default:
+      clib_warning ("Unexpected `type' (%d)!", type);
+    }
 
   while (n_left_from > 0)
     {
@@ -1727,9 +1934,8 @@ ip4_full_reass_handoff_node_inline (vlib_main_t * vm,
       ti += 1;
       b += 1;
     }
-  n_enq =
-    vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices,
-                                  frame->n_vectors, 1);
+  n_enq = vlib_buffer_enqueue_to_thread (vm, node, fq_index, from,
+                                        thread_indices, frame->n_vectors, 1);
 
   if (n_enq < frame->n_vectors)
     vlib_node_increment_counter (vm, node->node_index,
@@ -1742,12 +1948,11 @@ VLIB_NODE_FN (ip4_full_reass_handoff_node) (vlib_main_t * vm,
                                            vlib_node_runtime_t * node,
                                            vlib_frame_t * frame)
 {
-  return ip4_full_reass_handoff_node_inline (vm, node, frame,
-                                            false /* is_feature */ );
+  return ip4_full_reass_handoff_node_inline (vm, node, frame, NORMAL,
+                                            false /* is_local */);
 }
 
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
   .name = "ip4-full-reassembly-handoff",
   .vector_size = sizeof (u32),
@@ -1761,22 +1966,37 @@ VLIB_REGISTER_NODE (ip4_full_reass_handoff_node) = {
     [0] = "error-drop",
   },
 };
-/* *INDENT-ON* */
 
+VLIB_NODE_FN (ip4_local_full_reass_handoff_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+  return ip4_full_reass_handoff_node_inline (vm, node, frame, NORMAL,
+                                            true /* is_local */);
+}
+
+VLIB_REGISTER_NODE (ip4_local_full_reass_handoff_node) = {
+  .name = "ip4-local-full-reassembly-handoff",
+  .vector_size = sizeof (u32),
+  .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
+  .error_strings = ip4_full_reass_handoff_error_strings,
+  .format_trace = format_ip4_full_reass_handoff_trace,
+
+  .n_next_nodes = 1,
+
+  .next_nodes = {
+    [0] = "error-drop",
+  },
+};
 
-/* *INDENT-OFF* */
 VLIB_NODE_FN (ip4_full_reass_feature_handoff_node) (vlib_main_t * vm,
                                                    vlib_node_runtime_t *
                                                    node,
                                                    vlib_frame_t * frame)
 {
-  return ip4_full_reass_handoff_node_inline (vm, node, frame,
-                                            true /* is_feature */ );
+  return ip4_full_reass_handoff_node_inline (vm, node, frame, FEATURE,
+                                            false /* is_local */);
 }
-/* *INDENT-ON* */
-
 
-/* *INDENT-OFF* */
 VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
   .name = "ip4-full-reass-feature-hoff",
   .vector_size = sizeof (u32),
@@ -1790,7 +2010,78 @@ VLIB_REGISTER_NODE (ip4_full_reass_feature_handoff_node) = {
     [0] = "error-drop",
   },
 };
-/* *INDENT-ON* */
+
+VLIB_NODE_FN (ip4_full_reass_custom_handoff_node) (vlib_main_t * vm,
+                                                   vlib_node_runtime_t *
+                                                   node,
+                                                   vlib_frame_t * frame)
+{
+  return ip4_full_reass_handoff_node_inline (vm, node, frame, CUSTOM,
+                                            false /* is_local */);
+}
+
+VLIB_REGISTER_NODE (ip4_full_reass_custom_handoff_node) = {
+  .name = "ip4-full-reass-custom-hoff",
+  .vector_size = sizeof (u32),
+  .n_errors = ARRAY_LEN(ip4_full_reass_handoff_error_strings),
+  .error_strings = ip4_full_reass_handoff_error_strings,
+  .format_trace = format_ip4_full_reass_handoff_trace,
+
+  .n_next_nodes = 1,
+
+  .next_nodes = {
+    [0] = "error-drop",
+  },
+};
+
+#ifndef CLIB_MARCH_VARIANT
+int
+ip4_full_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
+{
+  ip4_full_reass_main_t *rm = &ip4_full_reass_main;
+  vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
+  if (is_enable)
+    {
+      if (!rm->feature_use_refcount_per_intf[sw_if_index])
+       {
+         ++rm->feature_use_refcount_per_intf[sw_if_index];
+         return vnet_feature_enable_disable ("ip4-unicast",
+                                             "ip4-full-reassembly-feature",
+                                             sw_if_index, 1, 0, 0);
+       }
+      ++rm->feature_use_refcount_per_intf[sw_if_index];
+    }
+  else
+    {
+      --rm->feature_use_refcount_per_intf[sw_if_index];
+      if (!rm->feature_use_refcount_per_intf[sw_if_index])
+       return vnet_feature_enable_disable ("ip4-unicast",
+                                           "ip4-full-reassembly-feature",
+                                           sw_if_index, 0, 0, 0);
+    }
+  return -1;
+}
+
+void
+ip4_local_full_reass_enable_disable (int enable)
+{
+  if (enable)
+    {
+      ip4_full_reass_main.is_local_reass_enabled = 1;
+    }
+  else
+    {
+      ip4_full_reass_main.is_local_reass_enabled = 0;
+    }
+}
+
+int
+ip4_local_full_reass_enabled ()
+{
+  return ip4_full_reass_main.is_local_reass_enabled;
+}
+
+#endif
 
 /*
  * fd.io coding-style-patch-verification: ON