map: use ip6-full-reassembly instead of own code
[vpp.git] / src / vnet / ip / reass / ip6_full_reass.c
index ef10149..bba11e5 100644 (file)
@@ -164,6 +164,8 @@ typedef struct
   u32 fq_index;
   u32 fq_feature_index;
 
+  // reference count for enabling/disabling feature - per interface
+  u32 *feature_use_refcount_per_intf;
 } ip6_full_reass_main_t;
 
 extern ip6_full_reass_main_t ip6_full_reass_main;
@@ -885,13 +887,13 @@ ip6_full_reass_update (vlib_main_t * vm, vlib_node_runtime_t * node,
       else
        {
          // overlapping fragment - not allowed by RFC 8200
-         ip6_full_reass_drop_all (vm, node, rm, reass);
-         ip6_full_reass_free (rm, rt, reass);
          if (PREDICT_FALSE (fb->flags & VLIB_BUFFER_IS_TRACED))
            {
              ip6_full_reass_add_trace (vm, node, rm, reass, *bi0,
                                        RANGE_OVERLAP, ~0);
            }
+         ip6_full_reass_drop_all (vm, node, rm, reass);
+         ip6_full_reass_free (rm, rt, reass);
          *next0 = IP6_FULL_REASSEMBLY_NEXT_DROP;
          *error0 = IP6_ERROR_REASS_OVERLAPPING_FRAGMENT;
          return IP6_FULL_REASS_RC_OK;
@@ -911,11 +913,12 @@ check_if_done_maybe:
       reass->data_len == reass->last_packet_octet + 1)
     {
       *handoff_thread_idx = reass->sendout_thread_index;
+      int handoff =
+       reass->memory_owner_thread_index != reass->sendout_thread_index;
       ip6_full_reass_rc_t rc =
        ip6_full_reass_finalize (vm, node, rm, rt, reass, bi0, next0, error0,
                                 is_custom_app);
-      if (IP6_FULL_REASS_RC_OK == rc
-         && reass->memory_owner_thread_index != reass->sendout_thread_index)
+      if (IP6_FULL_REASS_RC_OK == rc && handoff)
        {
          return IP6_FULL_REASS_RC_HANDOFF;
        }
@@ -1101,12 +1104,8 @@ ip6_full_reassembly_inline (vlib_main_t * vm,
          if (PREDICT_FALSE (do_handoff))
            {
              next0 = IP6_FULL_REASSEMBLY_NEXT_HANDOFF;
-             if (is_feature)
-               vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
-                 kv.v.memory_owner_thread_index;
-             else
-               vnet_buffer (b0)->ip.reass.owner_thread_index =
-                 kv.v.memory_owner_thread_index;
+             vnet_buffer (b0)->ip.reass.owner_thread_index =
+               kv.v.memory_owner_thread_index;
            }
          else if (reass)
            {
@@ -1121,12 +1120,8 @@ ip6_full_reassembly_inline (vlib_main_t * vm,
                case IP6_FULL_REASS_RC_HANDOFF:
                  next0 = IP6_FULL_REASSEMBLY_NEXT_HANDOFF;
                  b0 = vlib_get_buffer (vm, bi0);
-                 if (is_feature)
-                   vnet_buffer (b0)->ip.reass.owner_feature_thread_index =
-                     handoff_thread_idx;
-                 else
-                   vnet_buffer (b0)->ip.reass.owner_thread_index =
-                     handoff_thread_idx;
+                 vnet_buffer (b0)->ip.reass.owner_thread_index =
+                   handoff_thread_idx;
                  break;
                case IP6_FULL_REASS_RC_TOO_MANY_FRAGMENTS:
                  vlib_node_increment_counter (vm, node->node_index,
@@ -1179,16 +1174,10 @@ ip6_full_reassembly_inline (vlib_main_t * vm,
                {
                  if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
                    {
-                     if (is_feature)
-                       ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
-                                                 HANDOFF,
-                                                 vnet_buffer (b0)->ip.
-                                                 reass.owner_feature_thread_index);
-                     else
-                       ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
-                                                 HANDOFF,
-                                                 vnet_buffer (b0)->ip.
-                                                 reass.owner_thread_index);
+                     ip6_full_reass_add_trace (vm, node, rm, NULL, bi0,
+                                               HANDOFF,
+                                               vnet_buffer (b0)->ip.
+                                               reass.owner_thread_index);
                    }
                }
              else if (is_feature && IP6_ERROR_NONE == error0)
@@ -1440,6 +1429,7 @@ ip6_full_reass_init_function (vlib_main_t * vm)
   rm->fq_feature_index =
     vlib_frame_queue_main_init (ip6_full_reass_node_feature.index, 0);
 
+  rm->feature_use_refcount_per_intf = NULL;
   return error;
 }
 
@@ -1730,10 +1720,7 @@ ip6_full_reassembly_handoff_inline (vlib_main_t * vm,
 
   while (n_left_from > 0)
     {
-      ti[0] =
-       (is_feature) ? vnet_buffer (b[0])->ip.
-       reass.owner_feature_thread_index : vnet_buffer (b[0])->ip.
-       reass.owner_thread_index;
+      ti[0] = vnet_buffer (b[0])->ip.reass.owner_thread_index;
 
       if (PREDICT_FALSE
          ((node->flags & VLIB_NODE_FLAG_TRACE)
@@ -1806,6 +1793,35 @@ VLIB_REGISTER_NODE (ip6_full_reassembly_feature_handoff_node) = {
 };
 /* *INDENT-ON* */
 
+#ifndef CLIB_MARCH_VARIANT
+int
+ip6_full_reass_enable_disable_with_refcnt (u32 sw_if_index, int is_enable)
+{
+  ip6_full_reass_main_t *rm = &ip6_full_reass_main;
+  vec_validate (rm->feature_use_refcount_per_intf, sw_if_index);
+  if (is_enable)
+    {
+      if (!rm->feature_use_refcount_per_intf[sw_if_index])
+       {
+         ++rm->feature_use_refcount_per_intf[sw_if_index];
+         return vnet_feature_enable_disable ("ip6-unicast",
+                                             "ip6-full-reassembly-feature",
+                                             sw_if_index, 1, 0, 0);
+       }
+      ++rm->feature_use_refcount_per_intf[sw_if_index];
+    }
+  else
+    {
+      --rm->feature_use_refcount_per_intf[sw_if_index];
+      if (!rm->feature_use_refcount_per_intf[sw_if_index])
+       return vnet_feature_enable_disable ("ip6-unicast",
+                                           "ip6-full-reassembly-feature",
+                                           sw_if_index, 0, 0, 0);
+    }
+  return -1;
+}
+#endif
+
 /*
  * fd.io coding-style-patch-verification: ON
  *