misc: Remove the unused GBP fields from the buffer meta-data
[vpp.git] / src / vnet / ipsec / ipsec_spd_policy.c
index 8528c8e..85acf7a 100644 (file)
@@ -123,6 +123,10 @@ ipsec_policy_mk_type (bool is_outbound,
                   IPSEC_SPD_POLICY_IP4_INBOUND_BYPASS);
          return (0);
        case IPSEC_POLICY_ACTION_DISCARD:
+         *type = (is_ipv6 ?
+                  IPSEC_SPD_POLICY_IP6_INBOUND_DISCARD :
+                  IPSEC_SPD_POLICY_IP4_INBOUND_DISCARD);
+         return (0);
        case IPSEC_POLICY_ACTION_RESOLVE:
          break;
        }
@@ -142,17 +146,6 @@ ipsec_add_del_policy (vlib_main_t * vm,
   u32 spd_index;
   uword *p;
 
-  clib_warning ("policy-id %u priority %d type %U", policy->id,
-               policy->priority, format_ipsec_policy_type, policy->type);
-
-  if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
-    {
-      p = hash_get (im->sa_index_by_sa_id, policy->sa_id);
-      if (!p)
-       return VNET_API_ERROR_SYSCALL_ERROR_1;
-      policy->sa_index = p[0];
-    }
-
   p = hash_get (im->spd_index_by_spd_id, policy->id);
 
   if (!p)
@@ -163,10 +156,44 @@ ipsec_add_del_policy (vlib_main_t * vm,
   if (!spd)
     return VNET_API_ERROR_SYSCALL_ERROR_1;
 
+  if (im->flow_cache_flag && !policy->is_ipv6 &&
+      policy->type == IPSEC_SPD_POLICY_IP4_OUTBOUND)
+    {
+      /*
+       * Flow cache entry is valid only when epoch_count value in control
+       * plane and data plane match. Otherwise, flow cache entry is considered
+       * stale. To avoid the race condition of using old epoch_count value
+       * in data plane after the roll over of epoch_count in control plane,
+       * entire flow cache is reset.
+       */
+      if (im->epoch_count == 0xFFFFFFFF)
+       {
+         /* Reset all the entries in flow cache */
+         clib_memset_u8 (im->ipsec4_out_spd_hash_tbl, 0,
+                         im->ipsec4_out_spd_hash_num_buckets *
+                           (sizeof (*(im->ipsec4_out_spd_hash_tbl))));
+       }
+      /* Increment epoch counter by 1 */
+      clib_atomic_fetch_add_relax (&im->epoch_count, 1);
+      /* Reset spd flow cache counter since all old entries are stale */
+      clib_atomic_store_relax_n (&im->ipsec4_out_spd_flow_cache_entries, 0);
+    }
+
   if (is_add)
     {
       u32 policy_index;
 
+      if (policy->policy == IPSEC_POLICY_ACTION_PROTECT)
+       {
+         index_t sa_index = ipsec_sa_find_and_lock (policy->sa_id);
+
+         if (INDEX_INVALID == sa_index)
+           return VNET_API_ERROR_SYSCALL_ERROR_1;
+         policy->sa_index = sa_index;
+       }
+      else
+       policy->sa_index = INDEX_INVALID;
+
       pool_get (im->policies, vp);
       clib_memcpy (vp, policy, sizeof (*vp));
       policy_index = vp - im->policies;
@@ -191,6 +218,7 @@ ipsec_add_del_policy (vlib_main_t * vm,
        if (ipsec_policy_is_equal (vp, policy))
          {
            vec_del1 (spd->policies[policy->type], ii);
+           ipsec_sa_unlock (vp->sa_index);
            pool_put (im->policies, vp);
            break;
          }