flow: add IPSec ESP/AH flow
[vpp.git] / src / plugins / dpdk / device / flow.c
index 9d0887c..674f2f5 100644 (file)
@@ -79,6 +79,33 @@ dpdk_flow_convert_rss_types (u64 type, u64 * dpdk_rss_type)
     return;
 }
 
+static inline enum rte_eth_hash_function
+dpdk_flow_convert_rss_func (vnet_rss_function_t func)
+{
+  enum rte_eth_hash_function rss_func;
+
+  switch (func)
+    {
+    case VNET_RSS_FUNC_DEFAULT:
+      rss_func = RTE_ETH_HASH_FUNCTION_DEFAULT;
+      break;
+    case VNET_RSS_FUNC_TOEPLITZ:
+      rss_func = RTE_ETH_HASH_FUNCTION_TOEPLITZ;
+      break;
+    case VNET_RSS_FUNC_SIMPLE_XOR:
+      rss_func = RTE_ETH_HASH_FUNCTION_SIMPLE_XOR;
+      break;
+    case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ:
+      rss_func = RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
+      break;
+    default:
+      rss_func = RTE_ETH_HASH_FUNCTION_MAX;
+      break;
+    }
+
+  return rss_func;
+}
+
 static int
 dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
 {
@@ -91,6 +118,8 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
   struct rte_flow_item_tcp tcp[2] = { };
   struct rte_flow_item_gtp gtp[2] = { };
   struct rte_flow_item_l2tpv3oip l2tp[2] = { };
+  struct rte_flow_item_esp esp[2] = { };
+  struct rte_flow_item_ah ah[2] = { };
   struct rte_flow_action_mark mark = { 0 };
   struct rte_flow_action_queue queue = { 0 };
   struct rte_flow_action_rss rss = { 0 };
@@ -192,6 +221,48 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
        }
       protocol = l2tp->protocol;
     }
+  if (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP)
+    {
+      vnet_flow_ip4_ipsec_esp_t *tesp = &f->ip4_ipsec_esp;
+      item->type = RTE_FLOW_ITEM_TYPE_IPV4;
+
+      if (!tesp->src_addr.mask.as_u32 && !tesp->dst_addr.mask.as_u32)
+       {
+         item->spec = NULL;
+         item->mask = NULL;
+       }
+      else
+       {
+         ip4[0].hdr.src_addr = tesp->src_addr.addr.as_u32;
+         ip4[1].hdr.src_addr = tesp->src_addr.mask.as_u32;
+         ip4[0].hdr.dst_addr = tesp->dst_addr.addr.as_u32;
+         ip4[1].hdr.dst_addr = tesp->dst_addr.mask.as_u32;
+         item->spec = ip4;
+         item->mask = ip4 + 1;
+       }
+      protocol = tesp->protocol;
+    }
+  else if (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH)
+    {
+      vnet_flow_ip4_ipsec_ah_t *tah = &f->ip4_ipsec_ah;
+      item->type = RTE_FLOW_ITEM_TYPE_IPV4;
+
+      if (!tah->src_addr.mask.as_u32 && !tah->dst_addr.mask.as_u32)
+       {
+         item->spec = NULL;
+         item->mask = NULL;
+       }
+      else
+       {
+         ip4[0].hdr.src_addr = tah->src_addr.addr.as_u32;
+         ip4[1].hdr.src_addr = tah->src_addr.mask.as_u32;
+         ip4[0].hdr.dst_addr = tah->dst_addr.addr.as_u32;
+         ip4[1].hdr.dst_addr = tah->dst_addr.mask.as_u32;
+         item->spec = ip4;
+         item->mask = ip4 + 1;
+       }
+      protocol = tah->protocol;
+    }
   else if ((f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
           (f->type == VNET_FLOW_TYPE_IP6_GTPC) ||
           (f->type == VNET_FLOW_TYPE_IP6_GTPU) ||
@@ -317,6 +388,30 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
          item->mask = tcp + 1;
        }
     }
+  else if (protocol == IP_PROTOCOL_IPSEC_ESP)
+    {
+      vec_add2 (items, item, 1);
+      item->type = RTE_FLOW_ITEM_TYPE_ESP;
+
+      vnet_flow_ip4_ipsec_esp_t *tesp = &f->ip4_ipsec_esp;
+      esp[0].hdr.spi = clib_host_to_net_u32 (tesp->spi);
+      esp[1].hdr.spi = ~0;
+
+      item->spec = esp;
+      item->mask = esp + 1;
+    }
+  else if (protocol == IP_PROTOCOL_IPSEC_AH)
+    {
+      vec_add2 (items, item, 1);
+      item->type = RTE_FLOW_ITEM_TYPE_AH;
+
+      vnet_flow_ip4_ipsec_ah_t *tah = &f->ip4_ipsec_ah;
+      ah[0].spi = clib_host_to_net_u32 (tah->spi);
+      ah[1].spi = ~0;
+
+      item->spec = ah;
+      item->mask = ah + 1;
+    }
   else if (protocol == IP_PROTOCOL_RESERVED)
     {
       rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
@@ -336,6 +431,7 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
       item->spec = l2tp;
       item->mask = l2tp + 1;
     }
+
   if (f->type == VNET_FLOW_TYPE_IP4_VXLAN)
     {
       u32 vni = f->ip4_vxlan.vni;
@@ -571,6 +667,7 @@ pattern_end:
   if (f->actions & VNET_FLOW_ACTION_RSS)
     {
       u64 rss_type = 0;
+
       vec_add2 (actions, action, 1);
       action->type = RTE_FLOW_ACTION_TYPE_RSS;
       action->conf = &rss;
@@ -579,7 +676,12 @@ pattern_end:
       dpdk_flow_convert_rss_types (f->rss_types, &rss_type);
 
       rss.types = rss_type;
-      rss.func = (enum rte_eth_hash_function) f->rss_fun;
+      if ((rss.func = dpdk_flow_convert_rss_func (f->rss_fun)) ==
+         RTE_ETH_HASH_FUNCTION_MAX)
+       {
+         rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
+         goto done;
+       }
 
       if (fate == true)
        {
@@ -735,6 +837,8 @@ dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
     case VNET_FLOW_TYPE_IP6_GTPU_IP4:
     case VNET_FLOW_TYPE_IP6_GTPU_IP6:
     case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
+    case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
+    case VNET_FLOW_TYPE_IP4_IPSEC_AH:
       if ((rv = dpdk_flow_add (xd, flow, fe)))
        goto done;
       break;