flow: Add GTP support 11/22811/8
authorChenmin Sun <chenmin.sun@intel.com>
Thu, 17 Oct 2019 23:35:16 +0000 (07:35 +0800)
committerDamjan Marion <dmarion@me.com>
Wed, 20 Nov 2019 17:34:48 +0000 (17:34 +0000)
Type: feature

Adding:
VNET_FLOW_TYPE_IP4_GTPC
VNET_FLOW_TYPE_IP4_GTPU
VNET_FLOW_TYPE_IP4_GTPU_IP4
VNET_FLOW_TYPE_IP4_GTPU_IP6
VNET_FLOW_TYPE_IP6_GTPC
VNET_FLOW_TYPE_IP6_GTPU
VNET_FLOW_TYPE_IP6_GTPU_IP4
VNET_FLOW_TYPE_IP6_GTPU_IP6
in this patch

Signed-off-by: Chenmin Sun <chenmin.sun@intel.com>
Change-Id: I4ad53895b5ac0771432bb039b8c79e48e3c19f25

src/plugins/dpdk/device/flow.c
src/vnet/flow/flow.h
src/vnet/flow/flow_cli.c

index cea96fd..7938568 100644 (file)
 
 /* constant structs */
 static const struct rte_flow_attr ingress = {.ingress = 1 };
-static const struct rte_flow_item_eth any_eth[2] = { };
-static const struct rte_flow_item_vlan any_vlan[2] = { };
 
 static int
 dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
 {
   struct rte_flow_item_ipv4 ip4[2] = { };
+  struct rte_flow_item_ipv4 inner_ip4[2] = { };
   struct rte_flow_item_ipv6 ip6[2] = { };
+  struct rte_flow_item_ipv6 inner_ip6[2] = { };
   struct rte_flow_item_udp udp[2] = { };
   struct rte_flow_item_tcp tcp[2] = { };
+  struct rte_flow_item_gtp gtp[2] = { };
   struct rte_flow_action_mark mark = { 0 };
   struct rte_flow_action_queue queue = { 0 };
   struct rte_flow_item *item, *items = 0;
@@ -69,30 +70,45 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
   /* Ethernet */
   vec_add2 (items, item, 1);
   item->type = RTE_FLOW_ITEM_TYPE_ETH;
-  item->spec = any_eth;
-  item->mask = any_eth + 1;
+  item->spec = NULL;
+  item->mask = NULL;
 
   /* VLAN */
-  if (f->type != VNET_FLOW_TYPE_IP4_VXLAN)
+  if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
+      (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE))
     {
       vec_add2 (items, item, 1);
       item->type = RTE_FLOW_ITEM_TYPE_VLAN;
-      item->spec = any_vlan;
-      item->mask = any_vlan + 1;
+      item->spec = NULL;
+      item->mask = NULL;
     }
 
   /* IP */
   vec_add2 (items, item, 1);
-  if (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE)
+  if ((f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) ||
+      (f->type == VNET_FLOW_TYPE_IP6_GTPC) ||
+      (f->type == VNET_FLOW_TYPE_IP6_GTPU) ||
+      (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
+      (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
     {
       vnet_flow_ip6_n_tuple_t *t6 = &f->ip6_n_tuple;
-      clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
-      clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
-      clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
-      clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
       item->type = RTE_FLOW_ITEM_TYPE_IPV6;
-      item->spec = ip6;
-      item->mask = ip6 + 1;
+
+      if (!clib_memcmp (&t6->src_addr.mask, &zero_addr, 16) &&
+         !clib_memcmp (&t6->dst_addr.mask, &zero_addr, 16))
+       {
+         item->spec = NULL;
+         item->mask = NULL;
+       }
+      else
+       {
+         clib_memcpy_fast (ip6[0].hdr.src_addr, &t6->src_addr.addr, 16);
+         clib_memcpy_fast (ip6[1].hdr.src_addr, &t6->src_addr.mask, 16);
+         clib_memcpy_fast (ip6[0].hdr.dst_addr, &t6->dst_addr.addr, 16);
+         clib_memcpy_fast (ip6[1].hdr.dst_addr, &t6->dst_addr.mask, 16);
+         item->spec = ip6;
+         item->mask = ip6 + 1;
+       }
 
       src_port = t6->src_port.port;
       dst_port = t6->dst_port.port;
@@ -100,16 +116,29 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
       dst_port_mask = t6->dst_port.mask;
       protocol = t6->protocol;
     }
-  else if (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE)
+  else if ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) ||
+          (f->type == VNET_FLOW_TYPE_IP4_GTPC) ||
+          (f->type == VNET_FLOW_TYPE_IP4_GTPU) ||
+          (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
+          (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
     {
       vnet_flow_ip4_n_tuple_t *t4 = &f->ip4_n_tuple;
-      ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
-      ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
-      ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
-      ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
       item->type = RTE_FLOW_ITEM_TYPE_IPV4;
-      item->spec = ip4;
-      item->mask = ip4 + 1;
+
+      if (!t4->src_addr.mask.as_u32 && !t4->dst_addr.mask.as_u32)
+       {
+         item->spec = NULL;
+         item->mask = NULL;
+       }
+      else
+       {
+         ip4[0].hdr.src_addr = t4->src_addr.addr.as_u32;
+         ip4[1].hdr.src_addr = t4->src_addr.mask.as_u32;
+         ip4[0].hdr.dst_addr = t4->dst_addr.addr.as_u32;
+         ip4[1].hdr.dst_addr = t4->dst_addr.mask.as_u32;
+         item->spec = ip4;
+         item->mask = ip4 + 1;
+       }
 
       src_port = t4->src_port.port;
       dst_port = t4->dst_port.port;
@@ -144,21 +173,37 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
   vec_add2 (items, item, 1);
   if (protocol == IP_PROTOCOL_UDP)
     {
-      udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
-      udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
-      udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
-      udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
       item->type = RTE_FLOW_ITEM_TYPE_UDP;
-      item->spec = udp;
-      item->mask = udp + 1;
+
+      if ((src_port_mask == 0) && (dst_port_mask == 0))
+       {
+         item->spec = NULL;
+         item->mask = NULL;
+       }
+      else
+       {
+         udp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
+         udp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
+         udp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
+         udp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
+         item->spec = udp;
+         item->mask = udp + 1;
+       }
     }
   else if (protocol == IP_PROTOCOL_TCP)
     {
+      item->type = RTE_FLOW_ITEM_TYPE_TCP;
+
+      if ((src_port_mask == 0) && (dst_port_mask == 0))
+       {
+         item->spec = NULL;
+         item->mask = NULL;
+       }
+
       tcp[0].hdr.src_port = clib_host_to_net_u16 (src_port);
       tcp[1].hdr.src_port = clib_host_to_net_u16 (src_port_mask);
       tcp[0].hdr.dst_port = clib_host_to_net_u16 (dst_port);
       tcp[1].hdr.dst_port = clib_host_to_net_u16 (dst_port_mask);
-      item->type = RTE_FLOW_ITEM_TYPE_TCP;
       item->spec = tcp;
       item->mask = tcp + 1;
     }
@@ -195,6 +240,185 @@ dpdk_flow_add (dpdk_device_t * xd, vnet_flow_t * f, dpdk_flow_entry_t * fe)
       item->spec = raw;
       item->mask = raw + 1;
     }
+  else if (f->type == VNET_FLOW_TYPE_IP4_GTPC)
+    {
+      vnet_flow_ip4_gtpc_t *gc = &f->ip4_gtpc;
+      gtp[0].teid = clib_host_to_net_u32 (gc->teid);
+      gtp[1].teid = ~0;
+
+      vec_add2 (items, item, 1);
+      item->type = RTE_FLOW_ITEM_TYPE_GTPC;
+      item->spec = gtp;
+      item->mask = gtp + 1;
+    }
+  else if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
+    {
+      vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
+      gtp[0].teid = clib_host_to_net_u32 (gu->teid);
+      gtp[1].teid = ~0;
+
+      vec_add2 (items, item, 1);
+      item->type = RTE_FLOW_ITEM_TYPE_GTPU;
+      item->spec = gtp;
+      item->mask = gtp + 1;
+    }
+  else if ((f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4) ||
+          (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6))
+    {
+      vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
+      gtp[0].teid = clib_host_to_net_u32 (gu->teid);
+      gtp[1].teid = ~0;
+
+      vec_add2 (items, item, 1);
+      item->type = RTE_FLOW_ITEM_TYPE_GTPU;
+      item->spec = gtp;
+      item->mask = gtp + 1;
+
+      /* inner IP4 header */
+      if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
+       {
+         vec_add2 (items, item, 1);
+         item->type = RTE_FLOW_ITEM_TYPE_IPV4;
+
+         vnet_flow_ip4_gtpu_ip4_t *gu4 = &f->ip4_gtpu_ip4;
+         if (!gu4->inner_src_addr.mask.as_u32 &&
+             !gu4->inner_dst_addr.mask.as_u32)
+           {
+             item->spec = NULL;
+             item->mask = NULL;
+           }
+         else
+           {
+             inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
+             inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
+             inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
+             inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
+             item->spec = inner_ip4;
+             item->mask = inner_ip4 + 1;
+           }
+       }
+      else if (f->type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
+       {
+         ip6_address_t zero_addr;
+         vnet_flow_ip4_gtpu_ip6_t *gu6 = &f->ip4_gtpu_ip6;
+
+         clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
+
+         vec_add2 (items, item, 1);
+         item->type = RTE_FLOW_ITEM_TYPE_IPV6;
+
+         if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
+             !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
+           {
+             item->spec = NULL;
+             item->mask = NULL;
+           }
+         else
+           {
+             clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
+                               &gu6->inner_src_addr.addr, 16);
+             clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
+                               &gu6->inner_src_addr.mask, 16);
+             clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
+                               &gu6->inner_dst_addr.addr, 16);
+             clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
+                               &gu6->inner_dst_addr.mask, 16);
+             item->spec = inner_ip6;
+             item->mask = inner_ip6 + 1;
+           }
+       }
+    }
+  else if (f->type == VNET_FLOW_TYPE_IP6_GTPC)
+    {
+      vnet_flow_ip6_gtpc_t *gc = &f->ip6_gtpc;
+      gtp[0].teid = clib_host_to_net_u32 (gc->teid);
+      gtp[1].teid = ~0;
+
+      vec_add2 (items, item, 1);
+      item->type = RTE_FLOW_ITEM_TYPE_GTPC;
+      item->spec = gtp;
+      item->mask = gtp + 1;
+    }
+  else if (f->type == VNET_FLOW_TYPE_IP6_GTPU)
+    {
+      vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
+      gtp[0].teid = clib_host_to_net_u32 (gu->teid);
+      gtp[1].teid = ~0;
+
+      vec_add2 (items, item, 1);
+      item->type = RTE_FLOW_ITEM_TYPE_GTPU;
+      item->spec = gtp;
+      item->mask = gtp + 1;
+    }
+  else if ((f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4) ||
+          (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6))
+    {
+      vnet_flow_ip6_gtpu_t *gu = &f->ip6_gtpu;
+      gtp[0].teid = clib_host_to_net_u32 (gu->teid);
+      gtp[1].teid = ~0;
+
+      vec_add2 (items, item, 1);
+      item->type = RTE_FLOW_ITEM_TYPE_GTPU;
+      item->spec = gtp;
+      item->mask = gtp + 1;
+
+      /* inner IP4 header */
+      if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
+       {
+         vec_add2 (items, item, 1);
+         item->type = RTE_FLOW_ITEM_TYPE_IPV4;
+
+         vnet_flow_ip6_gtpu_ip4_t *gu4 = &f->ip6_gtpu_ip4;
+
+         if (!gu4->inner_src_addr.mask.as_u32 &&
+             !gu4->inner_dst_addr.mask.as_u32)
+           {
+             item->spec = NULL;
+             item->mask = NULL;
+           }
+         else
+           {
+             inner_ip4[0].hdr.src_addr = gu4->inner_src_addr.addr.as_u32;
+             inner_ip4[1].hdr.src_addr = gu4->inner_src_addr.mask.as_u32;
+             inner_ip4[0].hdr.dst_addr = gu4->inner_dst_addr.addr.as_u32;
+             inner_ip4[1].hdr.dst_addr = gu4->inner_dst_addr.mask.as_u32;
+             item->spec = inner_ip4;
+             item->mask = inner_ip4 + 1;
+           }
+       }
+
+      if (f->type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
+       {
+         ip6_address_t zero_addr;
+         vnet_flow_ip6_gtpu_ip6_t *gu6 = &f->ip6_gtpu_ip6;
+
+         clib_memset (&zero_addr, 0, sizeof (ip6_address_t));
+
+         vec_add2 (items, item, 1);
+         item->type = RTE_FLOW_ITEM_TYPE_IPV6;
+
+         if (!clib_memcmp (&gu6->inner_src_addr.mask, &zero_addr, 16) &&
+             !clib_memcmp (&gu6->inner_dst_addr.mask, &zero_addr, 16))
+           {
+             item->spec = NULL;
+             item->mask = NULL;
+           }
+         else
+           {
+             clib_memcpy_fast (inner_ip6[0].hdr.src_addr,
+                               &gu6->inner_src_addr.addr, 16);
+             clib_memcpy_fast (inner_ip6[1].hdr.src_addr,
+                               &gu6->inner_src_addr.mask, 16);
+             clib_memcpy_fast (inner_ip6[0].hdr.dst_addr,
+                               &gu6->inner_dst_addr.addr, 16);
+             clib_memcpy_fast (inner_ip6[1].hdr.dst_addr,
+                               &gu6->inner_dst_addr.mask, 16);
+             item->spec = inner_ip6;
+             item->mask = inner_ip6 + 1;
+           }
+
+       }
+    }
 
   vec_add2 (items, item, 1);
   item->type = RTE_FLOW_ITEM_TYPE_END;
@@ -359,6 +583,14 @@ dpdk_flow_ops_fn (vnet_main_t * vnm, vnet_flow_dev_op_t op, u32 dev_instance,
     case VNET_FLOW_TYPE_IP4_N_TUPLE:
     case VNET_FLOW_TYPE_IP6_N_TUPLE:
     case VNET_FLOW_TYPE_IP4_VXLAN:
+    case VNET_FLOW_TYPE_IP4_GTPC:
+    case VNET_FLOW_TYPE_IP4_GTPU:
+    case VNET_FLOW_TYPE_IP4_GTPU_IP4:
+    case VNET_FLOW_TYPE_IP4_GTPU_IP6:
+    case VNET_FLOW_TYPE_IP6_GTPC:
+    case VNET_FLOW_TYPE_IP6_GTPU:
+    case VNET_FLOW_TYPE_IP6_GTPU_IP4:
+    case VNET_FLOW_TYPE_IP6_GTPU_IP6:
       if ((rv = dpdk_flow_add (xd, flow, fe)))
        goto done;
       break;
index de09d34..c0aa911 100644 (file)
   _(IP4_N_TUPLE, ip4_n_tuple, "ipv4-n-tuple") \
   _(IP6_N_TUPLE, ip6_n_tuple, "ipv6-n-tuple") \
   _(IP4_VXLAN, ip4_vxlan, "ipv4-vxlan") \
-  _(IP6_VXLAN, ip6_vxlan, "ipv6-vxlan")
+  _(IP6_VXLAN, ip6_vxlan, "ipv6-vxlan") \
+  _(IP4_GTPC, ip4_gtpc, "ipv4-gtpc") \
+  _(IP4_GTPU, ip4_gtpu, "ipv4-gtpu") \
+  _(IP4_GTPU_IP4, ip4_gtpu_ip4, "ipv4-gtpu-ipv4") \
+  _(IP4_GTPU_IP6, ip4_gtpu_ip6, "ipv4-gtpu-ipv6") \
+  _(IP6_GTPC, ip6_gtpc, "ipv6-gtpc") \
+  _(IP6_GTPU, ip6_gtpu, "ipv6-gtpu") \
+  _(IP6_GTPU_IP4, ip6_gtpu_ip4, "ipv6-gtpu-ipv4") \
+  _(IP6_GTPU_IP6, ip6_gtpu_ip6, "ipv6-gtpu-ipv6")
 
 #define foreach_flow_entry_ip4_n_tuple \
   _fe(ip4_address_and_mask_t, src_addr) \
   _fe(u16, dst_port) \
   _fe(u16, vni)
 
+#define foreach_flow_entry_ip4_gtpc \
+  foreach_flow_entry_ip4_n_tuple \
+  _fe(u32, teid)
+
+#define foreach_flow_entry_ip4_gtpu \
+  foreach_flow_entry_ip4_n_tuple \
+  _fe(u32, teid)
+
+#define foreach_flow_entry_ip4_gtpu_ip4 \
+  foreach_flow_entry_ip4_gtpu \
+  _fe(ip4_address_and_mask_t, inner_src_addr) \
+  _fe(ip4_address_and_mask_t, inner_dst_addr)
+
+#define foreach_flow_entry_ip4_gtpu_ip6 \
+  foreach_flow_entry_ip4_gtpu \
+  _fe(ip6_address_and_mask_t, inner_src_addr) \
+  _fe(ip6_address_and_mask_t, inner_dst_addr)
+
+#define foreach_flow_entry_ip6_gtpc \
+  foreach_flow_entry_ip6_n_tuple \
+  _fe(u32, teid)
+
+#define foreach_flow_entry_ip6_gtpu \
+  foreach_flow_entry_ip6_n_tuple \
+  _fe(u32, teid)
+
+#define foreach_flow_entry_ip6_gtpu_ip4 \
+  foreach_flow_entry_ip6_gtpu \
+  _fe(ip4_address_and_mask_t, inner_src_addr) \
+  _fe(ip4_address_and_mask_t, inner_dst_addr)
+
+#define foreach_flow_entry_ip6_gtpu_ip6 \
+  foreach_flow_entry_ip6_gtpu \
+  _fe(ip6_address_and_mask_t, inner_src_addr) \
+  _fe(ip6_address_and_mask_t, inner_dst_addr)
+
 #define foreach_flow_action \
   _(0, COUNT, "count") \
   _(1, MARK, "mark") \
index 5481aa3..1c09b2b 100644 (file)
@@ -274,9 +274,26 @@ test_flow (vlib_main_t * vm, unformat_input_t * input,
     FLOW_ENABLE,
     FLOW_DISABLE
   } action = FLOW_UNKNOWN_ACTION;
-  u32 hw_if_index = ~0, tmp, flow_index = ~0;
+  u32 hw_if_index = ~0, flow_index = ~0;
   int rv;
-  u8 prot;
+  u32 prot = 0, teid = 0;
+  vnet_flow_type_t type = VNET_FLOW_TYPE_IP4_N_TUPLE;
+  bool is_gtpc_set = false;
+  bool is_gtpu_set = false;
+  vnet_flow_type_t outer_type = VNET_FLOW_TYPE_UNKNOWN;
+  vnet_flow_type_t inner_type = VNET_FLOW_TYPE_UNKNOWN;
+  bool outer_ip4_set = false, inner_ip4_set = false;
+  bool outer_ip6_set = false, inner_ip6_set = false;
+  ip4_address_and_mask_t ip4s = { };
+  ip4_address_and_mask_t ip4d = { };
+  ip4_address_and_mask_t inner_ip4s = { };
+  ip4_address_and_mask_t inner_ip4d = { };
+  ip6_address_and_mask_t ip6s = { };
+  ip6_address_and_mask_t ip6d = { };
+  ip6_address_and_mask_t inner_ip6s = { };
+  ip6_address_and_mask_t inner_ip6d = { };
+  ip_port_and_mask_t sport = { };
+  ip_port_and_mask_t dport = { };
 
   clib_memset (&flow, 0, sizeof (vnet_flow_t));
   flow.index = ~0;
@@ -296,23 +313,44 @@ test_flow (vlib_main_t * vm, unformat_input_t * input,
       else if (unformat (line_input, "disable"))
        action = FLOW_DISABLE;
       else if (unformat (line_input, "src-ip %U",
-                        unformat_ip4_address_and_mask,
-                        &flow.ip4_n_tuple.src_addr))
-       ;
+                        unformat_ip4_address_and_mask, &ip4s))
+       outer_ip4_set = true;
       else if (unformat (line_input, "dst-ip %U",
-                        unformat_ip4_address_and_mask,
-                        &flow.ip4_n_tuple.dst_addr))
-       ;
+                        unformat_ip4_address_and_mask, &ip4d))
+       outer_ip4_set = true;
+      else if (unformat (line_input, "ip6-src-ip %U",
+                        unformat_ip6_address_and_mask, &ip6s))
+       outer_ip6_set = true;
+      else if (unformat (line_input, "ip6-dst-ip %U",
+                        unformat_ip6_address_and_mask, &ip6d))
+       outer_ip6_set = true;
+      else if (unformat (line_input, "inner-src-ip %U",
+                        unformat_ip4_address_and_mask, &inner_ip4s))
+       inner_ip4_set = true;
+      else if (unformat (line_input, "inner-dst-ip %U",
+                        unformat_ip4_address_and_mask, &inner_ip4d))
+       inner_ip4_set = true;
+      else if (unformat (line_input, "inner-ip6-src-ip %U",
+                        unformat_ip6_address_and_mask, &inner_ip6s))
+       inner_ip6_set = true;
+      else if (unformat (line_input, "inner-ip6-dst-ip %U",
+                        unformat_ip6_address_and_mask, &inner_ip6d))
+       inner_ip6_set = true;
+
       else if (unformat (line_input, "src-port %U", unformat_ip_port_and_mask,
-                        &flow.ip4_n_tuple.src_port))
+                        &sport))
        ;
       else if (unformat (line_input, "dst-port %U", unformat_ip_port_and_mask,
-                        &flow.ip4_n_tuple.dst_port))
+                        &dport))
        ;
       else if (unformat (line_input, "proto %U", unformat_ip_protocol, &prot))
-       flow.ip4_n_tuple.protocol = prot;
-      else if (unformat (line_input, "proto %u", &tmp))
-       flow.ip4_n_tuple.protocol = tmp;
+       ;
+      else if (unformat (line_input, "proto %u", &prot))
+       ;
+      else if (unformat (line_input, "gtpc teid %u", &teid))
+       is_gtpc_set = true;
+      else if (unformat (line_input, "gtpu teid %u", &teid))
+       is_gtpu_set = true;
       else if (unformat (line_input, "index %u", &flow_index))
        ;
       else if (unformat (line_input, "next-node %U", unformat_vlib_node, vm,
@@ -348,13 +386,167 @@ test_flow (vlib_main_t * vm, unformat_input_t * input,
   switch (action)
     {
     case FLOW_ADD:
-      if (flow.ip4_n_tuple.protocol == (ip_protocol_t) ~ 0)
-       return clib_error_return (0, "Please specify ip protocol");
-
       if (flow.actions == 0)
        return clib_error_return (0, "Please specify at least one action");
-      flow.type = VNET_FLOW_TYPE_IP4_N_TUPLE;
+
+      /* Adjust the flow type */
+      if (outer_ip4_set == true)
+       outer_type = VNET_FLOW_TYPE_IP4_N_TUPLE;
+      else if (outer_ip6_set == true)
+       outer_type = VNET_FLOW_TYPE_IP6_N_TUPLE;
+      if (inner_ip4_set == true)
+       inner_type = VNET_FLOW_TYPE_IP4_N_TUPLE;
+      else if (inner_ip6_set == true)
+       inner_type = VNET_FLOW_TYPE_IP6_N_TUPLE;
+
+      if (outer_type == VNET_FLOW_TYPE_UNKNOWN)
+       return clib_error_return (0, "Please specify a supported flow type");
+
+      if (outer_type == VNET_FLOW_TYPE_IP4_N_TUPLE)
+       {
+         type = VNET_FLOW_TYPE_IP4_N_TUPLE;
+
+         if (inner_type == VNET_FLOW_TYPE_UNKNOWN)
+           {
+             if (is_gtpc_set)
+               type = VNET_FLOW_TYPE_IP4_GTPC;
+             else if (is_gtpu_set)
+               type = VNET_FLOW_TYPE_IP4_GTPU;
+           }
+         else if (inner_type == VNET_FLOW_TYPE_IP4_N_TUPLE)
+           {
+             if (is_gtpu_set)
+               type = VNET_FLOW_TYPE_IP4_GTPU_IP4;
+           }
+         else if (inner_type == VNET_FLOW_TYPE_IP6_N_TUPLE)
+           {
+             if (is_gtpu_set)
+               type = VNET_FLOW_TYPE_IP4_GTPU_IP6;
+           }
+       }
+      else if (outer_type == VNET_FLOW_TYPE_IP6_N_TUPLE)
+       {
+         type = VNET_FLOW_TYPE_IP6_N_TUPLE;
+
+         if (inner_type == VNET_FLOW_TYPE_UNKNOWN)
+           {
+             if (is_gtpc_set)
+               type = VNET_FLOW_TYPE_IP6_GTPC;
+             else if (is_gtpu_set)
+               type = VNET_FLOW_TYPE_IP6_GTPU;
+           }
+         else if (inner_type == VNET_FLOW_TYPE_IP4_N_TUPLE)
+           {
+             if (is_gtpu_set)
+               type = VNET_FLOW_TYPE_IP6_GTPU_IP4;
+           }
+         else if (inner_type == VNET_FLOW_TYPE_IP6_N_TUPLE)
+           {
+             if (is_gtpu_set)
+               type = VNET_FLOW_TYPE_IP6_GTPU_IP6;
+           }
+       }
+
+      //assign specific field values per flow type
+      switch (type)
+       {
+       case VNET_FLOW_TYPE_IP4_N_TUPLE:
+       case VNET_FLOW_TYPE_IP4_GTPC:
+       case VNET_FLOW_TYPE_IP4_GTPU:
+       case VNET_FLOW_TYPE_IP4_GTPU_IP4:
+       case VNET_FLOW_TYPE_IP4_GTPU_IP6:
+         clib_memcpy (&flow.ip4_n_tuple.src_addr, &ip4s,
+                      sizeof (ip4_address_and_mask_t));
+         clib_memcpy (&flow.ip4_n_tuple.dst_addr, &ip4d,
+                      sizeof (ip4_address_and_mask_t));
+         clib_memcpy (&flow.ip4_n_tuple.src_port, &sport,
+                      sizeof (ip_port_and_mask_t));
+         clib_memcpy (&flow.ip4_n_tuple.dst_port, &dport,
+                      sizeof (ip_port_and_mask_t));
+         flow.ip4_n_tuple.protocol = prot;
+
+         if (type == VNET_FLOW_TYPE_IP4_GTPC)
+           flow.ip4_gtpc.teid = teid;
+         else if (type == VNET_FLOW_TYPE_IP4_GTPU)
+           flow.ip4_gtpu.teid = teid;
+         else if (type == VNET_FLOW_TYPE_IP4_GTPU_IP4)
+           {
+             flow.ip4_gtpu_ip4.teid = teid;
+             clib_memcpy (&flow.ip4_gtpu_ip4.inner_src_addr, &inner_ip4s,
+                          sizeof (ip4_address_and_mask_t));
+             clib_memcpy (&flow.ip4_gtpu_ip4.inner_dst_addr, &inner_ip4d,
+                          sizeof (ip4_address_and_mask_t));
+           }
+         else if (type == VNET_FLOW_TYPE_IP4_GTPU_IP6)
+           {
+             flow.ip4_gtpu_ip6.teid = teid;
+             clib_memcpy (&flow.ip4_gtpu_ip6.inner_src_addr, &inner_ip6s,
+                          sizeof (ip6_address_and_mask_t));
+             clib_memcpy (&flow.ip4_gtpu_ip6.inner_dst_addr, &inner_ip6d,
+                          sizeof (ip6_address_and_mask_t));
+           }
+
+         if (flow.ip4_n_tuple.protocol == (ip_protocol_t) ~ 0)
+           return clib_error_return (0, "Please specify ip protocol");
+         if ((type != VNET_FLOW_TYPE_IP4_N_TUPLE) &&
+             (flow.ip4_n_tuple.protocol != IP_PROTOCOL_UDP))
+           return clib_error_return (0,
+                                     "For GTP related flow, ip protocol must be UDP");
+         break;
+
+       case VNET_FLOW_TYPE_IP6_N_TUPLE:
+       case VNET_FLOW_TYPE_IP6_GTPC:
+       case VNET_FLOW_TYPE_IP6_GTPU:
+       case VNET_FLOW_TYPE_IP6_GTPU_IP4:
+       case VNET_FLOW_TYPE_IP6_GTPU_IP6:
+         clib_memcpy (&flow.ip6_n_tuple.src_addr, &ip6s,
+                      sizeof (ip6_address_and_mask_t));
+         clib_memcpy (&flow.ip6_n_tuple.dst_addr, &ip6d,
+                      sizeof (ip6_address_and_mask_t));
+         clib_memcpy (&flow.ip6_n_tuple.src_port, &sport,
+                      sizeof (ip_port_and_mask_t));
+         clib_memcpy (&flow.ip6_n_tuple.dst_port, &dport,
+                      sizeof (ip_port_and_mask_t));
+         flow.ip6_n_tuple.protocol = prot;
+
+         if (type == VNET_FLOW_TYPE_IP6_GTPC)
+           flow.ip6_gtpc.teid = teid;
+         else if (type == VNET_FLOW_TYPE_IP6_GTPU)
+           flow.ip6_gtpu.teid = teid;
+         else if (type == VNET_FLOW_TYPE_IP6_GTPU_IP4)
+           {
+             flow.ip6_gtpu_ip4.teid = teid;
+             clib_memcpy (&flow.ip6_gtpu_ip4.inner_src_addr, &inner_ip4s,
+                          sizeof (ip4_address_and_mask_t));
+             clib_memcpy (&flow.ip6_gtpu_ip4.inner_dst_addr, &inner_ip4d,
+                          sizeof (ip4_address_and_mask_t));
+           }
+         else if (type == VNET_FLOW_TYPE_IP6_GTPU_IP6)
+           {
+             flow.ip6_gtpu_ip6.teid = teid;
+             clib_memcpy (&flow.ip6_gtpu_ip6.inner_src_addr, &inner_ip6s,
+                          sizeof (ip6_address_and_mask_t));
+             clib_memcpy (&flow.ip6_gtpu_ip6.inner_dst_addr, &inner_ip6d,
+                          sizeof (ip6_address_and_mask_t));
+           }
+
+         if (flow.ip6_n_tuple.protocol == (ip_protocol_t) ~ 0)
+           return clib_error_return (0, "Please specify ip protocol");
+         if ((type != VNET_FLOW_TYPE_IP4_N_TUPLE) &&
+             (flow.ip6_n_tuple.protocol != IP_PROTOCOL_UDP))
+           return clib_error_return (0,
+                                     "For GTP related flow, ip protocol must be UDP");
+         break;
+
+       default:
+         break;
+       }
+
+      flow.type = type;
       rv = vnet_flow_add (vnm, &flow, &flow_index);
+      if (!rv)
+       printf ("flow %u added\n", flow_index);
+
       break;
     case FLOW_DEL:
       rv = vnet_flow_del (vnm, flow_index);