vppinfra: Improve code portability
[vpp.git] / src / vnet / flow / flow_api.c
index b24c851..bfe97ec 100644 (file)
 #include <vnet/udp/udp_local.h>
 #include <vnet/tunnel/tunnel_types_api.h>
 #include <vnet/ip/ip_types_api.h>
-#include <vnet/vnet_msg_enum.h>
 
-#define vl_typedefs            /* define message structures */
-#include <vnet/vnet_all_api_h.h>
-#undef vl_typedefs
-
-#define vl_endianfun           /* define message structures */
-#include <vnet/vnet_all_api_h.h>
-#undef vl_endianfun
-
-/* instantiate all the print functions we know about */
-#define vl_print(handle, ...) vlib_cli_output (handle, __VA_ARGS__)
-#define vl_printfun
-#include <vnet/vnet_all_api_h.h>
-#undef vl_printfun
+#include <vnet/format_fns.h>
+#include <vnet/flow/flow.api_enum.h>
+#include <vnet/flow/flow.api_types.h>
 
+#define REPLY_MSG_ID_BASE flow_main.msg_id_base
 #include <vlibapi/api_helper_macros.h>
 
-#define foreach_vpe_api_msg         \
-_(FLOW_ADD, flow_add)               \
-_(FLOW_DEL, flow_del)               \
-_(FLOW_ENABLE, flow_enable)         \
-_(FLOW_DISABLE, flow_disable)
-
 static inline void
 ipv4_addr_and_mask_convert (vl_api_ip4_address_and_mask_t * vl_api_addr,
                            ip4_address_and_mask_t * vnet_addr)
@@ -82,6 +66,24 @@ port_and_mask_convert (vl_api_ip_port_and_mask_t * vl_api_port,
   vnet_port->mask = ntohs (vl_api_port->mask);
 }
 
+static inline void
+ipv4_flow_convert (vl_api_flow_ip4_t *vl_api_flow, vnet_flow_ip4_t *f)
+{
+  ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
+  ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
+
+  protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
+}
+
+static void
+ipv6_flow_convert (vl_api_flow_ip6_t *vl_api_flow, vnet_flow_ip6_t *f)
+{
+  ipv6_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
+  ipv6_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
+
+  protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
+}
+
 static inline void
 ipv4_n_tuple_flow_convert (vl_api_flow_ip4_n_tuple_t * vl_api_flow,
                           vnet_flow_ip4_n_tuple_t * f)
@@ -157,6 +159,34 @@ ipv4_ipsec_ah_flow_convert (vl_api_flow_ip4_ipsec_ah_t * vl_api_flow,
   f->spi = ntohl (vl_api_flow->spi);
 }
 
+static inline void
+ipv4_vxlan_flow_convert (vl_api_flow_ip4_vxlan_t *vl_api_flow,
+                        vnet_flow_ip4_vxlan_t *f)
+{
+  ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
+  ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
+  protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
+
+  port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
+  port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
+
+  f->vni = ntohl (vl_api_flow->vni);
+}
+
+static inline void
+ipv6_vxlan_flow_convert (vl_api_flow_ip6_vxlan_t *vl_api_flow,
+                        vnet_flow_ip6_vxlan_t *f)
+{
+  ipv6_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
+  ipv6_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
+  protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
+
+  port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
+  port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
+
+  f->vni = ntohl (vl_api_flow->vni);
+}
+
 static inline void
 ipv4_gtpu_flow_convert (vl_api_flow_ip4_gtpu_t * vl_api_flow,
                        vnet_flow_ip4_gtpu_t * f)
@@ -185,6 +215,16 @@ ipv4_gtpc_flow_convert (vl_api_flow_ip4_gtpc_t * vl_api_flow,
   f->teid = ntohl (vl_api_flow->teid);
 }
 
+static inline void
+generic_flow_convert (vl_api_flow_generic_t *vl_api_flow,
+                     vnet_flow_generic_t *f)
+{
+  clib_memcpy (f->pattern.spec, vl_api_flow->pattern.spec,
+              sizeof (vl_api_flow->pattern.spec));
+  clib_memcpy (f->pattern.mask, vl_api_flow->pattern.mask,
+              sizeof (vl_api_flow->pattern.mask));
+}
+
 static void
 vl_api_flow_add_t_handler (vl_api_flow_add_t * mp)
 {
@@ -207,6 +247,12 @@ vl_api_flow_add_t_handler (vl_api_flow_add_t * mp)
 
   switch (flow.type)
     {
+    case VNET_FLOW_TYPE_IP4:
+      ipv4_flow_convert (&f->flow.ip4, &flow.ip4);
+      break;
+    case VNET_FLOW_TYPE_IP6:
+      ipv6_flow_convert (&f->flow.ip6, &flow.ip6);
+      break;
     case VNET_FLOW_TYPE_IP4_N_TUPLE:
       ipv4_n_tuple_flow_convert (&f->flow.ip4_n_tuple, &flow.ip4_n_tuple);
       break;
@@ -232,6 +278,12 @@ vl_api_flow_add_t_handler (vl_api_flow_add_t * mp)
     case VNET_FLOW_TYPE_IP4_IPSEC_AH:
       ipv4_ipsec_ah_flow_convert (&f->flow.ip4_ipsec_ah, &flow.ip4_ipsec_ah);
       break;
+    case VNET_FLOW_TYPE_IP4_VXLAN:
+      ipv4_vxlan_flow_convert (&f->flow.ip4_vxlan, &flow.ip4_vxlan);
+      break;
+    case VNET_FLOW_TYPE_IP6_VXLAN:
+      ipv6_vxlan_flow_convert (&f->flow.ip6_vxlan, &flow.ip6_vxlan);
+      break;
     case VNET_FLOW_TYPE_IP4_GTPU:
       ipv4_gtpu_flow_convert (&f->flow.ip4_gtpu, &flow.ip4_gtpu);
       break;
@@ -247,12 +299,95 @@ vl_api_flow_add_t_handler (vl_api_flow_add_t * mp)
   rv = vnet_flow_add (vnm, &flow, &flow_index);
 
 out:
-  /* *INDENT-OFF* */
   REPLY_MACRO2(VL_API_FLOW_ADD_REPLY,
   ({
     rmp->flow_index = ntohl (flow_index);
   }));
-  /* *INDENT-ON* */
+}
+
+static void
+vl_api_flow_add_v2_t_handler (vl_api_flow_add_v2_t *mp)
+{
+  vl_api_flow_add_v2_reply_t *rmp;
+  int rv = 0;
+  vnet_flow_t flow;
+  u32 flow_index = ~0;
+  vl_api_flow_rule_v2_t *f = &mp->flow;
+
+  vnet_main_t *vnm = vnet_get_main ();
+
+  flow.type = ntohl (f->type);
+  flow.actions = ntohl (f->actions);
+  flow.mark_flow_id = ntohl (f->mark_flow_id);
+  flow.redirect_node_index = ntohl (f->redirect_node_index);
+  flow.redirect_device_input_next_index =
+    ntohl (f->redirect_device_input_next_index);
+  flow.redirect_queue = ntohl (f->redirect_queue);
+  flow.buffer_advance = ntohl (f->buffer_advance);
+  flow.queue_index = ntohl (f->queue_index);
+  flow.queue_num = ntohl (f->queue_num);
+  flow.rss_types = clib_net_to_host_u64 (f->rss_types);
+  flow.rss_fun = ntohl (f->rss_fun);
+
+  switch (flow.type)
+    {
+    case VNET_FLOW_TYPE_IP4:
+      ipv4_flow_convert (&f->flow.ip4, &flow.ip4);
+      break;
+    case VNET_FLOW_TYPE_IP6:
+      ipv6_flow_convert (&f->flow.ip6, &flow.ip6);
+      break;
+    case VNET_FLOW_TYPE_IP4_N_TUPLE:
+      ipv4_n_tuple_flow_convert (&f->flow.ip4_n_tuple, &flow.ip4_n_tuple);
+      break;
+    case VNET_FLOW_TYPE_IP6_N_TUPLE:
+      ipv6_n_tuple_flow_convert (&f->flow.ip6_n_tuple, &flow.ip6_n_tuple);
+      break;
+    case VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED:
+      ipv4_n_tuple_tagged_flow_convert (&f->flow.ip4_n_tuple_tagged,
+                                       &flow.ip4_n_tuple_tagged);
+      break;
+    case VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED:
+      ipv6_n_tuple_tagged_flow_convert (&f->flow.ip6_n_tuple_tagged,
+                                       &flow.ip6_n_tuple_tagged);
+      break;
+    case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
+      ipv4_l2tpv3oip_flow_convert (&f->flow.ip4_l2tpv3oip,
+                                  &flow.ip4_l2tpv3oip);
+      break;
+    case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
+      ipv4_ipsec_esp_flow_convert (&f->flow.ip4_ipsec_esp,
+                                  &flow.ip4_ipsec_esp);
+      break;
+    case VNET_FLOW_TYPE_IP4_IPSEC_AH:
+      ipv4_ipsec_ah_flow_convert (&f->flow.ip4_ipsec_ah, &flow.ip4_ipsec_ah);
+      break;
+    case VNET_FLOW_TYPE_IP4_VXLAN:
+      ipv4_vxlan_flow_convert (&f->flow.ip4_vxlan, &flow.ip4_vxlan);
+      break;
+    case VNET_FLOW_TYPE_IP6_VXLAN:
+      ipv6_vxlan_flow_convert (&f->flow.ip6_vxlan, &flow.ip6_vxlan);
+      break;
+    case VNET_FLOW_TYPE_IP4_GTPU:
+      ipv4_gtpu_flow_convert (&f->flow.ip4_gtpu, &flow.ip4_gtpu);
+      break;
+    case VNET_FLOW_TYPE_IP4_GTPC:
+      ipv4_gtpc_flow_convert (&f->flow.ip4_gtpc, &flow.ip4_gtpc);
+      break;
+    case VNET_FLOW_TYPE_GENERIC:
+      generic_flow_convert (&f->flow.generic, &flow.generic);
+      break;
+    default:
+      rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
+      goto out;
+      break;
+    }
+
+  rv = vnet_flow_add (vnm, &flow, &flow_index);
+
+out:
+  REPLY_MACRO2 (VL_API_FLOW_ADD_V2_REPLY,
+               ({ rmp->flow_index = ntohl (flow_index); }));
 }
 
 static void
@@ -293,46 +428,11 @@ vl_api_flow_disable_t_handler (vl_api_flow_disable_t * mp)
   REPLY_MACRO (VL_API_FLOW_DISABLE_REPLY);
 }
 
-#define vl_msg_name_crc_list
-#include <vnet/flow/flow.api.h>
-#undef vl_msg_name_crc_list
-
-/*
- * flow_api_hookup
- * Add vpe's API message handlers to the table.
- * vlib has already mapped shared memory and
- * added the client registration handlers.
- * See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
- */
-
-
-static void
-setup_message_id_table (api_main_t * am)
-{
-#define _(id,n,crc) vl_msg_api_add_msg_name_crc (am, #n "_" #crc, id);
-  foreach_vl_msg_name_crc_flow;
-#undef _
-}
-
+#include <vnet/flow/flow.api.c>
 static clib_error_t *
 hw_flow_api_hookup (vlib_main_t * vm)
 {
-  api_main_t *am = vlibapi_get_main ();
-
-#define _(N,n)                                                  \
-    vl_msg_api_set_handlers(VL_API_##N, #n,                     \
-                           vl_api_##n##_t_handler,              \
-                           vl_noop_handler,                     \
-                           vl_api_##n##_t_endian,               \
-                           vl_api_##n##_t_print,                \
-                           sizeof(vl_api_##n##_t), 1);
-  foreach_vpe_api_msg;
-#undef _
-
-  /*
-   * Set up the (msg_name, crc, message-id) table
-   */
-  setup_message_id_table (am);
+  flow_main.msg_id_base = setup_message_id_table ();
 
   return 0;
 }