*------------------------------------------------------------------
* flow_api.c - flow api
*
- * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Copyright (c) 2020 Intel and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
*------------------------------------------------------------------
*/
+#include <stddef.h>
+
#include <vnet/vnet.h>
#include <vlibmemory/api.h>
-
#include <vnet/interface.h>
#include <vnet/api_errno.h>
-
+#include <vnet/flow/flow.h>
#include <vnet/fib/fib_table.h>
-#include <vnet/flow/flow_report.h>
-#include <vnet/flow/flow_report_classify.h>
-
+#include <vnet/tunnel/tunnel_types_api.h>
+#include <vnet/ip/ip_types_api.h>
#include <vnet/vnet_msg_enum.h>
#define vl_typedefs /* define message structures */
#include <vlibapi/api_helper_macros.h>
-#define foreach_vpe_api_msg \
-_(SET_IPFIX_EXPORTER, set_ipfix_exporter) \
-_(IPFIX_EXPORTER_DUMP, ipfix_exporter_dump) \
-_(SET_IPFIX_CLASSIFY_STREAM, set_ipfix_classify_stream) \
-_(IPFIX_CLASSIFY_STREAM_DUMP, ipfix_classify_stream_dump) \
-_(IPFIX_CLASSIFY_TABLE_ADD_DEL, ipfix_classify_table_add_del) \
-_(IPFIX_CLASSIFY_TABLE_DUMP, ipfix_classify_table_dump)
+#define foreach_vpe_api_msg \
+_(FLOW_ADD, flow_add) \
+_(FLOW_DEL, flow_del) \
+_(FLOW_ENABLE, flow_enable) \
+_(FLOW_DISABLE, flow_disable)
-static void
-vl_api_set_ipfix_exporter_t_handler (vl_api_set_ipfix_exporter_t * mp)
+static inline void
+ipv4_addr_and_mask_convert (vl_api_ip4_address_and_mask_t * vl_api_addr,
+ ip4_address_and_mask_t * vnet_addr)
{
- vlib_main_t *vm = vlib_get_main ();
- flow_report_main_t *frm = &flow_report_main;
- vl_api_set_ipfix_exporter_reply_t *rmp;
- ip4_address_t collector, src;
- u16 collector_port = UDP_DST_PORT_ipfix;
- u32 path_mtu;
- u32 template_interval;
- u8 udp_checksum;
- u32 fib_id;
- u32 fib_index = ~0;
- int rv = 0;
+ clib_memcpy (vnet_addr, vl_api_addr, sizeof (*vnet_addr));
+}
- memcpy (collector.data, mp->collector_address, sizeof (collector.data));
- collector_port = ntohs (mp->collector_port);
- if (collector_port == (u16) ~ 0)
- collector_port = UDP_DST_PORT_ipfix;
- memcpy (src.data, mp->src_address, sizeof (src.data));
- fib_id = ntohl (mp->vrf_id);
+static inline void
+ipv6_addr_and_mask_convert (vl_api_ip6_address_and_mask_t * vl_api_addr,
+ ip6_address_and_mask_t * vnet_addr)
+{
+ clib_memcpy (vnet_addr, vl_api_addr, sizeof (*vnet_addr));
+}
- ip4_main_t *im = &ip4_main;
- if (fib_id == ~0)
- {
- fib_index = ~0;
- }
- else
- {
- uword *p = hash_get (im->fib_index_by_table_id, fib_id);
- if (!p)
- {
- rv = VNET_API_ERROR_NO_SUCH_FIB;
- goto out;
- }
- fib_index = p[0];
- }
+static inline void
+protocol_and_mask_convert (vl_api_ip_prot_and_mask_t * vl_api_protocol,
+ ip_prot_and_mask_t * vnet_protocol)
+{
+ vnet_protocol->prot = (ip_protocol_t) vl_api_protocol->prot;
+ vnet_protocol->mask = vl_api_protocol->mask;
+}
- path_mtu = ntohl (mp->path_mtu);
- if (path_mtu == ~0)
- path_mtu = 512; // RFC 7011 section 10.3.3.
- template_interval = ntohl (mp->template_interval);
- if (template_interval == ~0)
- template_interval = 20;
- udp_checksum = mp->udp_checksum;
+static inline void
+port_and_mask_convert (vl_api_ip_port_and_mask_t * vl_api_port,
+ ip_port_and_mask_t * vnet_port)
+{
+ vnet_port->port = ntohs (vl_api_port->port);
+ vnet_port->mask = ntohs (vl_api_port->mask);
+}
- if (collector.as_u32 == 0)
- {
- rv = VNET_API_ERROR_INVALID_VALUE;
- goto out;
- }
+static inline void
+ipv4_n_tuple_flow_convert (vl_api_flow_ip4_n_tuple_t * vl_api_flow,
+ vnet_flow_ip4_n_tuple_t * f)
+{
+ ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
+ ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
- if (src.as_u32 == 0)
- {
- rv = VNET_API_ERROR_INVALID_VALUE;
- goto out;
- }
+ port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
+ port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
+}
- if (path_mtu > 1450 /* vpp does not support fragmentation */ )
- {
- rv = VNET_API_ERROR_INVALID_VALUE;
- goto out;
- }
+static void
+ipv6_n_tuple_flow_convert (vl_api_flow_ip6_n_tuple_t * vl_api_flow,
+ vnet_flow_ip6_n_tuple_t * f)
+{
+ ipv6_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
+ ipv6_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
- if (path_mtu < 68)
- {
- rv = VNET_API_ERROR_INVALID_VALUE;
- goto out;
- }
+ port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
+ port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
+}
- /* Reset report streams if we are reconfiguring IP addresses */
- if (frm->ipfix_collector.as_u32 != collector.as_u32 ||
- frm->src_address.as_u32 != src.as_u32 ||
- frm->collector_port != collector_port)
- vnet_flow_reports_reset (frm);
+static inline void
+ipv4_n_tuple_tagged_flow_convert (vl_api_flow_ip4_n_tuple_tagged_t *
+ vl_api_flow,
+ vnet_flow_ip4_n_tuple_tagged_t * f)
+{
+ return ipv4_n_tuple_flow_convert ((vl_api_flow_ip4_n_tuple_t *) vl_api_flow,
+ (vnet_flow_ip4_n_tuple_t *) f);
+}
- frm->ipfix_collector.as_u32 = collector.as_u32;
- frm->collector_port = collector_port;
- frm->src_address.as_u32 = src.as_u32;
- frm->fib_index = fib_index;
- frm->path_mtu = path_mtu;
- frm->template_interval = template_interval;
- frm->udp_checksum = udp_checksum;
+static inline void
+ipv6_n_tuple_tagged_flow_convert (vl_api_flow_ip6_n_tuple_tagged_t *
+ vl_api_flow,
+ vnet_flow_ip6_n_tuple_tagged_t * f)
+{
+ return ipv6_n_tuple_flow_convert ((vl_api_flow_ip6_n_tuple_t *) vl_api_flow,
+ (vnet_flow_ip6_n_tuple_t *) f);
+}
- /* Turn on the flow reporting process */
- vlib_process_signal_event (vm, flow_report_process_node.index, 1, 0);
+static inline void
+ipv4_l2tpv3oip_flow_convert (vl_api_flow_ip4_l2tpv3oip_t * vl_api_flow,
+ vnet_flow_ip4_l2tpv3oip_t * f)
+{
+ ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
+ ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
-out:
- REPLY_MACRO (VL_API_SET_IPFIX_EXPORTER_REPLY);
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
+ f->session_id = ntohl (vl_api_flow->session_id);
}
-static void
-vl_api_ipfix_exporter_dump_t_handler (vl_api_ipfix_exporter_dump_t * mp)
+static inline void
+ipv4_ipsec_esp_flow_convert (vl_api_flow_ip4_ipsec_esp_t * vl_api_flow,
+ vnet_flow_ip4_ipsec_esp_t * f)
{
- flow_report_main_t *frm = &flow_report_main;
- unix_shared_memory_queue_t *q;
- vl_api_ipfix_exporter_details_t *rmp;
- ip4_main_t *im = &ip4_main;
- u32 vrf_id;
-
- q = vl_api_client_index_to_input_queue (mp->client_index);
- if (!q)
- return;
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- memset (rmp, 0, sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_IPFIX_EXPORTER_DETAILS);
- rmp->context = mp->context;
- memcpy (rmp->collector_address, frm->ipfix_collector.data,
- sizeof (frm->ipfix_collector.data));
- rmp->collector_port = htons (frm->collector_port);
- memcpy (rmp->src_address, frm->src_address.data,
- sizeof (frm->src_address.data));
- if (frm->fib_index == ~0)
- vrf_id = ~0;
- else
- vrf_id = im->fibs[frm->fib_index].ft_table_id;
- rmp->vrf_id = htonl (vrf_id);
- rmp->path_mtu = htonl (frm->path_mtu);
- rmp->template_interval = htonl (frm->template_interval);
- rmp->udp_checksum = (frm->udp_checksum != 0);
-
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
+ ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
+
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
+ f->spi = ntohl (vl_api_flow->spi);
}
-static void
- vl_api_set_ipfix_classify_stream_t_handler
- (vl_api_set_ipfix_classify_stream_t * mp)
+static inline void
+ipv4_ipsec_ah_flow_convert (vl_api_flow_ip4_ipsec_ah_t * vl_api_flow,
+ vnet_flow_ip4_ipsec_ah_t * f)
{
- vl_api_set_ipfix_classify_stream_reply_t *rmp;
- flow_report_classify_main_t *fcm = &flow_report_classify_main;
- flow_report_main_t *frm = &flow_report_main;
- u32 domain_id = 0;
- u32 src_port = UDP_DST_PORT_ipfix;
- int rv = 0;
+ ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
+ ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
- domain_id = ntohl (mp->domain_id);
- src_port = ntohs (mp->src_port);
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
+ f->spi = ntohl (vl_api_flow->spi);
+}
- if (fcm->src_port != 0 &&
- (fcm->domain_id != domain_id || fcm->src_port != (u16) src_port))
- {
- int rv = vnet_stream_change (frm, fcm->domain_id, fcm->src_port,
- domain_id, (u16) src_port);
- ASSERT (rv == 0);
- }
+static inline void
+ipv4_gtpu_flow_convert (vl_api_flow_ip4_gtpu_t * vl_api_flow,
+ vnet_flow_ip4_gtpu_t * f)
+{
+ ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
+ ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
- fcm->domain_id = domain_id;
- fcm->src_port = (u16) src_port;
+ port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
+ port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
- REPLY_MACRO (VL_API_SET_IPFIX_CLASSIFY_STREAM_REPLY);
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
+ f->teid = ntohl (vl_api_flow->teid);
}
-static void
- vl_api_ipfix_classify_stream_dump_t_handler
- (vl_api_ipfix_classify_stream_dump_t * mp)
+static inline void
+ipv4_gtpc_flow_convert (vl_api_flow_ip4_gtpc_t * vl_api_flow,
+ vnet_flow_ip4_gtpc_t * f)
{
- flow_report_classify_main_t *fcm = &flow_report_classify_main;
- unix_shared_memory_queue_t *q;
- vl_api_ipfix_classify_stream_details_t *rmp;
-
- q = vl_api_client_index_to_input_queue (mp->client_index);
- if (!q)
- return;
-
- rmp = vl_msg_api_alloc (sizeof (*rmp));
- memset (rmp, 0, sizeof (*rmp));
- rmp->_vl_msg_id = ntohs (VL_API_IPFIX_CLASSIFY_STREAM_DETAILS);
- rmp->context = mp->context;
- rmp->domain_id = htonl (fcm->domain_id);
- rmp->src_port = htons (fcm->src_port);
-
- vl_msg_api_send_shmem (q, (u8 *) & rmp);
+ ipv4_addr_and_mask_convert (&vl_api_flow->src_addr, &f->src_addr);
+ ipv4_addr_and_mask_convert (&vl_api_flow->dst_addr, &f->dst_addr);
+
+ port_and_mask_convert (&vl_api_flow->src_port, &f->src_port);
+ port_and_mask_convert (&vl_api_flow->dst_port, &f->dst_port);
+
+ protocol_and_mask_convert (&vl_api_flow->protocol, &f->protocol);
+ f->teid = ntohl (vl_api_flow->teid);
}
static void
- vl_api_ipfix_classify_table_add_del_t_handler
- (vl_api_ipfix_classify_table_add_del_t * mp)
+vl_api_flow_add_t_handler (vl_api_flow_add_t * mp)
{
- vl_api_ipfix_classify_table_add_del_reply_t *rmp;
- flow_report_classify_main_t *fcm = &flow_report_classify_main;
- flow_report_main_t *frm = &flow_report_main;
- vnet_flow_report_add_del_args_t args;
- ipfix_classify_table_t *table;
- int is_add;
- u32 classify_table_index;
- u8 ip_version;
- u8 transport_protocol;
+ vl_api_flow_add_reply_t *rmp;
int rv = 0;
-
- classify_table_index = ntohl (mp->table_id);
- ip_version = mp->ip_version;
- transport_protocol = mp->transport_protocol;
- is_add = mp->is_add;
-
- if (fcm->src_port == 0)
+ vnet_flow_t flow;
+ u32 flow_index = ~0;
+ vl_api_flow_rule_t *f = &mp->flow;
+
+ vnet_main_t *vnm = vnet_get_main ();
+
+ flow.type = ntohl (f->type);
+ flow.actions = ntohl (f->actions);
+ flow.mark_flow_id = ntohl (f->mark_flow_id);
+ flow.redirect_node_index = ntohl (f->redirect_node_index);
+ flow.redirect_device_input_next_index =
+ ntohl (f->redirect_device_input_next_index);
+ flow.redirect_queue = ntohl (f->redirect_queue);
+ flow.buffer_advance = ntohl (f->buffer_advance);
+
+ switch (flow.type)
{
- /* call set_ipfix_classify_stream first */
- rv = VNET_API_ERROR_UNSPECIFIED;
+ case VNET_FLOW_TYPE_IP4_N_TUPLE:
+ ipv4_n_tuple_flow_convert (&f->flow.ip4_n_tuple, &flow.ip4_n_tuple);
+ break;
+ case VNET_FLOW_TYPE_IP6_N_TUPLE:
+ ipv6_n_tuple_flow_convert (&f->flow.ip6_n_tuple, &flow.ip6_n_tuple);
+ break;
+ case VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED:
+ ipv4_n_tuple_tagged_flow_convert (&f->flow.ip4_n_tuple_tagged,
+ &flow.ip4_n_tuple_tagged);
+ break;
+ case VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED:
+ ipv6_n_tuple_tagged_flow_convert (&f->flow.ip6_n_tuple_tagged,
+ &flow.ip6_n_tuple_tagged);
+ break;
+ case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
+ ipv4_l2tpv3oip_flow_convert (&f->flow.ip4_l2tpv3oip,
+ &flow.ip4_l2tpv3oip);
+ break;
+ case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
+ ipv4_ipsec_esp_flow_convert (&f->flow.ip4_ipsec_esp,
+ &flow.ip4_ipsec_esp);
+ break;
+ case VNET_FLOW_TYPE_IP4_IPSEC_AH:
+ ipv4_ipsec_ah_flow_convert (&f->flow.ip4_ipsec_ah, &flow.ip4_ipsec_ah);
+ break;
+ case VNET_FLOW_TYPE_IP4_GTPU:
+ ipv4_gtpu_flow_convert (&f->flow.ip4_gtpu, &flow.ip4_gtpu);
+ break;
+ case VNET_FLOW_TYPE_IP4_GTPC:
+ ipv4_gtpc_flow_convert (&f->flow.ip4_gtpc, &flow.ip4_gtpc);
+ break;
+ default:
+ rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
goto out;
+ break;
}
- memset (&args, 0, sizeof (args));
-
- table = 0;
- int i;
- for (i = 0; i < vec_len (fcm->tables); i++)
- if (ipfix_classify_table_index_valid (i))
- if (fcm->tables[i].classify_table_index == classify_table_index)
- {
- table = &fcm->tables[i];
- break;
- }
-
- if (is_add)
- {
- if (table)
- {
- rv = VNET_API_ERROR_VALUE_EXIST;
- goto out;
- }
- table = ipfix_classify_add_table ();
- table->classify_table_index = classify_table_index;
- }
- else
- {
- if (!table)
- {
- rv = VNET_API_ERROR_NO_SUCH_ENTRY;
- goto out;
- }
- }
-
- table->ip_version = ip_version;
- table->transport_protocol = transport_protocol;
+ rv = vnet_flow_add (vnm, &flow, &flow_index);
- args.opaque.as_uword = table - fcm->tables;
- args.rewrite_callback = ipfix_classify_template_rewrite;
- args.flow_data_callback = ipfix_classify_send_flows;
- args.is_add = is_add;
- args.domain_id = fcm->domain_id;
- args.src_port = fcm->src_port;
+out:
+ /* *INDENT-OFF* */
+ REPLY_MACRO2(VL_API_FLOW_ADD_REPLY,
+ ({
+ rmp->flow_index = ntohl (flow_index);
+ }));
+}
- rv = vnet_flow_report_add_del (frm, &args, NULL);
+static void
+vl_api_flow_del_t_handler (vl_api_flow_del_t * mp)
+{
+ vl_api_flow_add_reply_t *rmp;
+ int rv = 0;
- /* If deleting, or add failed */
- if (is_add == 0 || (rv && is_add))
- ipfix_classify_delete_table (table - fcm->tables);
+ vnet_main_t *vnm = vnet_get_main();
+ rv = vnet_flow_del(vnm, ntohl(mp->flow_index));
-out:
- REPLY_MACRO (VL_API_SET_IPFIX_CLASSIFY_STREAM_REPLY);
+ REPLY_MACRO (VL_API_FLOW_DEL_REPLY);
}
static void
-send_ipfix_classify_table_details (u32 table_index,
- unix_shared_memory_queue_t * q,
- u32 context)
+vl_api_flow_enable_t_handler (vl_api_flow_enable_t * mp)
{
- flow_report_classify_main_t *fcm = &flow_report_classify_main;
- vl_api_ipfix_classify_table_details_t *mp;
-
- ipfix_classify_table_t *table = &fcm->tables[table_index];
+ vl_api_flow_add_reply_t *rmp;
+ int rv = 0;
- mp = vl_msg_api_alloc (sizeof (*mp));
- memset (mp, 0, sizeof (*mp));
- mp->_vl_msg_id = ntohs (VL_API_IPFIX_CLASSIFY_TABLE_DETAILS);
- mp->context = context;
- mp->table_id = htonl (table->classify_table_index);
- mp->ip_version = table->ip_version;
- mp->transport_protocol = table->transport_protocol;
+ vnet_main_t *vnm = vnet_get_main();
+ rv = vnet_flow_enable(vnm, ntohl(mp->flow_index), ntohl(mp->hw_if_index));
- vl_msg_api_send_shmem (q, (u8 *) & mp);
+ REPLY_MACRO (VL_API_FLOW_ENABLE_REPLY);
}
static void
- vl_api_ipfix_classify_table_dump_t_handler
- (vl_api_ipfix_classify_table_dump_t * mp)
+vl_api_flow_disable_t_handler (vl_api_flow_disable_t * mp)
{
- flow_report_classify_main_t *fcm = &flow_report_classify_main;
- unix_shared_memory_queue_t *q;
- u32 i;
+ vl_api_flow_add_reply_t *rmp;
+ int rv = 0;
- q = vl_api_client_index_to_input_queue (mp->client_index);
- if (!q)
- return;
+ vnet_main_t *vnm = vnet_get_main();
+ rv = vnet_flow_disable(vnm, ntohl(mp->flow_index), ntohl(mp->hw_if_index));
- for (i = 0; i < vec_len (fcm->tables); i++)
- if (ipfix_classify_table_index_valid (i))
- send_ipfix_classify_table_details (i, q, mp->context);
+ REPLY_MACRO (VL_API_FLOW_DISABLE_REPLY);
}
+#define vl_msg_name_crc_list
+#include <vnet/flow/flow.api.h>
+#undef vl_msg_name_crc_list
+
/*
* flow_api_hookup
* Add vpe's API message handlers to the table.
- * vlib has alread mapped shared memory and
+ * vlib has already mapped shared memory and
* added the client registration handlers.
* See .../vlib-api/vlibmemory/memclnt_vlib.c:memclnt_process()
*/
-#define vl_msg_name_crc_list
-#include <vnet/vnet_all_api_h.h>
-#undef vl_msg_name_crc_list
+
static void
setup_message_id_table (api_main_t * am)
}
static clib_error_t *
-flow_api_hookup (vlib_main_t * vm)
+hw_flow_api_hookup (vlib_main_t * vm)
{
- api_main_t *am = &api_main;
+ api_main_t *am = vlibapi_get_main ();
#define _(N,n) \
vl_msg_api_set_handlers(VL_API_##N, #n, \
return 0;
}
-VLIB_API_INIT_FUNCTION (flow_api_hookup);
+VLIB_API_INIT_FUNCTION (hw_flow_api_hookup);
/*
* fd.io coding-style-patch-verification: ON