2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vppinfra/ring.h>
21 #include <vlib/unix/unix.h>
22 #include <vlib/pci/pci.h>
23 #include <vnet/ethernet/ethernet.h>
26 #include <avf/avf_advanced_flow.h>
28 #define FLOW_IS_ETHERNET_CLASS(f) (f->type == VNET_FLOW_TYPE_ETHERNET)
30 #define FLOW_IS_IPV4_CLASS(f) \
31 ((f->type == VNET_FLOW_TYPE_IP4) || \
32 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
33 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
34 (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
35 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
36 (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \
37 (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \
38 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \
39 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH))
41 #define FLOW_IS_IPV6_CLASS(f) \
42 ((f->type == VNET_FLOW_TYPE_IP6) || \
43 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
44 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \
45 (f->type == VNET_FLOW_TYPE_IP6_VXLAN))
47 /* check if flow is L3 type */
48 #define FLOW_IS_L3_TYPE(f) \
49 ((f->type == VNET_FLOW_TYPE_IP4) || (f->type == VNET_FLOW_TYPE_IP6))
51 /* check if flow is L4 type */
52 #define FLOW_IS_L4_TYPE(f) \
53 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
54 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
55 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
56 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
58 /* check if flow is L4 tunnel type */
59 #define FLOW_IS_L4_TUNNEL_TYPE(f) \
60 ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
61 (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \
62 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
63 (f->type == VNET_FLOW_TYPE_IP4_GTPU))
66 avf_fdir_vc_op_callback (void *vc_hdl, enum virthnl_adv_ops vc_op, void *in,
67 u32 in_len, void *out, u32 out_len)
69 u32 dev_instance = *(u32 *) vc_hdl;
70 avf_device_t *ad = avf_get_device (dev_instance);
71 clib_error_t *err = 0;
74 if (vc_op >= VIRTCHNL_ADV_OP_MAX)
81 case VIRTCHNL_ADV_OP_ADD_FDIR_FILTER:
84 case VIRTCHNL_ADV_OP_DEL_FDIR_FILTER:
88 avf_log_err (ad, "unsupported avf virtual channel opcode %u\n",
93 err = avf_program_flow (dev_instance, is_add, in, in_len, out, out_len);
96 avf_log_err (ad, "avf fdir program failed: %U", format_clib_error, err);
97 clib_error_free (err);
101 avf_log_debug (ad, "avf fdir program success");
106 avf_flow_add (u32 dev_instance, vnet_flow_t *f, avf_flow_entry_t *fe)
108 avf_device_t *ad = avf_get_device (dev_instance);
111 u16 src_port = 0, dst_port = 0;
112 u16 src_port_mask = 0, dst_port_mask = 0;
113 u8 protocol = IP_PROTOCOL_RESERVED;
115 struct avf_flow_error error;
118 int action_count = 0;
120 struct avf_fdir_vc_ctx vc_ctx;
121 struct avf_fdir_conf *filter;
122 struct avf_flow_item avf_items[VIRTCHNL_MAX_NUM_PROTO_HDRS];
123 struct avf_flow_action avf_actions[VIRTCHNL_MAX_NUM_ACTIONS];
125 struct avf_ipv4_hdr ip4_spec = {}, ip4_mask = {};
126 struct avf_ipv6_hdr ip6_spec = {}, ip6_mask = {};
127 struct avf_tcp_hdr tcp_spec = {}, tcp_mask = {};
128 struct avf_udp_hdr udp_spec = {}, udp_mask = {};
129 struct avf_gtp_hdr gtp_spec = {}, gtp_mask = {};
130 struct avf_l2tpv3oip_hdr l2tpv3_spec = {}, l2tpv3_mask = {};
131 struct avf_esp_hdr esp_spec = {}, esp_mask = {};
132 struct avf_ah_hdr ah_spec = {}, ah_mask = {};
134 struct avf_flow_action_queue act_q = {};
135 struct avf_flow_action_mark act_msk = {};
143 } flow_class = FLOW_UNKNOWN_CLASS;
145 if (FLOW_IS_ETHERNET_CLASS (f))
146 flow_class = FLOW_ETHERNET_CLASS;
147 else if (FLOW_IS_IPV4_CLASS (f))
148 flow_class = FLOW_IPV4_CLASS;
149 else if (FLOW_IS_IPV6_CLASS (f))
150 flow_class = FLOW_IPV6_CLASS;
152 return VNET_FLOW_ERROR_NOT_SUPPORTED;
154 ret = avf_fdir_rcfg_create (&filter, 0, ad->vsi_id, ad->n_rx_queues);
157 rv = VNET_FLOW_ERROR_INTERNAL;
161 /* init a virtual channel context */
162 vc_ctx.vc_hdl = &dev_instance;
163 vc_ctx.vc_op = avf_fdir_vc_op_callback;
165 clib_memset (avf_items, 0, sizeof (avf_actions));
166 clib_memset (avf_actions, 0, sizeof (avf_actions));
169 avf_items[layer].type = VIRTCHNL_PROTO_HDR_ETH;
170 avf_items[layer].spec = NULL;
171 avf_items[layer].mask = NULL;
174 if (flow_class == FLOW_IPV4_CLASS)
176 vnet_flow_ip4_t *ip4_ptr = &f->ip4;
179 avf_items[layer].type = VIRTCHNL_PROTO_HDR_IPV4;
180 avf_items[layer].spec = &ip4_spec;
181 avf_items[layer].mask = &ip4_mask;
184 if ((!ip4_ptr->src_addr.mask.as_u32) &&
185 (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask))
191 ip4_spec.src_addr = ip4_ptr->src_addr.addr.as_u32;
192 ip4_mask.src_addr = ip4_ptr->src_addr.mask.as_u32;
194 ip4_spec.dst_addr = ip4_ptr->dst_addr.addr.as_u32;
195 ip4_mask.dst_addr = ip4_ptr->dst_addr.mask.as_u32;
197 ip4_spec.next_proto_id = ip4_ptr->protocol.prot;
198 ip4_mask.next_proto_id = ip4_ptr->protocol.mask;
201 if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
203 vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &f->ip4_n_tuple;
205 src_port = ip4_n_ptr->src_port.port;
206 dst_port = ip4_n_ptr->dst_port.port;
207 src_port_mask = ip4_n_ptr->src_port.mask;
208 dst_port_mask = ip4_n_ptr->dst_port.mask;
211 protocol = ip4_ptr->protocol.prot;
213 else if (flow_class == FLOW_IPV6_CLASS)
215 vnet_flow_ip6_t *ip6_ptr = &f->ip6;
218 avf_items[layer].type = VIRTCHNL_PROTO_HDR_IPV6;
219 avf_items[layer].spec = &ip6_spec;
220 avf_items[layer].mask = &ip6_mask;
223 if ((ip6_address_is_zero (&ip6_ptr->src_addr.mask)) &&
224 (ip6_address_is_zero (&ip6_ptr->dst_addr.mask)) &&
225 (!ip6_ptr->protocol.mask))
231 clib_memcpy (ip6_spec.src_addr, &ip6_ptr->src_addr.addr,
232 ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8));
233 clib_memcpy (ip6_mask.src_addr, &ip6_ptr->src_addr.mask,
234 ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8));
235 clib_memcpy (ip6_spec.dst_addr, &ip6_ptr->dst_addr.addr,
236 ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8));
237 clib_memcpy (ip6_mask.dst_addr, &ip6_ptr->dst_addr.mask,
238 ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8));
239 ip6_spec.proto = ip6_ptr->protocol.prot;
240 ip6_mask.proto = ip6_ptr->protocol.mask;
243 if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
245 vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &f->ip6_n_tuple;
247 src_port = ip6_n_ptr->src_port.port;
248 dst_port = ip6_n_ptr->dst_port.port;
249 src_port_mask = ip6_n_ptr->src_port.mask;
250 dst_port_mask = ip6_n_ptr->dst_port.mask;
253 protocol = ip6_ptr->protocol.prot;
256 if (FLOW_IS_L3_TYPE (f))
262 case IP_PROTOCOL_L2TP:
263 avf_items[layer].type = VIRTCHNL_PROTO_HDR_L2TPV3;
264 avf_items[layer].spec = &l2tpv3_spec;
265 avf_items[layer].mask = &l2tpv3_mask;
268 vnet_flow_ip4_l2tpv3oip_t *l2tph = &f->ip4_l2tpv3oip;
269 l2tpv3_spec.session_id = clib_host_to_net_u32 (l2tph->session_id);
270 l2tpv3_mask.session_id = ~0;
273 case IP_PROTOCOL_IPSEC_ESP:
274 avf_items[layer].type = VIRTCHNL_PROTO_HDR_ESP;
275 avf_items[layer].spec = &esp_spec;
276 avf_items[layer].mask = &esp_mask;
279 vnet_flow_ip4_ipsec_esp_t *esph = &f->ip4_ipsec_esp;
280 esp_spec.spi = clib_host_to_net_u32 (esph->spi);
284 case IP_PROTOCOL_IPSEC_AH:
285 avf_items[layer].type = VIRTCHNL_PROTO_HDR_AH;
286 avf_items[layer].spec = &ah_spec;
287 avf_items[layer].mask = &ah_mask;
290 vnet_flow_ip4_ipsec_ah_t *ah = &f->ip4_ipsec_ah;
291 ah_spec.spi = clib_host_to_net_u32 (ah->spi);
295 case IP_PROTOCOL_TCP:
296 avf_items[layer].type = VIRTCHNL_PROTO_HDR_TCP;
297 avf_items[layer].spec = &tcp_spec;
298 avf_items[layer].mask = &tcp_mask;
303 tcp_spec.src_port = clib_host_to_net_u16 (src_port);
304 tcp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
308 tcp_spec.dst_port = clib_host_to_net_u16 (dst_port);
309 tcp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
313 case IP_PROTOCOL_UDP:
314 avf_items[layer].type = VIRTCHNL_PROTO_HDR_UDP;
315 avf_items[layer].spec = &udp_spec;
316 avf_items[layer].mask = &udp_mask;
321 udp_spec.src_port = clib_host_to_net_u16 (src_port);
322 udp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
326 udp_spec.dst_port = clib_host_to_net_u16 (dst_port);
327 udp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
330 /* handle the UDP tunnels */
331 if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
333 avf_items[layer].type = VIRTCHNL_PROTO_HDR_GTPU_IP;
334 avf_items[layer].spec = >p_spec;
335 avf_items[layer].mask = >p_mask;
338 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
339 gtp_spec.teid = clib_host_to_net_u32 (gu->teid);
345 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
350 /* pattern end flag */
351 avf_items[layer].type = VIRTCHNL_PROTO_HDR_NONE;
352 ret = avf_fdir_parse_pattern (filter, avf_items, &error);
355 avf_log_err (ad, "avf fdir parse pattern failed: %s", error.message);
356 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
361 /* Only one 'fate' can be assigned */
362 if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
364 avf_actions[action_count].type = VIRTCHNL_ACTION_QUEUE;
365 avf_actions[action_count].conf = &act_q;
367 act_q.index = f->redirect_queue;
372 if (f->actions & VNET_FLOW_ACTION_DROP)
374 avf_actions[action_count].type = VIRTCHNL_ACTION_DROP;
375 avf_actions[action_count].conf = NULL;
379 rv = VNET_FLOW_ERROR_INTERNAL;
390 avf_actions[action_count].type = VIRTCHNL_ACTION_PASSTHRU;
391 avf_actions[action_count].conf = NULL;
397 if (f->actions & VNET_FLOW_ACTION_MARK)
399 avf_actions[action_count].type = VIRTCHNL_ACTION_MARK;
400 avf_actions[action_count].conf = &act_msk;
403 act_msk.id = fe->mark;
406 /* action end flag */
407 avf_actions[action_count].type = VIRTCHNL_ACTION_NONE;
410 ret = avf_fdir_parse_action (avf_actions, filter, &error);
413 avf_log_err (ad, "avf fdir parse action failed: %s", error.message);
414 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
418 /* create flow rule, save rule */
419 ret = avf_fdir_rule_create (&vc_ctx, filter);
423 avf_log_err (ad, "avf fdir rule create failed: %s",
424 avf_fdir_prgm_error_decode (ret));
425 rv = VNET_FLOW_ERROR_INTERNAL;
438 avf_flow_ops_fn (vnet_main_t *vm, vnet_flow_dev_op_t op, u32 dev_instance,
439 u32 flow_index, uword *private_data)
441 vnet_flow_t *flow = vnet_get_flow (flow_index);
442 avf_device_t *ad = avf_get_device (dev_instance);
443 avf_flow_entry_t *fe = NULL;
444 avf_flow_lookup_entry_t *fle = NULL;
447 if ((ad->cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) == 0)
449 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
453 if (op == VNET_FLOW_DEV_OP_ADD_FLOW)
455 pool_get (ad->flow_entries, fe);
456 fe->flow_index = flow->index;
458 /* if we need to mark packets, assign one mark */
460 (VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE |
461 VNET_FLOW_ACTION_BUFFER_ADVANCE))
464 if (ad->flow_lookup_entries == 0)
465 pool_get_aligned (ad->flow_lookup_entries, fle,
466 CLIB_CACHE_LINE_BYTES);
467 pool_get_aligned (ad->flow_lookup_entries, fle,
468 CLIB_CACHE_LINE_BYTES);
469 fe->mark = fle - ad->flow_lookup_entries;
471 /* install entry in the lookup table */
472 clib_memset (fle, -1, sizeof (*fle));
473 if (flow->actions & VNET_FLOW_ACTION_MARK)
474 fle->flow_id = flow->mark_flow_id;
475 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
476 fle->next_index = flow->redirect_device_input_next_index;
477 if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
478 fle->buffer_advance = flow->buffer_advance;
480 if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) == 0)
482 ad->flags |= AVF_DEVICE_F_RX_FLOW_OFFLOAD;
490 case VNET_FLOW_TYPE_IP4:
491 case VNET_FLOW_TYPE_IP6:
492 case VNET_FLOW_TYPE_IP4_N_TUPLE:
493 case VNET_FLOW_TYPE_IP6_N_TUPLE:
494 case VNET_FLOW_TYPE_IP4_VXLAN:
495 case VNET_FLOW_TYPE_IP4_GTPU:
496 case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
497 case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
498 case VNET_FLOW_TYPE_IP4_IPSEC_AH:
499 if ((rv = avf_flow_add (dev_instance, flow, fe)))
503 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
507 *private_data = fe - ad->flow_entries;
509 else if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
511 fe = vec_elt_at_index (ad->flow_entries, *private_data);
513 struct avf_fdir_vc_ctx ctx;
514 ctx.vc_hdl = &dev_instance;
515 ctx.vc_op = avf_fdir_vc_op_callback;
517 rv = avf_fdir_rule_destroy (&ctx, fe->rcfg);
519 return VNET_FLOW_ERROR_INTERNAL;
523 fle = pool_elt_at_index (ad->flow_lookup_entries, fe->mark);
524 clib_memset (fle, -1, sizeof (*fle));
525 pool_put_index (ad->flow_lookup_entries, fe->mark);
528 (void) avf_fdir_rcfg_destroy (fe->rcfg);
529 clib_memset (fe, 0, sizeof (*fe));
530 pool_put (ad->flow_entries, fe);
531 goto disable_rx_offload;
534 return VNET_FLOW_ERROR_NOT_SUPPORTED;
541 clib_memset (fe, 0, sizeof (*fe));
542 pool_put (ad->flow_entries, fe);
547 clib_memset (fle, -1, sizeof (*fle));
548 pool_put (ad->flow_lookup_entries, fle);
552 if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) != 0 &&
553 pool_elts (ad->flow_entries) == 0)
555 ad->flags &= ~AVF_DEVICE_F_RX_FLOW_OFFLOAD;
562 * fd.io coding-style-patch-verification: ON
565 * eval: (c-set-style "gnu")