2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vppinfra/ring.h>
21 #include <vlib/unix/unix.h>
22 #include <vlib/pci/pci.h>
23 #include <vnet/ethernet/ethernet.h>
26 #include <avf/avf_advanced_flow.h>
28 #define FLOW_IS_ETHERNET_CLASS(f) (f->type == VNET_FLOW_TYPE_ETHERNET)
30 #define FLOW_IS_IPV4_CLASS(f) \
31 ((f->type == VNET_FLOW_TYPE_IP4) || \
32 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
33 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
34 (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
35 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
36 (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \
37 (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \
38 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \
39 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH))
41 #define FLOW_IS_IPV6_CLASS(f) \
42 ((f->type == VNET_FLOW_TYPE_IP6) || \
43 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
44 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \
45 (f->type == VNET_FLOW_TYPE_IP6_VXLAN))
47 /* check if flow is L3 type */
48 #define FLOW_IS_L3_TYPE(f) \
49 ((f->type == VNET_FLOW_TYPE_IP4) || (f->type == VNET_FLOW_TYPE_IP6))
51 /* check if flow is L4 type */
52 #define FLOW_IS_L4_TYPE(f) \
53 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
54 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
55 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
56 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
58 /* check if flow is L4 tunnel type */
59 #define FLOW_IS_L4_TUNNEL_TYPE(f) \
60 ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
61 (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \
62 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
63 (f->type == VNET_FLOW_TYPE_IP4_GTPU))
66 avf_fdir_vc_op_callback (void *vc_hdl, enum virthnl_adv_ops vc_op, void *in,
67 u32 in_len, void *out, u32 out_len)
69 u32 dev_instance = *(u32 *) vc_hdl;
70 avf_device_t *ad = avf_get_device (dev_instance);
71 clib_error_t *err = 0;
74 if (vc_op >= VIRTCHNL_ADV_OP_MAX)
81 case VIRTCHNL_ADV_OP_ADD_FDIR_FILTER:
84 case VIRTCHNL_ADV_OP_DEL_FDIR_FILTER:
88 avf_log_err (ad, "unsupported avf virtual channel opcode %u\n",
93 err = avf_program_flow (dev_instance, is_add, in, in_len, out, out_len);
96 avf_log_err (ad, "avf fdir program failed: %U", format_clib_error, err);
97 clib_error_free (err);
101 avf_log_debug (ad, "avf fdir program success");
106 avf_flow_add (u32 dev_instance, vnet_flow_t *f, avf_flow_entry_t *fe)
108 avf_device_t *ad = avf_get_device (dev_instance);
111 u16 src_port = 0, dst_port = 0;
112 u16 src_port_mask = 0, dst_port_mask = 0;
113 u8 protocol = IP_PROTOCOL_RESERVED;
115 struct avf_flow_error error;
118 int action_count = 0;
120 struct avf_fdir_vc_ctx vc_ctx;
121 struct avf_fdir_conf *filter;
122 struct avf_flow_item avf_items[VIRTCHNL_MAX_NUM_PROTO_HDRS];
123 struct avf_flow_action avf_actions[VIRTCHNL_MAX_NUM_ACTIONS];
125 struct avf_ipv4_hdr ip4_spec = {}, ip4_mask = {};
126 struct avf_ipv6_hdr ip6_spec = {}, ip6_mask = {};
127 struct avf_tcp_hdr tcp_spec = {}, tcp_mask = {};
128 struct avf_udp_hdr udp_spec = {}, udp_mask = {};
129 struct avf_gtp_hdr gtp_spec = {}, gtp_mask = {};
130 struct avf_l2tpv3oip_hdr l2tpv3_spec = {}, l2tpv3_mask = {};
131 struct avf_esp_hdr esp_spec = {}, esp_mask = {};
132 struct avf_esp_hdr ah_spec = {}, ah_mask = {};
134 struct avf_flow_action_queue act_q = {};
135 struct avf_flow_action_mark act_msk = {};
143 } flow_class = FLOW_UNKNOWN_CLASS;
145 if (FLOW_IS_ETHERNET_CLASS (f))
146 flow_class = FLOW_ETHERNET_CLASS;
147 else if (FLOW_IS_IPV4_CLASS (f))
148 flow_class = FLOW_IPV4_CLASS;
149 else if (FLOW_IS_IPV6_CLASS (f))
150 flow_class = FLOW_IPV6_CLASS;
152 return VNET_FLOW_ERROR_NOT_SUPPORTED;
154 ret = avf_fdir_rcfg_create (&filter, 0, ad->vsi_id, ad->n_rx_queues);
157 rv = VNET_FLOW_ERROR_INTERNAL;
161 /* init a virtual channel context */
162 vc_ctx.vc_hdl = &dev_instance;
163 vc_ctx.vc_op = avf_fdir_vc_op_callback;
165 clib_memset (avf_items, 0, sizeof (avf_actions));
166 clib_memset (avf_actions, 0, sizeof (avf_actions));
169 avf_items[layer].type = VIRTCHNL_PROTO_HDR_ETH;
170 avf_items[layer].spec = NULL;
171 avf_items[layer].mask = NULL;
174 if (flow_class == FLOW_IPV4_CLASS)
176 vnet_flow_ip4_t *ip4_ptr = &f->ip4;
179 avf_items[layer].type = VIRTCHNL_PROTO_HDR_IPV4;
180 avf_items[layer].spec = &ip4_spec;
181 avf_items[layer].mask = &ip4_mask;
184 // memset (&ip4_spec, 0, sizeof (ip4_spec));
185 // memset (&ip4_mask, 0, sizeof (ip4_mask));
187 if ((!ip4_ptr->src_addr.mask.as_u32) &&
188 (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask))
194 ip4_spec.src_addr = ip4_ptr->src_addr.addr.as_u32;
195 ip4_mask.src_addr = ip4_ptr->src_addr.mask.as_u32;
197 ip4_spec.dst_addr = ip4_ptr->dst_addr.addr.as_u32;
198 ip4_mask.dst_addr = ip4_ptr->dst_addr.mask.as_u32;
200 ip4_spec.next_proto_id = ip4_ptr->protocol.prot;
201 ip4_mask.next_proto_id = ip4_ptr->protocol.mask;
204 if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
206 vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &f->ip4_n_tuple;
208 src_port = ip4_n_ptr->src_port.port;
209 dst_port = ip4_n_ptr->dst_port.port;
210 src_port_mask = ip4_n_ptr->src_port.mask;
211 dst_port_mask = ip4_n_ptr->dst_port.mask;
214 protocol = ip4_ptr->protocol.prot;
216 else if (flow_class == FLOW_IPV6_CLASS)
218 vnet_flow_ip6_t *ip6_ptr = &f->ip6;
221 avf_items[layer].type = VIRTCHNL_PROTO_HDR_IPV4;
222 avf_items[layer].spec = &ip6_spec;
223 avf_items[layer].mask = &ip6_mask;
226 // memset (&ip6_spec, 0, sizeof (ip6_spec));
227 // memset (&ip6_mask, 0, sizeof (ip6_mask));
229 if ((ip6_address_is_zero (&ip6_ptr->src_addr.mask)) &&
230 (ip6_address_is_zero (&ip6_ptr->dst_addr.mask)) &&
231 (!ip6_ptr->protocol.mask))
237 clib_memcpy (ip6_spec.src_addr, &ip6_ptr->src_addr.addr,
238 ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8));
239 clib_memcpy (ip6_mask.src_addr, &ip6_ptr->src_addr.mask,
240 ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8));
241 clib_memcpy (ip6_spec.dst_addr, &ip6_ptr->dst_addr.addr,
242 ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8));
243 clib_memcpy (ip6_mask.dst_addr, &ip6_ptr->dst_addr.mask,
244 ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8));
245 ip6_spec.proto = ip6_ptr->protocol.prot;
246 ip6_mask.proto = ip6_ptr->protocol.mask;
249 if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
251 vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &f->ip6_n_tuple;
253 src_port = ip6_n_ptr->src_port.port;
254 dst_port = ip6_n_ptr->dst_port.port;
255 src_port_mask = ip6_n_ptr->src_port.mask;
256 dst_port_mask = ip6_n_ptr->dst_port.mask;
259 protocol = ip6_ptr->protocol.prot;
262 if (FLOW_IS_L3_TYPE (f))
268 case IP_PROTOCOL_L2TP:
269 avf_items[layer].type = VIRTCHNL_PROTO_HDR_L2TPV3;
270 avf_items[layer].spec = &l2tpv3_spec;
271 avf_items[layer].mask = &l2tpv3_mask;
274 // memset (&l2tpv3_spec, 0, sizeof (l2tpv3_spec));
275 // memset (&l2tpv3_mask, 0, sizeof (l2tpv3_mask));
277 vnet_flow_ip4_l2tpv3oip_t *l2tph = &f->ip4_l2tpv3oip;
278 l2tpv3_spec.session_id = clib_host_to_net_u32 (l2tph->session_id);
279 l2tpv3_mask.session_id = ~0;
282 case IP_PROTOCOL_IPSEC_ESP:
283 avf_items[layer].type = VIRTCHNL_PROTO_HDR_ESP;
284 avf_items[layer].spec = &esp_spec;
285 avf_items[layer].mask = &esp_mask;
288 // memset (&esp_spec, 0, sizeof (esp_spec));
289 // memset (&esp_mask, 0, sizeof (esp_mask));
291 vnet_flow_ip4_ipsec_esp_t *esph = &f->ip4_ipsec_esp;
292 esp_spec.spi = clib_host_to_net_u32 (esph->spi);
296 case IP_PROTOCOL_IPSEC_AH:
297 avf_items[layer].type = VIRTCHNL_PROTO_HDR_AH;
298 avf_items[layer].spec = &ah_spec;
299 avf_items[layer].mask = &ah_mask;
302 // memset (&ah_spec, 0, sizeof (ah_spec));
303 // memset (&ah_mask, 0, sizeof (ah_mask));
305 vnet_flow_ip4_ipsec_ah_t *ah = &f->ip4_ipsec_ah;
306 ah_spec.spi = clib_host_to_net_u32 (ah->spi);
310 case IP_PROTOCOL_TCP:
311 avf_items[layer].type = VIRTCHNL_PROTO_HDR_TCP;
312 avf_items[layer].spec = &tcp_spec;
313 avf_items[layer].mask = &tcp_mask;
316 // memset (&tcp_spec, 0, sizeof (tcp_spec));
317 // memset (&tcp_mask, 0, sizeof (tcp_mask));
321 tcp_spec.src_port = clib_host_to_net_u16 (src_port);
322 tcp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
326 tcp_spec.dst_port = clib_host_to_net_u16 (dst_port);
327 tcp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
331 case IP_PROTOCOL_UDP:
332 avf_items[layer].type = VIRTCHNL_PROTO_HDR_UDP;
333 avf_items[layer].spec = &udp_spec;
334 avf_items[layer].mask = &udp_mask;
337 // memset (&udp_spec, 0, sizeof (udp_spec));
338 // memset (&udp_mask, 0, sizeof (udp_mask));
342 udp_spec.src_port = clib_host_to_net_u16 (src_port);
343 udp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
347 udp_spec.dst_port = clib_host_to_net_u16 (dst_port);
348 udp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
351 /* handle the UDP tunnels */
352 if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
354 avf_items[layer].type = VIRTCHNL_PROTO_HDR_GTPU_IP;
355 avf_items[layer].spec = >p_spec;
356 avf_items[layer].mask = >p_mask;
359 // memset (>p_spec, 0, sizeof (gtp_spec));
360 // memset (>p_mask, 0, sizeof (gtp_mask));
362 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
363 gtp_spec.teid = clib_host_to_net_u32 (gu->teid);
369 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
374 /* pattern end flag */
375 avf_items[layer].type = VIRTCHNL_PROTO_HDR_NONE;
376 ret = avf_fdir_parse_pattern (filter, avf_items, &error);
379 avf_log_err (ad, "avf fdir parse pattern failed: %s", error.message);
380 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
385 /* Only one 'fate' can be assigned */
386 if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
388 avf_actions[action_count].type = VIRTCHNL_ACTION_QUEUE;
389 avf_actions[action_count].conf = &act_q;
391 act_q.index = f->redirect_queue;
396 if (f->actions & VNET_FLOW_ACTION_DROP)
398 avf_actions[action_count].type = VIRTCHNL_ACTION_DROP;
399 avf_actions[action_count].conf = NULL;
403 rv = VNET_FLOW_ERROR_INTERNAL;
414 avf_actions[action_count].type = VIRTCHNL_ACTION_PASSTHRU;
415 avf_actions[action_count].conf = NULL;
421 if (f->actions & VNET_FLOW_ACTION_MARK)
423 avf_actions[action_count].type = VIRTCHNL_ACTION_MARK;
424 avf_actions[action_count].conf = &act_msk;
427 act_msk.id = fe->mark;
430 /* action end flag */
431 avf_actions[action_count].type = VIRTCHNL_ACTION_NONE;
434 ret = avf_fdir_parse_action (avf_actions, filter, &error);
437 avf_log_err (ad, "avf fdir parse action failed: %s", error.message);
438 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
442 /* create flow rule, save rule */
443 ret = avf_fdir_rule_create (&vc_ctx, filter);
447 avf_log_err (ad, "avf fdir rule create failed: %s",
448 avf_fdir_prgm_error_decode (ret));
449 rv = VNET_FLOW_ERROR_INTERNAL;
462 avf_flow_ops_fn (vnet_main_t *vm, vnet_flow_dev_op_t op, u32 dev_instance,
463 u32 flow_index, uword *private_data)
465 vnet_flow_t *flow = vnet_get_flow (flow_index);
466 avf_device_t *ad = avf_get_device (dev_instance);
467 avf_flow_entry_t *fe = NULL;
468 avf_flow_lookup_entry_t *fle = NULL;
471 if ((ad->feature_bitmap & VIRTCHNL_VF_OFFLOAD_FDIR_PF) == 0)
473 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
477 if (op == VNET_FLOW_DEV_OP_ADD_FLOW)
479 pool_get (ad->flow_entries, fe);
480 fe->flow_index = flow->index;
482 /* if we need to mark packets, assign one mark */
484 (VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE |
485 VNET_FLOW_ACTION_BUFFER_ADVANCE))
488 if (ad->flow_lookup_entries == 0)
489 pool_get_aligned (ad->flow_lookup_entries, fle,
490 CLIB_CACHE_LINE_BYTES);
491 pool_get_aligned (ad->flow_lookup_entries, fle,
492 CLIB_CACHE_LINE_BYTES);
493 fe->mark = fle - ad->flow_lookup_entries;
495 /* install entry in the lookup table */
496 clib_memset (fle, -1, sizeof (*fle));
497 if (flow->actions & VNET_FLOW_ACTION_MARK)
498 fle->flow_id = flow->mark_flow_id;
499 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
500 fle->next_index = flow->redirect_device_input_next_index;
501 if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
502 fle->buffer_advance = flow->buffer_advance;
504 if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) == 0)
506 ad->flags |= AVF_DEVICE_F_RX_FLOW_OFFLOAD;
514 case VNET_FLOW_TYPE_IP4:
515 case VNET_FLOW_TYPE_IP6:
516 case VNET_FLOW_TYPE_IP4_N_TUPLE:
517 case VNET_FLOW_TYPE_IP6_N_TUPLE:
518 case VNET_FLOW_TYPE_IP4_VXLAN:
519 case VNET_FLOW_TYPE_IP4_GTPU:
520 case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
521 case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
522 case VNET_FLOW_TYPE_IP4_IPSEC_AH:
523 if ((rv = avf_flow_add (dev_instance, flow, fe)))
527 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
531 *private_data = fe - ad->flow_entries;
533 else if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
535 fe = vec_elt_at_index (ad->flow_entries, *private_data);
537 struct avf_fdir_vc_ctx ctx;
538 ctx.vc_hdl = &dev_instance;
539 ctx.vc_op = avf_fdir_vc_op_callback;
541 rv = avf_fdir_rule_destroy (&ctx, fe->rcfg);
543 return VNET_FLOW_ERROR_INTERNAL;
547 fle = pool_elt_at_index (ad->flow_lookup_entries, fe->mark);
548 clib_memset (fle, -1, sizeof (*fle));
549 pool_put_index (ad->flow_lookup_entries, fe->mark);
552 (void) avf_fdir_rcfg_destroy (fe->rcfg);
553 clib_memset (fe, 0, sizeof (*fe));
554 pool_put (ad->flow_entries, fe);
555 goto disable_rx_offload;
558 return VNET_FLOW_ERROR_NOT_SUPPORTED;
565 clib_memset (fe, 0, sizeof (*fe));
566 pool_put (ad->flow_entries, fe);
571 clib_memset (fle, -1, sizeof (*fle));
572 pool_put (ad->flow_lookup_entries, fle);
576 if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) != 0 &&
577 pool_elts (ad->flow_entries) == 0)
579 ad->flags &= ~AVF_DEVICE_F_RX_FLOW_OFFLOAD;
586 * fd.io coding-style-patch-verification: ON
589 * eval: (c-set-style "gnu")