2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vppinfra/ring.h>
21 #include <vlib/unix/unix.h>
22 #include <vlib/pci/pci.h>
23 #include <vnet/ethernet/ethernet.h>
26 #include <avf/avf_advanced_flow.h>
28 #define FLOW_IS_ETHERNET_CLASS(f) (f->type == VNET_FLOW_TYPE_ETHERNET)
30 #define FLOW_IS_IPV4_CLASS(f) \
31 ((f->type == VNET_FLOW_TYPE_IP4) || \
32 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
33 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
34 (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
35 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
36 (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \
37 (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \
38 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \
39 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH))
41 #define FLOW_IS_IPV6_CLASS(f) \
42 ((f->type == VNET_FLOW_TYPE_IP6) || \
43 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
44 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \
45 (f->type == VNET_FLOW_TYPE_IP6_VXLAN))
47 #define FLOW_IS_GENERIC_CLASS(f) (f->type == VNET_FLOW_TYPE_GENERIC)
49 /* check if flow is L3 type */
50 #define FLOW_IS_L3_TYPE(f) \
51 ((f->type == VNET_FLOW_TYPE_IP4) || (f->type == VNET_FLOW_TYPE_IP6))
53 /* check if flow is L4 type */
54 #define FLOW_IS_L4_TYPE(f) \
55 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
56 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
57 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
58 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
60 /* check if flow is L4 tunnel type */
61 #define FLOW_IS_L4_TUNNEL_TYPE(f) \
62 ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
63 (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \
64 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
65 (f->type == VNET_FLOW_TYPE_IP4_GTPU))
68 avf_flow_convert_rss_types (u64 type, u64 *avf_rss_type)
70 #define BIT_IS_SET(v, b) ((v) & (u64) 1 << (b))
76 if (n != -1 && BIT_IS_SET (type, n)) \
85 avf_flow_vc_op_callback (void *vc_hdl, enum virthnl_adv_ops vc_op, void *in,
86 u32 in_len, void *out, u32 out_len)
88 u32 dev_instance = *(u32 *) vc_hdl;
89 avf_device_t *ad = avf_get_device (dev_instance);
90 clib_error_t *err = 0;
93 if (vc_op >= VIRTCHNL_ADV_OP_MAX)
100 case VIRTCHNL_ADV_OP_ADD_FDIR_FILTER:
101 case VIRTCHNL_ADV_OP_ADD_RSS_CFG:
104 case VIRTCHNL_ADV_OP_DEL_FDIR_FILTER:
105 case VIRTCHNL_ADV_OP_DEL_RSS_CFG:
109 avf_log_err (ad, "unsupported avf virtual channel opcode %u\n",
115 avf_program_flow (dev_instance, is_add, vc_op, in, in_len, out, out_len);
118 avf_log_err (ad, "avf flow program failed: %U", format_clib_error, err);
119 clib_error_free (err);
123 avf_log_debug (ad, "avf flow program success");
127 static inline enum avf_eth_hash_function
128 avf_flow_convert_rss_func (vnet_rss_function_t func)
130 enum avf_eth_hash_function rss_func;
134 case VNET_RSS_FUNC_DEFAULT:
135 rss_func = AVF_ETH_HASH_FUNCTION_DEFAULT;
137 case VNET_RSS_FUNC_TOEPLITZ:
138 rss_func = AVF_ETH_HASH_FUNCTION_TOEPLITZ;
140 case VNET_RSS_FUNC_SIMPLE_XOR:
141 rss_func = AVF_ETH_HASH_FUNCTION_SIMPLE_XOR;
143 case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ:
144 rss_func = AVF_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
147 rss_func = AVF_ETH_HASH_FUNCTION_MAX;
154 /** Maximum number of queue indices in struct avf_flow_action_rss. */
155 #define ACTION_RSS_QUEUE_NUM 128
158 avf_flow_convert_rss_queues (u32 queue_index, u32 queue_num,
159 struct avf_flow_action_rss *act_rss)
161 u16 *queues = clib_mem_alloc (sizeof (*queues) * ACTION_RSS_QUEUE_NUM);
164 for (i = 0; i < queue_num; i++)
165 queues[i] = queue_index++;
167 act_rss->queue_num = queue_num;
168 act_rss->queue = queues;
174 avf_parse_generic_pattern (struct avf_flow_item *item, u8 *pkt_buf,
175 u8 *msk_buf, u16 spec_len)
177 u8 *raw_spec, *raw_mask;
182 raw_spec = (u8 *) item->spec;
183 raw_mask = (u8 *) item->mask;
185 /* convert string to int array */
186 for (i = 0, j = 0; i < spec_len; i += 2, j++)
189 if (tmp_c >= 'a' && tmp_c <= 'f')
190 tmp_val = tmp_c - 'a' + 10;
191 if (tmp_c >= 'A' && tmp_c <= 'F')
192 tmp_val = tmp_c - 'A' + 10;
193 if (tmp_c >= '0' && tmp_c <= '9')
194 tmp_val = tmp_c - '0';
196 tmp_c = raw_spec[i + 1];
197 if (tmp_c >= 'a' && tmp_c <= 'f')
198 pkt_buf[j] = tmp_val * 16 + tmp_c - 'a' + 10;
199 if (tmp_c >= 'A' && tmp_c <= 'F')
200 pkt_buf[j] = tmp_val * 16 + tmp_c - 'A' + 10;
201 if (tmp_c >= '0' && tmp_c <= '9')
202 pkt_buf[j] = tmp_val * 16 + tmp_c - '0';
205 if (tmp_c >= 'a' && tmp_c <= 'f')
206 tmp_val = tmp_c - 0x57;
207 if (tmp_c >= 'A' && tmp_c <= 'F')
208 tmp_val = tmp_c - 0x37;
209 if (tmp_c >= '0' && tmp_c <= '9')
210 tmp_val = tmp_c - '0';
212 tmp_c = raw_mask[i + 1];
213 if (tmp_c >= 'a' && tmp_c <= 'f')
214 msk_buf[j] = tmp_val * 16 + tmp_c - 'a' + 10;
215 if (tmp_c >= 'A' && tmp_c <= 'F')
216 msk_buf[j] = tmp_val * 16 + tmp_c - 'A' + 10;
217 if (tmp_c >= '0' && tmp_c <= '9')
218 msk_buf[j] = tmp_val * 16 + tmp_c - '0';
223 avf_flow_add (u32 dev_instance, vnet_flow_t *f, avf_flow_entry_t *fe)
225 avf_device_t *ad = avf_get_device (dev_instance);
228 u16 src_port = 0, dst_port = 0;
229 u16 src_port_mask = 0, dst_port_mask = 0;
230 u8 protocol = IP_PROTOCOL_RESERVED;
233 struct avf_flow_error error;
236 int action_count = 0;
238 struct avf_flow_vc_ctx vc_ctx;
239 struct avf_fdir_conf *filter;
240 struct virtchnl_rss_cfg *rss_cfg;
241 struct avf_flow_item avf_items[VIRTCHNL_MAX_NUM_PROTO_HDRS];
242 struct avf_flow_action avf_actions[VIRTCHNL_MAX_NUM_ACTIONS];
244 struct avf_ipv4_hdr ip4_spec = {}, ip4_mask = {};
245 struct avf_ipv6_hdr ip6_spec = {}, ip6_mask = {};
246 struct avf_tcp_hdr tcp_spec = {}, tcp_mask = {};
247 struct avf_udp_hdr udp_spec = {}, udp_mask = {};
248 struct avf_gtp_hdr gtp_spec = {}, gtp_mask = {};
249 struct avf_l2tpv3oip_hdr l2tpv3_spec = {}, l2tpv3_mask = {};
250 struct avf_esp_hdr esp_spec = {}, esp_mask = {};
251 struct avf_ah_hdr ah_spec = {}, ah_mask = {};
253 struct avf_flow_action_queue act_q = {};
254 struct avf_flow_action_mark act_msk = {};
255 struct avf_flow_action_rss act_rss = {};
264 } flow_class = FLOW_UNKNOWN_CLASS;
266 if (FLOW_IS_ETHERNET_CLASS (f))
267 flow_class = FLOW_ETHERNET_CLASS;
268 else if (FLOW_IS_IPV4_CLASS (f))
269 flow_class = FLOW_IPV4_CLASS;
270 else if (FLOW_IS_IPV6_CLASS (f))
271 flow_class = FLOW_IPV6_CLASS;
272 else if (FLOW_IS_GENERIC_CLASS (f))
273 flow_class = FLOW_GENERIC_CLASS;
275 return VNET_FLOW_ERROR_NOT_SUPPORTED;
277 ret = avf_fdir_rcfg_create (&filter, 0, ad->vsi_id, ad->n_rx_queues);
280 rv = VNET_FLOW_ERROR_INTERNAL;
284 ret = avf_rss_cfg_create (&rss_cfg, 0);
287 rv = VNET_FLOW_ERROR_INTERNAL;
291 /* init a virtual channel context */
292 vc_ctx.vc_hdl = &dev_instance;
293 vc_ctx.vc_op = avf_flow_vc_op_callback;
295 clib_memset (avf_items, 0, sizeof (avf_actions));
296 clib_memset (avf_actions, 0, sizeof (avf_actions));
298 /* Handle generic flow first */
299 if (flow_class == FLOW_GENERIC_CLASS)
301 avf_items[layer].type = AVF_FLOW_ITEM_TYPE_RAW;
302 avf_items[layer].is_generic = true;
303 avf_items[layer].spec = f->generic.pattern.spec;
304 avf_items[layer].mask = f->generic.pattern.mask;
312 avf_items[layer].type = AVF_FLOW_ITEM_TYPE_ETH;
313 avf_items[layer].spec = NULL;
314 avf_items[layer].mask = NULL;
317 if (flow_class == FLOW_IPV4_CLASS)
319 vnet_flow_ip4_t *ip4_ptr = &f->ip4;
322 avf_items[layer].type = AVF_FLOW_ITEM_TYPE_IPV4;
323 avf_items[layer].spec = &ip4_spec;
324 avf_items[layer].mask = &ip4_mask;
327 if ((!ip4_ptr->src_addr.mask.as_u32) &&
328 (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask))
334 ip4_spec.src_addr = ip4_ptr->src_addr.addr.as_u32;
335 ip4_mask.src_addr = ip4_ptr->src_addr.mask.as_u32;
337 ip4_spec.dst_addr = ip4_ptr->dst_addr.addr.as_u32;
338 ip4_mask.dst_addr = ip4_ptr->dst_addr.mask.as_u32;
340 ip4_spec.next_proto_id = ip4_ptr->protocol.prot;
341 ip4_mask.next_proto_id = ip4_ptr->protocol.mask;
344 if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
346 vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &f->ip4_n_tuple;
348 src_port = ip4_n_ptr->src_port.port;
349 dst_port = ip4_n_ptr->dst_port.port;
350 src_port_mask = ip4_n_ptr->src_port.mask;
351 dst_port_mask = ip4_n_ptr->dst_port.mask;
354 protocol = ip4_ptr->protocol.prot;
356 else if (flow_class == FLOW_IPV6_CLASS)
358 vnet_flow_ip6_t *ip6_ptr = &f->ip6;
361 avf_items[layer].type = AVF_FLOW_ITEM_TYPE_IPV6;
362 avf_items[layer].spec = &ip6_spec;
363 avf_items[layer].mask = &ip6_mask;
366 if ((ip6_address_is_zero (&ip6_ptr->src_addr.mask)) &&
367 (ip6_address_is_zero (&ip6_ptr->dst_addr.mask)) &&
368 (!ip6_ptr->protocol.mask))
374 clib_memcpy (ip6_spec.src_addr, &ip6_ptr->src_addr.addr,
375 ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8));
376 clib_memcpy (ip6_mask.src_addr, &ip6_ptr->src_addr.mask,
377 ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8));
378 clib_memcpy (ip6_spec.dst_addr, &ip6_ptr->dst_addr.addr,
379 ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8));
380 clib_memcpy (ip6_mask.dst_addr, &ip6_ptr->dst_addr.mask,
381 ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8));
382 ip6_spec.proto = ip6_ptr->protocol.prot;
383 ip6_mask.proto = ip6_ptr->protocol.mask;
386 if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
388 vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &f->ip6_n_tuple;
390 src_port = ip6_n_ptr->src_port.port;
391 dst_port = ip6_n_ptr->dst_port.port;
392 src_port_mask = ip6_n_ptr->src_port.mask;
393 dst_port_mask = ip6_n_ptr->dst_port.mask;
396 protocol = ip6_ptr->protocol.prot;
399 if (FLOW_IS_L3_TYPE (f))
405 case IP_PROTOCOL_L2TP:
406 avf_items[layer].type = AVF_FLOW_ITEM_TYPE_L2TPV3OIP;
407 avf_items[layer].spec = &l2tpv3_spec;
408 avf_items[layer].mask = &l2tpv3_mask;
411 vnet_flow_ip4_l2tpv3oip_t *l2tph = &f->ip4_l2tpv3oip;
412 l2tpv3_spec.session_id = clib_host_to_net_u32 (l2tph->session_id);
413 l2tpv3_mask.session_id = ~0;
416 case IP_PROTOCOL_IPSEC_ESP:
417 avf_items[layer].type = AVF_FLOW_ITEM_TYPE_ESP;
418 avf_items[layer].spec = &esp_spec;
419 avf_items[layer].mask = &esp_mask;
422 vnet_flow_ip4_ipsec_esp_t *esph = &f->ip4_ipsec_esp;
423 esp_spec.spi = clib_host_to_net_u32 (esph->spi);
427 case IP_PROTOCOL_IPSEC_AH:
428 avf_items[layer].type = AVF_FLOW_ITEM_TYPE_AH;
429 avf_items[layer].spec = &ah_spec;
430 avf_items[layer].mask = &ah_mask;
433 vnet_flow_ip4_ipsec_ah_t *ah = &f->ip4_ipsec_ah;
434 ah_spec.spi = clib_host_to_net_u32 (ah->spi);
438 case IP_PROTOCOL_TCP:
439 avf_items[layer].type = AVF_FLOW_ITEM_TYPE_TCP;
440 avf_items[layer].spec = &tcp_spec;
441 avf_items[layer].mask = &tcp_mask;
446 tcp_spec.src_port = clib_host_to_net_u16 (src_port);
447 tcp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
451 tcp_spec.dst_port = clib_host_to_net_u16 (dst_port);
452 tcp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
456 case IP_PROTOCOL_UDP:
457 avf_items[layer].type = AVF_FLOW_ITEM_TYPE_UDP;
458 avf_items[layer].spec = &udp_spec;
459 avf_items[layer].mask = &udp_mask;
464 udp_spec.src_port = clib_host_to_net_u16 (src_port);
465 udp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
469 udp_spec.dst_port = clib_host_to_net_u16 (dst_port);
470 udp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
473 /* handle the UDP tunnels */
474 if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
476 avf_items[layer].type = AVF_FLOW_ITEM_TYPE_GTPU;
477 avf_items[layer].spec = >p_spec;
478 avf_items[layer].mask = >p_mask;
481 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
482 gtp_spec.teid = clib_host_to_net_u32 (gu->teid);
488 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
493 /* pattern end flag */
494 avf_items[layer].type = AVF_FLOW_ITEM_TYPE_END;
497 /* Only one 'fate' can be assigned */
498 if (f->actions & VNET_FLOW_ACTION_RSS)
501 avf_actions[action_count].conf = &act_rss;
502 avf_actions[action_count].type = AVF_FLOW_ACTION_TYPE_RSS;
504 avf_flow_convert_rss_types (f->rss_types, &act_rss.types);
506 if ((act_rss.func = avf_flow_convert_rss_func (f->rss_fun)) ==
507 AVF_ETH_HASH_FUNCTION_MAX)
509 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
515 /* convert rss queues to array */
516 avf_flow_convert_rss_queues (f->queue_index, f->queue_num, &act_rss);
524 if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
526 avf_actions[action_count].type = AVF_FLOW_ACTION_TYPE_QUEUE;
527 avf_actions[action_count].conf = &act_q;
529 act_q.index = f->redirect_queue;
532 rv = VNET_FLOW_ERROR_INTERNAL;
541 if (f->actions & VNET_FLOW_ACTION_DROP)
543 avf_actions[action_count].type = AVF_FLOW_ACTION_TYPE_DROP;
544 avf_actions[action_count].conf = NULL;
548 rv = VNET_FLOW_ERROR_INTERNAL;
558 avf_actions[action_count].type = AVF_FLOW_ACTION_TYPE_PASSTHRU;
559 avf_actions[action_count].conf = NULL;
565 if (f->actions & VNET_FLOW_ACTION_MARK)
567 avf_actions[action_count].type = AVF_FLOW_ACTION_TYPE_MARK;
568 avf_actions[action_count].conf = &act_msk;
571 act_msk.id = fe->mark;
574 /* action end flag */
575 avf_actions[action_count].type = AVF_FLOW_ACTION_TYPE_END;
577 /* parse pattern and actions */
580 if (flow_class == FLOW_GENERIC_CLASS)
582 ret = avf_fdir_parse_generic_pattern (filter, avf_items, &error);
585 avf_log_err (ad, "avf fdir parse generic pattern failed: %s",
587 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
593 ret = avf_fdir_parse_pattern (filter, avf_items, &error);
596 avf_log_err (ad, "avf fdir parse pattern failed: %s",
598 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
603 ret = avf_fdir_parse_action (avf_actions, filter, &error);
606 avf_log_err (ad, "avf fdir parse action failed: %s", error.message);
607 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
611 /* create flow rule, save rule */
612 ret = avf_fdir_rule_create (&vc_ctx, filter);
616 avf_log_err (ad, "avf fdir rule create failed: %s",
617 avf_fdir_prgm_error_decode (ret));
618 rv = VNET_FLOW_ERROR_INTERNAL;
624 fe->flow_type_flag = 1;
630 avf_rss_parse_pattern_action (avf_items, avf_actions, rss_cfg, &error);
633 avf_log_err (ad, "avf rss parse pattern action failed: %s",
635 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
638 /* create flow rule, save rule */
639 ret = avf_rss_rule_create (&vc_ctx, rss_cfg);
643 avf_log_err (ad, "avf rss rule create failed");
644 rv = VNET_FLOW_ERROR_INTERNAL;
649 fe->rss_cfg = rss_cfg;
650 fe->flow_type_flag = 0;
660 avf_flow_ops_fn (vnet_main_t *vm, vnet_flow_dev_op_t op, u32 dev_instance,
661 u32 flow_index, uword *private_data)
663 vnet_flow_t *flow = vnet_get_flow (flow_index);
664 avf_device_t *ad = avf_get_device (dev_instance);
665 avf_flow_entry_t *fe = NULL;
666 avf_flow_lookup_entry_t *fle = NULL;
669 if ((ad->cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) == 0)
671 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
675 if (op == VNET_FLOW_DEV_OP_ADD_FLOW)
677 pool_get (ad->flow_entries, fe);
678 fe->flow_index = flow->index;
680 /* if we need to mark packets, assign one mark */
682 (VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE |
683 VNET_FLOW_ACTION_BUFFER_ADVANCE))
686 if (ad->flow_lookup_entries == 0)
687 pool_get_aligned (ad->flow_lookup_entries, fle,
688 CLIB_CACHE_LINE_BYTES);
689 pool_get_aligned (ad->flow_lookup_entries, fle,
690 CLIB_CACHE_LINE_BYTES);
691 fe->mark = fle - ad->flow_lookup_entries;
693 /* install entry in the lookup table */
694 clib_memset (fle, -1, sizeof (*fle));
695 if (flow->actions & VNET_FLOW_ACTION_MARK)
696 fle->flow_id = flow->mark_flow_id;
697 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
698 fle->next_index = flow->redirect_device_input_next_index;
699 if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
700 fle->buffer_advance = flow->buffer_advance;
702 if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) == 0)
704 ad->flags |= AVF_DEVICE_F_RX_FLOW_OFFLOAD;
712 case VNET_FLOW_TYPE_IP4:
713 case VNET_FLOW_TYPE_IP6:
714 case VNET_FLOW_TYPE_IP4_N_TUPLE:
715 case VNET_FLOW_TYPE_IP6_N_TUPLE:
716 case VNET_FLOW_TYPE_IP4_GTPU:
717 case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
718 case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
719 case VNET_FLOW_TYPE_IP4_IPSEC_AH:
720 case VNET_FLOW_TYPE_GENERIC:
721 if ((rv = avf_flow_add (dev_instance, flow, fe)))
725 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
729 *private_data = fe - ad->flow_entries;
731 else if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
733 fe = vec_elt_at_index (ad->flow_entries, *private_data);
735 struct avf_flow_vc_ctx ctx;
736 ctx.vc_hdl = &dev_instance;
737 ctx.vc_op = avf_flow_vc_op_callback;
739 if (fe->flow_type_flag)
741 rv = avf_fdir_rule_destroy (&ctx, fe->rcfg);
743 return VNET_FLOW_ERROR_INTERNAL;
747 rv = avf_rss_rule_destroy (&ctx, fe->rss_cfg);
749 return VNET_FLOW_ERROR_INTERNAL;
754 fle = pool_elt_at_index (ad->flow_lookup_entries, fe->mark);
755 clib_memset (fle, -1, sizeof (*fle));
756 pool_put_index (ad->flow_lookup_entries, fe->mark);
759 (void) avf_fdir_rcfg_destroy (fe->rcfg);
760 (void) avf_rss_rcfg_destroy (fe->rss_cfg);
761 clib_memset (fe, 0, sizeof (*fe));
762 pool_put (ad->flow_entries, fe);
763 goto disable_rx_offload;
766 return VNET_FLOW_ERROR_NOT_SUPPORTED;
773 clib_memset (fe, 0, sizeof (*fe));
774 pool_put (ad->flow_entries, fe);
779 clib_memset (fle, -1, sizeof (*fle));
780 pool_put (ad->flow_lookup_entries, fle);
784 if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) != 0 &&
785 pool_elts (ad->flow_entries) == 0)
787 ad->flags &= ~AVF_DEVICE_F_RX_FLOW_OFFLOAD;
794 * fd.io coding-style-patch-verification: ON
797 * eval: (c-set-style "gnu")