2 *------------------------------------------------------------------
3 * Copyright (c) 2020 Intel and/or its affiliates.
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at:
8 * http://www.apache.org/licenses/LICENSE-2.0
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 *------------------------------------------------------------------
19 #include <vlib/vlib.h>
20 #include <vppinfra/ring.h>
21 #include <vlib/unix/unix.h>
22 #include <vlib/pci/pci.h>
23 #include <vnet/ethernet/ethernet.h>
26 #include <avf/avf_advanced_flow.h>
28 #define FLOW_IS_ETHERNET_CLASS(f) (f->type == VNET_FLOW_TYPE_ETHERNET)
30 #define FLOW_IS_IPV4_CLASS(f) \
31 ((f->type == VNET_FLOW_TYPE_IP4) || \
32 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
33 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
34 (f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
35 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
36 (f->type == VNET_FLOW_TYPE_IP4_GTPU) || \
37 (f->type == VNET_FLOW_TYPE_IP4_L2TPV3OIP) || \
38 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_ESP) || \
39 (f->type == VNET_FLOW_TYPE_IP4_IPSEC_AH))
41 #define FLOW_IS_IPV6_CLASS(f) \
42 ((f->type == VNET_FLOW_TYPE_IP6) || \
43 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
44 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED) || \
45 (f->type == VNET_FLOW_TYPE_IP6_VXLAN))
47 #define FLOW_IS_GENERIC_CLASS(f) (f->type == VNET_FLOW_TYPE_GENERIC)
49 /* check if flow is L3 type */
50 #define FLOW_IS_L3_TYPE(f) \
51 ((f->type == VNET_FLOW_TYPE_IP4) || (f->type == VNET_FLOW_TYPE_IP6))
53 /* check if flow is L4 type */
54 #define FLOW_IS_L4_TYPE(f) \
55 ((f->type == VNET_FLOW_TYPE_IP4_N_TUPLE) || \
56 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE) || \
57 (f->type == VNET_FLOW_TYPE_IP4_N_TUPLE_TAGGED) || \
58 (f->type == VNET_FLOW_TYPE_IP6_N_TUPLE_TAGGED))
60 /* check if flow is L4 tunnel type */
61 #define FLOW_IS_L4_TUNNEL_TYPE(f) \
62 ((f->type == VNET_FLOW_TYPE_IP4_VXLAN) || \
63 (f->type == VNET_FLOW_TYPE_IP6_VXLAN) || \
64 (f->type == VNET_FLOW_TYPE_IP4_GTPC) || \
65 (f->type == VNET_FLOW_TYPE_IP4_GTPU))
68 avf_flow_vc_op_callback (void *vc_hdl, enum virthnl_adv_ops vc_op, void *in,
69 u32 in_len, void *out, u32 out_len)
71 u32 dev_instance = *(u32 *) vc_hdl;
72 avf_device_t *ad = avf_get_device (dev_instance);
73 clib_error_t *err = 0;
76 if (vc_op >= VIRTCHNL_ADV_OP_MAX)
83 case VIRTCHNL_ADV_OP_ADD_FDIR_FILTER:
84 case VIRTCHNL_ADV_OP_ADD_RSS_CFG:
87 case VIRTCHNL_ADV_OP_DEL_FDIR_FILTER:
88 case VIRTCHNL_ADV_OP_DEL_RSS_CFG:
92 avf_log_err (ad, "unsupported avf virtual channel opcode %u\n",
98 avf_program_flow (dev_instance, is_add, vc_op, in, in_len, out, out_len);
101 avf_log_err (ad, "avf flow program failed: %U", format_clib_error, err);
102 clib_error_free (err);
106 avf_log_debug (ad, "avf flow program success");
110 static inline enum avf_eth_hash_function
111 avf_flow_convert_rss_func (vnet_rss_function_t func)
113 enum avf_eth_hash_function rss_func;
117 case VNET_RSS_FUNC_DEFAULT:
118 rss_func = AVF_ETH_HASH_FUNCTION_DEFAULT;
120 case VNET_RSS_FUNC_TOEPLITZ:
121 rss_func = AVF_ETH_HASH_FUNCTION_TOEPLITZ;
123 case VNET_RSS_FUNC_SIMPLE_XOR:
124 rss_func = AVF_ETH_HASH_FUNCTION_SIMPLE_XOR;
126 case VNET_RSS_FUNC_SYMMETRIC_TOEPLITZ:
127 rss_func = AVF_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ;
130 rss_func = AVF_ETH_HASH_FUNCTION_MAX;
137 /** Maximum number of queue indices in struct avf_flow_action_rss. */
138 #define ACTION_RSS_QUEUE_NUM 128
141 avf_flow_convert_rss_queues (u32 queue_index, u32 queue_num,
142 struct avf_flow_action_rss *act_rss)
144 u16 *queues = clib_mem_alloc (sizeof (*queues) * ACTION_RSS_QUEUE_NUM);
147 for (i = 0; i < queue_num; i++)
148 queues[i] = queue_index++;
150 act_rss->queue_num = queue_num;
151 act_rss->queue = queues;
157 avf_parse_generic_pattern (struct avf_flow_item *item, u8 *pkt_buf,
158 u8 *msk_buf, u16 spec_len)
160 u8 *raw_spec, *raw_mask;
165 raw_spec = (u8 *) item->spec;
166 raw_mask = (u8 *) item->mask;
168 /* convert string to int array */
169 for (i = 0, j = 0; i < spec_len; i += 2, j++)
172 if (tmp_c >= 'a' && tmp_c <= 'f')
173 tmp_val = tmp_c - 'a' + 10;
174 if (tmp_c >= 'A' && tmp_c <= 'F')
175 tmp_val = tmp_c - 'A' + 10;
176 if (tmp_c >= '0' && tmp_c <= '9')
177 tmp_val = tmp_c - '0';
179 tmp_c = raw_spec[i + 1];
180 if (tmp_c >= 'a' && tmp_c <= 'f')
181 pkt_buf[j] = tmp_val * 16 + tmp_c - 'a' + 10;
182 if (tmp_c >= 'A' && tmp_c <= 'F')
183 pkt_buf[j] = tmp_val * 16 + tmp_c - 'A' + 10;
184 if (tmp_c >= '0' && tmp_c <= '9')
185 pkt_buf[j] = tmp_val * 16 + tmp_c - '0';
188 if (tmp_c >= 'a' && tmp_c <= 'f')
189 tmp_val = tmp_c - 0x57;
190 if (tmp_c >= 'A' && tmp_c <= 'F')
191 tmp_val = tmp_c - 0x37;
192 if (tmp_c >= '0' && tmp_c <= '9')
193 tmp_val = tmp_c - '0';
195 tmp_c = raw_mask[i + 1];
196 if (tmp_c >= 'a' && tmp_c <= 'f')
197 msk_buf[j] = tmp_val * 16 + tmp_c - 'a' + 10;
198 if (tmp_c >= 'A' && tmp_c <= 'F')
199 msk_buf[j] = tmp_val * 16 + tmp_c - 'A' + 10;
200 if (tmp_c >= '0' && tmp_c <= '9')
201 msk_buf[j] = tmp_val * 16 + tmp_c - '0';
206 avf_flow_add (u32 dev_instance, vnet_flow_t *f, avf_flow_entry_t *fe)
208 avf_device_t *ad = avf_get_device (dev_instance);
211 u16 src_port = 0, dst_port = 0;
212 u16 src_port_mask = 0, dst_port_mask = 0;
213 u8 protocol = IP_PROTOCOL_RESERVED;
216 struct avf_flow_error error;
219 int action_count = 0;
221 struct avf_flow_vc_ctx vc_ctx;
222 struct avf_fdir_conf *filter;
223 struct virtchnl_rss_cfg *rss_cfg;
224 struct avf_flow_item avf_items[VIRTCHNL_MAX_NUM_PROTO_HDRS];
225 struct avf_flow_action avf_actions[VIRTCHNL_MAX_NUM_ACTIONS];
227 struct avf_ipv4_hdr ip4_spec = {}, ip4_mask = {};
228 struct avf_ipv6_hdr ip6_spec = {}, ip6_mask = {};
229 struct avf_tcp_hdr tcp_spec = {}, tcp_mask = {};
230 struct avf_udp_hdr udp_spec = {}, udp_mask = {};
231 struct avf_gtp_hdr gtp_spec = {}, gtp_mask = {};
232 struct avf_l2tpv3oip_hdr l2tpv3_spec = {}, l2tpv3_mask = {};
233 struct avf_esp_hdr esp_spec = {}, esp_mask = {};
234 struct avf_ah_hdr ah_spec = {}, ah_mask = {};
236 struct avf_flow_action_queue act_q = {};
237 struct avf_flow_action_mark act_msk = {};
238 struct avf_flow_action_rss act_rss = {};
247 } flow_class = FLOW_UNKNOWN_CLASS;
249 if (FLOW_IS_ETHERNET_CLASS (f))
250 flow_class = FLOW_ETHERNET_CLASS;
251 else if (FLOW_IS_IPV4_CLASS (f))
252 flow_class = FLOW_IPV4_CLASS;
253 else if (FLOW_IS_IPV6_CLASS (f))
254 flow_class = FLOW_IPV6_CLASS;
255 else if (FLOW_IS_GENERIC_CLASS (f))
256 flow_class = FLOW_GENERIC_CLASS;
258 return VNET_FLOW_ERROR_NOT_SUPPORTED;
260 ret = avf_fdir_rcfg_create (&filter, 0, ad->vsi_id, ad->n_rx_queues);
263 rv = VNET_FLOW_ERROR_INTERNAL;
267 ret = avf_rss_cfg_create (&rss_cfg, 0);
270 rv = VNET_FLOW_ERROR_INTERNAL;
274 /* init a virtual channel context */
275 vc_ctx.vc_hdl = &dev_instance;
276 vc_ctx.vc_op = avf_flow_vc_op_callback;
278 clib_memset (avf_items, 0, sizeof (avf_actions));
279 clib_memset (avf_actions, 0, sizeof (avf_actions));
281 /* Handle generic flow first */
282 if (flow_class == FLOW_GENERIC_CLASS)
284 avf_items[layer].is_generic = true;
285 avf_items[layer].spec = f->generic.pattern.spec;
286 avf_items[layer].mask = f->generic.pattern.mask;
294 avf_items[layer].type = VIRTCHNL_PROTO_HDR_ETH;
295 avf_items[layer].spec = NULL;
296 avf_items[layer].mask = NULL;
299 if (flow_class == FLOW_IPV4_CLASS)
301 vnet_flow_ip4_t *ip4_ptr = &f->ip4;
304 avf_items[layer].type = VIRTCHNL_PROTO_HDR_IPV4;
305 avf_items[layer].spec = &ip4_spec;
306 avf_items[layer].mask = &ip4_mask;
309 if ((!ip4_ptr->src_addr.mask.as_u32) &&
310 (!ip4_ptr->dst_addr.mask.as_u32) && (!ip4_ptr->protocol.mask))
316 ip4_spec.src_addr = ip4_ptr->src_addr.addr.as_u32;
317 ip4_mask.src_addr = ip4_ptr->src_addr.mask.as_u32;
319 ip4_spec.dst_addr = ip4_ptr->dst_addr.addr.as_u32;
320 ip4_mask.dst_addr = ip4_ptr->dst_addr.mask.as_u32;
322 ip4_spec.next_proto_id = ip4_ptr->protocol.prot;
323 ip4_mask.next_proto_id = ip4_ptr->protocol.mask;
326 if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
328 vnet_flow_ip4_n_tuple_t *ip4_n_ptr = &f->ip4_n_tuple;
330 src_port = ip4_n_ptr->src_port.port;
331 dst_port = ip4_n_ptr->dst_port.port;
332 src_port_mask = ip4_n_ptr->src_port.mask;
333 dst_port_mask = ip4_n_ptr->dst_port.mask;
336 protocol = ip4_ptr->protocol.prot;
338 else if (flow_class == FLOW_IPV6_CLASS)
340 vnet_flow_ip6_t *ip6_ptr = &f->ip6;
343 avf_items[layer].type = VIRTCHNL_PROTO_HDR_IPV6;
344 avf_items[layer].spec = &ip6_spec;
345 avf_items[layer].mask = &ip6_mask;
348 if ((ip6_address_is_zero (&ip6_ptr->src_addr.mask)) &&
349 (ip6_address_is_zero (&ip6_ptr->dst_addr.mask)) &&
350 (!ip6_ptr->protocol.mask))
356 clib_memcpy (ip6_spec.src_addr, &ip6_ptr->src_addr.addr,
357 ARRAY_LEN (ip6_ptr->src_addr.addr.as_u8));
358 clib_memcpy (ip6_mask.src_addr, &ip6_ptr->src_addr.mask,
359 ARRAY_LEN (ip6_ptr->src_addr.mask.as_u8));
360 clib_memcpy (ip6_spec.dst_addr, &ip6_ptr->dst_addr.addr,
361 ARRAY_LEN (ip6_ptr->dst_addr.addr.as_u8));
362 clib_memcpy (ip6_mask.dst_addr, &ip6_ptr->dst_addr.mask,
363 ARRAY_LEN (ip6_ptr->dst_addr.mask.as_u8));
364 ip6_spec.proto = ip6_ptr->protocol.prot;
365 ip6_mask.proto = ip6_ptr->protocol.mask;
368 if (FLOW_IS_L4_TYPE (f) || FLOW_IS_L4_TUNNEL_TYPE (f))
370 vnet_flow_ip6_n_tuple_t *ip6_n_ptr = &f->ip6_n_tuple;
372 src_port = ip6_n_ptr->src_port.port;
373 dst_port = ip6_n_ptr->dst_port.port;
374 src_port_mask = ip6_n_ptr->src_port.mask;
375 dst_port_mask = ip6_n_ptr->dst_port.mask;
378 protocol = ip6_ptr->protocol.prot;
381 if (FLOW_IS_L3_TYPE (f))
387 case IP_PROTOCOL_L2TP:
388 avf_items[layer].type = VIRTCHNL_PROTO_HDR_L2TPV3;
389 avf_items[layer].spec = &l2tpv3_spec;
390 avf_items[layer].mask = &l2tpv3_mask;
393 vnet_flow_ip4_l2tpv3oip_t *l2tph = &f->ip4_l2tpv3oip;
394 l2tpv3_spec.session_id = clib_host_to_net_u32 (l2tph->session_id);
395 l2tpv3_mask.session_id = ~0;
398 case IP_PROTOCOL_IPSEC_ESP:
399 avf_items[layer].type = VIRTCHNL_PROTO_HDR_ESP;
400 avf_items[layer].spec = &esp_spec;
401 avf_items[layer].mask = &esp_mask;
404 vnet_flow_ip4_ipsec_esp_t *esph = &f->ip4_ipsec_esp;
405 esp_spec.spi = clib_host_to_net_u32 (esph->spi);
409 case IP_PROTOCOL_IPSEC_AH:
410 avf_items[layer].type = VIRTCHNL_PROTO_HDR_AH;
411 avf_items[layer].spec = &ah_spec;
412 avf_items[layer].mask = &ah_mask;
415 vnet_flow_ip4_ipsec_ah_t *ah = &f->ip4_ipsec_ah;
416 ah_spec.spi = clib_host_to_net_u32 (ah->spi);
420 case IP_PROTOCOL_TCP:
421 avf_items[layer].type = VIRTCHNL_PROTO_HDR_TCP;
422 avf_items[layer].spec = &tcp_spec;
423 avf_items[layer].mask = &tcp_mask;
428 tcp_spec.src_port = clib_host_to_net_u16 (src_port);
429 tcp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
433 tcp_spec.dst_port = clib_host_to_net_u16 (dst_port);
434 tcp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
438 case IP_PROTOCOL_UDP:
439 avf_items[layer].type = VIRTCHNL_PROTO_HDR_UDP;
440 avf_items[layer].spec = &udp_spec;
441 avf_items[layer].mask = &udp_mask;
446 udp_spec.src_port = clib_host_to_net_u16 (src_port);
447 udp_mask.src_port = clib_host_to_net_u16 (src_port_mask);
451 udp_spec.dst_port = clib_host_to_net_u16 (dst_port);
452 udp_mask.dst_port = clib_host_to_net_u16 (dst_port_mask);
455 /* handle the UDP tunnels */
456 if (f->type == VNET_FLOW_TYPE_IP4_GTPU)
458 avf_items[layer].type = VIRTCHNL_PROTO_HDR_GTPU_IP;
459 avf_items[layer].spec = >p_spec;
460 avf_items[layer].mask = >p_mask;
463 vnet_flow_ip4_gtpu_t *gu = &f->ip4_gtpu;
464 gtp_spec.teid = clib_host_to_net_u32 (gu->teid);
470 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
475 /* pattern end flag */
476 avf_items[layer].type = VIRTCHNL_PROTO_HDR_NONE;
479 /* Only one 'fate' can be assigned */
480 if (f->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
482 avf_actions[action_count].type = VIRTCHNL_ACTION_QUEUE;
483 avf_actions[action_count].conf = &act_q;
485 act_q.index = f->redirect_queue;
490 if (f->actions & VNET_FLOW_ACTION_DROP)
492 avf_actions[action_count].type = VIRTCHNL_ACTION_DROP;
493 avf_actions[action_count].conf = NULL;
497 rv = VNET_FLOW_ERROR_INTERNAL;
506 if (f->actions & VNET_FLOW_ACTION_RSS)
508 avf_actions[action_count].conf = &act_rss;
511 if ((act_rss.func = avf_flow_convert_rss_func (f->rss_fun)) ==
512 AVF_ETH_HASH_FUNCTION_MAX)
514 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
520 /* convert rss queues to array */
521 avf_flow_convert_rss_queues (f->queue_index, f->queue_num, &act_rss);
522 avf_actions[action_count].type = VIRTCHNL_ACTION_Q_REGION;
528 rv = VNET_FLOW_ERROR_INTERNAL;
538 avf_actions[action_count].type = VIRTCHNL_ACTION_PASSTHRU;
539 avf_actions[action_count].conf = NULL;
545 if (f->actions & VNET_FLOW_ACTION_MARK)
547 avf_actions[action_count].type = VIRTCHNL_ACTION_MARK;
548 avf_actions[action_count].conf = &act_msk;
551 act_msk.id = fe->mark;
554 /* action end flag */
555 avf_actions[action_count].type = VIRTCHNL_ACTION_NONE;
557 /* parse pattern and actions */
560 if (flow_class == FLOW_GENERIC_CLASS)
562 ret = avf_fdir_parse_generic_pattern (filter, avf_items, &error);
565 avf_log_err (ad, "avf fdir parse generic pattern failed: %s",
567 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
573 ret = avf_fdir_parse_pattern (filter, avf_items, &error);
576 avf_log_err (ad, "avf fdir parse pattern failed: %s",
578 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
583 ret = avf_fdir_parse_action (avf_actions, filter, &error);
586 avf_log_err (ad, "avf fdir parse action failed: %s", error.message);
587 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
591 /* create flow rule, save rule */
592 ret = avf_fdir_rule_create (&vc_ctx, filter);
596 avf_log_err (ad, "avf fdir rule create failed: %s",
597 avf_fdir_prgm_error_decode (ret));
598 rv = VNET_FLOW_ERROR_INTERNAL;
604 fe->flow_type_flag = 1;
609 if (flow_class == FLOW_GENERIC_CLASS)
611 ret = avf_rss_parse_generic_pattern (rss_cfg, avf_items, &error);
614 avf_log_err (ad, "avf rss parse generic pattern failed: %s",
616 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
622 ret = avf_rss_parse_pattern (rss_cfg, avf_items, &error);
626 "avf rss is not supported except generic flow");
627 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
632 ret = avf_rss_parse_action (avf_actions, rss_cfg, &error);
635 avf_log_err (ad, "avf rss parse action failed: %s", error.message);
636 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
640 /* create flow rule, save rule */
641 ret = avf_rss_rule_create (&vc_ctx, rss_cfg);
645 avf_log_err (ad, "avf rss rule create failed");
646 rv = VNET_FLOW_ERROR_INTERNAL;
651 fe->rss_cfg = rss_cfg;
652 fe->flow_type_flag = 0;
662 avf_flow_ops_fn (vnet_main_t *vm, vnet_flow_dev_op_t op, u32 dev_instance,
663 u32 flow_index, uword *private_data)
665 vnet_flow_t *flow = vnet_get_flow (flow_index);
666 avf_device_t *ad = avf_get_device (dev_instance);
667 avf_flow_entry_t *fe = NULL;
668 avf_flow_lookup_entry_t *fle = NULL;
671 if ((ad->cap_flags & VIRTCHNL_VF_OFFLOAD_FDIR_PF) == 0)
673 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
677 if (op == VNET_FLOW_DEV_OP_ADD_FLOW)
679 pool_get (ad->flow_entries, fe);
680 fe->flow_index = flow->index;
682 /* if we need to mark packets, assign one mark */
684 (VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE |
685 VNET_FLOW_ACTION_BUFFER_ADVANCE))
688 if (ad->flow_lookup_entries == 0)
689 pool_get_aligned (ad->flow_lookup_entries, fle,
690 CLIB_CACHE_LINE_BYTES);
691 pool_get_aligned (ad->flow_lookup_entries, fle,
692 CLIB_CACHE_LINE_BYTES);
693 fe->mark = fle - ad->flow_lookup_entries;
695 /* install entry in the lookup table */
696 clib_memset (fle, -1, sizeof (*fle));
697 if (flow->actions & VNET_FLOW_ACTION_MARK)
698 fle->flow_id = flow->mark_flow_id;
699 if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_NODE)
700 fle->next_index = flow->redirect_device_input_next_index;
701 if (flow->actions & VNET_FLOW_ACTION_BUFFER_ADVANCE)
702 fle->buffer_advance = flow->buffer_advance;
704 if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) == 0)
706 ad->flags |= AVF_DEVICE_F_RX_FLOW_OFFLOAD;
714 case VNET_FLOW_TYPE_IP4:
715 case VNET_FLOW_TYPE_IP6:
716 case VNET_FLOW_TYPE_IP4_N_TUPLE:
717 case VNET_FLOW_TYPE_IP6_N_TUPLE:
718 case VNET_FLOW_TYPE_IP4_GTPU:
719 case VNET_FLOW_TYPE_IP4_L2TPV3OIP:
720 case VNET_FLOW_TYPE_IP4_IPSEC_ESP:
721 case VNET_FLOW_TYPE_IP4_IPSEC_AH:
722 case VNET_FLOW_TYPE_GENERIC:
723 if ((rv = avf_flow_add (dev_instance, flow, fe)))
727 rv = VNET_FLOW_ERROR_NOT_SUPPORTED;
731 *private_data = fe - ad->flow_entries;
733 else if (op == VNET_FLOW_DEV_OP_DEL_FLOW)
735 fe = vec_elt_at_index (ad->flow_entries, *private_data);
737 struct avf_flow_vc_ctx ctx;
738 ctx.vc_hdl = &dev_instance;
739 ctx.vc_op = avf_flow_vc_op_callback;
741 if (fe->flow_type_flag)
743 rv = avf_fdir_rule_destroy (&ctx, fe->rcfg);
745 return VNET_FLOW_ERROR_INTERNAL;
749 rv = avf_rss_rule_destroy (&ctx, fe->rss_cfg);
751 return VNET_FLOW_ERROR_INTERNAL;
756 fle = pool_elt_at_index (ad->flow_lookup_entries, fe->mark);
757 clib_memset (fle, -1, sizeof (*fle));
758 pool_put_index (ad->flow_lookup_entries, fe->mark);
761 (void) avf_fdir_rcfg_destroy (fe->rcfg);
762 (void) avf_rss_rcfg_destroy (fe->rss_cfg);
763 clib_memset (fe, 0, sizeof (*fe));
764 pool_put (ad->flow_entries, fe);
765 goto disable_rx_offload;
768 return VNET_FLOW_ERROR_NOT_SUPPORTED;
775 clib_memset (fe, 0, sizeof (*fe));
776 pool_put (ad->flow_entries, fe);
781 clib_memset (fle, -1, sizeof (*fle));
782 pool_put (ad->flow_lookup_entries, fle);
786 if ((ad->flags & AVF_DEVICE_F_RX_FLOW_OFFLOAD) != 0 &&
787 pool_elts (ad->flow_entries) == 0)
789 ad->flags &= ~AVF_DEVICE_F_RX_FLOW_OFFLOAD;
796 * fd.io coding-style-patch-verification: ON
799 * eval: (c-set-style "gnu")