New upstream version 18.02
[deb_dpdk.git] / drivers / net / bnxt / bnxt_filter.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <rte_log.h>
37 #include <rte_malloc.h>
38 #include <rte_flow.h>
39 #include <rte_flow_driver.h>
40 #include <rte_tailq.h>
41
42 #include "bnxt.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_vnic.h"
46 #include "hsi_struct_def_dpdk.h"
47
48 /*
49  * Filter Functions
50  */
51
52 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
53 {
54         struct bnxt_filter_info *filter;
55
56         /* Find the 1st unused filter from the free_filter_list pool*/
57         filter = STAILQ_FIRST(&bp->free_filter_list);
58         if (!filter) {
59                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
60                 return NULL;
61         }
62         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
63
64         /* Default to L2 MAC Addr filter */
65         filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
66         filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
67                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
68         memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
69                ETHER_ADDR_LEN);
70         memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
71         return filter;
72 }
73
74 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
75 {
76         struct bnxt_filter_info *filter;
77
78         filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
79         if (!filter) {
80                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
81                         vf);
82                 return NULL;
83         }
84
85         filter->fw_l2_filter_id = UINT64_MAX;
86         STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
87         return filter;
88 }
89
90 void bnxt_init_filters(struct bnxt *bp)
91 {
92         struct bnxt_filter_info *filter;
93         int i, max_filters;
94
95         max_filters = bp->max_l2_ctx;
96         STAILQ_INIT(&bp->free_filter_list);
97         for (i = 0; i < max_filters; i++) {
98                 filter = &bp->filter_info[i];
99                 filter->fw_l2_filter_id = -1;
100                 filter->fw_em_filter_id = -1;
101                 filter->fw_ntuple_filter_id = -1;
102                 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
103         }
104 }
105
106 void bnxt_free_all_filters(struct bnxt *bp)
107 {
108         struct bnxt_vnic_info *vnic;
109         struct bnxt_filter_info *filter, *temp_filter;
110         int i;
111
112         for (i = 0; i < MAX_FF_POOLS; i++) {
113                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
114                         filter = STAILQ_FIRST(&vnic->filter);
115                         while (filter) {
116                                 temp_filter = STAILQ_NEXT(filter, next);
117                                 STAILQ_REMOVE(&vnic->filter, filter,
118                                               bnxt_filter_info, next);
119                                 STAILQ_INSERT_TAIL(&bp->free_filter_list,
120                                                    filter, next);
121                                 filter = temp_filter;
122                         }
123                         STAILQ_INIT(&vnic->filter);
124                 }
125         }
126
127         for (i = 0; i < bp->pf.max_vfs; i++) {
128                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
129                         bnxt_hwrm_clear_l2_filter(bp, filter);
130                 }
131         }
132 }
133
134 void bnxt_free_filter_mem(struct bnxt *bp)
135 {
136         struct bnxt_filter_info *filter;
137         uint16_t max_filters, i;
138         int rc = 0;
139
140         if (bp->filter_info == NULL)
141                 return;
142
143         /* Ensure that all filters are freed */
144         max_filters = bp->max_l2_ctx;
145         for (i = 0; i < max_filters; i++) {
146                 filter = &bp->filter_info[i];
147                 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
148                         PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
149                         /* Call HWRM to try to free filter again */
150                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
151                         if (rc)
152                                 PMD_DRV_LOG(ERR,
153                                        "HWRM filter cannot be freed rc = %d\n",
154                                         rc);
155                 }
156                 filter->fw_l2_filter_id = UINT64_MAX;
157         }
158         STAILQ_INIT(&bp->free_filter_list);
159
160         rte_free(bp->filter_info);
161         bp->filter_info = NULL;
162 }
163
164 int bnxt_alloc_filter_mem(struct bnxt *bp)
165 {
166         struct bnxt_filter_info *filter_mem;
167         uint16_t max_filters;
168
169         max_filters = bp->max_l2_ctx;
170         /* Allocate memory for VNIC pool and filter pool */
171         filter_mem = rte_zmalloc("bnxt_filter_info",
172                                  max_filters * sizeof(struct bnxt_filter_info),
173                                  0);
174         if (filter_mem == NULL) {
175                 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
176                         max_filters);
177                 return -ENOMEM;
178         }
179         bp->filter_info = filter_mem;
180         return 0;
181 }
182
183 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
184 {
185         struct bnxt_filter_info *filter;
186
187         /* Find the 1st unused filter from the free_filter_list pool*/
188         filter = STAILQ_FIRST(&bp->free_filter_list);
189         if (!filter) {
190                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
191                 return NULL;
192         }
193         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
194
195         return filter;
196 }
197
198 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
199 {
200         STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
201 }
202
203 static int
204 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
205                         const struct rte_flow_item pattern[],
206                         const struct rte_flow_action actions[],
207                         struct rte_flow_error *error)
208 {
209         if (!pattern) {
210                 rte_flow_error_set(error, EINVAL,
211                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
212                         NULL, "NULL pattern.");
213                 return -rte_errno;
214         }
215
216         if (!actions) {
217                 rte_flow_error_set(error, EINVAL,
218                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
219                                    NULL, "NULL action.");
220                 return -rte_errno;
221         }
222
223         if (!attr) {
224                 rte_flow_error_set(error, EINVAL,
225                                    RTE_FLOW_ERROR_TYPE_ATTR,
226                                    NULL, "NULL attribute.");
227                 return -rte_errno;
228         }
229
230         return 0;
231 }
232
233 static const struct rte_flow_item *
234 nxt_non_void_pattern(const struct rte_flow_item *cur)
235 {
236         while (1) {
237                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
238                         return cur;
239                 cur++;
240         }
241 }
242
243 static const struct rte_flow_action *
244 nxt_non_void_action(const struct rte_flow_action *cur)
245 {
246         while (1) {
247                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
248                         return cur;
249                 cur++;
250         }
251 }
252
253 int check_zero_bytes(const uint8_t *bytes, int len)
254 {
255         int i;
256         for (i = 0; i < len; i++)
257                 if (bytes[i] != 0x00)
258                         return 0;
259         return 1;
260 }
261
262 static int
263 bnxt_filter_type_check(const struct rte_flow_item pattern[],
264                        struct rte_flow_error *error __rte_unused)
265 {
266         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
267         int use_ntuple = 1;
268
269         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
270                 switch (item->type) {
271                 case RTE_FLOW_ITEM_TYPE_ETH:
272                         use_ntuple = 1;
273                         break;
274                 case RTE_FLOW_ITEM_TYPE_VLAN:
275                         use_ntuple = 0;
276                         break;
277                 case RTE_FLOW_ITEM_TYPE_IPV4:
278                 case RTE_FLOW_ITEM_TYPE_IPV6:
279                 case RTE_FLOW_ITEM_TYPE_TCP:
280                 case RTE_FLOW_ITEM_TYPE_UDP:
281                         /* FALLTHROUGH */
282                         /* need ntuple match, reset exact match */
283                         if (!use_ntuple) {
284                                 PMD_DRV_LOG(ERR,
285                                         "VLAN flow cannot use NTUPLE filter\n");
286                                 rte_flow_error_set(error, EINVAL,
287                                                    RTE_FLOW_ERROR_TYPE_ITEM,
288                                                    item,
289                                                    "Cannot use VLAN with NTUPLE");
290                                 return -rte_errno;
291                         }
292                         use_ntuple |= 1;
293                         break;
294                 default:
295                         PMD_DRV_LOG(ERR, "Unknown Flow type");
296                         use_ntuple |= 1;
297                 }
298                 item++;
299         }
300         return use_ntuple;
301 }
302
303 static int
304 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
305                                   const struct rte_flow_item pattern[],
306                                   struct rte_flow_error *error,
307                                   struct bnxt_filter_info *filter)
308 {
309         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
310         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
311         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
312         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
313         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
314         const struct rte_flow_item_udp *udp_spec, *udp_mask;
315         const struct rte_flow_item_eth *eth_spec, *eth_mask;
316         const struct rte_flow_item_nvgre *nvgre_spec;
317         const struct rte_flow_item_nvgre *nvgre_mask;
318         const struct rte_flow_item_vxlan *vxlan_spec;
319         const struct rte_flow_item_vxlan *vxlan_mask;
320         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
321         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
322         const struct rte_flow_item_vf *vf_spec;
323         uint32_t tenant_id_be = 0;
324         bool vni_masked = 0;
325         bool tni_masked = 0;
326         uint32_t vf = 0;
327         int use_ntuple;
328         uint32_t en = 0;
329         int dflt_vnic;
330
331         use_ntuple = bnxt_filter_type_check(pattern, error);
332         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
333         if (use_ntuple < 0)
334                 return use_ntuple;
335
336         filter->filter_type = use_ntuple ?
337                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
338
339         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
340                 if (item->last) {
341                         /* last or range is NOT supported as match criteria */
342                         rte_flow_error_set(error, EINVAL,
343                                            RTE_FLOW_ERROR_TYPE_ITEM,
344                                            item,
345                                            "No support for range");
346                         return -rte_errno;
347                 }
348                 if (!item->spec || !item->mask) {
349                         rte_flow_error_set(error, EINVAL,
350                                            RTE_FLOW_ERROR_TYPE_ITEM,
351                                            item,
352                                            "spec/mask is NULL");
353                         return -rte_errno;
354                 }
355                 switch (item->type) {
356                 case RTE_FLOW_ITEM_TYPE_ETH:
357                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
358                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
359
360                         /* Source MAC address mask cannot be partially set.
361                          * Should be All 0's or all 1's.
362                          * Destination MAC address mask must not be partially
363                          * set. Should be all 1's or all 0's.
364                          */
365                         if ((!is_zero_ether_addr(&eth_mask->src) &&
366                              !is_broadcast_ether_addr(&eth_mask->src)) ||
367                             (!is_zero_ether_addr(&eth_mask->dst) &&
368                              !is_broadcast_ether_addr(&eth_mask->dst))) {
369                                 rte_flow_error_set(error, EINVAL,
370                                                    RTE_FLOW_ERROR_TYPE_ITEM,
371                                                    item,
372                                                    "MAC_addr mask not valid");
373                                 return -rte_errno;
374                         }
375
376                         /* Mask is not allowed. Only exact matches are */
377                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
378                                 rte_flow_error_set(error, EINVAL,
379                                                    RTE_FLOW_ERROR_TYPE_ITEM,
380                                                    item,
381                                                    "ethertype mask not valid");
382                                 return -rte_errno;
383                         }
384
385                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
386                                 rte_memcpy(filter->dst_macaddr,
387                                            &eth_spec->dst, 6);
388                                 en |= use_ntuple ?
389                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
390                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
391                         }
392                         if (is_broadcast_ether_addr(&eth_mask->src)) {
393                                 rte_memcpy(filter->src_macaddr,
394                                            &eth_spec->src, 6);
395                                 en |= use_ntuple ?
396                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
397                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
398                         } /*
399                            * else {
400                            *  RTE_LOG(ERR, PMD, "Handle this condition\n");
401                            * }
402                            */
403                         if (eth_spec->type) {
404                                 filter->ethertype =
405                                         rte_be_to_cpu_16(eth_spec->type);
406                                 en |= use_ntuple ?
407                                         NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
408                                         EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
409                         }
410
411                         break;
412                 case RTE_FLOW_ITEM_TYPE_VLAN:
413                         vlan_spec =
414                                 (const struct rte_flow_item_vlan *)item->spec;
415                         vlan_mask =
416                                 (const struct rte_flow_item_vlan *)item->mask;
417                         if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
418                                 /* Only the VLAN ID can be matched. */
419                                 filter->l2_ovlan =
420                                         rte_be_to_cpu_16(vlan_spec->tci &
421                                                          0xFFF);
422                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
423                         } else {
424                                 rte_flow_error_set(error, EINVAL,
425                                                    RTE_FLOW_ERROR_TYPE_ITEM,
426                                                    item,
427                                                    "VLAN mask is invalid");
428                                 return -rte_errno;
429                         }
430
431                         break;
432                 case RTE_FLOW_ITEM_TYPE_IPV4:
433                         /* If mask is not involved, we could use EM filters. */
434                         ipv4_spec =
435                                 (const struct rte_flow_item_ipv4 *)item->spec;
436                         ipv4_mask =
437                                 (const struct rte_flow_item_ipv4 *)item->mask;
438                         /* Only IP DST and SRC fields are maskable. */
439                         if (ipv4_mask->hdr.version_ihl ||
440                             ipv4_mask->hdr.type_of_service ||
441                             ipv4_mask->hdr.total_length ||
442                             ipv4_mask->hdr.packet_id ||
443                             ipv4_mask->hdr.fragment_offset ||
444                             ipv4_mask->hdr.time_to_live ||
445                             ipv4_mask->hdr.next_proto_id ||
446                             ipv4_mask->hdr.hdr_checksum) {
447                                 rte_flow_error_set(error, EINVAL,
448                                            RTE_FLOW_ERROR_TYPE_ITEM,
449                                            item,
450                                            "Invalid IPv4 mask.");
451                                 return -rte_errno;
452                         }
453                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
454                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
455                         if (use_ntuple)
456                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
457                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
458                         else
459                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
460                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
461                         if (ipv4_mask->hdr.src_addr) {
462                                 filter->src_ipaddr_mask[0] =
463                                         ipv4_mask->hdr.src_addr;
464                                 en |= !use_ntuple ? 0 :
465                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
466                         }
467                         if (ipv4_mask->hdr.dst_addr) {
468                                 filter->dst_ipaddr_mask[0] =
469                                         ipv4_mask->hdr.dst_addr;
470                                 en |= !use_ntuple ? 0 :
471                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
472                         }
473                         filter->ip_addr_type = use_ntuple ?
474                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
475                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
476                         if (ipv4_spec->hdr.next_proto_id) {
477                                 filter->ip_protocol =
478                                         ipv4_spec->hdr.next_proto_id;
479                                 if (use_ntuple)
480                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
481                                 else
482                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
483                         }
484                         break;
485                 case RTE_FLOW_ITEM_TYPE_IPV6:
486                         ipv6_spec =
487                                 (const struct rte_flow_item_ipv6 *)item->spec;
488                         ipv6_mask =
489                                 (const struct rte_flow_item_ipv6 *)item->mask;
490
491                         /* Only IP DST and SRC fields are maskable. */
492                         if (ipv6_mask->hdr.vtc_flow ||
493                             ipv6_mask->hdr.payload_len ||
494                             ipv6_mask->hdr.proto ||
495                             ipv6_mask->hdr.hop_limits) {
496                                 rte_flow_error_set(error, EINVAL,
497                                            RTE_FLOW_ERROR_TYPE_ITEM,
498                                            item,
499                                            "Invalid IPv6 mask.");
500                                 return -rte_errno;
501                         }
502
503                         if (use_ntuple)
504                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
505                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
506                         else
507                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
508                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
509                         rte_memcpy(filter->src_ipaddr,
510                                    ipv6_spec->hdr.src_addr, 16);
511                         rte_memcpy(filter->dst_ipaddr,
512                                    ipv6_spec->hdr.dst_addr, 16);
513                         if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
514                                 rte_memcpy(filter->src_ipaddr_mask,
515                                            ipv6_mask->hdr.src_addr, 16);
516                                 en |= !use_ntuple ? 0 :
517                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
518                         }
519                         if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
520                                 rte_memcpy(filter->dst_ipaddr_mask,
521                                            ipv6_mask->hdr.dst_addr, 16);
522                                 en |= !use_ntuple ? 0 :
523                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
524                         }
525                         filter->ip_addr_type = use_ntuple ?
526                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
527                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
528                         break;
529                 case RTE_FLOW_ITEM_TYPE_TCP:
530                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
531                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
532
533                         /* Check TCP mask. Only DST & SRC ports are maskable */
534                         if (tcp_mask->hdr.sent_seq ||
535                             tcp_mask->hdr.recv_ack ||
536                             tcp_mask->hdr.data_off ||
537                             tcp_mask->hdr.tcp_flags ||
538                             tcp_mask->hdr.rx_win ||
539                             tcp_mask->hdr.cksum ||
540                             tcp_mask->hdr.tcp_urp) {
541                                 rte_flow_error_set(error, EINVAL,
542                                            RTE_FLOW_ERROR_TYPE_ITEM,
543                                            item,
544                                            "Invalid TCP mask");
545                                 return -rte_errno;
546                         }
547                         filter->src_port = tcp_spec->hdr.src_port;
548                         filter->dst_port = tcp_spec->hdr.dst_port;
549                         if (use_ntuple)
550                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
551                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
552                         else
553                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
554                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
555                         if (tcp_mask->hdr.dst_port) {
556                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
557                                 en |= !use_ntuple ? 0 :
558                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
559                         }
560                         if (tcp_mask->hdr.src_port) {
561                                 filter->src_port_mask = tcp_mask->hdr.src_port;
562                                 en |= !use_ntuple ? 0 :
563                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
564                         }
565                         break;
566                 case RTE_FLOW_ITEM_TYPE_UDP:
567                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
568                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
569
570                         if (udp_mask->hdr.dgram_len ||
571                             udp_mask->hdr.dgram_cksum) {
572                                 rte_flow_error_set(error, EINVAL,
573                                            RTE_FLOW_ERROR_TYPE_ITEM,
574                                            item,
575                                            "Invalid UDP mask");
576                                 return -rte_errno;
577                         }
578
579                         filter->src_port = udp_spec->hdr.src_port;
580                         filter->dst_port = udp_spec->hdr.dst_port;
581                         if (use_ntuple)
582                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
583                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
584                         else
585                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
586                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
587
588                         if (udp_mask->hdr.dst_port) {
589                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
590                                 en |= !use_ntuple ? 0 :
591                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
592                         }
593                         if (udp_mask->hdr.src_port) {
594                                 filter->src_port_mask = udp_mask->hdr.src_port;
595                                 en |= !use_ntuple ? 0 :
596                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
597                         }
598                         break;
599                 case RTE_FLOW_ITEM_TYPE_VXLAN:
600                         vxlan_spec =
601                                 (const struct rte_flow_item_vxlan *)item->spec;
602                         vxlan_mask =
603                                 (const struct rte_flow_item_vxlan *)item->mask;
604                         /* Check if VXLAN item is used to describe protocol.
605                          * If yes, both spec and mask should be NULL.
606                          * If no, both spec and mask shouldn't be NULL.
607                          */
608                         if ((!vxlan_spec && vxlan_mask) ||
609                             (vxlan_spec && !vxlan_mask)) {
610                                 rte_flow_error_set(error, EINVAL,
611                                            RTE_FLOW_ERROR_TYPE_ITEM,
612                                            item,
613                                            "Invalid VXLAN item");
614                                 return -rte_errno;
615                         }
616
617                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
618                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
619                             vxlan_spec->flags != 0x8) {
620                                 rte_flow_error_set(error, EINVAL,
621                                            RTE_FLOW_ERROR_TYPE_ITEM,
622                                            item,
623                                            "Invalid VXLAN item");
624                                 return -rte_errno;
625                         }
626
627                         /* Check if VNI is masked. */
628                         if (vxlan_spec && vxlan_mask) {
629                                 vni_masked =
630                                         !!memcmp(vxlan_mask->vni, vni_mask,
631                                                  RTE_DIM(vni_mask));
632                                 if (vni_masked) {
633                                         rte_flow_error_set(error, EINVAL,
634                                                    RTE_FLOW_ERROR_TYPE_ITEM,
635                                                    item,
636                                                    "Invalid VNI mask");
637                                         return -rte_errno;
638                                 }
639
640                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
641                                            vxlan_spec->vni, 3);
642                                 filter->vni =
643                                         rte_be_to_cpu_32(tenant_id_be);
644                                 filter->tunnel_type =
645                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
646                         }
647                         break;
648                 case RTE_FLOW_ITEM_TYPE_NVGRE:
649                         nvgre_spec =
650                                 (const struct rte_flow_item_nvgre *)item->spec;
651                         nvgre_mask =
652                                 (const struct rte_flow_item_nvgre *)item->mask;
653                         /* Check if NVGRE item is used to describe protocol.
654                          * If yes, both spec and mask should be NULL.
655                          * If no, both spec and mask shouldn't be NULL.
656                          */
657                         if ((!nvgre_spec && nvgre_mask) ||
658                             (nvgre_spec && !nvgre_mask)) {
659                                 rte_flow_error_set(error, EINVAL,
660                                            RTE_FLOW_ERROR_TYPE_ITEM,
661                                            item,
662                                            "Invalid NVGRE item");
663                                 return -rte_errno;
664                         }
665
666                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
667                             nvgre_spec->protocol != 0x6558) {
668                                 rte_flow_error_set(error, EINVAL,
669                                            RTE_FLOW_ERROR_TYPE_ITEM,
670                                            item,
671                                            "Invalid NVGRE item");
672                                 return -rte_errno;
673                         }
674
675                         if (nvgre_spec && nvgre_mask) {
676                                 tni_masked =
677                                         !!memcmp(nvgre_mask->tni, tni_mask,
678                                                  RTE_DIM(tni_mask));
679                                 if (tni_masked) {
680                                         rte_flow_error_set(error, EINVAL,
681                                                        RTE_FLOW_ERROR_TYPE_ITEM,
682                                                        item,
683                                                        "Invalid TNI mask");
684                                         return -rte_errno;
685                                 }
686                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
687                                            nvgre_spec->tni, 3);
688                                 filter->vni =
689                                         rte_be_to_cpu_32(tenant_id_be);
690                                 filter->tunnel_type =
691                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
692                         }
693                         break;
694                 case RTE_FLOW_ITEM_TYPE_VF:
695                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
696                         vf = vf_spec->id;
697                         if (!BNXT_PF(bp)) {
698                                 rte_flow_error_set(error, EINVAL,
699                                            RTE_FLOW_ERROR_TYPE_ITEM,
700                                            item,
701                                            "Configuring on a VF!");
702                                 return -rte_errno;
703                         }
704
705                         if (vf >= bp->pdev->max_vfs) {
706                                 rte_flow_error_set(error, EINVAL,
707                                            RTE_FLOW_ERROR_TYPE_ITEM,
708                                            item,
709                                            "Incorrect VF id!");
710                                 return -rte_errno;
711                         }
712
713                         filter->mirror_vnic_id =
714                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
715                         if (dflt_vnic < 0) {
716                                 /* This simply indicates there's no driver
717                                  * loaded. This is not an error.
718                                  */
719                                 rte_flow_error_set(error, EINVAL,
720                                            RTE_FLOW_ERROR_TYPE_ITEM,
721                                            item,
722                                            "Unable to get default VNIC for VF");
723                                 return -rte_errno;
724                         }
725                         filter->mirror_vnic_id = dflt_vnic;
726                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
727                         break;
728                 default:
729                         break;
730                 }
731                 item++;
732         }
733         filter->enables = en;
734
735         return 0;
736 }
737
738 /* Parse attributes */
739 static int
740 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
741                      struct rte_flow_error *error)
742 {
743         /* Must be input direction */
744         if (!attr->ingress) {
745                 rte_flow_error_set(error, EINVAL,
746                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
747                                    attr, "Only support ingress.");
748                 return -rte_errno;
749         }
750
751         /* Not supported */
752         if (attr->egress) {
753                 rte_flow_error_set(error, EINVAL,
754                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
755                                    attr, "No support for egress.");
756                 return -rte_errno;
757         }
758
759         /* Not supported */
760         if (attr->priority) {
761                 rte_flow_error_set(error, EINVAL,
762                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
763                                    attr, "No support for priority.");
764                 return -rte_errno;
765         }
766
767         /* Not supported */
768         if (attr->group) {
769                 rte_flow_error_set(error, EINVAL,
770                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
771                                    attr, "No support for group.");
772                 return -rte_errno;
773         }
774
775         return 0;
776 }
777
778 struct bnxt_filter_info *
779 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
780                    struct bnxt_vnic_info *vnic)
781 {
782         struct bnxt_filter_info *filter1, *f0;
783         struct bnxt_vnic_info *vnic0;
784         int rc;
785
786         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
787         f0 = STAILQ_FIRST(&vnic0->filter);
788
789         //This flow has same DST MAC as the port/l2 filter.
790         if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
791                 return f0;
792
793         //This flow needs DST MAC which is not same as port/l2
794         PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
795         filter1 = bnxt_get_unused_filter(bp);
796         if (filter1 == NULL)
797                 return NULL;
798         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
799         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
800                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
801         memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
802         memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
803         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
804                                      filter1);
805         if (rc) {
806                 bnxt_free_filter(bp, filter1);
807                 return NULL;
808         }
809         return filter1;
810 }
811
812 static int
813 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
814                              const struct rte_flow_item pattern[],
815                              const struct rte_flow_action actions[],
816                              const struct rte_flow_attr *attr,
817                              struct rte_flow_error *error,
818                              struct bnxt_filter_info *filter)
819 {
820         const struct rte_flow_action *act = nxt_non_void_action(actions);
821         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
822         const struct rte_flow_action_queue *act_q;
823         const struct rte_flow_action_vf *act_vf;
824         struct bnxt_vnic_info *vnic, *vnic0;
825         struct bnxt_filter_info *filter1;
826         uint32_t vf = 0;
827         int dflt_vnic;
828         int rc;
829
830         if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
831                 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
832                 rte_flow_error_set(error, EINVAL,
833                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
834                                    "Cannot create flow on RSS queues");
835                 rc = -rte_errno;
836                 goto ret;
837         }
838
839         rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
840         if (rc != 0)
841                 goto ret;
842
843         rc = bnxt_flow_parse_attr(attr, error);
844         if (rc != 0)
845                 goto ret;
846         //Since we support ingress attribute only - right now.
847         filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
848
849         switch (act->type) {
850         case RTE_FLOW_ACTION_TYPE_QUEUE:
851                 /* Allow this flow. Redirect to a VNIC. */
852                 act_q = (const struct rte_flow_action_queue *)act->conf;
853                 if (act_q->index >= bp->rx_nr_rings) {
854                         rte_flow_error_set(error, EINVAL,
855                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
856                                            "Invalid queue ID.");
857                         rc = -rte_errno;
858                         goto ret;
859                 }
860                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
861
862                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
863                 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
864                 if (vnic == NULL) {
865                         rte_flow_error_set(error, EINVAL,
866                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
867                                            "No matching VNIC for queue ID.");
868                         rc = -rte_errno;
869                         goto ret;
870                 }
871                 filter->dst_id = vnic->fw_vnic_id;
872                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
873                 if (filter1 == NULL) {
874                         rc = -ENOSPC;
875                         goto ret;
876                 }
877                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
878                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
879                 break;
880         case RTE_FLOW_ACTION_TYPE_DROP:
881                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
882                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
883                 if (filter1 == NULL) {
884                         rc = -ENOSPC;
885                         goto ret;
886                 }
887                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
888                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
889                         filter->flags =
890                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
891                 else
892                         filter->flags =
893                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
894                 break;
895         case RTE_FLOW_ACTION_TYPE_COUNT:
896                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
897                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
898                 if (filter1 == NULL) {
899                         rc = -ENOSPC;
900                         goto ret;
901                 }
902                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
903                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
904                 break;
905         case RTE_FLOW_ACTION_TYPE_VF:
906                 act_vf = (const struct rte_flow_action_vf *)act->conf;
907                 vf = act_vf->id;
908                 if (!BNXT_PF(bp)) {
909                         rte_flow_error_set(error, EINVAL,
910                                    RTE_FLOW_ERROR_TYPE_ACTION,
911                                    act,
912                                    "Configuring on a VF!");
913                         rc = -rte_errno;
914                         goto ret;
915                 }
916
917                 if (vf >= bp->pdev->max_vfs) {
918                         rte_flow_error_set(error, EINVAL,
919                                    RTE_FLOW_ERROR_TYPE_ACTION,
920                                    act,
921                                    "Incorrect VF id!");
922                         rc = -rte_errno;
923                         goto ret;
924                 }
925
926                 filter->mirror_vnic_id =
927                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
928                 if (dflt_vnic < 0) {
929                         /* This simply indicates there's no driver loaded.
930                          * This is not an error.
931                          */
932                         rte_flow_error_set(error, EINVAL,
933                                    RTE_FLOW_ERROR_TYPE_ACTION,
934                                    act,
935                                    "Unable to get default VNIC for VF");
936                         rc = -rte_errno;
937                         goto ret;
938                 }
939                 filter->mirror_vnic_id = dflt_vnic;
940                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
941
942                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
943                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
944                 if (filter1 == NULL) {
945                         rc = -ENOSPC;
946                         goto ret;
947                 }
948                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
949                 break;
950
951         default:
952                 rte_flow_error_set(error, EINVAL,
953                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
954                                    "Invalid action.");
955                 rc = -rte_errno;
956                 goto ret;
957         }
958
959         if (filter1) {
960                 bnxt_free_filter(bp, filter1);
961                 filter1->fw_l2_filter_id = -1;
962         }
963
964         act = nxt_non_void_action(++act);
965         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
966                 rte_flow_error_set(error, EINVAL,
967                                    RTE_FLOW_ERROR_TYPE_ACTION,
968                                    act, "Invalid action.");
969                 rc = -rte_errno;
970                 goto ret;
971         }
972 ret:
973         return rc;
974 }
975
976 static int
977 bnxt_flow_validate(struct rte_eth_dev *dev,
978                 const struct rte_flow_attr *attr,
979                 const struct rte_flow_item pattern[],
980                 const struct rte_flow_action actions[],
981                 struct rte_flow_error *error)
982 {
983         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
984         struct bnxt_filter_info *filter;
985         int ret = 0;
986
987         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
988         if (ret != 0)
989                 return ret;
990
991         filter = bnxt_get_unused_filter(bp);
992         if (filter == NULL) {
993                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
994                 return -ENOMEM;
995         }
996
997         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
998                                            error, filter);
999         /* No need to hold on to this filter if we are just validating flow */
1000         filter->fw_l2_filter_id = -1;
1001         bnxt_free_filter(bp, filter);
1002
1003         return ret;
1004 }
1005
1006 static int
1007 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1008 {
1009         struct bnxt_filter_info *mf;
1010         struct rte_flow *flow;
1011         int i;
1012
1013         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1014                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1015
1016                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1017                         mf = flow->filter;
1018
1019                         if (mf->filter_type == nf->filter_type &&
1020                             mf->flags == nf->flags &&
1021                             mf->src_port == nf->src_port &&
1022                             mf->src_port_mask == nf->src_port_mask &&
1023                             mf->dst_port == nf->dst_port &&
1024                             mf->dst_port_mask == nf->dst_port_mask &&
1025                             mf->ip_protocol == nf->ip_protocol &&
1026                             mf->ip_addr_type == nf->ip_addr_type &&
1027                             mf->ethertype == nf->ethertype &&
1028                             mf->vni == nf->vni &&
1029                             mf->tunnel_type == nf->tunnel_type &&
1030                             mf->l2_ovlan == nf->l2_ovlan &&
1031                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1032                             mf->l2_ivlan == nf->l2_ivlan &&
1033                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1034                             !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1035                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1036                                     ETHER_ADDR_LEN) &&
1037                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1038                                     ETHER_ADDR_LEN) &&
1039                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1040                                     ETHER_ADDR_LEN) &&
1041                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1042                                     sizeof(nf->src_ipaddr)) &&
1043                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1044                                     sizeof(nf->src_ipaddr_mask)) &&
1045                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1046                                     sizeof(nf->dst_ipaddr)) &&
1047                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1048                                     sizeof(nf->dst_ipaddr_mask))) {
1049                                 if (mf->dst_id == nf->dst_id)
1050                                         return -EEXIST;
1051                                 /* Same Flow, Different queue
1052                                  * Clear the old ntuple filter
1053                                  */
1054                                 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1055                                         bnxt_hwrm_clear_em_filter(bp, mf);
1056                                 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1057                                         bnxt_hwrm_clear_ntuple_filter(bp, mf);
1058                                 /* Free the old filter, update flow
1059                                  * with new filter
1060                                  */
1061                                 bnxt_free_filter(bp, mf);
1062                                 flow->filter = nf;
1063                                 return -EXDEV;
1064                         }
1065                 }
1066         }
1067         return 0;
1068 }
1069
1070 static struct rte_flow *
1071 bnxt_flow_create(struct rte_eth_dev *dev,
1072                   const struct rte_flow_attr *attr,
1073                   const struct rte_flow_item pattern[],
1074                   const struct rte_flow_action actions[],
1075                   struct rte_flow_error *error)
1076 {
1077         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1078         struct bnxt_filter_info *filter;
1079         struct bnxt_vnic_info *vnic = NULL;
1080         bool update_flow = false;
1081         struct rte_flow *flow;
1082         unsigned int i;
1083         int ret = 0;
1084
1085         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1086         if (!flow) {
1087                 rte_flow_error_set(error, ENOMEM,
1088                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1089                                    "Failed to allocate memory");
1090                 return flow;
1091         }
1092
1093         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1094         if (ret != 0) {
1095                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1096                 goto free_flow;
1097         }
1098
1099         filter = bnxt_get_unused_filter(bp);
1100         if (filter == NULL) {
1101                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1102                 goto free_flow;
1103         }
1104
1105         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1106                                            error, filter);
1107         if (ret != 0)
1108                 goto free_filter;
1109
1110         ret = bnxt_match_filter(bp, filter);
1111         if (ret == -EEXIST) {
1112                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1113                 /* Clear the filter that was created as part of
1114                  * validate_and_parse_flow() above
1115                  */
1116                 bnxt_hwrm_clear_l2_filter(bp, filter);
1117                 goto free_filter;
1118         } else if (ret == -EXDEV) {
1119                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1120                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1121                 update_flow = true;
1122         }
1123
1124         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1125                 filter->enables |=
1126                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1127                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1128         }
1129         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1130                 filter->enables |=
1131                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1132                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1133         }
1134
1135         for (i = 0; i < bp->nr_vnics; i++) {
1136                 vnic = &bp->vnic_info[i];
1137                 if (filter->dst_id == vnic->fw_vnic_id)
1138                         break;
1139         }
1140
1141         if (!ret) {
1142                 flow->filter = filter;
1143                 flow->vnic = vnic;
1144                 if (update_flow) {
1145                         ret = -EXDEV;
1146                         goto free_flow;
1147                 }
1148                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1149                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1150                 return flow;
1151         }
1152 free_filter:
1153         bnxt_free_filter(bp, filter);
1154 free_flow:
1155         if (ret == -EEXIST)
1156                 rte_flow_error_set(error, ret,
1157                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1158                                    "Matching Flow exists.");
1159         else if (ret == -EXDEV)
1160                 rte_flow_error_set(error, ret,
1161                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1162                                    "Flow with pattern exists, updating destination queue");
1163         else
1164                 rte_flow_error_set(error, -ret,
1165                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1166                                    "Failed to create flow.");
1167         rte_free(flow);
1168         flow = NULL;
1169         return flow;
1170 }
1171
1172 static int
1173 bnxt_flow_destroy(struct rte_eth_dev *dev,
1174                   struct rte_flow *flow,
1175                   struct rte_flow_error *error)
1176 {
1177         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1178         struct bnxt_filter_info *filter = flow->filter;
1179         struct bnxt_vnic_info *vnic = flow->vnic;
1180         int ret = 0;
1181
1182         ret = bnxt_match_filter(bp, filter);
1183         if (ret == 0)
1184                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1185         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1186                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1187         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1188                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1189
1190         bnxt_hwrm_clear_l2_filter(bp, filter);
1191         if (!ret) {
1192                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1193                 rte_free(flow);
1194         } else {
1195                 rte_flow_error_set(error, -ret,
1196                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1197                                    "Failed to destroy flow.");
1198         }
1199
1200         return ret;
1201 }
1202
1203 static int
1204 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1205 {
1206         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1207         struct bnxt_vnic_info *vnic;
1208         struct rte_flow *flow;
1209         unsigned int i;
1210         int ret = 0;
1211
1212         for (i = 0; i < bp->nr_vnics; i++) {
1213                 vnic = &bp->vnic_info[i];
1214                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1215                         struct bnxt_filter_info *filter = flow->filter;
1216
1217                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1218                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1219                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1220                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1221
1222                         if (ret) {
1223                                 rte_flow_error_set(error, -ret,
1224                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1225                                                    NULL,
1226                                                    "Failed to flush flow in HW.");
1227                                 return -rte_errno;
1228                         }
1229
1230                         STAILQ_REMOVE(&vnic->flow_list, flow,
1231                                       rte_flow, next);
1232                         rte_free(flow);
1233                 }
1234         }
1235
1236         return ret;
1237 }
1238
1239 const struct rte_flow_ops bnxt_flow_ops = {
1240         .validate = bnxt_flow_validate,
1241         .create = bnxt_flow_create,
1242         .destroy = bnxt_flow_destroy,
1243         .flush = bnxt_flow_flush,
1244 };