New upstream version 18.05
[deb_dpdk.git] / drivers / net / bnxt / bnxt_filter.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2018 Broadcom
3  * All rights reserved.
4  */
5
6 #include <sys/queue.h>
7
8 #include <rte_byteorder.h>
9 #include <rte_log.h>
10 #include <rte_malloc.h>
11 #include <rte_flow.h>
12 #include <rte_flow_driver.h>
13 #include <rte_tailq.h>
14
15 #include "bnxt.h"
16 #include "bnxt_filter.h"
17 #include "bnxt_hwrm.h"
18 #include "bnxt_vnic.h"
19 #include "hsi_struct_def_dpdk.h"
20
21 /*
22  * Filter Functions
23  */
24
25 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
26 {
27         struct bnxt_filter_info *filter;
28
29         /* Find the 1st unused filter from the free_filter_list pool*/
30         filter = STAILQ_FIRST(&bp->free_filter_list);
31         if (!filter) {
32                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
33                 return NULL;
34         }
35         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
36
37         /* Default to L2 MAC Addr filter */
38         filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
39         filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
40                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
41         memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
42                ETHER_ADDR_LEN);
43         memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
44         return filter;
45 }
46
47 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
48 {
49         struct bnxt_filter_info *filter;
50
51         filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
52         if (!filter) {
53                 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
54                         vf);
55                 return NULL;
56         }
57
58         filter->fw_l2_filter_id = UINT64_MAX;
59         STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
60         return filter;
61 }
62
63 void bnxt_init_filters(struct bnxt *bp)
64 {
65         struct bnxt_filter_info *filter;
66         int i, max_filters;
67
68         max_filters = bp->max_l2_ctx;
69         STAILQ_INIT(&bp->free_filter_list);
70         for (i = 0; i < max_filters; i++) {
71                 filter = &bp->filter_info[i];
72                 filter->fw_l2_filter_id = UINT64_MAX;
73                 filter->fw_em_filter_id = UINT64_MAX;
74                 filter->fw_ntuple_filter_id = UINT64_MAX;
75                 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
76         }
77 }
78
79 void bnxt_free_all_filters(struct bnxt *bp)
80 {
81         struct bnxt_vnic_info *vnic;
82         struct bnxt_filter_info *filter, *temp_filter;
83         int i;
84
85         for (i = 0; i < MAX_FF_POOLS; i++) {
86                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
87                         filter = STAILQ_FIRST(&vnic->filter);
88                         while (filter) {
89                                 temp_filter = STAILQ_NEXT(filter, next);
90                                 STAILQ_REMOVE(&vnic->filter, filter,
91                                               bnxt_filter_info, next);
92                                 STAILQ_INSERT_TAIL(&bp->free_filter_list,
93                                                    filter, next);
94                                 filter = temp_filter;
95                         }
96                         STAILQ_INIT(&vnic->filter);
97                 }
98         }
99
100         for (i = 0; i < bp->pf.max_vfs; i++) {
101                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
102                         bnxt_hwrm_clear_l2_filter(bp, filter);
103                 }
104         }
105 }
106
107 void bnxt_free_filter_mem(struct bnxt *bp)
108 {
109         struct bnxt_filter_info *filter;
110         uint16_t max_filters, i;
111         int rc = 0;
112
113         if (bp->filter_info == NULL)
114                 return;
115
116         /* Ensure that all filters are freed */
117         max_filters = bp->max_l2_ctx;
118         for (i = 0; i < max_filters; i++) {
119                 filter = &bp->filter_info[i];
120                 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
121                         PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
122                         /* Call HWRM to try to free filter again */
123                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
124                         if (rc)
125                                 PMD_DRV_LOG(ERR,
126                                        "HWRM filter cannot be freed rc = %d\n",
127                                         rc);
128                 }
129                 filter->fw_l2_filter_id = UINT64_MAX;
130         }
131         STAILQ_INIT(&bp->free_filter_list);
132
133         rte_free(bp->filter_info);
134         bp->filter_info = NULL;
135
136         for (i = 0; i < bp->pf.max_vfs; i++) {
137                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
138                         rte_free(filter);
139                         STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
140                                       bnxt_filter_info, next);
141                 }
142         }
143 }
144
145 int bnxt_alloc_filter_mem(struct bnxt *bp)
146 {
147         struct bnxt_filter_info *filter_mem;
148         uint16_t max_filters;
149
150         max_filters = bp->max_l2_ctx;
151         /* Allocate memory for VNIC pool and filter pool */
152         filter_mem = rte_zmalloc("bnxt_filter_info",
153                                  max_filters * sizeof(struct bnxt_filter_info),
154                                  0);
155         if (filter_mem == NULL) {
156                 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
157                         max_filters);
158                 return -ENOMEM;
159         }
160         bp->filter_info = filter_mem;
161         return 0;
162 }
163
164 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
165 {
166         struct bnxt_filter_info *filter;
167
168         /* Find the 1st unused filter from the free_filter_list pool*/
169         filter = STAILQ_FIRST(&bp->free_filter_list);
170         if (!filter) {
171                 PMD_DRV_LOG(ERR, "No more free filter resources\n");
172                 return NULL;
173         }
174         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
175
176         return filter;
177 }
178
179 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
180 {
181         STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
182 }
183
184 static int
185 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
186                         const struct rte_flow_item pattern[],
187                         const struct rte_flow_action actions[],
188                         struct rte_flow_error *error)
189 {
190         if (!pattern) {
191                 rte_flow_error_set(error, EINVAL,
192                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
193                         NULL, "NULL pattern.");
194                 return -rte_errno;
195         }
196
197         if (!actions) {
198                 rte_flow_error_set(error, EINVAL,
199                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
200                                    NULL, "NULL action.");
201                 return -rte_errno;
202         }
203
204         if (!attr) {
205                 rte_flow_error_set(error, EINVAL,
206                                    RTE_FLOW_ERROR_TYPE_ATTR,
207                                    NULL, "NULL attribute.");
208                 return -rte_errno;
209         }
210
211         return 0;
212 }
213
214 static const struct rte_flow_item *
215 nxt_non_void_pattern(const struct rte_flow_item *cur)
216 {
217         while (1) {
218                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
219                         return cur;
220                 cur++;
221         }
222 }
223
224 static const struct rte_flow_action *
225 nxt_non_void_action(const struct rte_flow_action *cur)
226 {
227         while (1) {
228                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
229                         return cur;
230                 cur++;
231         }
232 }
233
234 int bnxt_check_zero_bytes(const uint8_t *bytes, int len)
235 {
236         int i;
237         for (i = 0; i < len; i++)
238                 if (bytes[i] != 0x00)
239                         return 0;
240         return 1;
241 }
242
243 static int
244 bnxt_filter_type_check(const struct rte_flow_item pattern[],
245                        struct rte_flow_error *error __rte_unused)
246 {
247         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
248         int use_ntuple = 1;
249
250         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
251                 switch (item->type) {
252                 case RTE_FLOW_ITEM_TYPE_ETH:
253                         use_ntuple = 1;
254                         break;
255                 case RTE_FLOW_ITEM_TYPE_VLAN:
256                         use_ntuple = 0;
257                         break;
258                 case RTE_FLOW_ITEM_TYPE_IPV4:
259                 case RTE_FLOW_ITEM_TYPE_IPV6:
260                 case RTE_FLOW_ITEM_TYPE_TCP:
261                 case RTE_FLOW_ITEM_TYPE_UDP:
262                         /* FALLTHROUGH */
263                         /* need ntuple match, reset exact match */
264                         if (!use_ntuple) {
265                                 PMD_DRV_LOG(ERR,
266                                         "VLAN flow cannot use NTUPLE filter\n");
267                                 rte_flow_error_set(error, EINVAL,
268                                                    RTE_FLOW_ERROR_TYPE_ITEM,
269                                                    item,
270                                                    "Cannot use VLAN with NTUPLE");
271                                 return -rte_errno;
272                         }
273                         use_ntuple |= 1;
274                         break;
275                 default:
276                         PMD_DRV_LOG(ERR, "Unknown Flow type");
277                         use_ntuple |= 1;
278                 }
279                 item++;
280         }
281         return use_ntuple;
282 }
283
284 static int
285 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
286                                   const struct rte_flow_attr *attr,
287                                   const struct rte_flow_item pattern[],
288                                   struct rte_flow_error *error,
289                                   struct bnxt_filter_info *filter)
290 {
291         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
292         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
293         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
294         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
295         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
296         const struct rte_flow_item_udp *udp_spec, *udp_mask;
297         const struct rte_flow_item_eth *eth_spec, *eth_mask;
298         const struct rte_flow_item_nvgre *nvgre_spec;
299         const struct rte_flow_item_nvgre *nvgre_mask;
300         const struct rte_flow_item_vxlan *vxlan_spec;
301         const struct rte_flow_item_vxlan *vxlan_mask;
302         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
303         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
304         const struct rte_flow_item_vf *vf_spec;
305         uint32_t tenant_id_be = 0;
306         bool vni_masked = 0;
307         bool tni_masked = 0;
308         uint32_t vf = 0;
309         int use_ntuple;
310         uint32_t en = 0;
311         uint32_t en_ethertype;
312         int dflt_vnic;
313
314         use_ntuple = bnxt_filter_type_check(pattern, error);
315         PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
316         if (use_ntuple < 0)
317                 return use_ntuple;
318
319         filter->filter_type = use_ntuple ?
320                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
321         en_ethertype = use_ntuple ?
322                 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
323                 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
324
325         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
326                 if (item->last) {
327                         /* last or range is NOT supported as match criteria */
328                         rte_flow_error_set(error, EINVAL,
329                                            RTE_FLOW_ERROR_TYPE_ITEM,
330                                            item,
331                                            "No support for range");
332                         return -rte_errno;
333                 }
334                 if (!item->spec || !item->mask) {
335                         rte_flow_error_set(error, EINVAL,
336                                            RTE_FLOW_ERROR_TYPE_ITEM,
337                                            item,
338                                            "spec/mask is NULL");
339                         return -rte_errno;
340                 }
341                 switch (item->type) {
342                 case RTE_FLOW_ITEM_TYPE_ETH:
343                         eth_spec = item->spec;
344                         eth_mask = item->mask;
345
346                         /* Source MAC address mask cannot be partially set.
347                          * Should be All 0's or all 1's.
348                          * Destination MAC address mask must not be partially
349                          * set. Should be all 1's or all 0's.
350                          */
351                         if ((!is_zero_ether_addr(&eth_mask->src) &&
352                              !is_broadcast_ether_addr(&eth_mask->src)) ||
353                             (!is_zero_ether_addr(&eth_mask->dst) &&
354                              !is_broadcast_ether_addr(&eth_mask->dst))) {
355                                 rte_flow_error_set(error, EINVAL,
356                                                    RTE_FLOW_ERROR_TYPE_ITEM,
357                                                    item,
358                                                    "MAC_addr mask not valid");
359                                 return -rte_errno;
360                         }
361
362                         /* Mask is not allowed. Only exact matches are */
363                         if (eth_mask->type &&
364                             eth_mask->type != RTE_BE16(0xffff)) {
365                                 rte_flow_error_set(error, EINVAL,
366                                                    RTE_FLOW_ERROR_TYPE_ITEM,
367                                                    item,
368                                                    "ethertype mask not valid");
369                                 return -rte_errno;
370                         }
371
372                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
373                                 rte_memcpy(filter->dst_macaddr,
374                                            &eth_spec->dst, 6);
375                                 en |= use_ntuple ?
376                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
377                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
378                         }
379                         if (is_broadcast_ether_addr(&eth_mask->src)) {
380                                 rte_memcpy(filter->src_macaddr,
381                                            &eth_spec->src, 6);
382                                 en |= use_ntuple ?
383                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
384                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
385                         } /*
386                            * else {
387                            *  RTE_LOG(ERR, PMD, "Handle this condition\n");
388                            * }
389                            */
390                         if (eth_mask->type) {
391                                 filter->ethertype =
392                                         rte_be_to_cpu_16(eth_spec->type);
393                                 en |= en_ethertype;
394                         }
395
396                         break;
397                 case RTE_FLOW_ITEM_TYPE_VLAN:
398                         vlan_spec = item->spec;
399                         vlan_mask = item->mask;
400                         if (en & en_ethertype) {
401                                 rte_flow_error_set(error, EINVAL,
402                                                    RTE_FLOW_ERROR_TYPE_ITEM,
403                                                    item,
404                                                    "VLAN TPID matching is not"
405                                                    " supported");
406                                 return -rte_errno;
407                         }
408                         if (vlan_mask->tci &&
409                             vlan_mask->tci == RTE_BE16(0x0fff)) {
410                                 /* Only the VLAN ID can be matched. */
411                                 filter->l2_ovlan =
412                                         rte_be_to_cpu_16(vlan_spec->tci &
413                                                          RTE_BE16(0x0fff));
414                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
415                         } else if (vlan_mask->tci) {
416                                 rte_flow_error_set(error, EINVAL,
417                                                    RTE_FLOW_ERROR_TYPE_ITEM,
418                                                    item,
419                                                    "VLAN mask is invalid");
420                                 return -rte_errno;
421                         }
422                         if (vlan_mask->inner_type &&
423                             vlan_mask->inner_type != RTE_BE16(0xffff)) {
424                                 rte_flow_error_set(error, EINVAL,
425                                                    RTE_FLOW_ERROR_TYPE_ITEM,
426                                                    item,
427                                                    "inner ethertype mask not"
428                                                    " valid");
429                                 return -rte_errno;
430                         }
431                         if (vlan_mask->inner_type) {
432                                 filter->ethertype =
433                                         rte_be_to_cpu_16(vlan_spec->inner_type);
434                                 en |= en_ethertype;
435                         }
436
437                         break;
438                 case RTE_FLOW_ITEM_TYPE_IPV4:
439                         /* If mask is not involved, we could use EM filters. */
440                         ipv4_spec = item->spec;
441                         ipv4_mask = item->mask;
442                         /* Only IP DST and SRC fields are maskable. */
443                         if (ipv4_mask->hdr.version_ihl ||
444                             ipv4_mask->hdr.type_of_service ||
445                             ipv4_mask->hdr.total_length ||
446                             ipv4_mask->hdr.packet_id ||
447                             ipv4_mask->hdr.fragment_offset ||
448                             ipv4_mask->hdr.time_to_live ||
449                             ipv4_mask->hdr.next_proto_id ||
450                             ipv4_mask->hdr.hdr_checksum) {
451                                 rte_flow_error_set(error, EINVAL,
452                                            RTE_FLOW_ERROR_TYPE_ITEM,
453                                            item,
454                                            "Invalid IPv4 mask.");
455                                 return -rte_errno;
456                         }
457                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
458                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
459                         if (use_ntuple)
460                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
461                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
462                         else
463                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
464                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
465                         if (ipv4_mask->hdr.src_addr) {
466                                 filter->src_ipaddr_mask[0] =
467                                         ipv4_mask->hdr.src_addr;
468                                 en |= !use_ntuple ? 0 :
469                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
470                         }
471                         if (ipv4_mask->hdr.dst_addr) {
472                                 filter->dst_ipaddr_mask[0] =
473                                         ipv4_mask->hdr.dst_addr;
474                                 en |= !use_ntuple ? 0 :
475                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
476                         }
477                         filter->ip_addr_type = use_ntuple ?
478                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
479                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
480                         if (ipv4_spec->hdr.next_proto_id) {
481                                 filter->ip_protocol =
482                                         ipv4_spec->hdr.next_proto_id;
483                                 if (use_ntuple)
484                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
485                                 else
486                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
487                         }
488                         break;
489                 case RTE_FLOW_ITEM_TYPE_IPV6:
490                         ipv6_spec = item->spec;
491                         ipv6_mask = item->mask;
492
493                         /* Only IP DST and SRC fields are maskable. */
494                         if (ipv6_mask->hdr.vtc_flow ||
495                             ipv6_mask->hdr.payload_len ||
496                             ipv6_mask->hdr.proto ||
497                             ipv6_mask->hdr.hop_limits) {
498                                 rte_flow_error_set(error, EINVAL,
499                                            RTE_FLOW_ERROR_TYPE_ITEM,
500                                            item,
501                                            "Invalid IPv6 mask.");
502                                 return -rte_errno;
503                         }
504
505                         if (use_ntuple)
506                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
507                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
508                         else
509                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
510                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
511                         rte_memcpy(filter->src_ipaddr,
512                                    ipv6_spec->hdr.src_addr, 16);
513                         rte_memcpy(filter->dst_ipaddr,
514                                    ipv6_spec->hdr.dst_addr, 16);
515                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
516                                                    16)) {
517                                 rte_memcpy(filter->src_ipaddr_mask,
518                                            ipv6_mask->hdr.src_addr, 16);
519                                 en |= !use_ntuple ? 0 :
520                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
521                         }
522                         if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
523                                                    16)) {
524                                 rte_memcpy(filter->dst_ipaddr_mask,
525                                            ipv6_mask->hdr.dst_addr, 16);
526                                 en |= !use_ntuple ? 0 :
527                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
528                         }
529                         filter->ip_addr_type = use_ntuple ?
530                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
531                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
532                         break;
533                 case RTE_FLOW_ITEM_TYPE_TCP:
534                         tcp_spec = item->spec;
535                         tcp_mask = item->mask;
536
537                         /* Check TCP mask. Only DST & SRC ports are maskable */
538                         if (tcp_mask->hdr.sent_seq ||
539                             tcp_mask->hdr.recv_ack ||
540                             tcp_mask->hdr.data_off ||
541                             tcp_mask->hdr.tcp_flags ||
542                             tcp_mask->hdr.rx_win ||
543                             tcp_mask->hdr.cksum ||
544                             tcp_mask->hdr.tcp_urp) {
545                                 rte_flow_error_set(error, EINVAL,
546                                            RTE_FLOW_ERROR_TYPE_ITEM,
547                                            item,
548                                            "Invalid TCP mask");
549                                 return -rte_errno;
550                         }
551                         filter->src_port = tcp_spec->hdr.src_port;
552                         filter->dst_port = tcp_spec->hdr.dst_port;
553                         if (use_ntuple)
554                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
555                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
556                         else
557                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
558                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
559                         if (tcp_mask->hdr.dst_port) {
560                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
561                                 en |= !use_ntuple ? 0 :
562                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
563                         }
564                         if (tcp_mask->hdr.src_port) {
565                                 filter->src_port_mask = tcp_mask->hdr.src_port;
566                                 en |= !use_ntuple ? 0 :
567                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
568                         }
569                         break;
570                 case RTE_FLOW_ITEM_TYPE_UDP:
571                         udp_spec = item->spec;
572                         udp_mask = item->mask;
573
574                         if (udp_mask->hdr.dgram_len ||
575                             udp_mask->hdr.dgram_cksum) {
576                                 rte_flow_error_set(error, EINVAL,
577                                            RTE_FLOW_ERROR_TYPE_ITEM,
578                                            item,
579                                            "Invalid UDP mask");
580                                 return -rte_errno;
581                         }
582
583                         filter->src_port = udp_spec->hdr.src_port;
584                         filter->dst_port = udp_spec->hdr.dst_port;
585                         if (use_ntuple)
586                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
587                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
588                         else
589                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
590                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
591
592                         if (udp_mask->hdr.dst_port) {
593                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
594                                 en |= !use_ntuple ? 0 :
595                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
596                         }
597                         if (udp_mask->hdr.src_port) {
598                                 filter->src_port_mask = udp_mask->hdr.src_port;
599                                 en |= !use_ntuple ? 0 :
600                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
601                         }
602                         break;
603                 case RTE_FLOW_ITEM_TYPE_VXLAN:
604                         vxlan_spec = item->spec;
605                         vxlan_mask = item->mask;
606                         /* Check if VXLAN item is used to describe protocol.
607                          * If yes, both spec and mask should be NULL.
608                          * If no, both spec and mask shouldn't be NULL.
609                          */
610                         if ((!vxlan_spec && vxlan_mask) ||
611                             (vxlan_spec && !vxlan_mask)) {
612                                 rte_flow_error_set(error, EINVAL,
613                                            RTE_FLOW_ERROR_TYPE_ITEM,
614                                            item,
615                                            "Invalid VXLAN item");
616                                 return -rte_errno;
617                         }
618
619                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
620                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
621                             vxlan_spec->flags != 0x8) {
622                                 rte_flow_error_set(error, EINVAL,
623                                            RTE_FLOW_ERROR_TYPE_ITEM,
624                                            item,
625                                            "Invalid VXLAN item");
626                                 return -rte_errno;
627                         }
628
629                         /* Check if VNI is masked. */
630                         if (vxlan_spec && vxlan_mask) {
631                                 vni_masked =
632                                         !!memcmp(vxlan_mask->vni, vni_mask,
633                                                  RTE_DIM(vni_mask));
634                                 if (vni_masked) {
635                                         rte_flow_error_set(error, EINVAL,
636                                                    RTE_FLOW_ERROR_TYPE_ITEM,
637                                                    item,
638                                                    "Invalid VNI mask");
639                                         return -rte_errno;
640                                 }
641
642                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
643                                            vxlan_spec->vni, 3);
644                                 filter->vni =
645                                         rte_be_to_cpu_32(tenant_id_be);
646                                 filter->tunnel_type =
647                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
648                         }
649                         break;
650                 case RTE_FLOW_ITEM_TYPE_NVGRE:
651                         nvgre_spec = item->spec;
652                         nvgre_mask = item->mask;
653                         /* Check if NVGRE item is used to describe protocol.
654                          * If yes, both spec and mask should be NULL.
655                          * If no, both spec and mask shouldn't be NULL.
656                          */
657                         if ((!nvgre_spec && nvgre_mask) ||
658                             (nvgre_spec && !nvgre_mask)) {
659                                 rte_flow_error_set(error, EINVAL,
660                                            RTE_FLOW_ERROR_TYPE_ITEM,
661                                            item,
662                                            "Invalid NVGRE item");
663                                 return -rte_errno;
664                         }
665
666                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
667                             nvgre_spec->protocol != 0x6558) {
668                                 rte_flow_error_set(error, EINVAL,
669                                            RTE_FLOW_ERROR_TYPE_ITEM,
670                                            item,
671                                            "Invalid NVGRE item");
672                                 return -rte_errno;
673                         }
674
675                         if (nvgre_spec && nvgre_mask) {
676                                 tni_masked =
677                                         !!memcmp(nvgre_mask->tni, tni_mask,
678                                                  RTE_DIM(tni_mask));
679                                 if (tni_masked) {
680                                         rte_flow_error_set(error, EINVAL,
681                                                        RTE_FLOW_ERROR_TYPE_ITEM,
682                                                        item,
683                                                        "Invalid TNI mask");
684                                         return -rte_errno;
685                                 }
686                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
687                                            nvgre_spec->tni, 3);
688                                 filter->vni =
689                                         rte_be_to_cpu_32(tenant_id_be);
690                                 filter->tunnel_type =
691                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
692                         }
693                         break;
694                 case RTE_FLOW_ITEM_TYPE_VF:
695                         vf_spec = item->spec;
696                         vf = vf_spec->id;
697                         if (!BNXT_PF(bp)) {
698                                 rte_flow_error_set(error, EINVAL,
699                                            RTE_FLOW_ERROR_TYPE_ITEM,
700                                            item,
701                                            "Configuring on a VF!");
702                                 return -rte_errno;
703                         }
704
705                         if (vf >= bp->pdev->max_vfs) {
706                                 rte_flow_error_set(error, EINVAL,
707                                            RTE_FLOW_ERROR_TYPE_ITEM,
708                                            item,
709                                            "Incorrect VF id!");
710                                 return -rte_errno;
711                         }
712
713                         if (!attr->transfer) {
714                                 rte_flow_error_set(error, ENOTSUP,
715                                            RTE_FLOW_ERROR_TYPE_ITEM,
716                                            item,
717                                            "Matching VF traffic without"
718                                            " affecting it (transfer attribute)"
719                                            " is unsupported");
720                                 return -rte_errno;
721                         }
722
723                         filter->mirror_vnic_id =
724                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
725                         if (dflt_vnic < 0) {
726                                 /* This simply indicates there's no driver
727                                  * loaded. This is not an error.
728                                  */
729                                 rte_flow_error_set(error, EINVAL,
730                                            RTE_FLOW_ERROR_TYPE_ITEM,
731                                            item,
732                                            "Unable to get default VNIC for VF");
733                                 return -rte_errno;
734                         }
735                         filter->mirror_vnic_id = dflt_vnic;
736                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
737                         break;
738                 default:
739                         break;
740                 }
741                 item++;
742         }
743         filter->enables = en;
744
745         return 0;
746 }
747
748 /* Parse attributes */
749 static int
750 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
751                      struct rte_flow_error *error)
752 {
753         /* Must be input direction */
754         if (!attr->ingress) {
755                 rte_flow_error_set(error, EINVAL,
756                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
757                                    attr, "Only support ingress.");
758                 return -rte_errno;
759         }
760
761         /* Not supported */
762         if (attr->egress) {
763                 rte_flow_error_set(error, EINVAL,
764                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
765                                    attr, "No support for egress.");
766                 return -rte_errno;
767         }
768
769         /* Not supported */
770         if (attr->priority) {
771                 rte_flow_error_set(error, EINVAL,
772                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
773                                    attr, "No support for priority.");
774                 return -rte_errno;
775         }
776
777         /* Not supported */
778         if (attr->group) {
779                 rte_flow_error_set(error, EINVAL,
780                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
781                                    attr, "No support for group.");
782                 return -rte_errno;
783         }
784
785         return 0;
786 }
787
788 struct bnxt_filter_info *
789 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
790                    struct bnxt_vnic_info *vnic)
791 {
792         struct bnxt_filter_info *filter1, *f0;
793         struct bnxt_vnic_info *vnic0;
794         int rc;
795
796         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
797         f0 = STAILQ_FIRST(&vnic0->filter);
798
799         //This flow has same DST MAC as the port/l2 filter.
800         if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
801                 return f0;
802
803         //This flow needs DST MAC which is not same as port/l2
804         PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
805         filter1 = bnxt_get_unused_filter(bp);
806         if (filter1 == NULL)
807                 return NULL;
808         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
809         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
810                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
811         memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
812         memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
813         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
814                                      filter1);
815         if (rc) {
816                 bnxt_free_filter(bp, filter1);
817                 return NULL;
818         }
819         return filter1;
820 }
821
822 static int
823 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
824                              const struct rte_flow_item pattern[],
825                              const struct rte_flow_action actions[],
826                              const struct rte_flow_attr *attr,
827                              struct rte_flow_error *error,
828                              struct bnxt_filter_info *filter)
829 {
830         const struct rte_flow_action *act = nxt_non_void_action(actions);
831         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
832         const struct rte_flow_action_queue *act_q;
833         const struct rte_flow_action_vf *act_vf;
834         struct bnxt_vnic_info *vnic, *vnic0;
835         struct bnxt_filter_info *filter1;
836         uint32_t vf = 0;
837         int dflt_vnic;
838         int rc;
839
840         if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
841                 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
842                 rte_flow_error_set(error, EINVAL,
843                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
844                                    "Cannot create flow on RSS queues");
845                 rc = -rte_errno;
846                 goto ret;
847         }
848
849         rc = bnxt_validate_and_parse_flow_type(bp, attr, pattern, error,
850                                                filter);
851         if (rc != 0)
852                 goto ret;
853
854         rc = bnxt_flow_parse_attr(attr, error);
855         if (rc != 0)
856                 goto ret;
857         //Since we support ingress attribute only - right now.
858         if (filter->filter_type == HWRM_CFA_EM_FILTER)
859                 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
860
861         switch (act->type) {
862         case RTE_FLOW_ACTION_TYPE_QUEUE:
863                 /* Allow this flow. Redirect to a VNIC. */
864                 act_q = (const struct rte_flow_action_queue *)act->conf;
865                 if (act_q->index >= bp->rx_nr_rings) {
866                         rte_flow_error_set(error, EINVAL,
867                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
868                                            "Invalid queue ID.");
869                         rc = -rte_errno;
870                         goto ret;
871                 }
872                 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
873
874                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
875                 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
876                 if (vnic == NULL) {
877                         rte_flow_error_set(error, EINVAL,
878                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
879                                            "No matching VNIC for queue ID.");
880                         rc = -rte_errno;
881                         goto ret;
882                 }
883                 filter->dst_id = vnic->fw_vnic_id;
884                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
885                 if (filter1 == NULL) {
886                         rc = -ENOSPC;
887                         goto ret;
888                 }
889                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
890                 PMD_DRV_LOG(DEBUG, "VNIC found\n");
891                 break;
892         case RTE_FLOW_ACTION_TYPE_DROP:
893                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
894                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
895                 if (filter1 == NULL) {
896                         rc = -ENOSPC;
897                         goto ret;
898                 }
899                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
900                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
901                         filter->flags =
902                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
903                 else
904                         filter->flags =
905                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
906                 break;
907         case RTE_FLOW_ACTION_TYPE_COUNT:
908                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
909                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
910                 if (filter1 == NULL) {
911                         rc = -ENOSPC;
912                         goto ret;
913                 }
914                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
915                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
916                 break;
917         case RTE_FLOW_ACTION_TYPE_VF:
918                 act_vf = (const struct rte_flow_action_vf *)act->conf;
919                 vf = act_vf->id;
920                 if (!BNXT_PF(bp)) {
921                         rte_flow_error_set(error, EINVAL,
922                                    RTE_FLOW_ERROR_TYPE_ACTION,
923                                    act,
924                                    "Configuring on a VF!");
925                         rc = -rte_errno;
926                         goto ret;
927                 }
928
929                 if (vf >= bp->pdev->max_vfs) {
930                         rte_flow_error_set(error, EINVAL,
931                                    RTE_FLOW_ERROR_TYPE_ACTION,
932                                    act,
933                                    "Incorrect VF id!");
934                         rc = -rte_errno;
935                         goto ret;
936                 }
937
938                 filter->mirror_vnic_id =
939                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
940                 if (dflt_vnic < 0) {
941                         /* This simply indicates there's no driver loaded.
942                          * This is not an error.
943                          */
944                         rte_flow_error_set(error, EINVAL,
945                                    RTE_FLOW_ERROR_TYPE_ACTION,
946                                    act,
947                                    "Unable to get default VNIC for VF");
948                         rc = -rte_errno;
949                         goto ret;
950                 }
951                 filter->mirror_vnic_id = dflt_vnic;
952                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
953
954                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
955                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
956                 if (filter1 == NULL) {
957                         rc = -ENOSPC;
958                         goto ret;
959                 }
960                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
961                 break;
962
963         default:
964                 rte_flow_error_set(error, EINVAL,
965                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
966                                    "Invalid action.");
967                 rc = -rte_errno;
968                 goto ret;
969         }
970
971         act = nxt_non_void_action(++act);
972         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
973                 rte_flow_error_set(error, EINVAL,
974                                    RTE_FLOW_ERROR_TYPE_ACTION,
975                                    act, "Invalid action.");
976                 rc = -rte_errno;
977                 goto ret;
978         }
979 ret:
980         return rc;
981 }
982
983 static int
984 bnxt_flow_validate(struct rte_eth_dev *dev,
985                 const struct rte_flow_attr *attr,
986                 const struct rte_flow_item pattern[],
987                 const struct rte_flow_action actions[],
988                 struct rte_flow_error *error)
989 {
990         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
991         struct bnxt_filter_info *filter;
992         int ret = 0;
993
994         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
995         if (ret != 0)
996                 return ret;
997
998         filter = bnxt_get_unused_filter(bp);
999         if (filter == NULL) {
1000                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1001                 return -ENOMEM;
1002         }
1003
1004         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1005                                            error, filter);
1006         /* No need to hold on to this filter if we are just validating flow */
1007         filter->fw_l2_filter_id = UINT64_MAX;
1008         bnxt_free_filter(bp, filter);
1009
1010         return ret;
1011 }
1012
1013 static int
1014 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1015 {
1016         struct bnxt_filter_info *mf;
1017         struct rte_flow *flow;
1018         int i;
1019
1020         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1021                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1022
1023                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1024                         mf = flow->filter;
1025
1026                         if (mf->filter_type == nf->filter_type &&
1027                             mf->flags == nf->flags &&
1028                             mf->src_port == nf->src_port &&
1029                             mf->src_port_mask == nf->src_port_mask &&
1030                             mf->dst_port == nf->dst_port &&
1031                             mf->dst_port_mask == nf->dst_port_mask &&
1032                             mf->ip_protocol == nf->ip_protocol &&
1033                             mf->ip_addr_type == nf->ip_addr_type &&
1034                             mf->ethertype == nf->ethertype &&
1035                             mf->vni == nf->vni &&
1036                             mf->tunnel_type == nf->tunnel_type &&
1037                             mf->l2_ovlan == nf->l2_ovlan &&
1038                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1039                             mf->l2_ivlan == nf->l2_ivlan &&
1040                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1041                             !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1042                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1043                                     ETHER_ADDR_LEN) &&
1044                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1045                                     ETHER_ADDR_LEN) &&
1046                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1047                                     ETHER_ADDR_LEN) &&
1048                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1049                                     sizeof(nf->src_ipaddr)) &&
1050                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1051                                     sizeof(nf->src_ipaddr_mask)) &&
1052                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1053                                     sizeof(nf->dst_ipaddr)) &&
1054                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1055                                     sizeof(nf->dst_ipaddr_mask))) {
1056                                 if (mf->dst_id == nf->dst_id)
1057                                         return -EEXIST;
1058                                 /* Same Flow, Different queue
1059                                  * Clear the old ntuple filter
1060                                  */
1061                                 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1062                                         bnxt_hwrm_clear_em_filter(bp, mf);
1063                                 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1064                                         bnxt_hwrm_clear_ntuple_filter(bp, mf);
1065                                 /* Free the old filter, update flow
1066                                  * with new filter
1067                                  */
1068                                 bnxt_free_filter(bp, mf);
1069                                 flow->filter = nf;
1070                                 return -EXDEV;
1071                         }
1072                 }
1073         }
1074         return 0;
1075 }
1076
1077 static struct rte_flow *
1078 bnxt_flow_create(struct rte_eth_dev *dev,
1079                   const struct rte_flow_attr *attr,
1080                   const struct rte_flow_item pattern[],
1081                   const struct rte_flow_action actions[],
1082                   struct rte_flow_error *error)
1083 {
1084         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1085         struct bnxt_filter_info *filter;
1086         struct bnxt_vnic_info *vnic = NULL;
1087         bool update_flow = false;
1088         struct rte_flow *flow;
1089         unsigned int i;
1090         int ret = 0;
1091
1092         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1093         if (!flow) {
1094                 rte_flow_error_set(error, ENOMEM,
1095                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1096                                    "Failed to allocate memory");
1097                 return flow;
1098         }
1099
1100         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1101         if (ret != 0) {
1102                 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1103                 goto free_flow;
1104         }
1105
1106         filter = bnxt_get_unused_filter(bp);
1107         if (filter == NULL) {
1108                 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1109                 goto free_flow;
1110         }
1111
1112         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1113                                            error, filter);
1114         if (ret != 0)
1115                 goto free_filter;
1116
1117         ret = bnxt_match_filter(bp, filter);
1118         if (ret == -EEXIST) {
1119                 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1120                 /* Clear the filter that was created as part of
1121                  * validate_and_parse_flow() above
1122                  */
1123                 bnxt_hwrm_clear_l2_filter(bp, filter);
1124                 goto free_filter;
1125         } else if (ret == -EXDEV) {
1126                 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1127                 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1128                 update_flow = true;
1129         }
1130
1131         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1132                 filter->enables |=
1133                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1134                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1135         }
1136         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1137                 filter->enables |=
1138                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1139                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1140         }
1141
1142         for (i = 0; i < bp->nr_vnics; i++) {
1143                 vnic = &bp->vnic_info[i];
1144                 if (filter->dst_id == vnic->fw_vnic_id)
1145                         break;
1146         }
1147
1148         if (!ret) {
1149                 flow->filter = filter;
1150                 flow->vnic = vnic;
1151                 if (update_flow) {
1152                         ret = -EXDEV;
1153                         goto free_flow;
1154                 }
1155                 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1156                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1157                 return flow;
1158         }
1159 free_filter:
1160         bnxt_free_filter(bp, filter);
1161 free_flow:
1162         if (ret == -EEXIST)
1163                 rte_flow_error_set(error, ret,
1164                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1165                                    "Matching Flow exists.");
1166         else if (ret == -EXDEV)
1167                 rte_flow_error_set(error, ret,
1168                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1169                                    "Flow with pattern exists, updating destination queue");
1170         else
1171                 rte_flow_error_set(error, -ret,
1172                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1173                                    "Failed to create flow.");
1174         rte_free(flow);
1175         flow = NULL;
1176         return flow;
1177 }
1178
1179 static int
1180 bnxt_flow_destroy(struct rte_eth_dev *dev,
1181                   struct rte_flow *flow,
1182                   struct rte_flow_error *error)
1183 {
1184         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1185         struct bnxt_filter_info *filter = flow->filter;
1186         struct bnxt_vnic_info *vnic = flow->vnic;
1187         int ret = 0;
1188
1189         ret = bnxt_match_filter(bp, filter);
1190         if (ret == 0)
1191                 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1192         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1193                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1194         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1195                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1196         else
1197                 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1198         if (!ret) {
1199                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1200                 rte_free(flow);
1201         } else {
1202                 rte_flow_error_set(error, -ret,
1203                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1204                                    "Failed to destroy flow.");
1205         }
1206
1207         return ret;
1208 }
1209
1210 static int
1211 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1212 {
1213         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1214         struct bnxt_vnic_info *vnic;
1215         struct rte_flow *flow;
1216         unsigned int i;
1217         int ret = 0;
1218
1219         for (i = 0; i < bp->nr_vnics; i++) {
1220                 vnic = &bp->vnic_info[i];
1221                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1222                         struct bnxt_filter_info *filter = flow->filter;
1223
1224                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1225                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1226                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1227                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1228
1229                         if (ret) {
1230                                 rte_flow_error_set(error, -ret,
1231                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1232                                                    NULL,
1233                                                    "Failed to flush flow in HW.");
1234                                 return -rte_errno;
1235                         }
1236
1237                         STAILQ_REMOVE(&vnic->flow_list, flow,
1238                                       rte_flow, next);
1239                         rte_free(flow);
1240                 }
1241         }
1242
1243         return ret;
1244 }
1245
1246 const struct rte_flow_ops bnxt_flow_ops = {
1247         .validate = bnxt_flow_validate,
1248         .create = bnxt_flow_create,
1249         .destroy = bnxt_flow_destroy,
1250         .flush = bnxt_flow_flush,
1251 };