New upstream version 17.11.4
[deb_dpdk.git] / drivers / net / bnxt / bnxt_filter.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright(c) Broadcom Limited.
5  *   All rights reserved.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of Broadcom Corporation nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <sys/queue.h>
35
36 #include <rte_log.h>
37 #include <rte_malloc.h>
38 #include <rte_flow.h>
39 #include <rte_flow_driver.h>
40 #include <rte_tailq.h>
41
42 #include "bnxt.h"
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_vnic.h"
46 #include "hsi_struct_def_dpdk.h"
47
48 /*
49  * Filter Functions
50  */
51
52 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
53 {
54         struct bnxt_filter_info *filter;
55
56         /* Find the 1st unused filter from the free_filter_list pool*/
57         filter = STAILQ_FIRST(&bp->free_filter_list);
58         if (!filter) {
59                 RTE_LOG(ERR, PMD, "No more free filter resources\n");
60                 return NULL;
61         }
62         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
63
64         /* Default to L2 MAC Addr filter */
65         filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
66         filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
67                         HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
68         memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
69                ETHER_ADDR_LEN);
70         memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
71         return filter;
72 }
73
74 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
75 {
76         struct bnxt_filter_info *filter;
77
78         filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
79         if (!filter) {
80                 RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
81                         vf);
82                 return NULL;
83         }
84
85         filter->fw_l2_filter_id = UINT64_MAX;
86         STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
87         return filter;
88 }
89
90 void bnxt_init_filters(struct bnxt *bp)
91 {
92         struct bnxt_filter_info *filter;
93         int i, max_filters;
94
95         max_filters = bp->max_l2_ctx;
96         STAILQ_INIT(&bp->free_filter_list);
97         for (i = 0; i < max_filters; i++) {
98                 filter = &bp->filter_info[i];
99                 filter->fw_l2_filter_id = -1;
100                 filter->fw_em_filter_id = -1;
101                 filter->fw_ntuple_filter_id = -1;
102                 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
103         }
104 }
105
106 void bnxt_free_all_filters(struct bnxt *bp)
107 {
108         struct bnxt_vnic_info *vnic;
109         struct bnxt_filter_info *filter, *temp_filter;
110         int i;
111
112         for (i = 0; i < MAX_FF_POOLS; i++) {
113                 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
114                         filter = STAILQ_FIRST(&vnic->filter);
115                         while (filter) {
116                                 temp_filter = STAILQ_NEXT(filter, next);
117                                 STAILQ_REMOVE(&vnic->filter, filter,
118                                               bnxt_filter_info, next);
119                                 STAILQ_INSERT_TAIL(&bp->free_filter_list,
120                                                    filter, next);
121                                 filter = temp_filter;
122                         }
123                         STAILQ_INIT(&vnic->filter);
124                 }
125         }
126
127         for (i = 0; i < bp->pf.max_vfs; i++) {
128                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
129                         bnxt_hwrm_clear_l2_filter(bp, filter);
130                 }
131         }
132 }
133
134 void bnxt_free_filter_mem(struct bnxt *bp)
135 {
136         struct bnxt_filter_info *filter;
137         uint16_t max_filters, i;
138         int rc = 0;
139
140         if (bp->filter_info == NULL)
141                 return;
142
143         /* Ensure that all filters are freed */
144         max_filters = bp->max_l2_ctx;
145         for (i = 0; i < max_filters; i++) {
146                 filter = &bp->filter_info[i];
147                 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
148                         RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
149                         /* Call HWRM to try to free filter again */
150                         rc = bnxt_hwrm_clear_l2_filter(bp, filter);
151                         if (rc)
152                                 RTE_LOG(ERR, PMD,
153                                        "HWRM filter cannot be freed rc = %d\n",
154                                         rc);
155                 }
156                 filter->fw_l2_filter_id = UINT64_MAX;
157         }
158         STAILQ_INIT(&bp->free_filter_list);
159
160         rte_free(bp->filter_info);
161         bp->filter_info = NULL;
162
163         for (i = 0; i < bp->pf.max_vfs; i++) {
164                 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
165                         rte_free(filter);
166                         STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
167                                       bnxt_filter_info, next);
168                 }
169         }
170 }
171
172 int bnxt_alloc_filter_mem(struct bnxt *bp)
173 {
174         struct bnxt_filter_info *filter_mem;
175         uint16_t max_filters;
176
177         max_filters = bp->max_l2_ctx;
178         /* Allocate memory for VNIC pool and filter pool */
179         filter_mem = rte_zmalloc("bnxt_filter_info",
180                                  max_filters * sizeof(struct bnxt_filter_info),
181                                  0);
182         if (filter_mem == NULL) {
183                 RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
184                         max_filters);
185                 return -ENOMEM;
186         }
187         bp->filter_info = filter_mem;
188         return 0;
189 }
190
191 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
192 {
193         struct bnxt_filter_info *filter;
194
195         /* Find the 1st unused filter from the free_filter_list pool*/
196         filter = STAILQ_FIRST(&bp->free_filter_list);
197         if (!filter) {
198                 RTE_LOG(ERR, PMD, "No more free filter resources\n");
199                 return NULL;
200         }
201         STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
202
203         return filter;
204 }
205
206 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
207 {
208         STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
209 }
210
211 static int
212 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
213                         const struct rte_flow_item pattern[],
214                         const struct rte_flow_action actions[],
215                         struct rte_flow_error *error)
216 {
217         if (!pattern) {
218                 rte_flow_error_set(error, EINVAL,
219                         RTE_FLOW_ERROR_TYPE_ITEM_NUM,
220                         NULL, "NULL pattern.");
221                 return -rte_errno;
222         }
223
224         if (!actions) {
225                 rte_flow_error_set(error, EINVAL,
226                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
227                                    NULL, "NULL action.");
228                 return -rte_errno;
229         }
230
231         if (!attr) {
232                 rte_flow_error_set(error, EINVAL,
233                                    RTE_FLOW_ERROR_TYPE_ATTR,
234                                    NULL, "NULL attribute.");
235                 return -rte_errno;
236         }
237
238         return 0;
239 }
240
241 static const struct rte_flow_item *
242 nxt_non_void_pattern(const struct rte_flow_item *cur)
243 {
244         while (1) {
245                 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
246                         return cur;
247                 cur++;
248         }
249 }
250
251 static const struct rte_flow_action *
252 nxt_non_void_action(const struct rte_flow_action *cur)
253 {
254         while (1) {
255                 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
256                         return cur;
257                 cur++;
258         }
259 }
260
261 static inline int check_zero_bytes(const uint8_t *bytes, int len)
262 {
263         int i;
264         for (i = 0; i < len; i++)
265                 if (bytes[i] != 0x00)
266                         return 0;
267         return 1;
268 }
269
270 static int
271 bnxt_filter_type_check(const struct rte_flow_item pattern[],
272                        struct rte_flow_error *error __rte_unused)
273 {
274         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
275         int use_ntuple = 1;
276
277         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
278                 switch (item->type) {
279                 case RTE_FLOW_ITEM_TYPE_ETH:
280                         use_ntuple = 1;
281                         break;
282                 case RTE_FLOW_ITEM_TYPE_VLAN:
283                         use_ntuple = 0;
284                         break;
285                 case RTE_FLOW_ITEM_TYPE_IPV4:
286                 case RTE_FLOW_ITEM_TYPE_IPV6:
287                 case RTE_FLOW_ITEM_TYPE_TCP:
288                 case RTE_FLOW_ITEM_TYPE_UDP:
289                         /* FALLTHROUGH */
290                         /* need ntuple match, reset exact match */
291                         if (!use_ntuple) {
292                                 RTE_LOG(ERR, PMD,
293                                         "VLAN flow cannot use NTUPLE filter\n");
294                                 rte_flow_error_set(error, EINVAL,
295                                                    RTE_FLOW_ERROR_TYPE_ITEM,
296                                                    item,
297                                                    "Cannot use VLAN with NTUPLE");
298                                 return -rte_errno;
299                         }
300                         use_ntuple |= 1;
301                         break;
302                 default:
303                         RTE_LOG(ERR, PMD, "Unknown Flow type");
304                         use_ntuple |= 1;
305                 }
306                 item++;
307         }
308         return use_ntuple;
309 }
310
311 static int
312 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
313                                   const struct rte_flow_item pattern[],
314                                   struct rte_flow_error *error,
315                                   struct bnxt_filter_info *filter)
316 {
317         const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
318         const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
319         const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
320         const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
321         const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
322         const struct rte_flow_item_udp *udp_spec, *udp_mask;
323         const struct rte_flow_item_eth *eth_spec, *eth_mask;
324         const struct rte_flow_item_nvgre *nvgre_spec;
325         const struct rte_flow_item_nvgre *nvgre_mask;
326         const struct rte_flow_item_vxlan *vxlan_spec;
327         const struct rte_flow_item_vxlan *vxlan_mask;
328         uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
329         uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
330         const struct rte_flow_item_vf *vf_spec;
331         uint32_t tenant_id_be = 0;
332         bool vni_masked = 0;
333         bool tni_masked = 0;
334         uint32_t vf = 0;
335         int use_ntuple;
336         uint32_t en = 0;
337         int dflt_vnic;
338
339         use_ntuple = bnxt_filter_type_check(pattern, error);
340         RTE_LOG(DEBUG, PMD, "Use NTUPLE %d\n", use_ntuple);
341         if (use_ntuple < 0)
342                 return use_ntuple;
343
344         filter->filter_type = use_ntuple ?
345                 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
346
347         while (item->type != RTE_FLOW_ITEM_TYPE_END) {
348                 if (item->last) {
349                         /* last or range is NOT supported as match criteria */
350                         rte_flow_error_set(error, EINVAL,
351                                            RTE_FLOW_ERROR_TYPE_ITEM,
352                                            item,
353                                            "No support for range");
354                         return -rte_errno;
355                 }
356                 if (!item->spec || !item->mask) {
357                         rte_flow_error_set(error, EINVAL,
358                                            RTE_FLOW_ERROR_TYPE_ITEM,
359                                            item,
360                                            "spec/mask is NULL");
361                         return -rte_errno;
362                 }
363                 switch (item->type) {
364                 case RTE_FLOW_ITEM_TYPE_ETH:
365                         eth_spec = (const struct rte_flow_item_eth *)item->spec;
366                         eth_mask = (const struct rte_flow_item_eth *)item->mask;
367
368                         /* Source MAC address mask cannot be partially set.
369                          * Should be All 0's or all 1's.
370                          * Destination MAC address mask must not be partially
371                          * set. Should be all 1's or all 0's.
372                          */
373                         if ((!is_zero_ether_addr(&eth_mask->src) &&
374                              !is_broadcast_ether_addr(&eth_mask->src)) ||
375                             (!is_zero_ether_addr(&eth_mask->dst) &&
376                              !is_broadcast_ether_addr(&eth_mask->dst))) {
377                                 rte_flow_error_set(error, EINVAL,
378                                                    RTE_FLOW_ERROR_TYPE_ITEM,
379                                                    item,
380                                                    "MAC_addr mask not valid");
381                                 return -rte_errno;
382                         }
383
384                         /* Mask is not allowed. Only exact matches are */
385                         if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
386                                 rte_flow_error_set(error, EINVAL,
387                                                    RTE_FLOW_ERROR_TYPE_ITEM,
388                                                    item,
389                                                    "ethertype mask not valid");
390                                 return -rte_errno;
391                         }
392
393                         if (is_broadcast_ether_addr(&eth_mask->dst)) {
394                                 rte_memcpy(filter->dst_macaddr,
395                                            &eth_spec->dst, 6);
396                                 en |= use_ntuple ?
397                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
398                                         EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
399                         }
400                         if (is_broadcast_ether_addr(&eth_mask->src)) {
401                                 rte_memcpy(filter->src_macaddr,
402                                            &eth_spec->src, 6);
403                                 en |= use_ntuple ?
404                                         NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
405                                         EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
406                         } /*
407                            * else {
408                            *  RTE_LOG(ERR, PMD, "Handle this condition\n");
409                            * }
410                            */
411                         if (eth_spec->type) {
412                                 filter->ethertype =
413                                         rte_be_to_cpu_16(eth_spec->type);
414                                 en |= use_ntuple ?
415                                         NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
416                                         EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
417                         }
418
419                         break;
420                 case RTE_FLOW_ITEM_TYPE_VLAN:
421                         vlan_spec =
422                                 (const struct rte_flow_item_vlan *)item->spec;
423                         vlan_mask =
424                                 (const struct rte_flow_item_vlan *)item->mask;
425                         if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
426                                 /* Only the VLAN ID can be matched. */
427                                 filter->l2_ovlan =
428                                         rte_be_to_cpu_16(vlan_spec->tci &
429                                                          0xFFF);
430                                 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
431                         } else {
432                                 rte_flow_error_set(error, EINVAL,
433                                                    RTE_FLOW_ERROR_TYPE_ITEM,
434                                                    item,
435                                                    "VLAN mask is invalid");
436                                 return -rte_errno;
437                         }
438
439                         break;
440                 case RTE_FLOW_ITEM_TYPE_IPV4:
441                         /* If mask is not involved, we could use EM filters. */
442                         ipv4_spec =
443                                 (const struct rte_flow_item_ipv4 *)item->spec;
444                         ipv4_mask =
445                                 (const struct rte_flow_item_ipv4 *)item->mask;
446                         /* Only IP DST and SRC fields are maskable. */
447                         if (ipv4_mask->hdr.version_ihl ||
448                             ipv4_mask->hdr.type_of_service ||
449                             ipv4_mask->hdr.total_length ||
450                             ipv4_mask->hdr.packet_id ||
451                             ipv4_mask->hdr.fragment_offset ||
452                             ipv4_mask->hdr.time_to_live ||
453                             ipv4_mask->hdr.next_proto_id ||
454                             ipv4_mask->hdr.hdr_checksum) {
455                                 rte_flow_error_set(error, EINVAL,
456                                            RTE_FLOW_ERROR_TYPE_ITEM,
457                                            item,
458                                            "Invalid IPv4 mask.");
459                                 return -rte_errno;
460                         }
461                         filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
462                         filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
463                         if (use_ntuple)
464                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
465                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
466                         else
467                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
468                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
469                         if (ipv4_mask->hdr.src_addr) {
470                                 filter->src_ipaddr_mask[0] =
471                                         ipv4_mask->hdr.src_addr;
472                                 en |= !use_ntuple ? 0 :
473                                      NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
474                         }
475                         if (ipv4_mask->hdr.dst_addr) {
476                                 filter->dst_ipaddr_mask[0] =
477                                         ipv4_mask->hdr.dst_addr;
478                                 en |= !use_ntuple ? 0 :
479                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
480                         }
481                         filter->ip_addr_type = use_ntuple ?
482                          HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
483                          HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
484                         if (ipv4_spec->hdr.next_proto_id) {
485                                 filter->ip_protocol =
486                                         ipv4_spec->hdr.next_proto_id;
487                                 if (use_ntuple)
488                                         en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
489                                 else
490                                         en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
491                         }
492                         break;
493                 case RTE_FLOW_ITEM_TYPE_IPV6:
494                         ipv6_spec =
495                                 (const struct rte_flow_item_ipv6 *)item->spec;
496                         ipv6_mask =
497                                 (const struct rte_flow_item_ipv6 *)item->mask;
498
499                         /* Only IP DST and SRC fields are maskable. */
500                         if (ipv6_mask->hdr.vtc_flow ||
501                             ipv6_mask->hdr.payload_len ||
502                             ipv6_mask->hdr.proto ||
503                             ipv6_mask->hdr.hop_limits) {
504                                 rte_flow_error_set(error, EINVAL,
505                                            RTE_FLOW_ERROR_TYPE_ITEM,
506                                            item,
507                                            "Invalid IPv6 mask.");
508                                 return -rte_errno;
509                         }
510
511                         if (use_ntuple)
512                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
513                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
514                         else
515                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
516                                         EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
517                         rte_memcpy(filter->src_ipaddr,
518                                    ipv6_spec->hdr.src_addr, 16);
519                         rte_memcpy(filter->dst_ipaddr,
520                                    ipv6_spec->hdr.dst_addr, 16);
521                         if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
522                                 rte_memcpy(filter->src_ipaddr_mask,
523                                            ipv6_mask->hdr.src_addr, 16);
524                                 en |= !use_ntuple ? 0 :
525                                     NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
526                         }
527                         if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
528                                 rte_memcpy(filter->dst_ipaddr_mask,
529                                            ipv6_mask->hdr.dst_addr, 16);
530                                 en |= !use_ntuple ? 0 :
531                                      NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
532                         }
533                         filter->ip_addr_type = use_ntuple ?
534                                 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
535                                 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
536                         break;
537                 case RTE_FLOW_ITEM_TYPE_TCP:
538                         tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
539                         tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
540
541                         /* Check TCP mask. Only DST & SRC ports are maskable */
542                         if (tcp_mask->hdr.sent_seq ||
543                             tcp_mask->hdr.recv_ack ||
544                             tcp_mask->hdr.data_off ||
545                             tcp_mask->hdr.tcp_flags ||
546                             tcp_mask->hdr.rx_win ||
547                             tcp_mask->hdr.cksum ||
548                             tcp_mask->hdr.tcp_urp) {
549                                 rte_flow_error_set(error, EINVAL,
550                                            RTE_FLOW_ERROR_TYPE_ITEM,
551                                            item,
552                                            "Invalid TCP mask");
553                                 return -rte_errno;
554                         }
555                         filter->src_port = tcp_spec->hdr.src_port;
556                         filter->dst_port = tcp_spec->hdr.dst_port;
557                         if (use_ntuple)
558                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
559                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
560                         else
561                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
562                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
563                         if (tcp_mask->hdr.dst_port) {
564                                 filter->dst_port_mask = tcp_mask->hdr.dst_port;
565                                 en |= !use_ntuple ? 0 :
566                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
567                         }
568                         if (tcp_mask->hdr.src_port) {
569                                 filter->src_port_mask = tcp_mask->hdr.src_port;
570                                 en |= !use_ntuple ? 0 :
571                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
572                         }
573                         break;
574                 case RTE_FLOW_ITEM_TYPE_UDP:
575                         udp_spec = (const struct rte_flow_item_udp *)item->spec;
576                         udp_mask = (const struct rte_flow_item_udp *)item->mask;
577
578                         if (udp_mask->hdr.dgram_len ||
579                             udp_mask->hdr.dgram_cksum) {
580                                 rte_flow_error_set(error, EINVAL,
581                                            RTE_FLOW_ERROR_TYPE_ITEM,
582                                            item,
583                                            "Invalid UDP mask");
584                                 return -rte_errno;
585                         }
586
587                         filter->src_port = udp_spec->hdr.src_port;
588                         filter->dst_port = udp_spec->hdr.dst_port;
589                         if (use_ntuple)
590                                 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
591                                         NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
592                         else
593                                 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
594                                         EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
595
596                         if (udp_mask->hdr.dst_port) {
597                                 filter->dst_port_mask = udp_mask->hdr.dst_port;
598                                 en |= !use_ntuple ? 0 :
599                                   NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
600                         }
601                         if (udp_mask->hdr.src_port) {
602                                 filter->src_port_mask = udp_mask->hdr.src_port;
603                                 en |= !use_ntuple ? 0 :
604                                   NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
605                         }
606                         break;
607                 case RTE_FLOW_ITEM_TYPE_VXLAN:
608                         vxlan_spec =
609                                 (const struct rte_flow_item_vxlan *)item->spec;
610                         vxlan_mask =
611                                 (const struct rte_flow_item_vxlan *)item->mask;
612                         /* Check if VXLAN item is used to describe protocol.
613                          * If yes, both spec and mask should be NULL.
614                          * If no, both spec and mask shouldn't be NULL.
615                          */
616                         if ((!vxlan_spec && vxlan_mask) ||
617                             (vxlan_spec && !vxlan_mask)) {
618                                 rte_flow_error_set(error, EINVAL,
619                                            RTE_FLOW_ERROR_TYPE_ITEM,
620                                            item,
621                                            "Invalid VXLAN item");
622                                 return -rte_errno;
623                         }
624
625                         if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
626                             vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
627                             vxlan_spec->flags != 0x8) {
628                                 rte_flow_error_set(error, EINVAL,
629                                            RTE_FLOW_ERROR_TYPE_ITEM,
630                                            item,
631                                            "Invalid VXLAN item");
632                                 return -rte_errno;
633                         }
634
635                         /* Check if VNI is masked. */
636                         if (vxlan_spec && vxlan_mask) {
637                                 vni_masked =
638                                         !!memcmp(vxlan_mask->vni, vni_mask,
639                                                  RTE_DIM(vni_mask));
640                                 if (vni_masked) {
641                                         rte_flow_error_set(error, EINVAL,
642                                                    RTE_FLOW_ERROR_TYPE_ITEM,
643                                                    item,
644                                                    "Invalid VNI mask");
645                                         return -rte_errno;
646                                 }
647
648                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
649                                            vxlan_spec->vni, 3);
650                                 filter->vni =
651                                         rte_be_to_cpu_32(tenant_id_be);
652                                 filter->tunnel_type =
653                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
654                         }
655                         break;
656                 case RTE_FLOW_ITEM_TYPE_NVGRE:
657                         nvgre_spec =
658                                 (const struct rte_flow_item_nvgre *)item->spec;
659                         nvgre_mask =
660                                 (const struct rte_flow_item_nvgre *)item->mask;
661                         /* Check if NVGRE item is used to describe protocol.
662                          * If yes, both spec and mask should be NULL.
663                          * If no, both spec and mask shouldn't be NULL.
664                          */
665                         if ((!nvgre_spec && nvgre_mask) ||
666                             (nvgre_spec && !nvgre_mask)) {
667                                 rte_flow_error_set(error, EINVAL,
668                                            RTE_FLOW_ERROR_TYPE_ITEM,
669                                            item,
670                                            "Invalid NVGRE item");
671                                 return -rte_errno;
672                         }
673
674                         if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
675                             nvgre_spec->protocol != 0x6558) {
676                                 rte_flow_error_set(error, EINVAL,
677                                            RTE_FLOW_ERROR_TYPE_ITEM,
678                                            item,
679                                            "Invalid NVGRE item");
680                                 return -rte_errno;
681                         }
682
683                         if (nvgre_spec && nvgre_mask) {
684                                 tni_masked =
685                                         !!memcmp(nvgre_mask->tni, tni_mask,
686                                                  RTE_DIM(tni_mask));
687                                 if (tni_masked) {
688                                         rte_flow_error_set(error, EINVAL,
689                                                        RTE_FLOW_ERROR_TYPE_ITEM,
690                                                        item,
691                                                        "Invalid TNI mask");
692                                         return -rte_errno;
693                                 }
694                                 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
695                                            nvgre_spec->tni, 3);
696                                 filter->vni =
697                                         rte_be_to_cpu_32(tenant_id_be);
698                                 filter->tunnel_type =
699                                  CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
700                         }
701                         break;
702                 case RTE_FLOW_ITEM_TYPE_VF:
703                         vf_spec = (const struct rte_flow_item_vf *)item->spec;
704                         vf = vf_spec->id;
705                         if (!BNXT_PF(bp)) {
706                                 rte_flow_error_set(error, EINVAL,
707                                            RTE_FLOW_ERROR_TYPE_ITEM,
708                                            item,
709                                            "Configuring on a VF!");
710                                 return -rte_errno;
711                         }
712
713                         if (vf >= bp->pdev->max_vfs) {
714                                 rte_flow_error_set(error, EINVAL,
715                                            RTE_FLOW_ERROR_TYPE_ITEM,
716                                            item,
717                                            "Incorrect VF id!");
718                                 return -rte_errno;
719                         }
720
721                         filter->mirror_vnic_id =
722                         dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
723                         if (dflt_vnic < 0) {
724                                 /* This simply indicates there's no driver
725                                  * loaded. This is not an error.
726                                  */
727                                 rte_flow_error_set(error, EINVAL,
728                                            RTE_FLOW_ERROR_TYPE_ITEM,
729                                            item,
730                                            "Unable to get default VNIC for VF");
731                                 return -rte_errno;
732                         }
733                         filter->mirror_vnic_id = dflt_vnic;
734                         en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
735                         break;
736                 default:
737                         break;
738                 }
739                 item++;
740         }
741         filter->enables = en;
742
743         return 0;
744 }
745
746 /* Parse attributes */
747 static int
748 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
749                      struct rte_flow_error *error)
750 {
751         /* Must be input direction */
752         if (!attr->ingress) {
753                 rte_flow_error_set(error, EINVAL,
754                                    RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
755                                    attr, "Only support ingress.");
756                 return -rte_errno;
757         }
758
759         /* Not supported */
760         if (attr->egress) {
761                 rte_flow_error_set(error, EINVAL,
762                                    RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
763                                    attr, "No support for egress.");
764                 return -rte_errno;
765         }
766
767         /* Not supported */
768         if (attr->priority) {
769                 rte_flow_error_set(error, EINVAL,
770                                    RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
771                                    attr, "No support for priority.");
772                 return -rte_errno;
773         }
774
775         /* Not supported */
776         if (attr->group) {
777                 rte_flow_error_set(error, EINVAL,
778                                    RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
779                                    attr, "No support for group.");
780                 return -rte_errno;
781         }
782
783         return 0;
784 }
785
786 struct bnxt_filter_info *
787 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
788                    struct bnxt_vnic_info *vnic)
789 {
790         struct bnxt_filter_info *filter1, *f0;
791         struct bnxt_vnic_info *vnic0;
792         int rc;
793
794         vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
795         f0 = STAILQ_FIRST(&vnic0->filter);
796
797         //This flow has same DST MAC as the port/l2 filter.
798         if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
799                 return f0;
800
801         //This flow needs DST MAC which is not same as port/l2
802         RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n");
803         filter1 = bnxt_get_unused_filter(bp);
804         if (filter1 == NULL)
805                 return NULL;
806         filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
807         filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
808                         L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
809         memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
810         memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
811         rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
812                                      filter1);
813         if (rc) {
814                 bnxt_free_filter(bp, filter1);
815                 return NULL;
816         }
817         STAILQ_INSERT_TAIL(&vnic->filter, filter1, next);
818         return filter1;
819 }
820
821 static int
822 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
823                              const struct rte_flow_item pattern[],
824                              const struct rte_flow_action actions[],
825                              const struct rte_flow_attr *attr,
826                              struct rte_flow_error *error,
827                              struct bnxt_filter_info *filter)
828 {
829         const struct rte_flow_action *act = nxt_non_void_action(actions);
830         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
831         const struct rte_flow_action_queue *act_q;
832         const struct rte_flow_action_vf *act_vf;
833         struct bnxt_vnic_info *vnic, *vnic0;
834         struct bnxt_filter_info *filter1;
835         uint32_t vf = 0;
836         int dflt_vnic;
837         int rc;
838
839         if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
840                 RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n");
841                 rte_flow_error_set(error, EINVAL,
842                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
843                                    "Cannot create flow on RSS queues");
844                 rc = -rte_errno;
845                 goto ret;
846         }
847
848         rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
849         if (rc != 0)
850                 goto ret;
851
852         rc = bnxt_flow_parse_attr(attr, error);
853         if (rc != 0)
854                 goto ret;
855         //Since we support ingress attribute only - right now.
856         filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
857
858         switch (act->type) {
859         case RTE_FLOW_ACTION_TYPE_QUEUE:
860                 /* Allow this flow. Redirect to a VNIC. */
861                 act_q = (const struct rte_flow_action_queue *)act->conf;
862                 if (act_q->index >= bp->rx_nr_rings) {
863                         rte_flow_error_set(error, EINVAL,
864                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
865                                            "Invalid queue ID.");
866                         rc = -rte_errno;
867                         goto ret;
868                 }
869                 RTE_LOG(DEBUG, PMD, "Queue index %d\n", act_q->index);
870
871                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
872                 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
873                 if (vnic == NULL) {
874                         rte_flow_error_set(error, EINVAL,
875                                            RTE_FLOW_ERROR_TYPE_ACTION, act,
876                                            "No matching VNIC for queue ID.");
877                         rc = -rte_errno;
878                         goto ret;
879                 }
880                 filter->dst_id = vnic->fw_vnic_id;
881                 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
882                 if (filter1 == NULL) {
883                         rc = -ENOSPC;
884                         goto ret;
885                 }
886                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
887                 RTE_LOG(DEBUG, PMD, "VNIC found\n");
888                 break;
889         case RTE_FLOW_ACTION_TYPE_DROP:
890                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
891                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
892                 if (filter1 == NULL) {
893                         rc = -ENOSPC;
894                         goto ret;
895                 }
896                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
897                 if (filter->filter_type == HWRM_CFA_EM_FILTER)
898                         filter->flags =
899                                 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
900                 else
901                         filter->flags =
902                                 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
903                 break;
904         case RTE_FLOW_ACTION_TYPE_COUNT:
905                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
906                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
907                 if (filter1 == NULL) {
908                         rc = -ENOSPC;
909                         goto ret;
910                 }
911                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
912                 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
913                 break;
914         case RTE_FLOW_ACTION_TYPE_VF:
915                 act_vf = (const struct rte_flow_action_vf *)act->conf;
916                 vf = act_vf->id;
917                 if (!BNXT_PF(bp)) {
918                         rte_flow_error_set(error, EINVAL,
919                                    RTE_FLOW_ERROR_TYPE_ACTION,
920                                    act,
921                                    "Configuring on a VF!");
922                         rc = -rte_errno;
923                         goto ret;
924                 }
925
926                 if (vf >= bp->pdev->max_vfs) {
927                         rte_flow_error_set(error, EINVAL,
928                                    RTE_FLOW_ERROR_TYPE_ACTION,
929                                    act,
930                                    "Incorrect VF id!");
931                         rc = -rte_errno;
932                         goto ret;
933                 }
934
935                 filter->mirror_vnic_id =
936                 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
937                 if (dflt_vnic < 0) {
938                         /* This simply indicates there's no driver loaded.
939                          * This is not an error.
940                          */
941                         rte_flow_error_set(error, EINVAL,
942                                    RTE_FLOW_ERROR_TYPE_ACTION,
943                                    act,
944                                    "Unable to get default VNIC for VF");
945                         rc = -rte_errno;
946                         goto ret;
947                 }
948                 filter->mirror_vnic_id = dflt_vnic;
949                 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
950
951                 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
952                 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
953                 if (filter1 == NULL) {
954                         rc = -ENOSPC;
955                         goto ret;
956                 }
957                 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
958                 break;
959
960         default:
961                 rte_flow_error_set(error, EINVAL,
962                                    RTE_FLOW_ERROR_TYPE_ACTION, act,
963                                    "Invalid action.");
964                 rc = -rte_errno;
965                 goto ret;
966         }
967
968 //done:
969         act = nxt_non_void_action(++act);
970         if (act->type != RTE_FLOW_ACTION_TYPE_END) {
971                 rte_flow_error_set(error, EINVAL,
972                                    RTE_FLOW_ERROR_TYPE_ACTION,
973                                    act, "Invalid action.");
974                 rc = -rte_errno;
975                 goto ret;
976         }
977 ret:
978         return rc;
979 }
980
981 static int
982 bnxt_flow_validate(struct rte_eth_dev *dev,
983                 const struct rte_flow_attr *attr,
984                 const struct rte_flow_item pattern[],
985                 const struct rte_flow_action actions[],
986                 struct rte_flow_error *error)
987 {
988         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
989         struct bnxt_filter_info *filter;
990         int ret = 0;
991
992         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
993         if (ret != 0)
994                 return ret;
995
996         filter = bnxt_get_unused_filter(bp);
997         if (filter == NULL) {
998                 RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
999                 return -ENOMEM;
1000         }
1001
1002         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1003                                            error, filter);
1004         /* No need to hold on to this filter if we are just validating flow */
1005         filter->fw_l2_filter_id = -1;
1006         bnxt_free_filter(bp, filter);
1007
1008         return ret;
1009 }
1010
1011 static int
1012 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1013 {
1014         struct bnxt_filter_info *mf;
1015         struct rte_flow *flow;
1016         int i;
1017
1018         for (i = bp->nr_vnics - 1; i >= 0; i--) {
1019                 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1020
1021                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1022                         mf = flow->filter;
1023
1024                         if (mf->filter_type == nf->filter_type &&
1025                             mf->flags == nf->flags &&
1026                             mf->src_port == nf->src_port &&
1027                             mf->src_port_mask == nf->src_port_mask &&
1028                             mf->dst_port == nf->dst_port &&
1029                             mf->dst_port_mask == nf->dst_port_mask &&
1030                             mf->ip_protocol == nf->ip_protocol &&
1031                             mf->ip_addr_type == nf->ip_addr_type &&
1032                             mf->ethertype == nf->ethertype &&
1033                             mf->vni == nf->vni &&
1034                             mf->tunnel_type == nf->tunnel_type &&
1035                             mf->l2_ovlan == nf->l2_ovlan &&
1036                             mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1037                             mf->l2_ivlan == nf->l2_ivlan &&
1038                             mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1039                             !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1040                             !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1041                                     ETHER_ADDR_LEN) &&
1042                             !memcmp(mf->src_macaddr, nf->src_macaddr,
1043                                     ETHER_ADDR_LEN) &&
1044                             !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1045                                     ETHER_ADDR_LEN) &&
1046                             !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1047                                     sizeof(nf->src_ipaddr)) &&
1048                             !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1049                                     sizeof(nf->src_ipaddr_mask)) &&
1050                             !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1051                                     sizeof(nf->dst_ipaddr)) &&
1052                             !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1053                                     sizeof(nf->dst_ipaddr_mask))) {
1054                                 if (mf->dst_id == nf->dst_id)
1055                                         return -EEXIST;
1056                                 /*
1057                                  * Same Flow, Different queue
1058                                  * Clear the old ntuple filter
1059                                  * Reuse the matching L2 filter
1060                                  * ID for the new filter
1061                                  */
1062                                 nf->fw_l2_filter_id = mf->fw_l2_filter_id;
1063                                 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1064                                         bnxt_hwrm_clear_em_filter(bp, mf);
1065                                 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1066                                         bnxt_hwrm_clear_ntuple_filter(bp, mf);
1067                                 /* Free the old filter, update flow
1068                                  * with new filter
1069                                  */
1070                                 bnxt_free_filter(bp, mf);
1071                                 flow->filter = nf;
1072                                 return -EXDEV;
1073                         }
1074                 }
1075         }
1076         return 0;
1077 }
1078
1079 static struct rte_flow *
1080 bnxt_flow_create(struct rte_eth_dev *dev,
1081                   const struct rte_flow_attr *attr,
1082                   const struct rte_flow_item pattern[],
1083                   const struct rte_flow_action actions[],
1084                   struct rte_flow_error *error)
1085 {
1086         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1087         struct bnxt_filter_info *filter;
1088         struct bnxt_vnic_info *vnic = NULL;
1089         bool update_flow = false;
1090         struct rte_flow *flow;
1091         unsigned int i;
1092         int ret = 0;
1093
1094         flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1095         if (!flow) {
1096                 rte_flow_error_set(error, ENOMEM,
1097                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1098                                    "Failed to allocate memory");
1099                 return flow;
1100         }
1101
1102         ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1103         if (ret != 0) {
1104                 RTE_LOG(ERR, PMD, "Not a validate flow.\n");
1105                 goto free_flow;
1106         }
1107
1108         filter = bnxt_get_unused_filter(bp);
1109         if (filter == NULL) {
1110                 RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
1111                 goto free_flow;
1112         }
1113
1114         ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1115                                            error, filter);
1116         if (ret != 0)
1117                 goto free_filter;
1118
1119         ret = bnxt_match_filter(bp, filter);
1120         if (ret == -EEXIST) {
1121                 RTE_LOG(DEBUG, PMD, "Flow already exists.\n");
1122                 /* Clear the filter that was created as part of
1123                  * validate_and_parse_flow() above
1124                  */
1125                 bnxt_hwrm_clear_l2_filter(bp, filter);
1126                 goto free_filter;
1127         } else if (ret == -EXDEV) {
1128                 RTE_LOG(DEBUG, PMD, "Flow with same pattern exists");
1129                 RTE_LOG(DEBUG, PMD, "Updating with different destination\n");
1130                 update_flow = true;
1131         }
1132
1133         if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1134                 filter->enables |=
1135                         HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1136                 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1137         }
1138         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1139                 filter->enables |=
1140                         HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1141                 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1142         }
1143
1144         for (i = 0; i < bp->nr_vnics; i++) {
1145                 vnic = &bp->vnic_info[i];
1146                 if (filter->dst_id == vnic->fw_vnic_id)
1147                         break;
1148         }
1149
1150         if (!ret) {
1151                 flow->filter = filter;
1152                 flow->vnic = vnic;
1153                 if (update_flow) {
1154                         ret = -EXDEV;
1155                         goto free_flow;
1156                 }
1157                 RTE_LOG(ERR, PMD, "Successfully created flow.\n");
1158                 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1159                 return flow;
1160         }
1161 free_filter:
1162         bnxt_free_filter(bp, filter);
1163 free_flow:
1164         if (ret == -EEXIST)
1165                 rte_flow_error_set(error, ret,
1166                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1167                                    "Matching Flow exists.");
1168         else if (ret == -EXDEV)
1169                 rte_flow_error_set(error, ret,
1170                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1171                                    "Flow with pattern exists, updating destination queue");
1172         else
1173                 rte_flow_error_set(error, -ret,
1174                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1175                                    "Failed to create flow.");
1176         rte_free(flow);
1177         flow = NULL;
1178         return flow;
1179 }
1180
1181 static int
1182 bnxt_flow_destroy(struct rte_eth_dev *dev,
1183                   struct rte_flow *flow,
1184                   struct rte_flow_error *error)
1185 {
1186         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1187         struct bnxt_filter_info *filter = flow->filter;
1188         struct bnxt_vnic_info *vnic = flow->vnic;
1189         int ret = 0;
1190
1191         ret = bnxt_match_filter(bp, filter);
1192         if (ret == 0)
1193                 RTE_LOG(ERR, PMD, "Could not find matching flow\n");
1194         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1195                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1196         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1197                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1198
1199         bnxt_hwrm_clear_l2_filter(bp, filter);
1200         if (!ret) {
1201                 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1202                 rte_free(flow);
1203         } else {
1204                 rte_flow_error_set(error, -ret,
1205                                    RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1206                                    "Failed to destroy flow.");
1207         }
1208
1209         return ret;
1210 }
1211
1212 static int
1213 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1214 {
1215         struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1216         struct bnxt_vnic_info *vnic;
1217         struct rte_flow *flow;
1218         unsigned int i;
1219         int ret = 0;
1220
1221         for (i = 0; i < bp->nr_vnics; i++) {
1222                 vnic = &bp->vnic_info[i];
1223                 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1224                         struct bnxt_filter_info *filter = flow->filter;
1225
1226                         if (filter->filter_type == HWRM_CFA_EM_FILTER)
1227                                 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1228                         if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1229                                 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1230
1231                         if (ret) {
1232                                 rte_flow_error_set(error, -ret,
1233                                                    RTE_FLOW_ERROR_TYPE_HANDLE,
1234                                                    NULL,
1235                                                    "Failed to flush flow in HW.");
1236                                 return -rte_errno;
1237                         }
1238
1239                         STAILQ_REMOVE(&vnic->flow_list, flow,
1240                                       rte_flow, next);
1241                         rte_free(flow);
1242                 }
1243         }
1244
1245         return ret;
1246 }
1247
1248 const struct rte_flow_ops bnxt_flow_ops = {
1249         .validate = bnxt_flow_validate,
1250         .create = bnxt_flow_create,
1251         .destroy = bnxt_flow_destroy,
1252         .flush = bnxt_flow_flush,
1253 };