New upstream version 18.02
[deb_dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <rte_log.h>
7 #include <rte_ethdev_driver.h>
8 #include <rte_flow_driver.h>
9 #include <rte_ether.h>
10 #include <rte_ip.h>
11 #include <rte_udp.h>
12
13 #include "enic_compat.h"
14 #include "enic.h"
15 #include "vnic_dev.h"
16 #include "vnic_nic.h"
17
18 #define FLOW_TRACE() \
19         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
20                 "%s()\n", __func__)
21 #define FLOW_LOG(level, fmt, args...) \
22         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
23                 fmt "\n", ##args)
24
25 /** Info about how to copy items into enic filters. */
26 struct enic_items {
27         /** Function for copying and validating an item. */
28         int (*copy_item)(const struct rte_flow_item *item,
29                          struct filter_v2 *enic_filter, u8 *inner_ofst);
30         /** List of valid previous items. */
31         const enum rte_flow_item_type * const prev_items;
32         /** True if it's OK for this item to be the first item. For some NIC
33          * versions, it's invalid to start the stack above layer 3.
34          */
35         const u8 valid_start_item;
36 };
37
38 /** Filtering capabilities for various NIC and firmware versions. */
39 struct enic_filter_cap {
40         /** list of valid items and their handlers and attributes. */
41         const struct enic_items *item_info;
42 };
43
44 /* functions for copying flow actions into enic actions */
45 typedef int (copy_action_fn)(const struct rte_flow_action actions[],
46                              struct filter_action_v2 *enic_action);
47
48 /* functions for copying items into enic filters */
49 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
50                           struct filter_v2 *enic_filter, u8 *inner_ofst);
51
52 /** Action capabilities for various NICs. */
53 struct enic_action_cap {
54         /** list of valid actions */
55         const enum rte_flow_action_type *actions;
56         /** copy function for a particular NIC */
57         int (*copy_fn)(const struct rte_flow_action actions[],
58                        struct filter_action_v2 *enic_action);
59 };
60
61 /* Forward declarations */
62 static enic_copy_item_fn enic_copy_item_ipv4_v1;
63 static enic_copy_item_fn enic_copy_item_udp_v1;
64 static enic_copy_item_fn enic_copy_item_tcp_v1;
65 static enic_copy_item_fn enic_copy_item_eth_v2;
66 static enic_copy_item_fn enic_copy_item_vlan_v2;
67 static enic_copy_item_fn enic_copy_item_ipv4_v2;
68 static enic_copy_item_fn enic_copy_item_ipv6_v2;
69 static enic_copy_item_fn enic_copy_item_udp_v2;
70 static enic_copy_item_fn enic_copy_item_tcp_v2;
71 static enic_copy_item_fn enic_copy_item_sctp_v2;
72 static enic_copy_item_fn enic_copy_item_sctp_v2;
73 static enic_copy_item_fn enic_copy_item_vxlan_v2;
74 static copy_action_fn enic_copy_action_v1;
75 static copy_action_fn enic_copy_action_v2;
76
77 /**
78  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
79  * is supported.
80  */
81 static const struct enic_items enic_items_v1[] = {
82         [RTE_FLOW_ITEM_TYPE_IPV4] = {
83                 .copy_item = enic_copy_item_ipv4_v1,
84                 .valid_start_item = 1,
85                 .prev_items = (const enum rte_flow_item_type[]) {
86                                RTE_FLOW_ITEM_TYPE_END,
87                 },
88         },
89         [RTE_FLOW_ITEM_TYPE_UDP] = {
90                 .copy_item = enic_copy_item_udp_v1,
91                 .valid_start_item = 0,
92                 .prev_items = (const enum rte_flow_item_type[]) {
93                                RTE_FLOW_ITEM_TYPE_IPV4,
94                                RTE_FLOW_ITEM_TYPE_END,
95                 },
96         },
97         [RTE_FLOW_ITEM_TYPE_TCP] = {
98                 .copy_item = enic_copy_item_tcp_v1,
99                 .valid_start_item = 0,
100                 .prev_items = (const enum rte_flow_item_type[]) {
101                                RTE_FLOW_ITEM_TYPE_IPV4,
102                                RTE_FLOW_ITEM_TYPE_END,
103                 },
104         },
105 };
106
107 /**
108  * NICs have Advanced Filters capability but they are disabled. This means
109  * that layer 3 must be specified.
110  */
111 static const struct enic_items enic_items_v2[] = {
112         [RTE_FLOW_ITEM_TYPE_ETH] = {
113                 .copy_item = enic_copy_item_eth_v2,
114                 .valid_start_item = 1,
115                 .prev_items = (const enum rte_flow_item_type[]) {
116                                RTE_FLOW_ITEM_TYPE_VXLAN,
117                                RTE_FLOW_ITEM_TYPE_END,
118                 },
119         },
120         [RTE_FLOW_ITEM_TYPE_VLAN] = {
121                 .copy_item = enic_copy_item_vlan_v2,
122                 .valid_start_item = 1,
123                 .prev_items = (const enum rte_flow_item_type[]) {
124                                RTE_FLOW_ITEM_TYPE_ETH,
125                                RTE_FLOW_ITEM_TYPE_END,
126                 },
127         },
128         [RTE_FLOW_ITEM_TYPE_IPV4] = {
129                 .copy_item = enic_copy_item_ipv4_v2,
130                 .valid_start_item = 1,
131                 .prev_items = (const enum rte_flow_item_type[]) {
132                                RTE_FLOW_ITEM_TYPE_ETH,
133                                RTE_FLOW_ITEM_TYPE_VLAN,
134                                RTE_FLOW_ITEM_TYPE_END,
135                 },
136         },
137         [RTE_FLOW_ITEM_TYPE_IPV6] = {
138                 .copy_item = enic_copy_item_ipv6_v2,
139                 .valid_start_item = 1,
140                 .prev_items = (const enum rte_flow_item_type[]) {
141                                RTE_FLOW_ITEM_TYPE_ETH,
142                                RTE_FLOW_ITEM_TYPE_VLAN,
143                                RTE_FLOW_ITEM_TYPE_END,
144                 },
145         },
146         [RTE_FLOW_ITEM_TYPE_UDP] = {
147                 .copy_item = enic_copy_item_udp_v2,
148                 .valid_start_item = 0,
149                 .prev_items = (const enum rte_flow_item_type[]) {
150                                RTE_FLOW_ITEM_TYPE_IPV4,
151                                RTE_FLOW_ITEM_TYPE_IPV6,
152                                RTE_FLOW_ITEM_TYPE_END,
153                 },
154         },
155         [RTE_FLOW_ITEM_TYPE_TCP] = {
156                 .copy_item = enic_copy_item_tcp_v2,
157                 .valid_start_item = 0,
158                 .prev_items = (const enum rte_flow_item_type[]) {
159                                RTE_FLOW_ITEM_TYPE_IPV4,
160                                RTE_FLOW_ITEM_TYPE_IPV6,
161                                RTE_FLOW_ITEM_TYPE_END,
162                 },
163         },
164         [RTE_FLOW_ITEM_TYPE_SCTP] = {
165                 .copy_item = enic_copy_item_sctp_v2,
166                 .valid_start_item = 0,
167                 .prev_items = (const enum rte_flow_item_type[]) {
168                                RTE_FLOW_ITEM_TYPE_IPV4,
169                                RTE_FLOW_ITEM_TYPE_IPV6,
170                                RTE_FLOW_ITEM_TYPE_END,
171                 },
172         },
173         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
174                 .copy_item = enic_copy_item_vxlan_v2,
175                 .valid_start_item = 0,
176                 .prev_items = (const enum rte_flow_item_type[]) {
177                                RTE_FLOW_ITEM_TYPE_UDP,
178                                RTE_FLOW_ITEM_TYPE_END,
179                 },
180         },
181 };
182
183 /** NICs with Advanced filters enabled */
184 static const struct enic_items enic_items_v3[] = {
185         [RTE_FLOW_ITEM_TYPE_ETH] = {
186                 .copy_item = enic_copy_item_eth_v2,
187                 .valid_start_item = 1,
188                 .prev_items = (const enum rte_flow_item_type[]) {
189                                RTE_FLOW_ITEM_TYPE_VXLAN,
190                                RTE_FLOW_ITEM_TYPE_END,
191                 },
192         },
193         [RTE_FLOW_ITEM_TYPE_VLAN] = {
194                 .copy_item = enic_copy_item_vlan_v2,
195                 .valid_start_item = 1,
196                 .prev_items = (const enum rte_flow_item_type[]) {
197                                RTE_FLOW_ITEM_TYPE_ETH,
198                                RTE_FLOW_ITEM_TYPE_END,
199                 },
200         },
201         [RTE_FLOW_ITEM_TYPE_IPV4] = {
202                 .copy_item = enic_copy_item_ipv4_v2,
203                 .valid_start_item = 1,
204                 .prev_items = (const enum rte_flow_item_type[]) {
205                                RTE_FLOW_ITEM_TYPE_ETH,
206                                RTE_FLOW_ITEM_TYPE_VLAN,
207                                RTE_FLOW_ITEM_TYPE_END,
208                 },
209         },
210         [RTE_FLOW_ITEM_TYPE_IPV6] = {
211                 .copy_item = enic_copy_item_ipv6_v2,
212                 .valid_start_item = 1,
213                 .prev_items = (const enum rte_flow_item_type[]) {
214                                RTE_FLOW_ITEM_TYPE_ETH,
215                                RTE_FLOW_ITEM_TYPE_VLAN,
216                                RTE_FLOW_ITEM_TYPE_END,
217                 },
218         },
219         [RTE_FLOW_ITEM_TYPE_UDP] = {
220                 .copy_item = enic_copy_item_udp_v2,
221                 .valid_start_item = 1,
222                 .prev_items = (const enum rte_flow_item_type[]) {
223                                RTE_FLOW_ITEM_TYPE_IPV4,
224                                RTE_FLOW_ITEM_TYPE_IPV6,
225                                RTE_FLOW_ITEM_TYPE_END,
226                 },
227         },
228         [RTE_FLOW_ITEM_TYPE_TCP] = {
229                 .copy_item = enic_copy_item_tcp_v2,
230                 .valid_start_item = 1,
231                 .prev_items = (const enum rte_flow_item_type[]) {
232                                RTE_FLOW_ITEM_TYPE_IPV4,
233                                RTE_FLOW_ITEM_TYPE_IPV6,
234                                RTE_FLOW_ITEM_TYPE_END,
235                 },
236         },
237         [RTE_FLOW_ITEM_TYPE_SCTP] = {
238                 .copy_item = enic_copy_item_sctp_v2,
239                 .valid_start_item = 1,
240                 .prev_items = (const enum rte_flow_item_type[]) {
241                                RTE_FLOW_ITEM_TYPE_IPV4,
242                                RTE_FLOW_ITEM_TYPE_IPV6,
243                                RTE_FLOW_ITEM_TYPE_END,
244                 },
245         },
246         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
247                 .copy_item = enic_copy_item_vxlan_v2,
248                 .valid_start_item = 1,
249                 .prev_items = (const enum rte_flow_item_type[]) {
250                                RTE_FLOW_ITEM_TYPE_UDP,
251                                RTE_FLOW_ITEM_TYPE_END,
252                 },
253         },
254 };
255
256 /** Filtering capabilities indexed this NICs supported filter type. */
257 static const struct enic_filter_cap enic_filter_cap[] = {
258         [FILTER_IPV4_5TUPLE] = {
259                 .item_info = enic_items_v1,
260         },
261         [FILTER_USNIC_IP] = {
262                 .item_info = enic_items_v2,
263         },
264         [FILTER_DPDK_1] = {
265                 .item_info = enic_items_v3,
266         },
267 };
268
269 /** Supported actions for older NICs */
270 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
271         RTE_FLOW_ACTION_TYPE_QUEUE,
272         RTE_FLOW_ACTION_TYPE_END,
273 };
274
275 /** Supported actions for newer NICs */
276 static const enum rte_flow_action_type enic_supported_actions_v2[] = {
277         RTE_FLOW_ACTION_TYPE_QUEUE,
278         RTE_FLOW_ACTION_TYPE_MARK,
279         RTE_FLOW_ACTION_TYPE_FLAG,
280         RTE_FLOW_ACTION_TYPE_END,
281 };
282
283 /** Action capabilities indexed by NIC version information */
284 static const struct enic_action_cap enic_action_cap[] = {
285         [FILTER_ACTION_RQ_STEERING_FLAG] = {
286                 .actions = enic_supported_actions_v1,
287                 .copy_fn = enic_copy_action_v1,
288         },
289         [FILTER_ACTION_V2_ALL] = {
290                 .actions = enic_supported_actions_v2,
291                 .copy_fn = enic_copy_action_v2,
292         },
293 };
294
295 static int
296 mask_exact_match(const u8 *supported, const u8 *supplied,
297                  unsigned int size)
298 {
299         unsigned int i;
300         for (i = 0; i < size; i++) {
301                 if (supported[i] != supplied[i])
302                         return 0;
303         }
304         return 1;
305 }
306
307 /**
308  * Copy IPv4 item into version 1 NIC filter.
309  *
310  * @param item[in]
311  *   Item specification.
312  * @param enic_filter[out]
313  *   Partially filled in NIC filter structure.
314  * @param inner_ofst[in]
315  *   Should always be 0 for version 1.
316  */
317 static int
318 enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
319                        struct filter_v2 *enic_filter, u8 *inner_ofst)
320 {
321         const struct rte_flow_item_ipv4 *spec = item->spec;
322         const struct rte_flow_item_ipv4 *mask = item->mask;
323         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
324         struct ipv4_hdr supported_mask = {
325                 .src_addr = 0xffffffff,
326                 .dst_addr = 0xffffffff,
327         };
328
329         FLOW_TRACE();
330
331         if (*inner_ofst)
332                 return ENOTSUP;
333
334         if (!mask)
335                 mask = &rte_flow_item_ipv4_mask;
336
337         /* This is an exact match filter, both fields must be set */
338         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
339                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
340                 return ENOTSUP;
341         }
342
343         /* check that the suppied mask exactly matches capabilty */
344         if (!mask_exact_match((const u8 *)&supported_mask,
345                               (const u8 *)item->mask, sizeof(*mask))) {
346                 FLOW_LOG(ERR, "IPv4 exact match mask");
347                 return ENOTSUP;
348         }
349
350         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
351         enic_5tup->src_addr = spec->hdr.src_addr;
352         enic_5tup->dst_addr = spec->hdr.dst_addr;
353
354         return 0;
355 }
356
357 /**
358  * Copy UDP item into version 1 NIC filter.
359  *
360  * @param item[in]
361  *   Item specification.
362  * @param enic_filter[out]
363  *   Partially filled in NIC filter structure.
364  * @param inner_ofst[in]
365  *   Should always be 0 for version 1.
366  */
367 static int
368 enic_copy_item_udp_v1(const struct rte_flow_item *item,
369                       struct filter_v2 *enic_filter, u8 *inner_ofst)
370 {
371         const struct rte_flow_item_udp *spec = item->spec;
372         const struct rte_flow_item_udp *mask = item->mask;
373         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
374         struct udp_hdr supported_mask = {
375                 .src_port = 0xffff,
376                 .dst_port = 0xffff,
377         };
378
379         FLOW_TRACE();
380
381         if (*inner_ofst)
382                 return ENOTSUP;
383
384         if (!mask)
385                 mask = &rte_flow_item_udp_mask;
386
387         /* This is an exact match filter, both ports must be set */
388         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
389                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
390                 return ENOTSUP;
391         }
392
393         /* check that the suppied mask exactly matches capabilty */
394         if (!mask_exact_match((const u8 *)&supported_mask,
395                               (const u8 *)item->mask, sizeof(*mask))) {
396                 FLOW_LOG(ERR, "UDP exact match mask");
397                 return ENOTSUP;
398         }
399
400         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
401         enic_5tup->src_port = spec->hdr.src_port;
402         enic_5tup->dst_port = spec->hdr.dst_port;
403         enic_5tup->protocol = PROTO_UDP;
404
405         return 0;
406 }
407
408 /**
409  * Copy TCP item into version 1 NIC filter.
410  *
411  * @param item[in]
412  *   Item specification.
413  * @param enic_filter[out]
414  *   Partially filled in NIC filter structure.
415  * @param inner_ofst[in]
416  *   Should always be 0 for version 1.
417  */
418 static int
419 enic_copy_item_tcp_v1(const struct rte_flow_item *item,
420                       struct filter_v2 *enic_filter, u8 *inner_ofst)
421 {
422         const struct rte_flow_item_tcp *spec = item->spec;
423         const struct rte_flow_item_tcp *mask = item->mask;
424         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
425         struct tcp_hdr supported_mask = {
426                 .src_port = 0xffff,
427                 .dst_port = 0xffff,
428         };
429
430         FLOW_TRACE();
431
432         if (*inner_ofst)
433                 return ENOTSUP;
434
435         if (!mask)
436                 mask = &rte_flow_item_tcp_mask;
437
438         /* This is an exact match filter, both ports must be set */
439         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
440                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
441                 return ENOTSUP;
442         }
443
444         /* check that the suppied mask exactly matches capabilty */
445         if (!mask_exact_match((const u8 *)&supported_mask,
446                              (const u8 *)item->mask, sizeof(*mask))) {
447                 FLOW_LOG(ERR, "TCP exact match mask");
448                 return ENOTSUP;
449         }
450
451         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
452         enic_5tup->src_port = spec->hdr.src_port;
453         enic_5tup->dst_port = spec->hdr.dst_port;
454         enic_5tup->protocol = PROTO_TCP;
455
456         return 0;
457 }
458
459 /**
460  * Copy ETH item into version 2 NIC filter.
461  *
462  * @param item[in]
463  *   Item specification.
464  * @param enic_filter[out]
465  *   Partially filled in NIC filter structure.
466  * @param inner_ofst[in]
467  *   If zero, this is an outer header. If non-zero, this is the offset into L5
468  *   where the header begins.
469  */
470 static int
471 enic_copy_item_eth_v2(const struct rte_flow_item *item,
472                       struct filter_v2 *enic_filter, u8 *inner_ofst)
473 {
474         struct ether_hdr enic_spec;
475         struct ether_hdr enic_mask;
476         const struct rte_flow_item_eth *spec = item->spec;
477         const struct rte_flow_item_eth *mask = item->mask;
478         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
479
480         FLOW_TRACE();
481
482         /* Match all if no spec */
483         if (!spec)
484                 return 0;
485
486         if (!mask)
487                 mask = &rte_flow_item_eth_mask;
488
489         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
490                ETHER_ADDR_LEN);
491         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
492                ETHER_ADDR_LEN);
493
494         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
495                ETHER_ADDR_LEN);
496         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
497                ETHER_ADDR_LEN);
498         enic_spec.ether_type = spec->type;
499         enic_mask.ether_type = mask->type;
500
501         if (*inner_ofst == 0) {
502                 /* outer header */
503                 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
504                        sizeof(struct ether_hdr));
505                 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
506                        sizeof(struct ether_hdr));
507         } else {
508                 /* inner header */
509                 if ((*inner_ofst + sizeof(struct ether_hdr)) >
510                      FILTER_GENERIC_1_KEY_LEN)
511                         return ENOTSUP;
512                 /* Offset into L5 where inner Ethernet header goes */
513                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
514                        &enic_mask, sizeof(struct ether_hdr));
515                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
516                        &enic_spec, sizeof(struct ether_hdr));
517                 *inner_ofst += sizeof(struct ether_hdr);
518         }
519         return 0;
520 }
521
522 /**
523  * Copy VLAN item into version 2 NIC filter.
524  *
525  * @param item[in]
526  *   Item specification.
527  * @param enic_filter[out]
528  *   Partially filled in NIC filter structure.
529  * @param inner_ofst[in]
530  *   If zero, this is an outer header. If non-zero, this is the offset into L5
531  *   where the header begins.
532  */
533 static int
534 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
535                        struct filter_v2 *enic_filter, u8 *inner_ofst)
536 {
537         const struct rte_flow_item_vlan *spec = item->spec;
538         const struct rte_flow_item_vlan *mask = item->mask;
539         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
540
541         FLOW_TRACE();
542
543         /* Match all if no spec */
544         if (!spec)
545                 return 0;
546
547         /* Don't support filtering in tpid */
548         if (mask) {
549                 if (mask->tpid != 0)
550                         return ENOTSUP;
551         } else {
552                 mask = &rte_flow_item_vlan_mask;
553                 RTE_ASSERT(mask->tpid == 0);
554         }
555
556         if (*inner_ofst == 0) {
557                 /* Outer header. Use the vlan mask/val fields */
558                 gp->mask_vlan = mask->tci;
559                 gp->val_vlan = spec->tci;
560         } else {
561                 /* Inner header. Mask/Val start at *inner_ofst into L5 */
562                 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
563                      FILTER_GENERIC_1_KEY_LEN)
564                         return ENOTSUP;
565                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
566                        mask, sizeof(struct vlan_hdr));
567                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
568                        spec, sizeof(struct vlan_hdr));
569                 *inner_ofst += sizeof(struct vlan_hdr);
570         }
571         return 0;
572 }
573
574 /**
575  * Copy IPv4 item into version 2 NIC filter.
576  *
577  * @param item[in]
578  *   Item specification.
579  * @param enic_filter[out]
580  *   Partially filled in NIC filter structure.
581  * @param inner_ofst[in]
582  *   Must be 0. Don't support inner IPv4 filtering.
583  */
584 static int
585 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
586                        struct filter_v2 *enic_filter, u8 *inner_ofst)
587 {
588         const struct rte_flow_item_ipv4 *spec = item->spec;
589         const struct rte_flow_item_ipv4 *mask = item->mask;
590         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
591
592         FLOW_TRACE();
593
594         if (*inner_ofst == 0) {
595                 /* Match IPv4 */
596                 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
597                 gp->val_flags |= FILTER_GENERIC_1_IPV4;
598
599                 /* Match all if no spec */
600                 if (!spec)
601                         return 0;
602
603                 if (!mask)
604                         mask = &rte_flow_item_ipv4_mask;
605
606                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
607                        sizeof(struct ipv4_hdr));
608                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
609                        sizeof(struct ipv4_hdr));
610         } else {
611                 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
612                 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
613                      FILTER_GENERIC_1_KEY_LEN)
614                         return ENOTSUP;
615                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
616                        mask, sizeof(struct ipv4_hdr));
617                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
618                        spec, sizeof(struct ipv4_hdr));
619                 *inner_ofst += sizeof(struct ipv4_hdr);
620         }
621         return 0;
622 }
623
624 /**
625  * Copy IPv6 item into version 2 NIC filter.
626  *
627  * @param item[in]
628  *   Item specification.
629  * @param enic_filter[out]
630  *   Partially filled in NIC filter structure.
631  * @param inner_ofst[in]
632  *   Must be 0. Don't support inner IPv6 filtering.
633  */
634 static int
635 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
636                        struct filter_v2 *enic_filter, u8 *inner_ofst)
637 {
638         const struct rte_flow_item_ipv6 *spec = item->spec;
639         const struct rte_flow_item_ipv6 *mask = item->mask;
640         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
641
642         FLOW_TRACE();
643
644         /* Match IPv6 */
645         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
646         gp->val_flags |= FILTER_GENERIC_1_IPV6;
647
648         /* Match all if no spec */
649         if (!spec)
650                 return 0;
651
652         if (!mask)
653                 mask = &rte_flow_item_ipv6_mask;
654
655         if (*inner_ofst == 0) {
656                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
657                        sizeof(struct ipv6_hdr));
658                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
659                        sizeof(struct ipv6_hdr));
660         } else {
661                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
662                 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
663                      FILTER_GENERIC_1_KEY_LEN)
664                         return ENOTSUP;
665                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
666                        mask, sizeof(struct ipv6_hdr));
667                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
668                        spec, sizeof(struct ipv6_hdr));
669                 *inner_ofst += sizeof(struct ipv6_hdr);
670         }
671         return 0;
672 }
673
674 /**
675  * Copy UDP item into version 2 NIC filter.
676  *
677  * @param item[in]
678  *   Item specification.
679  * @param enic_filter[out]
680  *   Partially filled in NIC filter structure.
681  * @param inner_ofst[in]
682  *   Must be 0. Don't support inner UDP filtering.
683  */
684 static int
685 enic_copy_item_udp_v2(const struct rte_flow_item *item,
686                       struct filter_v2 *enic_filter, u8 *inner_ofst)
687 {
688         const struct rte_flow_item_udp *spec = item->spec;
689         const struct rte_flow_item_udp *mask = item->mask;
690         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
691
692         FLOW_TRACE();
693
694         /* Match UDP */
695         gp->mask_flags |= FILTER_GENERIC_1_UDP;
696         gp->val_flags |= FILTER_GENERIC_1_UDP;
697
698         /* Match all if no spec */
699         if (!spec)
700                 return 0;
701
702         if (!mask)
703                 mask = &rte_flow_item_udp_mask;
704
705         if (*inner_ofst == 0) {
706                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
707                        sizeof(struct udp_hdr));
708                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
709                        sizeof(struct udp_hdr));
710         } else {
711                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
712                 if ((*inner_ofst + sizeof(struct udp_hdr)) >
713                      FILTER_GENERIC_1_KEY_LEN)
714                         return ENOTSUP;
715                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
716                        mask, sizeof(struct udp_hdr));
717                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
718                        spec, sizeof(struct udp_hdr));
719                 *inner_ofst += sizeof(struct udp_hdr);
720         }
721         return 0;
722 }
723
724 /**
725  * Copy TCP item into version 2 NIC filter.
726  *
727  * @param item[in]
728  *   Item specification.
729  * @param enic_filter[out]
730  *   Partially filled in NIC filter structure.
731  * @param inner_ofst[in]
732  *   Must be 0. Don't support inner TCP filtering.
733  */
734 static int
735 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
736                       struct filter_v2 *enic_filter, u8 *inner_ofst)
737 {
738         const struct rte_flow_item_tcp *spec = item->spec;
739         const struct rte_flow_item_tcp *mask = item->mask;
740         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
741
742         FLOW_TRACE();
743
744         /* Match TCP */
745         gp->mask_flags |= FILTER_GENERIC_1_TCP;
746         gp->val_flags |= FILTER_GENERIC_1_TCP;
747
748         /* Match all if no spec */
749         if (!spec)
750                 return 0;
751
752         if (!mask)
753                 return ENOTSUP;
754
755         if (*inner_ofst == 0) {
756                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
757                        sizeof(struct tcp_hdr));
758                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
759                        sizeof(struct tcp_hdr));
760         } else {
761                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
762                 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
763                      FILTER_GENERIC_1_KEY_LEN)
764                         return ENOTSUP;
765                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
766                        mask, sizeof(struct tcp_hdr));
767                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
768                        spec, sizeof(struct tcp_hdr));
769                 *inner_ofst += sizeof(struct tcp_hdr);
770         }
771         return 0;
772 }
773
774 /**
775  * Copy SCTP item into version 2 NIC filter.
776  *
777  * @param item[in]
778  *   Item specification.
779  * @param enic_filter[out]
780  *   Partially filled in NIC filter structure.
781  * @param inner_ofst[in]
782  *   Must be 0. Don't support inner SCTP filtering.
783  */
784 static int
785 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
786                        struct filter_v2 *enic_filter, u8 *inner_ofst)
787 {
788         const struct rte_flow_item_sctp *spec = item->spec;
789         const struct rte_flow_item_sctp *mask = item->mask;
790         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
791
792         FLOW_TRACE();
793
794         if (*inner_ofst)
795                 return ENOTSUP;
796
797         /* Match all if no spec */
798         if (!spec)
799                 return 0;
800
801         if (!mask)
802                 mask = &rte_flow_item_sctp_mask;
803
804         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
805                sizeof(struct sctp_hdr));
806         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
807                sizeof(struct sctp_hdr));
808         return 0;
809 }
810
811 /**
812  * Copy UDP item into version 2 NIC filter.
813  *
814  * @param item[in]
815  *   Item specification.
816  * @param enic_filter[out]
817  *   Partially filled in NIC filter structure.
818  * @param inner_ofst[in]
819  *   Must be 0. VxLAN headers always start at the beginning of L5.
820  */
821 static int
822 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
823                         struct filter_v2 *enic_filter, u8 *inner_ofst)
824 {
825         const struct rte_flow_item_vxlan *spec = item->spec;
826         const struct rte_flow_item_vxlan *mask = item->mask;
827         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
828
829         FLOW_TRACE();
830
831         if (*inner_ofst)
832                 return EINVAL;
833
834         /* Match all if no spec */
835         if (!spec)
836                 return 0;
837
838         if (!mask)
839                 mask = &rte_flow_item_vxlan_mask;
840
841         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
842                sizeof(struct vxlan_hdr));
843         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
844                sizeof(struct vxlan_hdr));
845
846         *inner_ofst = sizeof(struct vxlan_hdr);
847         return 0;
848 }
849
850 /**
851  * Return 1 if current item is valid on top of the previous one.
852  *
853  * @param prev_item[in]
854  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
855  *   is the first item.
856  * @param item_info[in]
857  *   Info about this item, like valid previous items.
858  * @param is_first[in]
859  *   True if this the first item in the pattern.
860  */
861 static int
862 item_stacking_valid(enum rte_flow_item_type prev_item,
863                     const struct enic_items *item_info, u8 is_first_item)
864 {
865         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
866
867         FLOW_TRACE();
868
869         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
870                 if (prev_item == *allowed_items)
871                         return 1;
872         }
873
874         /* This is the first item in the stack. Check if that's cool */
875         if (is_first_item && item_info->valid_start_item)
876                 return 1;
877
878         return 0;
879 }
880
881 /**
882  * Build the intenal enic filter structure from the provided pattern. The
883  * pattern is validated as the items are copied.
884  *
885  * @param pattern[in]
886  * @param items_info[in]
887  *   Info about this NICs item support, like valid previous items.
888  * @param enic_filter[out]
889  *   NIC specfilc filters derived from the pattern.
890  * @param error[out]
891  */
892 static int
893 enic_copy_filter(const struct rte_flow_item pattern[],
894                  const struct enic_items *items_info,
895                  struct filter_v2 *enic_filter,
896                  struct rte_flow_error *error)
897 {
898         int ret;
899         const struct rte_flow_item *item = pattern;
900         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
901         enum rte_flow_item_type prev_item;
902         const struct enic_items *item_info;
903
904         u8 is_first_item = 1;
905
906         FLOW_TRACE();
907
908         prev_item = 0;
909
910         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
911                 /* Get info about how to validate and copy the item. If NULL
912                  * is returned the nic does not support the item.
913                  */
914                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
915                         continue;
916
917                 item_info = &items_info[item->type];
918
919                 /* check to see if item stacking is valid */
920                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
921                         goto stacking_error;
922
923                 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
924                 if (ret)
925                         goto item_not_supported;
926                 prev_item = item->type;
927                 is_first_item = 0;
928         }
929         return 0;
930
931 item_not_supported:
932         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
933                            NULL, "enic type error");
934         return -rte_errno;
935
936 stacking_error:
937         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
938                            item, "stacking error");
939         return -rte_errno;
940 }
941
942 /**
943  * Build the intenal version 1 NIC action structure from the provided pattern.
944  * The pattern is validated as the items are copied.
945  *
946  * @param actions[in]
947  * @param enic_action[out]
948  *   NIC specfilc actions derived from the actions.
949  * @param error[out]
950  */
951 static int
952 enic_copy_action_v1(const struct rte_flow_action actions[],
953                     struct filter_action_v2 *enic_action)
954 {
955         FLOW_TRACE();
956
957         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
958                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
959                         continue;
960
961                 switch (actions->type) {
962                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
963                         const struct rte_flow_action_queue *queue =
964                                 (const struct rte_flow_action_queue *)
965                                 actions->conf;
966                         enic_action->rq_idx =
967                                 enic_rte_rq_idx_to_sop_idx(queue->index);
968                         break;
969                 }
970                 default:
971                         RTE_ASSERT(0);
972                         break;
973                 }
974         }
975         enic_action->type = FILTER_ACTION_RQ_STEERING;
976         return 0;
977 }
978
979 /**
980  * Build the intenal version 2 NIC action structure from the provided pattern.
981  * The pattern is validated as the items are copied.
982  *
983  * @param actions[in]
984  * @param enic_action[out]
985  *   NIC specfilc actions derived from the actions.
986  * @param error[out]
987  */
988 static int
989 enic_copy_action_v2(const struct rte_flow_action actions[],
990                     struct filter_action_v2 *enic_action)
991 {
992         FLOW_TRACE();
993
994         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
995                 switch (actions->type) {
996                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
997                         const struct rte_flow_action_queue *queue =
998                                 (const struct rte_flow_action_queue *)
999                                 actions->conf;
1000                         enic_action->rq_idx =
1001                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1002                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1003                         break;
1004                 }
1005                 case RTE_FLOW_ACTION_TYPE_MARK: {
1006                         const struct rte_flow_action_mark *mark =
1007                                 (const struct rte_flow_action_mark *)
1008                                 actions->conf;
1009
1010                         /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
1011                          * in the range of allows mark ids.
1012                          */
1013                         if (mark->id >= ENIC_MAGIC_FILTER_ID)
1014                                 return EINVAL;
1015                         enic_action->filter_id = mark->id;
1016                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1017                         break;
1018                 }
1019                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1020                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1021                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1022                         break;
1023                 }
1024                 case RTE_FLOW_ACTION_TYPE_VOID:
1025                         continue;
1026                 default:
1027                         RTE_ASSERT(0);
1028                         break;
1029                 }
1030         }
1031         enic_action->type = FILTER_ACTION_V2;
1032         return 0;
1033 }
1034
1035 /** Check if the action is supported */
1036 static int
1037 enic_match_action(const struct rte_flow_action *action,
1038                   const enum rte_flow_action_type *supported_actions)
1039 {
1040         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1041              supported_actions++) {
1042                 if (action->type == *supported_actions)
1043                         return 1;
1044         }
1045         return 0;
1046 }
1047
1048 /** Get the NIC filter capabilties structure */
1049 static const struct enic_filter_cap *
1050 enic_get_filter_cap(struct enic *enic)
1051 {
1052         if (enic->flow_filter_mode)
1053                 return &enic_filter_cap[enic->flow_filter_mode];
1054
1055         return NULL;
1056 }
1057
1058 /** Get the actions for this NIC version. */
1059 static const struct enic_action_cap *
1060 enic_get_action_cap(struct enic *enic)
1061 {
1062         static const struct enic_action_cap *ea;
1063
1064         if (enic->filter_tags)
1065                 ea = &enic_action_cap[FILTER_ACTION_V2_ALL];
1066         else
1067                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1068         return ea;
1069 }
1070
1071 /* Debug function to dump internal NIC action structure. */
1072 static void
1073 enic_dump_actions(const struct filter_action_v2 *ea)
1074 {
1075         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1076                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1077         } else if (ea->type == FILTER_ACTION_V2) {
1078                 FLOW_LOG(INFO, "Actions(V2)\n");
1079                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1080                         FLOW_LOG(INFO, "\tqueue: %u\n",
1081                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1082                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1083                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1084         }
1085 }
1086
1087 /* Debug function to dump internal NIC filter structure. */
1088 static void
1089 enic_dump_filter(const struct filter_v2 *filt)
1090 {
1091         const struct filter_generic_1 *gp;
1092         int i, j, mbyte;
1093         char buf[128], *bp;
1094         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1095         char l4csum[16], ipfrag[16];
1096
1097         switch (filt->type) {
1098         case FILTER_IPV4_5TUPLE:
1099                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1100                 break;
1101         case FILTER_USNIC_IP:
1102         case FILTER_DPDK_1:
1103                 /* FIXME: this should be a loop */
1104                 gp = &filt->u.generic_1;
1105                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1106                        gp->val_vlan, gp->mask_vlan);
1107
1108                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1109                         sprintf(ip4, "%s ",
1110                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1111                                  ? "ip4(y)" : "ip4(n)");
1112                 else
1113                         sprintf(ip4, "%s ", "ip4(x)");
1114
1115                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1116                         sprintf(ip6, "%s ",
1117                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1118                                  ? "ip6(y)" : "ip6(n)");
1119                 else
1120                         sprintf(ip6, "%s ", "ip6(x)");
1121
1122                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1123                         sprintf(udp, "%s ",
1124                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1125                                  ? "udp(y)" : "udp(n)");
1126                 else
1127                         sprintf(udp, "%s ", "udp(x)");
1128
1129                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1130                         sprintf(tcp, "%s ",
1131                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1132                                  ? "tcp(y)" : "tcp(n)");
1133                 else
1134                         sprintf(tcp, "%s ", "tcp(x)");
1135
1136                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1137                         sprintf(tcpudp, "%s ",
1138                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1139                                  ? "tcpudp(y)" : "tcpudp(n)");
1140                 else
1141                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1142
1143                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1144                         sprintf(ip4csum, "%s ",
1145                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1146                                  ? "ip4csum(y)" : "ip4csum(n)");
1147                 else
1148                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1149
1150                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1151                         sprintf(l4csum, "%s ",
1152                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1153                                  ? "l4csum(y)" : "l4csum(n)");
1154                 else
1155                         sprintf(l4csum, "%s ", "l4csum(x)");
1156
1157                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1158                         sprintf(ipfrag, "%s ",
1159                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1160                                  ? "ipfrag(y)" : "ipfrag(n)");
1161                 else
1162                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1163                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1164                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1165
1166                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1167                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1168                         while (mbyte && !gp->layer[i].mask[mbyte])
1169                                 mbyte--;
1170                         if (mbyte == 0)
1171                                 continue;
1172
1173                         bp = buf;
1174                         for (j = 0; j <= mbyte; j++) {
1175                                 sprintf(bp, "%02x",
1176                                         gp->layer[i].mask[j]);
1177                                 bp += 2;
1178                         }
1179                         *bp = '\0';
1180                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1181                         bp = buf;
1182                         for (j = 0; j <= mbyte; j++) {
1183                                 sprintf(bp, "%02x",
1184                                         gp->layer[i].val[j]);
1185                                 bp += 2;
1186                         }
1187                         *bp = '\0';
1188                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1189                 }
1190                 break;
1191         default:
1192                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1193                 break;
1194         }
1195 }
1196
1197 /* Debug function to dump internal NIC flow structures. */
1198 static void
1199 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1200 {
1201         enic_dump_filter(filt);
1202         enic_dump_actions(ea);
1203 }
1204
1205
1206 /**
1207  * Internal flow parse/validate function.
1208  *
1209  * @param dev[in]
1210  *   This device pointer.
1211  * @param pattern[in]
1212  * @param actions[in]
1213  * @param error[out]
1214  * @param enic_filter[out]
1215  *   Internal NIC filter structure pointer.
1216  * @param enic_action[out]
1217  *   Internal NIC action structure pointer.
1218  */
1219 static int
1220 enic_flow_parse(struct rte_eth_dev *dev,
1221                 const struct rte_flow_attr *attrs,
1222                 const struct rte_flow_item pattern[],
1223                 const struct rte_flow_action actions[],
1224                 struct rte_flow_error *error,
1225                 struct filter_v2 *enic_filter,
1226                 struct filter_action_v2 *enic_action)
1227 {
1228         unsigned int ret = 0;
1229         struct enic *enic = pmd_priv(dev);
1230         const struct enic_filter_cap *enic_filter_cap;
1231         const struct enic_action_cap *enic_action_cap;
1232         const struct rte_flow_action *action;
1233
1234         FLOW_TRACE();
1235
1236         memset(enic_filter, 0, sizeof(*enic_filter));
1237         memset(enic_action, 0, sizeof(*enic_action));
1238
1239         if (!pattern) {
1240                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1241                                    NULL, "No pattern specified");
1242                 return -rte_errno;
1243         }
1244
1245         if (!actions) {
1246                 rte_flow_error_set(error, EINVAL,
1247                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1248                                    NULL, "No action specified");
1249                 return -rte_errno;
1250         }
1251
1252         if (attrs) {
1253                 if (attrs->group) {
1254                         rte_flow_error_set(error, ENOTSUP,
1255                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1256                                            NULL,
1257                                            "priority groups are not supported");
1258                         return -rte_errno;
1259                 } else if (attrs->priority) {
1260                         rte_flow_error_set(error, ENOTSUP,
1261                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1262                                            NULL,
1263                                            "priorities are not supported");
1264                         return -rte_errno;
1265                 } else if (attrs->egress) {
1266                         rte_flow_error_set(error, ENOTSUP,
1267                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1268                                            NULL,
1269                                            "egress is not supported");
1270                         return -rte_errno;
1271                 } else if (!attrs->ingress) {
1272                         rte_flow_error_set(error, ENOTSUP,
1273                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1274                                            NULL,
1275                                            "only ingress is supported");
1276                         return -rte_errno;
1277                 }
1278
1279         } else {
1280                 rte_flow_error_set(error, EINVAL,
1281                                    RTE_FLOW_ERROR_TYPE_ATTR,
1282                                    NULL, "No attribute specified");
1283                 return -rte_errno;
1284         }
1285
1286         /* Verify Actions. */
1287         enic_action_cap =  enic_get_action_cap(enic);
1288         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1289              action++) {
1290                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1291                         continue;
1292                 else if (!enic_match_action(action, enic_action_cap->actions))
1293                         break;
1294         }
1295         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1296                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1297                                    action, "Invalid action.");
1298                 return -rte_errno;
1299         }
1300         ret = enic_action_cap->copy_fn(actions, enic_action);
1301         if (ret) {
1302                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1303                            NULL, "Unsupported action.");
1304                 return -rte_errno;
1305         }
1306
1307         /* Verify Flow items. If copying the filter from flow format to enic
1308          * format fails, the flow is not supported
1309          */
1310         enic_filter_cap =  enic_get_filter_cap(enic);
1311         if (enic_filter_cap == NULL) {
1312                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1313                            NULL, "Flow API not available");
1314                 return -rte_errno;
1315         }
1316         enic_filter->type = enic->flow_filter_mode;
1317         ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
1318                                        enic_filter, error);
1319         return ret;
1320 }
1321
1322 /**
1323  * Push filter/action to the NIC.
1324  *
1325  * @param enic[in]
1326  *   Device structure pointer.
1327  * @param enic_filter[in]
1328  *   Internal NIC filter structure pointer.
1329  * @param enic_action[in]
1330  *   Internal NIC action structure pointer.
1331  * @param error[out]
1332  */
1333 static struct rte_flow *
1334 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1335                    struct filter_action_v2 *enic_action,
1336                    struct rte_flow_error *error)
1337 {
1338         struct rte_flow *flow;
1339         int ret;
1340         u16 entry;
1341
1342         FLOW_TRACE();
1343
1344         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1345         if (!flow) {
1346                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1347                                    NULL, "cannot allocate flow memory");
1348                 return NULL;
1349         }
1350
1351         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1352         entry = enic_action->rq_idx;
1353         ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1354                                   enic_action);
1355         if (!ret) {
1356                 flow->enic_filter_id = entry;
1357                 flow->enic_filter = *enic_filter;
1358         } else {
1359                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1360                                    NULL, "vnic_dev_classifier error");
1361                 rte_free(flow);
1362                 return NULL;
1363         }
1364         return flow;
1365 }
1366
1367 /**
1368  * Remove filter/action from the NIC.
1369  *
1370  * @param enic[in]
1371  *   Device structure pointer.
1372  * @param filter_id[in]
1373  *   Id of NIC filter.
1374  * @param enic_action[in]
1375  *   Internal NIC action structure pointer.
1376  * @param error[out]
1377  */
1378 static int
1379 enic_flow_del_filter(struct enic *enic, u16 filter_id,
1380                    struct rte_flow_error *error)
1381 {
1382         int ret;
1383
1384         FLOW_TRACE();
1385
1386         ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1387         if (!ret)
1388                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1389                                    NULL, "vnic_dev_classifier failed");
1390         return ret;
1391 }
1392
1393 /*
1394  * The following functions are callbacks for Generic flow API.
1395  */
1396
1397 /**
1398  * Validate a flow supported by the NIC.
1399  *
1400  * @see rte_flow_validate()
1401  * @see rte_flow_ops
1402  */
1403 static int
1404 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1405                    const struct rte_flow_item pattern[],
1406                    const struct rte_flow_action actions[],
1407                    struct rte_flow_error *error)
1408 {
1409         struct filter_v2 enic_filter;
1410         struct filter_action_v2 enic_action;
1411         int ret;
1412
1413         FLOW_TRACE();
1414
1415         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1416                                &enic_filter, &enic_action);
1417         if (!ret)
1418                 enic_dump_flow(&enic_action, &enic_filter);
1419         return ret;
1420 }
1421
1422 /**
1423  * Create a flow supported by the NIC.
1424  *
1425  * @see rte_flow_create()
1426  * @see rte_flow_ops
1427  */
1428 static struct rte_flow *
1429 enic_flow_create(struct rte_eth_dev *dev,
1430                  const struct rte_flow_attr *attrs,
1431                  const struct rte_flow_item pattern[],
1432                  const struct rte_flow_action actions[],
1433                  struct rte_flow_error *error)
1434 {
1435         int ret;
1436         struct filter_v2 enic_filter;
1437         struct filter_action_v2 enic_action;
1438         struct rte_flow *flow;
1439         struct enic *enic = pmd_priv(dev);
1440
1441         FLOW_TRACE();
1442
1443         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1444                               &enic_action);
1445         if (ret < 0)
1446                 return NULL;
1447
1448         rte_spinlock_lock(&enic->flows_lock);
1449         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1450                                     error);
1451         if (flow)
1452                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1453         rte_spinlock_unlock(&enic->flows_lock);
1454
1455         return flow;
1456 }
1457
1458 /**
1459  * Destroy a flow supported by the NIC.
1460  *
1461  * @see rte_flow_destroy()
1462  * @see rte_flow_ops
1463  */
1464 static int
1465 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1466                   __rte_unused struct rte_flow_error *error)
1467 {
1468         struct enic *enic = pmd_priv(dev);
1469
1470         FLOW_TRACE();
1471
1472         rte_spinlock_lock(&enic->flows_lock);
1473         enic_flow_del_filter(enic, flow->enic_filter_id, error);
1474         LIST_REMOVE(flow, next);
1475         rte_spinlock_unlock(&enic->flows_lock);
1476         return 0;
1477 }
1478
1479 /**
1480  * Flush all flows on the device.
1481  *
1482  * @see rte_flow_flush()
1483  * @see rte_flow_ops
1484  */
1485 static int
1486 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1487 {
1488         struct rte_flow *flow;
1489         struct enic *enic = pmd_priv(dev);
1490
1491         FLOW_TRACE();
1492
1493         rte_spinlock_lock(&enic->flows_lock);
1494
1495         while (!LIST_EMPTY(&enic->flows)) {
1496                 flow = LIST_FIRST(&enic->flows);
1497                 enic_flow_del_filter(enic, flow->enic_filter_id, error);
1498                 LIST_REMOVE(flow, next);
1499         }
1500         rte_spinlock_unlock(&enic->flows_lock);
1501         return 0;
1502 }
1503
1504 /**
1505  * Flow callback registration.
1506  *
1507  * @see rte_flow_ops
1508  */
1509 const struct rte_flow_ops enic_flow_ops = {
1510         .validate = enic_flow_validate,
1511         .create = enic_flow_create,
1512         .destroy = enic_flow_destroy,
1513         .flush = enic_flow_flush,
1514 };