New upstream version 18.08
[deb_dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 #define FLOW_TRACE() \
20         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
21                 "%s()\n", __func__)
22 #define FLOW_LOG(level, fmt, args...) \
23         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
24                 fmt "\n", ##args)
25
26 /** Info about how to copy items into enic filters. */
27 struct enic_items {
28         /** Function for copying and validating an item. */
29         int (*copy_item)(const struct rte_flow_item *item,
30                          struct filter_v2 *enic_filter, u8 *inner_ofst);
31         /** List of valid previous items. */
32         const enum rte_flow_item_type * const prev_items;
33         /** True if it's OK for this item to be the first item. For some NIC
34          * versions, it's invalid to start the stack above layer 3.
35          */
36         const u8 valid_start_item;
37 };
38
39 /** Filtering capabilities for various NIC and firmware versions. */
40 struct enic_filter_cap {
41         /** list of valid items and their handlers and attributes. */
42         const struct enic_items *item_info;
43 };
44
45 /* functions for copying flow actions into enic actions */
46 typedef int (copy_action_fn)(const struct rte_flow_action actions[],
47                              struct filter_action_v2 *enic_action);
48
49 /* functions for copying items into enic filters */
50 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
51                           struct filter_v2 *enic_filter, u8 *inner_ofst);
52
53 /** Action capabilities for various NICs. */
54 struct enic_action_cap {
55         /** list of valid actions */
56         const enum rte_flow_action_type *actions;
57         /** copy function for a particular NIC */
58         int (*copy_fn)(const struct rte_flow_action actions[],
59                        struct filter_action_v2 *enic_action);
60 };
61
62 /* Forward declarations */
63 static enic_copy_item_fn enic_copy_item_ipv4_v1;
64 static enic_copy_item_fn enic_copy_item_udp_v1;
65 static enic_copy_item_fn enic_copy_item_tcp_v1;
66 static enic_copy_item_fn enic_copy_item_eth_v2;
67 static enic_copy_item_fn enic_copy_item_vlan_v2;
68 static enic_copy_item_fn enic_copy_item_ipv4_v2;
69 static enic_copy_item_fn enic_copy_item_ipv6_v2;
70 static enic_copy_item_fn enic_copy_item_udp_v2;
71 static enic_copy_item_fn enic_copy_item_tcp_v2;
72 static enic_copy_item_fn enic_copy_item_sctp_v2;
73 static enic_copy_item_fn enic_copy_item_sctp_v2;
74 static enic_copy_item_fn enic_copy_item_vxlan_v2;
75 static copy_action_fn enic_copy_action_v1;
76 static copy_action_fn enic_copy_action_v2;
77
78 /**
79  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
80  * is supported.
81  */
82 static const struct enic_items enic_items_v1[] = {
83         [RTE_FLOW_ITEM_TYPE_IPV4] = {
84                 .copy_item = enic_copy_item_ipv4_v1,
85                 .valid_start_item = 1,
86                 .prev_items = (const enum rte_flow_item_type[]) {
87                                RTE_FLOW_ITEM_TYPE_END,
88                 },
89         },
90         [RTE_FLOW_ITEM_TYPE_UDP] = {
91                 .copy_item = enic_copy_item_udp_v1,
92                 .valid_start_item = 0,
93                 .prev_items = (const enum rte_flow_item_type[]) {
94                                RTE_FLOW_ITEM_TYPE_IPV4,
95                                RTE_FLOW_ITEM_TYPE_END,
96                 },
97         },
98         [RTE_FLOW_ITEM_TYPE_TCP] = {
99                 .copy_item = enic_copy_item_tcp_v1,
100                 .valid_start_item = 0,
101                 .prev_items = (const enum rte_flow_item_type[]) {
102                                RTE_FLOW_ITEM_TYPE_IPV4,
103                                RTE_FLOW_ITEM_TYPE_END,
104                 },
105         },
106 };
107
108 /**
109  * NICs have Advanced Filters capability but they are disabled. This means
110  * that layer 3 must be specified.
111  */
112 static const struct enic_items enic_items_v2[] = {
113         [RTE_FLOW_ITEM_TYPE_ETH] = {
114                 .copy_item = enic_copy_item_eth_v2,
115                 .valid_start_item = 1,
116                 .prev_items = (const enum rte_flow_item_type[]) {
117                                RTE_FLOW_ITEM_TYPE_VXLAN,
118                                RTE_FLOW_ITEM_TYPE_END,
119                 },
120         },
121         [RTE_FLOW_ITEM_TYPE_VLAN] = {
122                 .copy_item = enic_copy_item_vlan_v2,
123                 .valid_start_item = 1,
124                 .prev_items = (const enum rte_flow_item_type[]) {
125                                RTE_FLOW_ITEM_TYPE_ETH,
126                                RTE_FLOW_ITEM_TYPE_END,
127                 },
128         },
129         [RTE_FLOW_ITEM_TYPE_IPV4] = {
130                 .copy_item = enic_copy_item_ipv4_v2,
131                 .valid_start_item = 1,
132                 .prev_items = (const enum rte_flow_item_type[]) {
133                                RTE_FLOW_ITEM_TYPE_ETH,
134                                RTE_FLOW_ITEM_TYPE_VLAN,
135                                RTE_FLOW_ITEM_TYPE_END,
136                 },
137         },
138         [RTE_FLOW_ITEM_TYPE_IPV6] = {
139                 .copy_item = enic_copy_item_ipv6_v2,
140                 .valid_start_item = 1,
141                 .prev_items = (const enum rte_flow_item_type[]) {
142                                RTE_FLOW_ITEM_TYPE_ETH,
143                                RTE_FLOW_ITEM_TYPE_VLAN,
144                                RTE_FLOW_ITEM_TYPE_END,
145                 },
146         },
147         [RTE_FLOW_ITEM_TYPE_UDP] = {
148                 .copy_item = enic_copy_item_udp_v2,
149                 .valid_start_item = 0,
150                 .prev_items = (const enum rte_flow_item_type[]) {
151                                RTE_FLOW_ITEM_TYPE_IPV4,
152                                RTE_FLOW_ITEM_TYPE_IPV6,
153                                RTE_FLOW_ITEM_TYPE_END,
154                 },
155         },
156         [RTE_FLOW_ITEM_TYPE_TCP] = {
157                 .copy_item = enic_copy_item_tcp_v2,
158                 .valid_start_item = 0,
159                 .prev_items = (const enum rte_flow_item_type[]) {
160                                RTE_FLOW_ITEM_TYPE_IPV4,
161                                RTE_FLOW_ITEM_TYPE_IPV6,
162                                RTE_FLOW_ITEM_TYPE_END,
163                 },
164         },
165         [RTE_FLOW_ITEM_TYPE_SCTP] = {
166                 .copy_item = enic_copy_item_sctp_v2,
167                 .valid_start_item = 0,
168                 .prev_items = (const enum rte_flow_item_type[]) {
169                                RTE_FLOW_ITEM_TYPE_IPV4,
170                                RTE_FLOW_ITEM_TYPE_IPV6,
171                                RTE_FLOW_ITEM_TYPE_END,
172                 },
173         },
174         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
175                 .copy_item = enic_copy_item_vxlan_v2,
176                 .valid_start_item = 0,
177                 .prev_items = (const enum rte_flow_item_type[]) {
178                                RTE_FLOW_ITEM_TYPE_UDP,
179                                RTE_FLOW_ITEM_TYPE_END,
180                 },
181         },
182 };
183
184 /** NICs with Advanced filters enabled */
185 static const struct enic_items enic_items_v3[] = {
186         [RTE_FLOW_ITEM_TYPE_ETH] = {
187                 .copy_item = enic_copy_item_eth_v2,
188                 .valid_start_item = 1,
189                 .prev_items = (const enum rte_flow_item_type[]) {
190                                RTE_FLOW_ITEM_TYPE_VXLAN,
191                                RTE_FLOW_ITEM_TYPE_END,
192                 },
193         },
194         [RTE_FLOW_ITEM_TYPE_VLAN] = {
195                 .copy_item = enic_copy_item_vlan_v2,
196                 .valid_start_item = 1,
197                 .prev_items = (const enum rte_flow_item_type[]) {
198                                RTE_FLOW_ITEM_TYPE_ETH,
199                                RTE_FLOW_ITEM_TYPE_END,
200                 },
201         },
202         [RTE_FLOW_ITEM_TYPE_IPV4] = {
203                 .copy_item = enic_copy_item_ipv4_v2,
204                 .valid_start_item = 1,
205                 .prev_items = (const enum rte_flow_item_type[]) {
206                                RTE_FLOW_ITEM_TYPE_ETH,
207                                RTE_FLOW_ITEM_TYPE_VLAN,
208                                RTE_FLOW_ITEM_TYPE_END,
209                 },
210         },
211         [RTE_FLOW_ITEM_TYPE_IPV6] = {
212                 .copy_item = enic_copy_item_ipv6_v2,
213                 .valid_start_item = 1,
214                 .prev_items = (const enum rte_flow_item_type[]) {
215                                RTE_FLOW_ITEM_TYPE_ETH,
216                                RTE_FLOW_ITEM_TYPE_VLAN,
217                                RTE_FLOW_ITEM_TYPE_END,
218                 },
219         },
220         [RTE_FLOW_ITEM_TYPE_UDP] = {
221                 .copy_item = enic_copy_item_udp_v2,
222                 .valid_start_item = 1,
223                 .prev_items = (const enum rte_flow_item_type[]) {
224                                RTE_FLOW_ITEM_TYPE_IPV4,
225                                RTE_FLOW_ITEM_TYPE_IPV6,
226                                RTE_FLOW_ITEM_TYPE_END,
227                 },
228         },
229         [RTE_FLOW_ITEM_TYPE_TCP] = {
230                 .copy_item = enic_copy_item_tcp_v2,
231                 .valid_start_item = 1,
232                 .prev_items = (const enum rte_flow_item_type[]) {
233                                RTE_FLOW_ITEM_TYPE_IPV4,
234                                RTE_FLOW_ITEM_TYPE_IPV6,
235                                RTE_FLOW_ITEM_TYPE_END,
236                 },
237         },
238         [RTE_FLOW_ITEM_TYPE_SCTP] = {
239                 .copy_item = enic_copy_item_sctp_v2,
240                 .valid_start_item = 1,
241                 .prev_items = (const enum rte_flow_item_type[]) {
242                                RTE_FLOW_ITEM_TYPE_IPV4,
243                                RTE_FLOW_ITEM_TYPE_IPV6,
244                                RTE_FLOW_ITEM_TYPE_END,
245                 },
246         },
247         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
248                 .copy_item = enic_copy_item_vxlan_v2,
249                 .valid_start_item = 1,
250                 .prev_items = (const enum rte_flow_item_type[]) {
251                                RTE_FLOW_ITEM_TYPE_UDP,
252                                RTE_FLOW_ITEM_TYPE_END,
253                 },
254         },
255 };
256
257 /** Filtering capabilities indexed this NICs supported filter type. */
258 static const struct enic_filter_cap enic_filter_cap[] = {
259         [FILTER_IPV4_5TUPLE] = {
260                 .item_info = enic_items_v1,
261         },
262         [FILTER_USNIC_IP] = {
263                 .item_info = enic_items_v2,
264         },
265         [FILTER_DPDK_1] = {
266                 .item_info = enic_items_v3,
267         },
268 };
269
270 /** Supported actions for older NICs */
271 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
272         RTE_FLOW_ACTION_TYPE_QUEUE,
273         RTE_FLOW_ACTION_TYPE_END,
274 };
275
276 /** Supported actions for newer NICs */
277 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
278         RTE_FLOW_ACTION_TYPE_QUEUE,
279         RTE_FLOW_ACTION_TYPE_MARK,
280         RTE_FLOW_ACTION_TYPE_FLAG,
281         RTE_FLOW_ACTION_TYPE_END,
282 };
283
284 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
285         RTE_FLOW_ACTION_TYPE_QUEUE,
286         RTE_FLOW_ACTION_TYPE_MARK,
287         RTE_FLOW_ACTION_TYPE_FLAG,
288         RTE_FLOW_ACTION_TYPE_DROP,
289         RTE_FLOW_ACTION_TYPE_END,
290 };
291
292 /** Action capabilities indexed by NIC version information */
293 static const struct enic_action_cap enic_action_cap[] = {
294         [FILTER_ACTION_RQ_STEERING_FLAG] = {
295                 .actions = enic_supported_actions_v1,
296                 .copy_fn = enic_copy_action_v1,
297         },
298         [FILTER_ACTION_FILTER_ID_FLAG] = {
299                 .actions = enic_supported_actions_v2_id,
300                 .copy_fn = enic_copy_action_v2,
301         },
302         [FILTER_ACTION_DROP_FLAG] = {
303                 .actions = enic_supported_actions_v2_drop,
304                 .copy_fn = enic_copy_action_v2,
305         },
306 };
307
308 static int
309 mask_exact_match(const u8 *supported, const u8 *supplied,
310                  unsigned int size)
311 {
312         unsigned int i;
313         for (i = 0; i < size; i++) {
314                 if (supported[i] != supplied[i])
315                         return 0;
316         }
317         return 1;
318 }
319
320 /**
321  * Copy IPv4 item into version 1 NIC filter.
322  *
323  * @param item[in]
324  *   Item specification.
325  * @param enic_filter[out]
326  *   Partially filled in NIC filter structure.
327  * @param inner_ofst[in]
328  *   Should always be 0 for version 1.
329  */
330 static int
331 enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
332                        struct filter_v2 *enic_filter, u8 *inner_ofst)
333 {
334         const struct rte_flow_item_ipv4 *spec = item->spec;
335         const struct rte_flow_item_ipv4 *mask = item->mask;
336         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
337         struct ipv4_hdr supported_mask = {
338                 .src_addr = 0xffffffff,
339                 .dst_addr = 0xffffffff,
340         };
341
342         FLOW_TRACE();
343
344         if (*inner_ofst)
345                 return ENOTSUP;
346
347         if (!mask)
348                 mask = &rte_flow_item_ipv4_mask;
349
350         /* This is an exact match filter, both fields must be set */
351         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
352                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
353                 return ENOTSUP;
354         }
355
356         /* check that the suppied mask exactly matches capabilty */
357         if (!mask_exact_match((const u8 *)&supported_mask,
358                               (const u8 *)item->mask, sizeof(*mask))) {
359                 FLOW_LOG(ERR, "IPv4 exact match mask");
360                 return ENOTSUP;
361         }
362
363         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
364         enic_5tup->src_addr = spec->hdr.src_addr;
365         enic_5tup->dst_addr = spec->hdr.dst_addr;
366
367         return 0;
368 }
369
370 /**
371  * Copy UDP item into version 1 NIC filter.
372  *
373  * @param item[in]
374  *   Item specification.
375  * @param enic_filter[out]
376  *   Partially filled in NIC filter structure.
377  * @param inner_ofst[in]
378  *   Should always be 0 for version 1.
379  */
380 static int
381 enic_copy_item_udp_v1(const struct rte_flow_item *item,
382                       struct filter_v2 *enic_filter, u8 *inner_ofst)
383 {
384         const struct rte_flow_item_udp *spec = item->spec;
385         const struct rte_flow_item_udp *mask = item->mask;
386         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
387         struct udp_hdr supported_mask = {
388                 .src_port = 0xffff,
389                 .dst_port = 0xffff,
390         };
391
392         FLOW_TRACE();
393
394         if (*inner_ofst)
395                 return ENOTSUP;
396
397         if (!mask)
398                 mask = &rte_flow_item_udp_mask;
399
400         /* This is an exact match filter, both ports must be set */
401         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
402                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
403                 return ENOTSUP;
404         }
405
406         /* check that the suppied mask exactly matches capabilty */
407         if (!mask_exact_match((const u8 *)&supported_mask,
408                               (const u8 *)item->mask, sizeof(*mask))) {
409                 FLOW_LOG(ERR, "UDP exact match mask");
410                 return ENOTSUP;
411         }
412
413         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
414         enic_5tup->src_port = spec->hdr.src_port;
415         enic_5tup->dst_port = spec->hdr.dst_port;
416         enic_5tup->protocol = PROTO_UDP;
417
418         return 0;
419 }
420
421 /**
422  * Copy TCP item into version 1 NIC filter.
423  *
424  * @param item[in]
425  *   Item specification.
426  * @param enic_filter[out]
427  *   Partially filled in NIC filter structure.
428  * @param inner_ofst[in]
429  *   Should always be 0 for version 1.
430  */
431 static int
432 enic_copy_item_tcp_v1(const struct rte_flow_item *item,
433                       struct filter_v2 *enic_filter, u8 *inner_ofst)
434 {
435         const struct rte_flow_item_tcp *spec = item->spec;
436         const struct rte_flow_item_tcp *mask = item->mask;
437         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
438         struct tcp_hdr supported_mask = {
439                 .src_port = 0xffff,
440                 .dst_port = 0xffff,
441         };
442
443         FLOW_TRACE();
444
445         if (*inner_ofst)
446                 return ENOTSUP;
447
448         if (!mask)
449                 mask = &rte_flow_item_tcp_mask;
450
451         /* This is an exact match filter, both ports must be set */
452         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
453                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
454                 return ENOTSUP;
455         }
456
457         /* check that the suppied mask exactly matches capabilty */
458         if (!mask_exact_match((const u8 *)&supported_mask,
459                              (const u8 *)item->mask, sizeof(*mask))) {
460                 FLOW_LOG(ERR, "TCP exact match mask");
461                 return ENOTSUP;
462         }
463
464         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
465         enic_5tup->src_port = spec->hdr.src_port;
466         enic_5tup->dst_port = spec->hdr.dst_port;
467         enic_5tup->protocol = PROTO_TCP;
468
469         return 0;
470 }
471
472 /**
473  * Copy ETH item into version 2 NIC filter.
474  *
475  * @param item[in]
476  *   Item specification.
477  * @param enic_filter[out]
478  *   Partially filled in NIC filter structure.
479  * @param inner_ofst[in]
480  *   If zero, this is an outer header. If non-zero, this is the offset into L5
481  *   where the header begins.
482  */
483 static int
484 enic_copy_item_eth_v2(const struct rte_flow_item *item,
485                       struct filter_v2 *enic_filter, u8 *inner_ofst)
486 {
487         struct ether_hdr enic_spec;
488         struct ether_hdr enic_mask;
489         const struct rte_flow_item_eth *spec = item->spec;
490         const struct rte_flow_item_eth *mask = item->mask;
491         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
492
493         FLOW_TRACE();
494
495         /* Match all if no spec */
496         if (!spec)
497                 return 0;
498
499         if (!mask)
500                 mask = &rte_flow_item_eth_mask;
501
502         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
503                ETHER_ADDR_LEN);
504         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
505                ETHER_ADDR_LEN);
506
507         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
508                ETHER_ADDR_LEN);
509         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
510                ETHER_ADDR_LEN);
511         enic_spec.ether_type = spec->type;
512         enic_mask.ether_type = mask->type;
513
514         if (*inner_ofst == 0) {
515                 /* outer header */
516                 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
517                        sizeof(struct ether_hdr));
518                 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
519                        sizeof(struct ether_hdr));
520         } else {
521                 /* inner header */
522                 if ((*inner_ofst + sizeof(struct ether_hdr)) >
523                      FILTER_GENERIC_1_KEY_LEN)
524                         return ENOTSUP;
525                 /* Offset into L5 where inner Ethernet header goes */
526                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
527                        &enic_mask, sizeof(struct ether_hdr));
528                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
529                        &enic_spec, sizeof(struct ether_hdr));
530                 *inner_ofst += sizeof(struct ether_hdr);
531         }
532         return 0;
533 }
534
535 /**
536  * Copy VLAN item into version 2 NIC filter.
537  *
538  * @param item[in]
539  *   Item specification.
540  * @param enic_filter[out]
541  *   Partially filled in NIC filter structure.
542  * @param inner_ofst[in]
543  *   If zero, this is an outer header. If non-zero, this is the offset into L5
544  *   where the header begins.
545  */
546 static int
547 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
548                        struct filter_v2 *enic_filter, u8 *inner_ofst)
549 {
550         const struct rte_flow_item_vlan *spec = item->spec;
551         const struct rte_flow_item_vlan *mask = item->mask;
552         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
553
554         FLOW_TRACE();
555
556         /* Match all if no spec */
557         if (!spec)
558                 return 0;
559
560         if (!mask)
561                 mask = &rte_flow_item_vlan_mask;
562
563         if (*inner_ofst == 0) {
564                 struct ether_hdr *eth_mask =
565                         (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
566                 struct ether_hdr *eth_val =
567                         (void *)gp->layer[FILTER_GENERIC_1_L2].val;
568
569                 /* Outer TPID cannot be matched */
570                 if (eth_mask->ether_type)
571                         return ENOTSUP;
572                 eth_mask->ether_type = mask->inner_type;
573                 eth_val->ether_type = spec->inner_type;
574
575                 /* Outer header. Use the vlan mask/val fields */
576                 gp->mask_vlan = mask->tci;
577                 gp->val_vlan = spec->tci;
578         } else {
579                 /* Inner header. Mask/Val start at *inner_ofst into L5 */
580                 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
581                      FILTER_GENERIC_1_KEY_LEN)
582                         return ENOTSUP;
583                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
584                        mask, sizeof(struct vlan_hdr));
585                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
586                        spec, sizeof(struct vlan_hdr));
587                 *inner_ofst += sizeof(struct vlan_hdr);
588         }
589         return 0;
590 }
591
592 /**
593  * Copy IPv4 item into version 2 NIC filter.
594  *
595  * @param item[in]
596  *   Item specification.
597  * @param enic_filter[out]
598  *   Partially filled in NIC filter structure.
599  * @param inner_ofst[in]
600  *   Must be 0. Don't support inner IPv4 filtering.
601  */
602 static int
603 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
604                        struct filter_v2 *enic_filter, u8 *inner_ofst)
605 {
606         const struct rte_flow_item_ipv4 *spec = item->spec;
607         const struct rte_flow_item_ipv4 *mask = item->mask;
608         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
609
610         FLOW_TRACE();
611
612         if (*inner_ofst == 0) {
613                 /* Match IPv4 */
614                 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
615                 gp->val_flags |= FILTER_GENERIC_1_IPV4;
616
617                 /* Match all if no spec */
618                 if (!spec)
619                         return 0;
620
621                 if (!mask)
622                         mask = &rte_flow_item_ipv4_mask;
623
624                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
625                        sizeof(struct ipv4_hdr));
626                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
627                        sizeof(struct ipv4_hdr));
628         } else {
629                 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
630                 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
631                      FILTER_GENERIC_1_KEY_LEN)
632                         return ENOTSUP;
633                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
634                        mask, sizeof(struct ipv4_hdr));
635                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
636                        spec, sizeof(struct ipv4_hdr));
637                 *inner_ofst += sizeof(struct ipv4_hdr);
638         }
639         return 0;
640 }
641
642 /**
643  * Copy IPv6 item into version 2 NIC filter.
644  *
645  * @param item[in]
646  *   Item specification.
647  * @param enic_filter[out]
648  *   Partially filled in NIC filter structure.
649  * @param inner_ofst[in]
650  *   Must be 0. Don't support inner IPv6 filtering.
651  */
652 static int
653 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
654                        struct filter_v2 *enic_filter, u8 *inner_ofst)
655 {
656         const struct rte_flow_item_ipv6 *spec = item->spec;
657         const struct rte_flow_item_ipv6 *mask = item->mask;
658         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
659
660         FLOW_TRACE();
661
662         /* Match IPv6 */
663         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
664         gp->val_flags |= FILTER_GENERIC_1_IPV6;
665
666         /* Match all if no spec */
667         if (!spec)
668                 return 0;
669
670         if (!mask)
671                 mask = &rte_flow_item_ipv6_mask;
672
673         if (*inner_ofst == 0) {
674                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
675                        sizeof(struct ipv6_hdr));
676                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
677                        sizeof(struct ipv6_hdr));
678         } else {
679                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
680                 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
681                      FILTER_GENERIC_1_KEY_LEN)
682                         return ENOTSUP;
683                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
684                        mask, sizeof(struct ipv6_hdr));
685                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
686                        spec, sizeof(struct ipv6_hdr));
687                 *inner_ofst += sizeof(struct ipv6_hdr);
688         }
689         return 0;
690 }
691
692 /**
693  * Copy UDP item into version 2 NIC filter.
694  *
695  * @param item[in]
696  *   Item specification.
697  * @param enic_filter[out]
698  *   Partially filled in NIC filter structure.
699  * @param inner_ofst[in]
700  *   Must be 0. Don't support inner UDP filtering.
701  */
702 static int
703 enic_copy_item_udp_v2(const struct rte_flow_item *item,
704                       struct filter_v2 *enic_filter, u8 *inner_ofst)
705 {
706         const struct rte_flow_item_udp *spec = item->spec;
707         const struct rte_flow_item_udp *mask = item->mask;
708         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
709
710         FLOW_TRACE();
711
712         /* Match UDP */
713         gp->mask_flags |= FILTER_GENERIC_1_UDP;
714         gp->val_flags |= FILTER_GENERIC_1_UDP;
715
716         /* Match all if no spec */
717         if (!spec)
718                 return 0;
719
720         if (!mask)
721                 mask = &rte_flow_item_udp_mask;
722
723         if (*inner_ofst == 0) {
724                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
725                        sizeof(struct udp_hdr));
726                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
727                        sizeof(struct udp_hdr));
728         } else {
729                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
730                 if ((*inner_ofst + sizeof(struct udp_hdr)) >
731                      FILTER_GENERIC_1_KEY_LEN)
732                         return ENOTSUP;
733                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
734                        mask, sizeof(struct udp_hdr));
735                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
736                        spec, sizeof(struct udp_hdr));
737                 *inner_ofst += sizeof(struct udp_hdr);
738         }
739         return 0;
740 }
741
742 /**
743  * Copy TCP item into version 2 NIC filter.
744  *
745  * @param item[in]
746  *   Item specification.
747  * @param enic_filter[out]
748  *   Partially filled in NIC filter structure.
749  * @param inner_ofst[in]
750  *   Must be 0. Don't support inner TCP filtering.
751  */
752 static int
753 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
754                       struct filter_v2 *enic_filter, u8 *inner_ofst)
755 {
756         const struct rte_flow_item_tcp *spec = item->spec;
757         const struct rte_flow_item_tcp *mask = item->mask;
758         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
759
760         FLOW_TRACE();
761
762         /* Match TCP */
763         gp->mask_flags |= FILTER_GENERIC_1_TCP;
764         gp->val_flags |= FILTER_GENERIC_1_TCP;
765
766         /* Match all if no spec */
767         if (!spec)
768                 return 0;
769
770         if (!mask)
771                 return ENOTSUP;
772
773         if (*inner_ofst == 0) {
774                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
775                        sizeof(struct tcp_hdr));
776                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
777                        sizeof(struct tcp_hdr));
778         } else {
779                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
780                 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
781                      FILTER_GENERIC_1_KEY_LEN)
782                         return ENOTSUP;
783                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
784                        mask, sizeof(struct tcp_hdr));
785                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
786                        spec, sizeof(struct tcp_hdr));
787                 *inner_ofst += sizeof(struct tcp_hdr);
788         }
789         return 0;
790 }
791
792 /**
793  * Copy SCTP item into version 2 NIC filter.
794  *
795  * @param item[in]
796  *   Item specification.
797  * @param enic_filter[out]
798  *   Partially filled in NIC filter structure.
799  * @param inner_ofst[in]
800  *   Must be 0. Don't support inner SCTP filtering.
801  */
802 static int
803 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
804                        struct filter_v2 *enic_filter, u8 *inner_ofst)
805 {
806         const struct rte_flow_item_sctp *spec = item->spec;
807         const struct rte_flow_item_sctp *mask = item->mask;
808         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
809
810         FLOW_TRACE();
811
812         if (*inner_ofst)
813                 return ENOTSUP;
814
815         /* Match all if no spec */
816         if (!spec)
817                 return 0;
818
819         if (!mask)
820                 mask = &rte_flow_item_sctp_mask;
821
822         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
823                sizeof(struct sctp_hdr));
824         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
825                sizeof(struct sctp_hdr));
826         return 0;
827 }
828
829 /**
830  * Copy UDP item into version 2 NIC filter.
831  *
832  * @param item[in]
833  *   Item specification.
834  * @param enic_filter[out]
835  *   Partially filled in NIC filter structure.
836  * @param inner_ofst[in]
837  *   Must be 0. VxLAN headers always start at the beginning of L5.
838  */
839 static int
840 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
841                         struct filter_v2 *enic_filter, u8 *inner_ofst)
842 {
843         const struct rte_flow_item_vxlan *spec = item->spec;
844         const struct rte_flow_item_vxlan *mask = item->mask;
845         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
846
847         FLOW_TRACE();
848
849         if (*inner_ofst)
850                 return EINVAL;
851
852         /* Match all if no spec */
853         if (!spec)
854                 return 0;
855
856         if (!mask)
857                 mask = &rte_flow_item_vxlan_mask;
858
859         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
860                sizeof(struct vxlan_hdr));
861         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
862                sizeof(struct vxlan_hdr));
863
864         *inner_ofst = sizeof(struct vxlan_hdr);
865         return 0;
866 }
867
868 /**
869  * Return 1 if current item is valid on top of the previous one.
870  *
871  * @param prev_item[in]
872  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
873  *   is the first item.
874  * @param item_info[in]
875  *   Info about this item, like valid previous items.
876  * @param is_first[in]
877  *   True if this the first item in the pattern.
878  */
879 static int
880 item_stacking_valid(enum rte_flow_item_type prev_item,
881                     const struct enic_items *item_info, u8 is_first_item)
882 {
883         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
884
885         FLOW_TRACE();
886
887         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
888                 if (prev_item == *allowed_items)
889                         return 1;
890         }
891
892         /* This is the first item in the stack. Check if that's cool */
893         if (is_first_item && item_info->valid_start_item)
894                 return 1;
895
896         return 0;
897 }
898
899 /**
900  * Build the intenal enic filter structure from the provided pattern. The
901  * pattern is validated as the items are copied.
902  *
903  * @param pattern[in]
904  * @param items_info[in]
905  *   Info about this NICs item support, like valid previous items.
906  * @param enic_filter[out]
907  *   NIC specfilc filters derived from the pattern.
908  * @param error[out]
909  */
910 static int
911 enic_copy_filter(const struct rte_flow_item pattern[],
912                  const struct enic_items *items_info,
913                  struct filter_v2 *enic_filter,
914                  struct rte_flow_error *error)
915 {
916         int ret;
917         const struct rte_flow_item *item = pattern;
918         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
919         enum rte_flow_item_type prev_item;
920         const struct enic_items *item_info;
921
922         u8 is_first_item = 1;
923
924         FLOW_TRACE();
925
926         prev_item = 0;
927
928         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
929                 /* Get info about how to validate and copy the item. If NULL
930                  * is returned the nic does not support the item.
931                  */
932                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
933                         continue;
934
935                 item_info = &items_info[item->type];
936
937                 /* check to see if item stacking is valid */
938                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
939                         goto stacking_error;
940
941                 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
942                 if (ret)
943                         goto item_not_supported;
944                 prev_item = item->type;
945                 is_first_item = 0;
946         }
947         return 0;
948
949 item_not_supported:
950         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
951                            NULL, "enic type error");
952         return -rte_errno;
953
954 stacking_error:
955         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
956                            item, "stacking error");
957         return -rte_errno;
958 }
959
960 /**
961  * Build the intenal version 1 NIC action structure from the provided pattern.
962  * The pattern is validated as the items are copied.
963  *
964  * @param actions[in]
965  * @param enic_action[out]
966  *   NIC specfilc actions derived from the actions.
967  * @param error[out]
968  */
969 static int
970 enic_copy_action_v1(const struct rte_flow_action actions[],
971                     struct filter_action_v2 *enic_action)
972 {
973         enum { FATE = 1, };
974         uint32_t overlap = 0;
975
976         FLOW_TRACE();
977
978         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
979                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
980                         continue;
981
982                 switch (actions->type) {
983                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
984                         const struct rte_flow_action_queue *queue =
985                                 (const struct rte_flow_action_queue *)
986                                 actions->conf;
987
988                         if (overlap & FATE)
989                                 return ENOTSUP;
990                         overlap |= FATE;
991                         enic_action->rq_idx =
992                                 enic_rte_rq_idx_to_sop_idx(queue->index);
993                         break;
994                 }
995                 default:
996                         RTE_ASSERT(0);
997                         break;
998                 }
999         }
1000         if (!(overlap & FATE))
1001                 return ENOTSUP;
1002         enic_action->type = FILTER_ACTION_RQ_STEERING;
1003         return 0;
1004 }
1005
1006 /**
1007  * Build the intenal version 2 NIC action structure from the provided pattern.
1008  * The pattern is validated as the items are copied.
1009  *
1010  * @param actions[in]
1011  * @param enic_action[out]
1012  *   NIC specfilc actions derived from the actions.
1013  * @param error[out]
1014  */
1015 static int
1016 enic_copy_action_v2(const struct rte_flow_action actions[],
1017                     struct filter_action_v2 *enic_action)
1018 {
1019         enum { FATE = 1, MARK = 2, };
1020         uint32_t overlap = 0;
1021
1022         FLOW_TRACE();
1023
1024         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1025                 switch (actions->type) {
1026                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1027                         const struct rte_flow_action_queue *queue =
1028                                 (const struct rte_flow_action_queue *)
1029                                 actions->conf;
1030
1031                         if (overlap & FATE)
1032                                 return ENOTSUP;
1033                         overlap |= FATE;
1034                         enic_action->rq_idx =
1035                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1036                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1037                         break;
1038                 }
1039                 case RTE_FLOW_ACTION_TYPE_MARK: {
1040                         const struct rte_flow_action_mark *mark =
1041                                 (const struct rte_flow_action_mark *)
1042                                 actions->conf;
1043
1044                         if (overlap & MARK)
1045                                 return ENOTSUP;
1046                         overlap |= MARK;
1047                         /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
1048                          * in the range of allows mark ids.
1049                          */
1050                         if (mark->id >= ENIC_MAGIC_FILTER_ID)
1051                                 return EINVAL;
1052                         enic_action->filter_id = mark->id;
1053                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1054                         break;
1055                 }
1056                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1057                         if (overlap & MARK)
1058                                 return ENOTSUP;
1059                         overlap |= MARK;
1060                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1061                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1062                         break;
1063                 }
1064                 case RTE_FLOW_ACTION_TYPE_DROP: {
1065                         if (overlap & FATE)
1066                                 return ENOTSUP;
1067                         overlap |= FATE;
1068                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1069                         break;
1070                 }
1071                 case RTE_FLOW_ACTION_TYPE_VOID:
1072                         continue;
1073                 default:
1074                         RTE_ASSERT(0);
1075                         break;
1076                 }
1077         }
1078         if (!(overlap & FATE))
1079                 return ENOTSUP;
1080         enic_action->type = FILTER_ACTION_V2;
1081         return 0;
1082 }
1083
1084 /** Check if the action is supported */
1085 static int
1086 enic_match_action(const struct rte_flow_action *action,
1087                   const enum rte_flow_action_type *supported_actions)
1088 {
1089         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1090              supported_actions++) {
1091                 if (action->type == *supported_actions)
1092                         return 1;
1093         }
1094         return 0;
1095 }
1096
1097 /** Get the NIC filter capabilties structure */
1098 static const struct enic_filter_cap *
1099 enic_get_filter_cap(struct enic *enic)
1100 {
1101         if (enic->flow_filter_mode)
1102                 return &enic_filter_cap[enic->flow_filter_mode];
1103
1104         return NULL;
1105 }
1106
1107 /** Get the actions for this NIC version. */
1108 static const struct enic_action_cap *
1109 enic_get_action_cap(struct enic *enic)
1110 {
1111         const struct enic_action_cap *ea;
1112         uint8_t actions;
1113
1114         actions = enic->filter_actions;
1115         if (actions & FILTER_ACTION_DROP_FLAG)
1116                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1117         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1118                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1119         else
1120                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1121         return ea;
1122 }
1123
1124 /* Debug function to dump internal NIC action structure. */
1125 static void
1126 enic_dump_actions(const struct filter_action_v2 *ea)
1127 {
1128         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1129                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1130         } else if (ea->type == FILTER_ACTION_V2) {
1131                 FLOW_LOG(INFO, "Actions(V2)\n");
1132                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1133                         FLOW_LOG(INFO, "\tqueue: %u\n",
1134                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1135                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1136                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1137         }
1138 }
1139
1140 /* Debug function to dump internal NIC filter structure. */
1141 static void
1142 enic_dump_filter(const struct filter_v2 *filt)
1143 {
1144         const struct filter_generic_1 *gp;
1145         int i, j, mbyte;
1146         char buf[128], *bp;
1147         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1148         char l4csum[16], ipfrag[16];
1149
1150         switch (filt->type) {
1151         case FILTER_IPV4_5TUPLE:
1152                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1153                 break;
1154         case FILTER_USNIC_IP:
1155         case FILTER_DPDK_1:
1156                 /* FIXME: this should be a loop */
1157                 gp = &filt->u.generic_1;
1158                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1159                        gp->val_vlan, gp->mask_vlan);
1160
1161                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1162                         sprintf(ip4, "%s ",
1163                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1164                                  ? "ip4(y)" : "ip4(n)");
1165                 else
1166                         sprintf(ip4, "%s ", "ip4(x)");
1167
1168                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1169                         sprintf(ip6, "%s ",
1170                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1171                                  ? "ip6(y)" : "ip6(n)");
1172                 else
1173                         sprintf(ip6, "%s ", "ip6(x)");
1174
1175                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1176                         sprintf(udp, "%s ",
1177                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1178                                  ? "udp(y)" : "udp(n)");
1179                 else
1180                         sprintf(udp, "%s ", "udp(x)");
1181
1182                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1183                         sprintf(tcp, "%s ",
1184                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1185                                  ? "tcp(y)" : "tcp(n)");
1186                 else
1187                         sprintf(tcp, "%s ", "tcp(x)");
1188
1189                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1190                         sprintf(tcpudp, "%s ",
1191                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1192                                  ? "tcpudp(y)" : "tcpudp(n)");
1193                 else
1194                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1195
1196                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1197                         sprintf(ip4csum, "%s ",
1198                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1199                                  ? "ip4csum(y)" : "ip4csum(n)");
1200                 else
1201                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1202
1203                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1204                         sprintf(l4csum, "%s ",
1205                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1206                                  ? "l4csum(y)" : "l4csum(n)");
1207                 else
1208                         sprintf(l4csum, "%s ", "l4csum(x)");
1209
1210                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1211                         sprintf(ipfrag, "%s ",
1212                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1213                                  ? "ipfrag(y)" : "ipfrag(n)");
1214                 else
1215                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1216                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1217                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1218
1219                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1220                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1221                         while (mbyte && !gp->layer[i].mask[mbyte])
1222                                 mbyte--;
1223                         if (mbyte == 0)
1224                                 continue;
1225
1226                         bp = buf;
1227                         for (j = 0; j <= mbyte; j++) {
1228                                 sprintf(bp, "%02x",
1229                                         gp->layer[i].mask[j]);
1230                                 bp += 2;
1231                         }
1232                         *bp = '\0';
1233                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1234                         bp = buf;
1235                         for (j = 0; j <= mbyte; j++) {
1236                                 sprintf(bp, "%02x",
1237                                         gp->layer[i].val[j]);
1238                                 bp += 2;
1239                         }
1240                         *bp = '\0';
1241                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1242                 }
1243                 break;
1244         default:
1245                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1246                 break;
1247         }
1248 }
1249
1250 /* Debug function to dump internal NIC flow structures. */
1251 static void
1252 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1253 {
1254         enic_dump_filter(filt);
1255         enic_dump_actions(ea);
1256 }
1257
1258
1259 /**
1260  * Internal flow parse/validate function.
1261  *
1262  * @param dev[in]
1263  *   This device pointer.
1264  * @param pattern[in]
1265  * @param actions[in]
1266  * @param error[out]
1267  * @param enic_filter[out]
1268  *   Internal NIC filter structure pointer.
1269  * @param enic_action[out]
1270  *   Internal NIC action structure pointer.
1271  */
1272 static int
1273 enic_flow_parse(struct rte_eth_dev *dev,
1274                 const struct rte_flow_attr *attrs,
1275                 const struct rte_flow_item pattern[],
1276                 const struct rte_flow_action actions[],
1277                 struct rte_flow_error *error,
1278                 struct filter_v2 *enic_filter,
1279                 struct filter_action_v2 *enic_action)
1280 {
1281         unsigned int ret = 0;
1282         struct enic *enic = pmd_priv(dev);
1283         const struct enic_filter_cap *enic_filter_cap;
1284         const struct enic_action_cap *enic_action_cap;
1285         const struct rte_flow_action *action;
1286
1287         FLOW_TRACE();
1288
1289         memset(enic_filter, 0, sizeof(*enic_filter));
1290         memset(enic_action, 0, sizeof(*enic_action));
1291
1292         if (!pattern) {
1293                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1294                                    NULL, "No pattern specified");
1295                 return -rte_errno;
1296         }
1297
1298         if (!actions) {
1299                 rte_flow_error_set(error, EINVAL,
1300                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1301                                    NULL, "No action specified");
1302                 return -rte_errno;
1303         }
1304
1305         if (attrs) {
1306                 if (attrs->group) {
1307                         rte_flow_error_set(error, ENOTSUP,
1308                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1309                                            NULL,
1310                                            "priority groups are not supported");
1311                         return -rte_errno;
1312                 } else if (attrs->priority) {
1313                         rte_flow_error_set(error, ENOTSUP,
1314                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1315                                            NULL,
1316                                            "priorities are not supported");
1317                         return -rte_errno;
1318                 } else if (attrs->egress) {
1319                         rte_flow_error_set(error, ENOTSUP,
1320                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1321                                            NULL,
1322                                            "egress is not supported");
1323                         return -rte_errno;
1324                 } else if (attrs->transfer) {
1325                         rte_flow_error_set(error, ENOTSUP,
1326                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1327                                            NULL,
1328                                            "transfer is not supported");
1329                         return -rte_errno;
1330                 } else if (!attrs->ingress) {
1331                         rte_flow_error_set(error, ENOTSUP,
1332                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1333                                            NULL,
1334                                            "only ingress is supported");
1335                         return -rte_errno;
1336                 }
1337
1338         } else {
1339                 rte_flow_error_set(error, EINVAL,
1340                                    RTE_FLOW_ERROR_TYPE_ATTR,
1341                                    NULL, "No attribute specified");
1342                 return -rte_errno;
1343         }
1344
1345         /* Verify Actions. */
1346         enic_action_cap =  enic_get_action_cap(enic);
1347         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1348              action++) {
1349                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1350                         continue;
1351                 else if (!enic_match_action(action, enic_action_cap->actions))
1352                         break;
1353         }
1354         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1355                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1356                                    action, "Invalid action.");
1357                 return -rte_errno;
1358         }
1359         ret = enic_action_cap->copy_fn(actions, enic_action);
1360         if (ret) {
1361                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1362                            NULL, "Unsupported action.");
1363                 return -rte_errno;
1364         }
1365
1366         /* Verify Flow items. If copying the filter from flow format to enic
1367          * format fails, the flow is not supported
1368          */
1369         enic_filter_cap =  enic_get_filter_cap(enic);
1370         if (enic_filter_cap == NULL) {
1371                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1372                            NULL, "Flow API not available");
1373                 return -rte_errno;
1374         }
1375         enic_filter->type = enic->flow_filter_mode;
1376         ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
1377                                        enic_filter, error);
1378         return ret;
1379 }
1380
1381 /**
1382  * Push filter/action to the NIC.
1383  *
1384  * @param enic[in]
1385  *   Device structure pointer.
1386  * @param enic_filter[in]
1387  *   Internal NIC filter structure pointer.
1388  * @param enic_action[in]
1389  *   Internal NIC action structure pointer.
1390  * @param error[out]
1391  */
1392 static struct rte_flow *
1393 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1394                    struct filter_action_v2 *enic_action,
1395                    struct rte_flow_error *error)
1396 {
1397         struct rte_flow *flow;
1398         int ret;
1399         u16 entry;
1400
1401         FLOW_TRACE();
1402
1403         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1404         if (!flow) {
1405                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1406                                    NULL, "cannot allocate flow memory");
1407                 return NULL;
1408         }
1409
1410         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1411         entry = enic_action->rq_idx;
1412         ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1413                                   enic_action);
1414         if (!ret) {
1415                 flow->enic_filter_id = entry;
1416                 flow->enic_filter = *enic_filter;
1417         } else {
1418                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1419                                    NULL, "vnic_dev_classifier error");
1420                 rte_free(flow);
1421                 return NULL;
1422         }
1423         return flow;
1424 }
1425
1426 /**
1427  * Remove filter/action from the NIC.
1428  *
1429  * @param enic[in]
1430  *   Device structure pointer.
1431  * @param filter_id[in]
1432  *   Id of NIC filter.
1433  * @param enic_action[in]
1434  *   Internal NIC action structure pointer.
1435  * @param error[out]
1436  */
1437 static int
1438 enic_flow_del_filter(struct enic *enic, u16 filter_id,
1439                    struct rte_flow_error *error)
1440 {
1441         int ret;
1442
1443         FLOW_TRACE();
1444
1445         ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1446         if (!ret)
1447                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1448                                    NULL, "vnic_dev_classifier failed");
1449         return ret;
1450 }
1451
1452 /*
1453  * The following functions are callbacks for Generic flow API.
1454  */
1455
1456 /**
1457  * Validate a flow supported by the NIC.
1458  *
1459  * @see rte_flow_validate()
1460  * @see rte_flow_ops
1461  */
1462 static int
1463 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1464                    const struct rte_flow_item pattern[],
1465                    const struct rte_flow_action actions[],
1466                    struct rte_flow_error *error)
1467 {
1468         struct filter_v2 enic_filter;
1469         struct filter_action_v2 enic_action;
1470         int ret;
1471
1472         FLOW_TRACE();
1473
1474         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1475                                &enic_filter, &enic_action);
1476         if (!ret)
1477                 enic_dump_flow(&enic_action, &enic_filter);
1478         return ret;
1479 }
1480
1481 /**
1482  * Create a flow supported by the NIC.
1483  *
1484  * @see rte_flow_create()
1485  * @see rte_flow_ops
1486  */
1487 static struct rte_flow *
1488 enic_flow_create(struct rte_eth_dev *dev,
1489                  const struct rte_flow_attr *attrs,
1490                  const struct rte_flow_item pattern[],
1491                  const struct rte_flow_action actions[],
1492                  struct rte_flow_error *error)
1493 {
1494         int ret;
1495         struct filter_v2 enic_filter;
1496         struct filter_action_v2 enic_action;
1497         struct rte_flow *flow;
1498         struct enic *enic = pmd_priv(dev);
1499
1500         FLOW_TRACE();
1501
1502         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1503                               &enic_action);
1504         if (ret < 0)
1505                 return NULL;
1506
1507         rte_spinlock_lock(&enic->flows_lock);
1508         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1509                                     error);
1510         if (flow)
1511                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1512         rte_spinlock_unlock(&enic->flows_lock);
1513
1514         return flow;
1515 }
1516
1517 /**
1518  * Destroy a flow supported by the NIC.
1519  *
1520  * @see rte_flow_destroy()
1521  * @see rte_flow_ops
1522  */
1523 static int
1524 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1525                   __rte_unused struct rte_flow_error *error)
1526 {
1527         struct enic *enic = pmd_priv(dev);
1528
1529         FLOW_TRACE();
1530
1531         rte_spinlock_lock(&enic->flows_lock);
1532         enic_flow_del_filter(enic, flow->enic_filter_id, error);
1533         LIST_REMOVE(flow, next);
1534         rte_spinlock_unlock(&enic->flows_lock);
1535         return 0;
1536 }
1537
1538 /**
1539  * Flush all flows on the device.
1540  *
1541  * @see rte_flow_flush()
1542  * @see rte_flow_ops
1543  */
1544 static int
1545 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1546 {
1547         struct rte_flow *flow;
1548         struct enic *enic = pmd_priv(dev);
1549
1550         FLOW_TRACE();
1551
1552         rte_spinlock_lock(&enic->flows_lock);
1553
1554         while (!LIST_EMPTY(&enic->flows)) {
1555                 flow = LIST_FIRST(&enic->flows);
1556                 enic_flow_del_filter(enic, flow->enic_filter_id, error);
1557                 LIST_REMOVE(flow, next);
1558         }
1559         rte_spinlock_unlock(&enic->flows_lock);
1560         return 0;
1561 }
1562
1563 /**
1564  * Flow callback registration.
1565  *
1566  * @see rte_flow_ops
1567  */
1568 const struct rte_flow_ops enic_flow_ops = {
1569         .validate = enic_flow_validate,
1570         .create = enic_flow_create,
1571         .destroy = enic_flow_destroy,
1572         .flush = enic_flow_flush,
1573 };