New upstream version 17.08
[deb_dpdk.git] / drivers / net / enic / enic_flow.c
1 /*
2  * Copyright (c) 2017, Cisco Systems, Inc.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright
13  * notice, this list of conditions and the following disclaimer in
14  * the documentation and/or other materials provided with the
15  * distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
20  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
21  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
23  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
24  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
25  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
27  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28  * POSSIBILITY OF SUCH DAMAGE.
29  *
30  */
31
32 #include <errno.h>
33 #include <rte_log.h>
34 #include <rte_ethdev.h>
35 #include <rte_flow_driver.h>
36 #include <rte_ether.h>
37 #include <rte_ip.h>
38 #include <rte_udp.h>
39
40 #include "enic_compat.h"
41 #include "enic.h"
42 #include "vnic_dev.h"
43 #include "vnic_nic.h"
44
45 #ifdef RTE_LIBRTE_ENIC_DEBUG_FLOW
46 #define FLOW_TRACE() \
47         RTE_LOG(DEBUG, PMD, "%s()\n", __func__)
48 #define FLOW_LOG(level, fmt, args...) \
49         RTE_LOG(level, PMD, fmt, ## args)
50 #else
51 #define FLOW_TRACE() do { } while (0)
52 #define FLOW_LOG(level, fmt, args...) do { } while (0)
53 #endif
54
55 /** Info about how to copy items into enic filters. */
56 struct enic_items {
57         /** Function for copying and validating an item. */
58         int (*copy_item)(const struct rte_flow_item *item,
59                          struct filter_v2 *enic_filter, u8 *inner_ofst);
60         /** List of valid previous items. */
61         const enum rte_flow_item_type * const prev_items;
62         /** True if it's OK for this item to be the first item. For some NIC
63          * versions, it's invalid to start the stack above layer 3.
64          */
65         const u8 valid_start_item;
66 };
67
68 /** Filtering capabilities for various NIC and firmware versions. */
69 struct enic_filter_cap {
70         /** list of valid items and their handlers and attributes. */
71         const struct enic_items *item_info;
72 };
73
74 /* functions for copying flow actions into enic actions */
75 typedef int (copy_action_fn)(const struct rte_flow_action actions[],
76                              struct filter_action_v2 *enic_action);
77
78 /* functions for copying items into enic filters */
79 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
80                           struct filter_v2 *enic_filter, u8 *inner_ofst);
81
82 /** Action capabilities for various NICs. */
83 struct enic_action_cap {
84         /** list of valid actions */
85         const enum rte_flow_action_type *actions;
86         /** copy function for a particular NIC */
87         int (*copy_fn)(const struct rte_flow_action actions[],
88                        struct filter_action_v2 *enic_action);
89 };
90
91 /* Forward declarations */
92 static enic_copy_item_fn enic_copy_item_ipv4_v1;
93 static enic_copy_item_fn enic_copy_item_udp_v1;
94 static enic_copy_item_fn enic_copy_item_tcp_v1;
95 static enic_copy_item_fn enic_copy_item_eth_v2;
96 static enic_copy_item_fn enic_copy_item_vlan_v2;
97 static enic_copy_item_fn enic_copy_item_ipv4_v2;
98 static enic_copy_item_fn enic_copy_item_ipv6_v2;
99 static enic_copy_item_fn enic_copy_item_udp_v2;
100 static enic_copy_item_fn enic_copy_item_tcp_v2;
101 static enic_copy_item_fn enic_copy_item_sctp_v2;
102 static enic_copy_item_fn enic_copy_item_sctp_v2;
103 static enic_copy_item_fn enic_copy_item_vxlan_v2;
104 static copy_action_fn enic_copy_action_v1;
105 static copy_action_fn enic_copy_action_v2;
106
107 /**
108  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
109  * is supported.
110  */
111 static const struct enic_items enic_items_v1[] = {
112         [RTE_FLOW_ITEM_TYPE_IPV4] = {
113                 .copy_item = enic_copy_item_ipv4_v1,
114                 .valid_start_item = 1,
115                 .prev_items = (const enum rte_flow_item_type[]) {
116                                RTE_FLOW_ITEM_TYPE_END,
117                 },
118         },
119         [RTE_FLOW_ITEM_TYPE_UDP] = {
120                 .copy_item = enic_copy_item_udp_v1,
121                 .valid_start_item = 0,
122                 .prev_items = (const enum rte_flow_item_type[]) {
123                                RTE_FLOW_ITEM_TYPE_IPV4,
124                                RTE_FLOW_ITEM_TYPE_END,
125                 },
126         },
127         [RTE_FLOW_ITEM_TYPE_TCP] = {
128                 .copy_item = enic_copy_item_tcp_v1,
129                 .valid_start_item = 0,
130                 .prev_items = (const enum rte_flow_item_type[]) {
131                                RTE_FLOW_ITEM_TYPE_IPV4,
132                                RTE_FLOW_ITEM_TYPE_END,
133                 },
134         },
135 };
136
137 /**
138  * NICs have Advanced Filters capability but they are disabled. This means
139  * that layer 3 must be specified.
140  */
141 static const struct enic_items enic_items_v2[] = {
142         [RTE_FLOW_ITEM_TYPE_ETH] = {
143                 .copy_item = enic_copy_item_eth_v2,
144                 .valid_start_item = 1,
145                 .prev_items = (const enum rte_flow_item_type[]) {
146                                RTE_FLOW_ITEM_TYPE_VXLAN,
147                                RTE_FLOW_ITEM_TYPE_END,
148                 },
149         },
150         [RTE_FLOW_ITEM_TYPE_VLAN] = {
151                 .copy_item = enic_copy_item_vlan_v2,
152                 .valid_start_item = 1,
153                 .prev_items = (const enum rte_flow_item_type[]) {
154                                RTE_FLOW_ITEM_TYPE_ETH,
155                                RTE_FLOW_ITEM_TYPE_END,
156                 },
157         },
158         [RTE_FLOW_ITEM_TYPE_IPV4] = {
159                 .copy_item = enic_copy_item_ipv4_v2,
160                 .valid_start_item = 1,
161                 .prev_items = (const enum rte_flow_item_type[]) {
162                                RTE_FLOW_ITEM_TYPE_ETH,
163                                RTE_FLOW_ITEM_TYPE_VLAN,
164                                RTE_FLOW_ITEM_TYPE_END,
165                 },
166         },
167         [RTE_FLOW_ITEM_TYPE_IPV6] = {
168                 .copy_item = enic_copy_item_ipv6_v2,
169                 .valid_start_item = 1,
170                 .prev_items = (const enum rte_flow_item_type[]) {
171                                RTE_FLOW_ITEM_TYPE_ETH,
172                                RTE_FLOW_ITEM_TYPE_VLAN,
173                                RTE_FLOW_ITEM_TYPE_END,
174                 },
175         },
176         [RTE_FLOW_ITEM_TYPE_UDP] = {
177                 .copy_item = enic_copy_item_udp_v2,
178                 .valid_start_item = 0,
179                 .prev_items = (const enum rte_flow_item_type[]) {
180                                RTE_FLOW_ITEM_TYPE_IPV4,
181                                RTE_FLOW_ITEM_TYPE_IPV6,
182                                RTE_FLOW_ITEM_TYPE_END,
183                 },
184         },
185         [RTE_FLOW_ITEM_TYPE_TCP] = {
186                 .copy_item = enic_copy_item_tcp_v2,
187                 .valid_start_item = 0,
188                 .prev_items = (const enum rte_flow_item_type[]) {
189                                RTE_FLOW_ITEM_TYPE_IPV4,
190                                RTE_FLOW_ITEM_TYPE_IPV6,
191                                RTE_FLOW_ITEM_TYPE_END,
192                 },
193         },
194         [RTE_FLOW_ITEM_TYPE_SCTP] = {
195                 .copy_item = enic_copy_item_sctp_v2,
196                 .valid_start_item = 0,
197                 .prev_items = (const enum rte_flow_item_type[]) {
198                                RTE_FLOW_ITEM_TYPE_IPV4,
199                                RTE_FLOW_ITEM_TYPE_IPV6,
200                                RTE_FLOW_ITEM_TYPE_END,
201                 },
202         },
203         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
204                 .copy_item = enic_copy_item_vxlan_v2,
205                 .valid_start_item = 0,
206                 .prev_items = (const enum rte_flow_item_type[]) {
207                                RTE_FLOW_ITEM_TYPE_UDP,
208                                RTE_FLOW_ITEM_TYPE_END,
209                 },
210         },
211 };
212
213 /** NICs with Advanced filters enabled */
214 static const struct enic_items enic_items_v3[] = {
215         [RTE_FLOW_ITEM_TYPE_ETH] = {
216                 .copy_item = enic_copy_item_eth_v2,
217                 .valid_start_item = 1,
218                 .prev_items = (const enum rte_flow_item_type[]) {
219                                RTE_FLOW_ITEM_TYPE_VXLAN,
220                                RTE_FLOW_ITEM_TYPE_END,
221                 },
222         },
223         [RTE_FLOW_ITEM_TYPE_VLAN] = {
224                 .copy_item = enic_copy_item_vlan_v2,
225                 .valid_start_item = 1,
226                 .prev_items = (const enum rte_flow_item_type[]) {
227                                RTE_FLOW_ITEM_TYPE_ETH,
228                                RTE_FLOW_ITEM_TYPE_END,
229                 },
230         },
231         [RTE_FLOW_ITEM_TYPE_IPV4] = {
232                 .copy_item = enic_copy_item_ipv4_v2,
233                 .valid_start_item = 1,
234                 .prev_items = (const enum rte_flow_item_type[]) {
235                                RTE_FLOW_ITEM_TYPE_ETH,
236                                RTE_FLOW_ITEM_TYPE_VLAN,
237                                RTE_FLOW_ITEM_TYPE_END,
238                 },
239         },
240         [RTE_FLOW_ITEM_TYPE_IPV6] = {
241                 .copy_item = enic_copy_item_ipv6_v2,
242                 .valid_start_item = 1,
243                 .prev_items = (const enum rte_flow_item_type[]) {
244                                RTE_FLOW_ITEM_TYPE_ETH,
245                                RTE_FLOW_ITEM_TYPE_VLAN,
246                                RTE_FLOW_ITEM_TYPE_END,
247                 },
248         },
249         [RTE_FLOW_ITEM_TYPE_UDP] = {
250                 .copy_item = enic_copy_item_udp_v2,
251                 .valid_start_item = 1,
252                 .prev_items = (const enum rte_flow_item_type[]) {
253                                RTE_FLOW_ITEM_TYPE_IPV4,
254                                RTE_FLOW_ITEM_TYPE_IPV6,
255                                RTE_FLOW_ITEM_TYPE_END,
256                 },
257         },
258         [RTE_FLOW_ITEM_TYPE_TCP] = {
259                 .copy_item = enic_copy_item_tcp_v2,
260                 .valid_start_item = 1,
261                 .prev_items = (const enum rte_flow_item_type[]) {
262                                RTE_FLOW_ITEM_TYPE_IPV4,
263                                RTE_FLOW_ITEM_TYPE_IPV6,
264                                RTE_FLOW_ITEM_TYPE_END,
265                 },
266         },
267         [RTE_FLOW_ITEM_TYPE_SCTP] = {
268                 .copy_item = enic_copy_item_sctp_v2,
269                 .valid_start_item = 1,
270                 .prev_items = (const enum rte_flow_item_type[]) {
271                                RTE_FLOW_ITEM_TYPE_IPV4,
272                                RTE_FLOW_ITEM_TYPE_IPV6,
273                                RTE_FLOW_ITEM_TYPE_END,
274                 },
275         },
276         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
277                 .copy_item = enic_copy_item_vxlan_v2,
278                 .valid_start_item = 1,
279                 .prev_items = (const enum rte_flow_item_type[]) {
280                                RTE_FLOW_ITEM_TYPE_UDP,
281                                RTE_FLOW_ITEM_TYPE_END,
282                 },
283         },
284 };
285
286 /** Filtering capabilities indexed this NICs supported filter type. */
287 static const struct enic_filter_cap enic_filter_cap[] = {
288         [FILTER_IPV4_5TUPLE] = {
289                 .item_info = enic_items_v1,
290         },
291         [FILTER_USNIC_IP] = {
292                 .item_info = enic_items_v2,
293         },
294         [FILTER_DPDK_1] = {
295                 .item_info = enic_items_v3,
296         },
297 };
298
299 /** Supported actions for older NICs */
300 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
301         RTE_FLOW_ACTION_TYPE_QUEUE,
302         RTE_FLOW_ACTION_TYPE_END,
303 };
304
305 /** Supported actions for newer NICs */
306 static const enum rte_flow_action_type enic_supported_actions_v2[] = {
307         RTE_FLOW_ACTION_TYPE_QUEUE,
308         RTE_FLOW_ACTION_TYPE_MARK,
309         RTE_FLOW_ACTION_TYPE_FLAG,
310         RTE_FLOW_ACTION_TYPE_END,
311 };
312
313 /** Action capabilities indexed by NIC version information */
314 static const struct enic_action_cap enic_action_cap[] = {
315         [FILTER_ACTION_RQ_STEERING_FLAG] = {
316                 .actions = enic_supported_actions_v1,
317                 .copy_fn = enic_copy_action_v1,
318         },
319         [FILTER_ACTION_V2_ALL] = {
320                 .actions = enic_supported_actions_v2,
321                 .copy_fn = enic_copy_action_v2,
322         },
323 };
324
325 static int
326 mask_exact_match(const u8 *supported, const u8 *supplied,
327                  unsigned int size)
328 {
329         unsigned int i;
330         for (i = 0; i < size; i++) {
331                 if (supported[i] != supplied[i])
332                         return 0;
333         }
334         return 1;
335 }
336
337 /**
338  * Copy IPv4 item into version 1 NIC filter.
339  *
340  * @param item[in]
341  *   Item specification.
342  * @param enic_filter[out]
343  *   Partially filled in NIC filter structure.
344  * @param inner_ofst[in]
345  *   Should always be 0 for version 1.
346  */
347 static int
348 enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
349                        struct filter_v2 *enic_filter, u8 *inner_ofst)
350 {
351         const struct rte_flow_item_ipv4 *spec = item->spec;
352         const struct rte_flow_item_ipv4 *mask = item->mask;
353         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
354         struct ipv4_hdr supported_mask = {
355                 .src_addr = 0xffffffff,
356                 .dst_addr = 0xffffffff,
357         };
358
359         FLOW_TRACE();
360
361         if (*inner_ofst)
362                 return ENOTSUP;
363
364         if (!mask)
365                 mask = &rte_flow_item_ipv4_mask;
366
367         /* This is an exact match filter, both fields must be set */
368         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
369                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
370                 return ENOTSUP;
371         }
372
373         /* check that the suppied mask exactly matches capabilty */
374         if (!mask_exact_match((const u8 *)&supported_mask,
375                               (const u8 *)item->mask, sizeof(*mask))) {
376                 FLOW_LOG(ERR, "IPv4 exact match mask");
377                 return ENOTSUP;
378         }
379
380         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
381         enic_5tup->src_addr = spec->hdr.src_addr;
382         enic_5tup->dst_addr = spec->hdr.dst_addr;
383
384         return 0;
385 }
386
387 /**
388  * Copy UDP item into version 1 NIC filter.
389  *
390  * @param item[in]
391  *   Item specification.
392  * @param enic_filter[out]
393  *   Partially filled in NIC filter structure.
394  * @param inner_ofst[in]
395  *   Should always be 0 for version 1.
396  */
397 static int
398 enic_copy_item_udp_v1(const struct rte_flow_item *item,
399                       struct filter_v2 *enic_filter, u8 *inner_ofst)
400 {
401         const struct rte_flow_item_udp *spec = item->spec;
402         const struct rte_flow_item_udp *mask = item->mask;
403         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
404         struct udp_hdr supported_mask = {
405                 .src_port = 0xffff,
406                 .dst_port = 0xffff,
407         };
408
409         FLOW_TRACE();
410
411         if (*inner_ofst)
412                 return ENOTSUP;
413
414         if (!mask)
415                 mask = &rte_flow_item_udp_mask;
416
417         /* This is an exact match filter, both ports must be set */
418         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
419                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
420                 return ENOTSUP;
421         }
422
423         /* check that the suppied mask exactly matches capabilty */
424         if (!mask_exact_match((const u8 *)&supported_mask,
425                               (const u8 *)item->mask, sizeof(*mask))) {
426                 FLOW_LOG(ERR, "UDP exact match mask");
427                 return ENOTSUP;
428         }
429
430         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
431         enic_5tup->src_port = spec->hdr.src_port;
432         enic_5tup->dst_port = spec->hdr.dst_port;
433         enic_5tup->protocol = PROTO_UDP;
434
435         return 0;
436 }
437
438 /**
439  * Copy TCP item into version 1 NIC filter.
440  *
441  * @param item[in]
442  *   Item specification.
443  * @param enic_filter[out]
444  *   Partially filled in NIC filter structure.
445  * @param inner_ofst[in]
446  *   Should always be 0 for version 1.
447  */
448 static int
449 enic_copy_item_tcp_v1(const struct rte_flow_item *item,
450                       struct filter_v2 *enic_filter, u8 *inner_ofst)
451 {
452         const struct rte_flow_item_tcp *spec = item->spec;
453         const struct rte_flow_item_tcp *mask = item->mask;
454         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
455         struct tcp_hdr supported_mask = {
456                 .src_port = 0xffff,
457                 .dst_port = 0xffff,
458         };
459
460         FLOW_TRACE();
461
462         if (*inner_ofst)
463                 return ENOTSUP;
464
465         if (!mask)
466                 mask = &rte_flow_item_tcp_mask;
467
468         /* This is an exact match filter, both ports must be set */
469         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
470                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
471                 return ENOTSUP;
472         }
473
474         /* check that the suppied mask exactly matches capabilty */
475         if (!mask_exact_match((const u8 *)&supported_mask,
476                              (const u8 *)item->mask, sizeof(*mask))) {
477                 FLOW_LOG(ERR, "TCP exact match mask");
478                 return ENOTSUP;
479         }
480
481         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
482         enic_5tup->src_port = spec->hdr.src_port;
483         enic_5tup->dst_port = spec->hdr.dst_port;
484         enic_5tup->protocol = PROTO_TCP;
485
486         return 0;
487 }
488
489 /**
490  * Copy ETH item into version 2 NIC filter.
491  *
492  * @param item[in]
493  *   Item specification.
494  * @param enic_filter[out]
495  *   Partially filled in NIC filter structure.
496  * @param inner_ofst[in]
497  *   If zero, this is an outer header. If non-zero, this is the offset into L5
498  *   where the header begins.
499  */
500 static int
501 enic_copy_item_eth_v2(const struct rte_flow_item *item,
502                       struct filter_v2 *enic_filter, u8 *inner_ofst)
503 {
504         struct ether_hdr enic_spec;
505         struct ether_hdr enic_mask;
506         const struct rte_flow_item_eth *spec = item->spec;
507         const struct rte_flow_item_eth *mask = item->mask;
508         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
509
510         FLOW_TRACE();
511
512         /* Match all if no spec */
513         if (!spec)
514                 return 0;
515
516         if (!mask)
517                 mask = &rte_flow_item_eth_mask;
518
519         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
520                ETHER_ADDR_LEN);
521         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
522                ETHER_ADDR_LEN);
523
524         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
525                ETHER_ADDR_LEN);
526         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
527                ETHER_ADDR_LEN);
528         enic_spec.ether_type = spec->type;
529         enic_mask.ether_type = mask->type;
530
531         if (*inner_ofst == 0) {
532                 /* outer header */
533                 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
534                        sizeof(struct ether_hdr));
535                 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
536                        sizeof(struct ether_hdr));
537         } else {
538                 /* inner header */
539                 if ((*inner_ofst + sizeof(struct ether_hdr)) >
540                      FILTER_GENERIC_1_KEY_LEN)
541                         return ENOTSUP;
542                 /* Offset into L5 where inner Ethernet header goes */
543                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
544                        &enic_mask, sizeof(struct ether_hdr));
545                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
546                        &enic_spec, sizeof(struct ether_hdr));
547                 *inner_ofst += sizeof(struct ether_hdr);
548         }
549         return 0;
550 }
551
552 /**
553  * Copy VLAN item into version 2 NIC filter.
554  *
555  * @param item[in]
556  *   Item specification.
557  * @param enic_filter[out]
558  *   Partially filled in NIC filter structure.
559  * @param inner_ofst[in]
560  *   If zero, this is an outer header. If non-zero, this is the offset into L5
561  *   where the header begins.
562  */
563 static int
564 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
565                        struct filter_v2 *enic_filter, u8 *inner_ofst)
566 {
567         const struct rte_flow_item_vlan *spec = item->spec;
568         const struct rte_flow_item_vlan *mask = item->mask;
569         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
570
571         FLOW_TRACE();
572
573         /* Match all if no spec */
574         if (!spec)
575                 return 0;
576
577         /* Don't support filtering in tpid */
578         if (mask) {
579                 if (mask->tpid != 0)
580                         return ENOTSUP;
581         } else {
582                 mask = &rte_flow_item_vlan_mask;
583                 RTE_ASSERT(mask->tpid == 0);
584         }
585
586         if (*inner_ofst == 0) {
587                 /* Outer header. Use the vlan mask/val fields */
588                 gp->mask_vlan = mask->tci;
589                 gp->val_vlan = spec->tci;
590         } else {
591                 /* Inner header. Mask/Val start at *inner_ofst into L5 */
592                 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
593                      FILTER_GENERIC_1_KEY_LEN)
594                         return ENOTSUP;
595                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
596                        mask, sizeof(struct vlan_hdr));
597                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
598                        spec, sizeof(struct vlan_hdr));
599                 *inner_ofst += sizeof(struct vlan_hdr);
600         }
601         return 0;
602 }
603
604 /**
605  * Copy IPv4 item into version 2 NIC filter.
606  *
607  * @param item[in]
608  *   Item specification.
609  * @param enic_filter[out]
610  *   Partially filled in NIC filter structure.
611  * @param inner_ofst[in]
612  *   Must be 0. Don't support inner IPv4 filtering.
613  */
614 static int
615 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
616                        struct filter_v2 *enic_filter, u8 *inner_ofst)
617 {
618         const struct rte_flow_item_ipv4 *spec = item->spec;
619         const struct rte_flow_item_ipv4 *mask = item->mask;
620         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
621
622         FLOW_TRACE();
623
624         if (*inner_ofst == 0) {
625                 /* Match IPv4 */
626                 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
627                 gp->val_flags |= FILTER_GENERIC_1_IPV4;
628
629                 /* Match all if no spec */
630                 if (!spec)
631                         return 0;
632
633                 if (!mask)
634                         mask = &rte_flow_item_ipv4_mask;
635
636                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
637                        sizeof(struct ipv4_hdr));
638                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
639                        sizeof(struct ipv4_hdr));
640         } else {
641                 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
642                 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
643                      FILTER_GENERIC_1_KEY_LEN)
644                         return ENOTSUP;
645                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
646                        mask, sizeof(struct ipv4_hdr));
647                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
648                        spec, sizeof(struct ipv4_hdr));
649                 *inner_ofst += sizeof(struct ipv4_hdr);
650         }
651         return 0;
652 }
653
654 /**
655  * Copy IPv6 item into version 2 NIC filter.
656  *
657  * @param item[in]
658  *   Item specification.
659  * @param enic_filter[out]
660  *   Partially filled in NIC filter structure.
661  * @param inner_ofst[in]
662  *   Must be 0. Don't support inner IPv6 filtering.
663  */
664 static int
665 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
666                        struct filter_v2 *enic_filter, u8 *inner_ofst)
667 {
668         const struct rte_flow_item_ipv6 *spec = item->spec;
669         const struct rte_flow_item_ipv6 *mask = item->mask;
670         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
671
672         FLOW_TRACE();
673
674         /* Match IPv6 */
675         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
676         gp->val_flags |= FILTER_GENERIC_1_IPV6;
677
678         /* Match all if no spec */
679         if (!spec)
680                 return 0;
681
682         if (!mask)
683                 mask = &rte_flow_item_ipv6_mask;
684
685         if (*inner_ofst == 0) {
686                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
687                        sizeof(struct ipv6_hdr));
688                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
689                        sizeof(struct ipv6_hdr));
690         } else {
691                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
692                 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
693                      FILTER_GENERIC_1_KEY_LEN)
694                         return ENOTSUP;
695                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
696                        mask, sizeof(struct ipv6_hdr));
697                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
698                        spec, sizeof(struct ipv6_hdr));
699                 *inner_ofst += sizeof(struct ipv6_hdr);
700         }
701         return 0;
702 }
703
704 /**
705  * Copy UDP item into version 2 NIC filter.
706  *
707  * @param item[in]
708  *   Item specification.
709  * @param enic_filter[out]
710  *   Partially filled in NIC filter structure.
711  * @param inner_ofst[in]
712  *   Must be 0. Don't support inner UDP filtering.
713  */
714 static int
715 enic_copy_item_udp_v2(const struct rte_flow_item *item,
716                       struct filter_v2 *enic_filter, u8 *inner_ofst)
717 {
718         const struct rte_flow_item_udp *spec = item->spec;
719         const struct rte_flow_item_udp *mask = item->mask;
720         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
721
722         FLOW_TRACE();
723
724         /* Match UDP */
725         gp->mask_flags |= FILTER_GENERIC_1_UDP;
726         gp->val_flags |= FILTER_GENERIC_1_UDP;
727
728         /* Match all if no spec */
729         if (!spec)
730                 return 0;
731
732         if (!mask)
733                 mask = &rte_flow_item_udp_mask;
734
735         if (*inner_ofst == 0) {
736                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
737                        sizeof(struct udp_hdr));
738                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
739                        sizeof(struct udp_hdr));
740         } else {
741                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
742                 if ((*inner_ofst + sizeof(struct udp_hdr)) >
743                      FILTER_GENERIC_1_KEY_LEN)
744                         return ENOTSUP;
745                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
746                        mask, sizeof(struct udp_hdr));
747                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
748                        spec, sizeof(struct udp_hdr));
749                 *inner_ofst += sizeof(struct udp_hdr);
750         }
751         return 0;
752 }
753
754 /**
755  * Copy TCP item into version 2 NIC filter.
756  *
757  * @param item[in]
758  *   Item specification.
759  * @param enic_filter[out]
760  *   Partially filled in NIC filter structure.
761  * @param inner_ofst[in]
762  *   Must be 0. Don't support inner TCP filtering.
763  */
764 static int
765 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
766                       struct filter_v2 *enic_filter, u8 *inner_ofst)
767 {
768         const struct rte_flow_item_tcp *spec = item->spec;
769         const struct rte_flow_item_tcp *mask = item->mask;
770         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
771
772         FLOW_TRACE();
773
774         /* Match TCP */
775         gp->mask_flags |= FILTER_GENERIC_1_TCP;
776         gp->val_flags |= FILTER_GENERIC_1_TCP;
777
778         /* Match all if no spec */
779         if (!spec)
780                 return 0;
781
782         if (!mask)
783                 return ENOTSUP;
784
785         if (*inner_ofst == 0) {
786                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
787                        sizeof(struct tcp_hdr));
788                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
789                        sizeof(struct tcp_hdr));
790         } else {
791                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
792                 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
793                      FILTER_GENERIC_1_KEY_LEN)
794                         return ENOTSUP;
795                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
796                        mask, sizeof(struct tcp_hdr));
797                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
798                        spec, sizeof(struct tcp_hdr));
799                 *inner_ofst += sizeof(struct tcp_hdr);
800         }
801         return 0;
802 }
803
804 /**
805  * Copy SCTP item into version 2 NIC filter.
806  *
807  * @param item[in]
808  *   Item specification.
809  * @param enic_filter[out]
810  *   Partially filled in NIC filter structure.
811  * @param inner_ofst[in]
812  *   Must be 0. Don't support inner SCTP filtering.
813  */
814 static int
815 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
816                        struct filter_v2 *enic_filter, u8 *inner_ofst)
817 {
818         const struct rte_flow_item_sctp *spec = item->spec;
819         const struct rte_flow_item_sctp *mask = item->mask;
820         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
821
822         FLOW_TRACE();
823
824         if (*inner_ofst)
825                 return ENOTSUP;
826
827         /* Match all if no spec */
828         if (!spec)
829                 return 0;
830
831         if (!mask)
832                 mask = &rte_flow_item_sctp_mask;
833
834         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
835                sizeof(struct sctp_hdr));
836         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
837                sizeof(struct sctp_hdr));
838         return 0;
839 }
840
841 /**
842  * Copy UDP item into version 2 NIC filter.
843  *
844  * @param item[in]
845  *   Item specification.
846  * @param enic_filter[out]
847  *   Partially filled in NIC filter structure.
848  * @param inner_ofst[in]
849  *   Must be 0. VxLAN headers always start at the beginning of L5.
850  */
851 static int
852 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
853                         struct filter_v2 *enic_filter, u8 *inner_ofst)
854 {
855         const struct rte_flow_item_vxlan *spec = item->spec;
856         const struct rte_flow_item_vxlan *mask = item->mask;
857         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
858
859         FLOW_TRACE();
860
861         if (*inner_ofst)
862                 return EINVAL;
863
864         /* Match all if no spec */
865         if (!spec)
866                 return 0;
867
868         if (!mask)
869                 mask = &rte_flow_item_vxlan_mask;
870
871         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
872                sizeof(struct vxlan_hdr));
873         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
874                sizeof(struct vxlan_hdr));
875
876         *inner_ofst = sizeof(struct vxlan_hdr);
877         return 0;
878 }
879
880 /**
881  * Return 1 if current item is valid on top of the previous one.
882  *
883  * @param prev_item[in]
884  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
885  *   is the first item.
886  * @param item_info[in]
887  *   Info about this item, like valid previous items.
888  * @param is_first[in]
889  *   True if this the first item in the pattern.
890  */
891 static int
892 item_stacking_valid(enum rte_flow_item_type prev_item,
893                     const struct enic_items *item_info, u8 is_first_item)
894 {
895         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
896
897         FLOW_TRACE();
898
899         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
900                 if (prev_item == *allowed_items)
901                         return 1;
902         }
903
904         /* This is the first item in the stack. Check if that's cool */
905         if (is_first_item && item_info->valid_start_item)
906                 return 1;
907
908         return 0;
909 }
910
911 /**
912  * Build the intenal enic filter structure from the provided pattern. The
913  * pattern is validated as the items are copied.
914  *
915  * @param pattern[in]
916  * @param items_info[in]
917  *   Info about this NICs item support, like valid previous items.
918  * @param enic_filter[out]
919  *   NIC specfilc filters derived from the pattern.
920  * @param error[out]
921  */
922 static int
923 enic_copy_filter(const struct rte_flow_item pattern[],
924                  const struct enic_items *items_info,
925                  struct filter_v2 *enic_filter,
926                  struct rte_flow_error *error)
927 {
928         int ret;
929         const struct rte_flow_item *item = pattern;
930         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
931         enum rte_flow_item_type prev_item;
932         const struct enic_items *item_info;
933
934         u8 is_first_item = 1;
935
936         FLOW_TRACE();
937
938         prev_item = 0;
939
940         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
941                 /* Get info about how to validate and copy the item. If NULL
942                  * is returned the nic does not support the item.
943                  */
944                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
945                         continue;
946
947                 item_info = &items_info[item->type];
948
949                 /* check to see if item stacking is valid */
950                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
951                         goto stacking_error;
952
953                 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
954                 if (ret)
955                         goto item_not_supported;
956                 prev_item = item->type;
957                 is_first_item = 0;
958         }
959         return 0;
960
961 item_not_supported:
962         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
963                            NULL, "enic type error");
964         return -rte_errno;
965
966 stacking_error:
967         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
968                            item, "stacking error");
969         return -rte_errno;
970 }
971
972 /**
973  * Build the intenal version 1 NIC action structure from the provided pattern.
974  * The pattern is validated as the items are copied.
975  *
976  * @param actions[in]
977  * @param enic_action[out]
978  *   NIC specfilc actions derived from the actions.
979  * @param error[out]
980  */
981 static int
982 enic_copy_action_v1(const struct rte_flow_action actions[],
983                     struct filter_action_v2 *enic_action)
984 {
985         FLOW_TRACE();
986
987         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
988                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
989                         continue;
990
991                 switch (actions->type) {
992                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
993                         const struct rte_flow_action_queue *queue =
994                                 (const struct rte_flow_action_queue *)
995                                 actions->conf;
996                         enic_action->rq_idx =
997                                 enic_rte_rq_idx_to_sop_idx(queue->index);
998                         break;
999                 }
1000                 default:
1001                         RTE_ASSERT(0);
1002                         break;
1003                 }
1004         }
1005         enic_action->type = FILTER_ACTION_RQ_STEERING;
1006         return 0;
1007 }
1008
1009 /**
1010  * Build the intenal version 2 NIC action structure from the provided pattern.
1011  * The pattern is validated as the items are copied.
1012  *
1013  * @param actions[in]
1014  * @param enic_action[out]
1015  *   NIC specfilc actions derived from the actions.
1016  * @param error[out]
1017  */
1018 static int
1019 enic_copy_action_v2(const struct rte_flow_action actions[],
1020                     struct filter_action_v2 *enic_action)
1021 {
1022         FLOW_TRACE();
1023
1024         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1025                 switch (actions->type) {
1026                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1027                         const struct rte_flow_action_queue *queue =
1028                                 (const struct rte_flow_action_queue *)
1029                                 actions->conf;
1030                         enic_action->rq_idx =
1031                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1032                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1033                         break;
1034                 }
1035                 case RTE_FLOW_ACTION_TYPE_MARK: {
1036                         const struct rte_flow_action_mark *mark =
1037                                 (const struct rte_flow_action_mark *)
1038                                 actions->conf;
1039
1040                         /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
1041                          * in the range of allows mark ids.
1042                          */
1043                         if (mark->id >= ENIC_MAGIC_FILTER_ID)
1044                                 return EINVAL;
1045                         enic_action->filter_id = mark->id;
1046                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1047                         break;
1048                 }
1049                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1050                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1051                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1052                         break;
1053                 }
1054                 case RTE_FLOW_ACTION_TYPE_VOID:
1055                         continue;
1056                 default:
1057                         RTE_ASSERT(0);
1058                         break;
1059                 }
1060         }
1061         enic_action->type = FILTER_ACTION_V2;
1062         return 0;
1063 }
1064
1065 /** Check if the action is supported */
1066 static int
1067 enic_match_action(const struct rte_flow_action *action,
1068                   const enum rte_flow_action_type *supported_actions)
1069 {
1070         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1071              supported_actions++) {
1072                 if (action->type == *supported_actions)
1073                         return 1;
1074         }
1075         return 0;
1076 }
1077
1078 /** Get the NIC filter capabilties structure */
1079 static const struct enic_filter_cap *
1080 enic_get_filter_cap(struct enic *enic)
1081 {
1082         if (enic->flow_filter_mode)
1083                 return &enic_filter_cap[enic->flow_filter_mode];
1084
1085         return NULL;
1086 }
1087
1088 /** Get the actions for this NIC version. */
1089 static const struct enic_action_cap *
1090 enic_get_action_cap(struct enic *enic)
1091 {
1092         static const struct enic_action_cap *ea;
1093
1094         if (enic->filter_tags)
1095                 ea = &enic_action_cap[FILTER_ACTION_V2_ALL];
1096         else
1097                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1098         return ea;
1099 }
1100
1101 /* Debug function to dump internal NIC action structure. */
1102 static void
1103 enic_dump_actions(const struct filter_action_v2 *ea)
1104 {
1105         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1106                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1107         } else if (ea->type == FILTER_ACTION_V2) {
1108                 FLOW_LOG(INFO, "Actions(V2)\n");
1109                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1110                         FLOW_LOG(INFO, "\tqueue: %u\n",
1111                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1112                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1113                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1114         }
1115 }
1116
1117 /* Debug function to dump internal NIC filter structure. */
1118 static void
1119 enic_dump_filter(const struct filter_v2 *filt)
1120 {
1121         const struct filter_generic_1 *gp;
1122         int i, j, mbyte;
1123         char buf[128], *bp;
1124         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1125         char l4csum[16], ipfrag[16];
1126
1127         switch (filt->type) {
1128         case FILTER_IPV4_5TUPLE:
1129                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1130                 break;
1131         case FILTER_USNIC_IP:
1132         case FILTER_DPDK_1:
1133                 /* FIXME: this should be a loop */
1134                 gp = &filt->u.generic_1;
1135                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1136                        gp->val_vlan, gp->mask_vlan);
1137
1138                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1139                         sprintf(ip4, "%s ",
1140                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1141                                  ? "ip4(y)" : "ip4(n)");
1142                 else
1143                         sprintf(ip4, "%s ", "ip4(x)");
1144
1145                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1146                         sprintf(ip6, "%s ",
1147                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1148                                  ? "ip6(y)" : "ip6(n)");
1149                 else
1150                         sprintf(ip6, "%s ", "ip6(x)");
1151
1152                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1153                         sprintf(udp, "%s ",
1154                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1155                                  ? "udp(y)" : "udp(n)");
1156                 else
1157                         sprintf(udp, "%s ", "udp(x)");
1158
1159                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1160                         sprintf(tcp, "%s ",
1161                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1162                                  ? "tcp(y)" : "tcp(n)");
1163                 else
1164                         sprintf(tcp, "%s ", "tcp(x)");
1165
1166                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1167                         sprintf(tcpudp, "%s ",
1168                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1169                                  ? "tcpudp(y)" : "tcpudp(n)");
1170                 else
1171                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1172
1173                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1174                         sprintf(ip4csum, "%s ",
1175                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1176                                  ? "ip4csum(y)" : "ip4csum(n)");
1177                 else
1178                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1179
1180                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1181                         sprintf(l4csum, "%s ",
1182                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1183                                  ? "l4csum(y)" : "l4csum(n)");
1184                 else
1185                         sprintf(l4csum, "%s ", "l4csum(x)");
1186
1187                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1188                         sprintf(ipfrag, "%s ",
1189                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1190                                  ? "ipfrag(y)" : "ipfrag(n)");
1191                 else
1192                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1193                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1194                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1195
1196                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1197                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1198                         while (mbyte && !gp->layer[i].mask[mbyte])
1199                                 mbyte--;
1200                         if (mbyte == 0)
1201                                 continue;
1202
1203                         bp = buf;
1204                         for (j = 0; j <= mbyte; j++) {
1205                                 sprintf(bp, "%02x",
1206                                         gp->layer[i].mask[j]);
1207                                 bp += 2;
1208                         }
1209                         *bp = '\0';
1210                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1211                         bp = buf;
1212                         for (j = 0; j <= mbyte; j++) {
1213                                 sprintf(bp, "%02x",
1214                                         gp->layer[i].val[j]);
1215                                 bp += 2;
1216                         }
1217                         *bp = '\0';
1218                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1219                 }
1220                 break;
1221         default:
1222                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1223                 break;
1224         }
1225 }
1226
1227 /* Debug function to dump internal NIC flow structures. */
1228 static void
1229 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1230 {
1231         enic_dump_filter(filt);
1232         enic_dump_actions(ea);
1233 }
1234
1235
1236 /**
1237  * Internal flow parse/validate function.
1238  *
1239  * @param dev[in]
1240  *   This device pointer.
1241  * @param pattern[in]
1242  * @param actions[in]
1243  * @param error[out]
1244  * @param enic_filter[out]
1245  *   Internal NIC filter structure pointer.
1246  * @param enic_action[out]
1247  *   Internal NIC action structure pointer.
1248  */
1249 static int
1250 enic_flow_parse(struct rte_eth_dev *dev,
1251                 const struct rte_flow_attr *attrs,
1252                 const struct rte_flow_item pattern[],
1253                 const struct rte_flow_action actions[],
1254                 struct rte_flow_error *error,
1255                 struct filter_v2 *enic_filter,
1256                 struct filter_action_v2 *enic_action)
1257 {
1258         unsigned int ret = 0;
1259         struct enic *enic = pmd_priv(dev);
1260         const struct enic_filter_cap *enic_filter_cap;
1261         const struct enic_action_cap *enic_action_cap;
1262         const struct rte_flow_action *action;
1263
1264         FLOW_TRACE();
1265
1266         memset(enic_filter, 0, sizeof(*enic_filter));
1267         memset(enic_action, 0, sizeof(*enic_action));
1268
1269         if (!pattern) {
1270                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1271                                    NULL, "No pattern specified");
1272                 return -rte_errno;
1273         }
1274
1275         if (!actions) {
1276                 rte_flow_error_set(error, EINVAL,
1277                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1278                                    NULL, "No action specified");
1279                 return -rte_errno;
1280         }
1281
1282         if (attrs) {
1283                 if (attrs->group) {
1284                         rte_flow_error_set(error, ENOTSUP,
1285                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1286                                            NULL,
1287                                            "priority groups are not supported");
1288                         return -rte_errno;
1289                 } else if (attrs->priority) {
1290                         rte_flow_error_set(error, ENOTSUP,
1291                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1292                                            NULL,
1293                                            "priorities are not supported");
1294                         return -rte_errno;
1295                 } else if (attrs->egress) {
1296                         rte_flow_error_set(error, ENOTSUP,
1297                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1298                                            NULL,
1299                                            "egress is not supported");
1300                         return -rte_errno;
1301                 } else if (!attrs->ingress) {
1302                         rte_flow_error_set(error, ENOTSUP,
1303                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1304                                            NULL,
1305                                            "only ingress is supported");
1306                         return -rte_errno;
1307                 }
1308
1309         } else {
1310                 rte_flow_error_set(error, EINVAL,
1311                                    RTE_FLOW_ERROR_TYPE_ATTR,
1312                                    NULL, "No attribute specified");
1313                 return -rte_errno;
1314         }
1315
1316         /* Verify Actions. */
1317         enic_action_cap =  enic_get_action_cap(enic);
1318         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1319              action++) {
1320                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1321                         continue;
1322                 else if (!enic_match_action(action, enic_action_cap->actions))
1323                         break;
1324         }
1325         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1326                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1327                                    action, "Invalid action.");
1328                 return -rte_errno;
1329         }
1330         ret = enic_action_cap->copy_fn(actions, enic_action);
1331         if (ret) {
1332                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1333                            NULL, "Unsupported action.");
1334                 return -rte_errno;
1335         }
1336
1337         /* Verify Flow items. If copying the filter from flow format to enic
1338          * format fails, the flow is not supported
1339          */
1340         enic_filter_cap =  enic_get_filter_cap(enic);
1341         if (enic_filter_cap == NULL) {
1342                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1343                            NULL, "Flow API not available");
1344                 return -rte_errno;
1345         }
1346         enic_filter->type = enic->flow_filter_mode;
1347         ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
1348                                        enic_filter, error);
1349         return ret;
1350 }
1351
1352 /**
1353  * Push filter/action to the NIC.
1354  *
1355  * @param enic[in]
1356  *   Device structure pointer.
1357  * @param enic_filter[in]
1358  *   Internal NIC filter structure pointer.
1359  * @param enic_action[in]
1360  *   Internal NIC action structure pointer.
1361  * @param error[out]
1362  */
1363 static struct rte_flow *
1364 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1365                    struct filter_action_v2 *enic_action,
1366                    struct rte_flow_error *error)
1367 {
1368         struct rte_flow *flow;
1369         int ret;
1370         u16 entry;
1371
1372         FLOW_TRACE();
1373
1374         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1375         if (!flow) {
1376                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1377                                    NULL, "cannot allocate flow memory");
1378                 return NULL;
1379         }
1380
1381         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1382         entry = enic_action->rq_idx;
1383         ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1384                                   enic_action);
1385         if (!ret) {
1386                 flow->enic_filter_id = entry;
1387                 flow->enic_filter = *enic_filter;
1388         } else {
1389                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1390                                    NULL, "vnic_dev_classifier error");
1391                 rte_free(flow);
1392                 return NULL;
1393         }
1394         return flow;
1395 }
1396
1397 /**
1398  * Remove filter/action from the NIC.
1399  *
1400  * @param enic[in]
1401  *   Device structure pointer.
1402  * @param filter_id[in]
1403  *   Id of NIC filter.
1404  * @param enic_action[in]
1405  *   Internal NIC action structure pointer.
1406  * @param error[out]
1407  */
1408 static int
1409 enic_flow_del_filter(struct enic *enic, u16 filter_id,
1410                    struct rte_flow_error *error)
1411 {
1412         int ret;
1413
1414         FLOW_TRACE();
1415
1416         ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1417         if (!ret)
1418                 rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE,
1419                                    NULL, "vnic_dev_classifier failed");
1420         return ret;
1421 }
1422
1423 /*
1424  * The following functions are callbacks for Generic flow API.
1425  */
1426
1427 /**
1428  * Validate a flow supported by the NIC.
1429  *
1430  * @see rte_flow_validate()
1431  * @see rte_flow_ops
1432  */
1433 static int
1434 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1435                    const struct rte_flow_item pattern[],
1436                    const struct rte_flow_action actions[],
1437                    struct rte_flow_error *error)
1438 {
1439         struct filter_v2 enic_filter;
1440         struct filter_action_v2 enic_action;
1441         int ret;
1442
1443         FLOW_TRACE();
1444
1445         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1446                                &enic_filter, &enic_action);
1447         if (!ret)
1448                 enic_dump_flow(&enic_action, &enic_filter);
1449         return ret;
1450 }
1451
1452 /**
1453  * Create a flow supported by the NIC.
1454  *
1455  * @see rte_flow_create()
1456  * @see rte_flow_ops
1457  */
1458 static struct rte_flow *
1459 enic_flow_create(struct rte_eth_dev *dev,
1460                  const struct rte_flow_attr *attrs,
1461                  const struct rte_flow_item pattern[],
1462                  const struct rte_flow_action actions[],
1463                  struct rte_flow_error *error)
1464 {
1465         int ret;
1466         struct filter_v2 enic_filter;
1467         struct filter_action_v2 enic_action;
1468         struct rte_flow *flow;
1469         struct enic *enic = pmd_priv(dev);
1470
1471         FLOW_TRACE();
1472
1473         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1474                               &enic_action);
1475         if (ret < 0)
1476                 return NULL;
1477
1478         rte_spinlock_lock(&enic->flows_lock);
1479         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1480                                     error);
1481         if (flow)
1482                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1483         rte_spinlock_unlock(&enic->flows_lock);
1484
1485         return flow;
1486 }
1487
1488 /**
1489  * Destroy a flow supported by the NIC.
1490  *
1491  * @see rte_flow_destroy()
1492  * @see rte_flow_ops
1493  */
1494 static int
1495 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1496                   __rte_unused struct rte_flow_error *error)
1497 {
1498         struct enic *enic = pmd_priv(dev);
1499
1500         FLOW_TRACE();
1501
1502         rte_spinlock_lock(&enic->flows_lock);
1503         enic_flow_del_filter(enic, flow->enic_filter_id, error);
1504         LIST_REMOVE(flow, next);
1505         rte_spinlock_unlock(&enic->flows_lock);
1506         return 0;
1507 }
1508
1509 /**
1510  * Flush all flows on the device.
1511  *
1512  * @see rte_flow_flush()
1513  * @see rte_flow_ops
1514  */
1515 static int
1516 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1517 {
1518         struct rte_flow *flow;
1519         struct enic *enic = pmd_priv(dev);
1520
1521         FLOW_TRACE();
1522
1523         rte_spinlock_lock(&enic->flows_lock);
1524
1525         while (!LIST_EMPTY(&enic->flows)) {
1526                 flow = LIST_FIRST(&enic->flows);
1527                 enic_flow_del_filter(enic, flow->enic_filter_id, error);
1528                 LIST_REMOVE(flow, next);
1529         }
1530         rte_spinlock_unlock(&enic->flows_lock);
1531         return 0;
1532 }
1533
1534 /**
1535  * Flow callback registration.
1536  *
1537  * @see rte_flow_ops
1538  */
1539 const struct rte_flow_ops enic_flow_ops = {
1540         .validate = enic_flow_validate,
1541         .create = enic_flow_create,
1542         .destroy = enic_flow_destroy,
1543         .flush = enic_flow_flush,
1544 };