New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / net / enic / enic_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2008-2017 Cisco Systems, Inc.  All rights reserved.
3  */
4
5 #include <errno.h>
6 #include <stdint.h>
7 #include <rte_log.h>
8 #include <rte_ethdev_driver.h>
9 #include <rte_flow_driver.h>
10 #include <rte_ether.h>
11 #include <rte_ip.h>
12 #include <rte_udp.h>
13
14 #include "enic_compat.h"
15 #include "enic.h"
16 #include "vnic_dev.h"
17 #include "vnic_nic.h"
18
19 #define FLOW_TRACE() \
20         rte_log(RTE_LOG_DEBUG, enicpmd_logtype_flow, \
21                 "%s()\n", __func__)
22 #define FLOW_LOG(level, fmt, args...) \
23         rte_log(RTE_LOG_ ## level, enicpmd_logtype_flow, \
24                 fmt "\n", ##args)
25
26 /** Info about how to copy items into enic filters. */
27 struct enic_items {
28         /** Function for copying and validating an item. */
29         int (*copy_item)(const struct rte_flow_item *item,
30                          struct filter_v2 *enic_filter, u8 *inner_ofst);
31         /** List of valid previous items. */
32         const enum rte_flow_item_type * const prev_items;
33         /** True if it's OK for this item to be the first item. For some NIC
34          * versions, it's invalid to start the stack above layer 3.
35          */
36         const u8 valid_start_item;
37 };
38
39 /** Filtering capabilities for various NIC and firmware versions. */
40 struct enic_filter_cap {
41         /** list of valid items and their handlers and attributes. */
42         const struct enic_items *item_info;
43 };
44
45 /* functions for copying flow actions into enic actions */
46 typedef int (copy_action_fn)(const struct rte_flow_action actions[],
47                              struct filter_action_v2 *enic_action);
48
49 /* functions for copying items into enic filters */
50 typedef int(enic_copy_item_fn)(const struct rte_flow_item *item,
51                           struct filter_v2 *enic_filter, u8 *inner_ofst);
52
53 /** Action capabilities for various NICs. */
54 struct enic_action_cap {
55         /** list of valid actions */
56         const enum rte_flow_action_type *actions;
57         /** copy function for a particular NIC */
58         int (*copy_fn)(const struct rte_flow_action actions[],
59                        struct filter_action_v2 *enic_action);
60 };
61
62 /* Forward declarations */
63 static enic_copy_item_fn enic_copy_item_ipv4_v1;
64 static enic_copy_item_fn enic_copy_item_udp_v1;
65 static enic_copy_item_fn enic_copy_item_tcp_v1;
66 static enic_copy_item_fn enic_copy_item_eth_v2;
67 static enic_copy_item_fn enic_copy_item_vlan_v2;
68 static enic_copy_item_fn enic_copy_item_ipv4_v2;
69 static enic_copy_item_fn enic_copy_item_ipv6_v2;
70 static enic_copy_item_fn enic_copy_item_udp_v2;
71 static enic_copy_item_fn enic_copy_item_tcp_v2;
72 static enic_copy_item_fn enic_copy_item_sctp_v2;
73 static enic_copy_item_fn enic_copy_item_sctp_v2;
74 static enic_copy_item_fn enic_copy_item_vxlan_v2;
75 static copy_action_fn enic_copy_action_v1;
76 static copy_action_fn enic_copy_action_v2;
77
78 /**
79  * Legacy NICs or NICs with outdated firmware. Only 5-tuple perfect match
80  * is supported.
81  */
82 static const struct enic_items enic_items_v1[] = {
83         [RTE_FLOW_ITEM_TYPE_IPV4] = {
84                 .copy_item = enic_copy_item_ipv4_v1,
85                 .valid_start_item = 1,
86                 .prev_items = (const enum rte_flow_item_type[]) {
87                                RTE_FLOW_ITEM_TYPE_END,
88                 },
89         },
90         [RTE_FLOW_ITEM_TYPE_UDP] = {
91                 .copy_item = enic_copy_item_udp_v1,
92                 .valid_start_item = 0,
93                 .prev_items = (const enum rte_flow_item_type[]) {
94                                RTE_FLOW_ITEM_TYPE_IPV4,
95                                RTE_FLOW_ITEM_TYPE_END,
96                 },
97         },
98         [RTE_FLOW_ITEM_TYPE_TCP] = {
99                 .copy_item = enic_copy_item_tcp_v1,
100                 .valid_start_item = 0,
101                 .prev_items = (const enum rte_flow_item_type[]) {
102                                RTE_FLOW_ITEM_TYPE_IPV4,
103                                RTE_FLOW_ITEM_TYPE_END,
104                 },
105         },
106 };
107
108 /**
109  * NICs have Advanced Filters capability but they are disabled. This means
110  * that layer 3 must be specified.
111  */
112 static const struct enic_items enic_items_v2[] = {
113         [RTE_FLOW_ITEM_TYPE_ETH] = {
114                 .copy_item = enic_copy_item_eth_v2,
115                 .valid_start_item = 1,
116                 .prev_items = (const enum rte_flow_item_type[]) {
117                                RTE_FLOW_ITEM_TYPE_VXLAN,
118                                RTE_FLOW_ITEM_TYPE_END,
119                 },
120         },
121         [RTE_FLOW_ITEM_TYPE_VLAN] = {
122                 .copy_item = enic_copy_item_vlan_v2,
123                 .valid_start_item = 1,
124                 .prev_items = (const enum rte_flow_item_type[]) {
125                                RTE_FLOW_ITEM_TYPE_ETH,
126                                RTE_FLOW_ITEM_TYPE_END,
127                 },
128         },
129         [RTE_FLOW_ITEM_TYPE_IPV4] = {
130                 .copy_item = enic_copy_item_ipv4_v2,
131                 .valid_start_item = 1,
132                 .prev_items = (const enum rte_flow_item_type[]) {
133                                RTE_FLOW_ITEM_TYPE_ETH,
134                                RTE_FLOW_ITEM_TYPE_VLAN,
135                                RTE_FLOW_ITEM_TYPE_END,
136                 },
137         },
138         [RTE_FLOW_ITEM_TYPE_IPV6] = {
139                 .copy_item = enic_copy_item_ipv6_v2,
140                 .valid_start_item = 1,
141                 .prev_items = (const enum rte_flow_item_type[]) {
142                                RTE_FLOW_ITEM_TYPE_ETH,
143                                RTE_FLOW_ITEM_TYPE_VLAN,
144                                RTE_FLOW_ITEM_TYPE_END,
145                 },
146         },
147         [RTE_FLOW_ITEM_TYPE_UDP] = {
148                 .copy_item = enic_copy_item_udp_v2,
149                 .valid_start_item = 0,
150                 .prev_items = (const enum rte_flow_item_type[]) {
151                                RTE_FLOW_ITEM_TYPE_IPV4,
152                                RTE_FLOW_ITEM_TYPE_IPV6,
153                                RTE_FLOW_ITEM_TYPE_END,
154                 },
155         },
156         [RTE_FLOW_ITEM_TYPE_TCP] = {
157                 .copy_item = enic_copy_item_tcp_v2,
158                 .valid_start_item = 0,
159                 .prev_items = (const enum rte_flow_item_type[]) {
160                                RTE_FLOW_ITEM_TYPE_IPV4,
161                                RTE_FLOW_ITEM_TYPE_IPV6,
162                                RTE_FLOW_ITEM_TYPE_END,
163                 },
164         },
165         [RTE_FLOW_ITEM_TYPE_SCTP] = {
166                 .copy_item = enic_copy_item_sctp_v2,
167                 .valid_start_item = 0,
168                 .prev_items = (const enum rte_flow_item_type[]) {
169                                RTE_FLOW_ITEM_TYPE_IPV4,
170                                RTE_FLOW_ITEM_TYPE_IPV6,
171                                RTE_FLOW_ITEM_TYPE_END,
172                 },
173         },
174         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
175                 .copy_item = enic_copy_item_vxlan_v2,
176                 .valid_start_item = 0,
177                 .prev_items = (const enum rte_flow_item_type[]) {
178                                RTE_FLOW_ITEM_TYPE_UDP,
179                                RTE_FLOW_ITEM_TYPE_END,
180                 },
181         },
182 };
183
184 /** NICs with Advanced filters enabled */
185 static const struct enic_items enic_items_v3[] = {
186         [RTE_FLOW_ITEM_TYPE_ETH] = {
187                 .copy_item = enic_copy_item_eth_v2,
188                 .valid_start_item = 1,
189                 .prev_items = (const enum rte_flow_item_type[]) {
190                                RTE_FLOW_ITEM_TYPE_VXLAN,
191                                RTE_FLOW_ITEM_TYPE_END,
192                 },
193         },
194         [RTE_FLOW_ITEM_TYPE_VLAN] = {
195                 .copy_item = enic_copy_item_vlan_v2,
196                 .valid_start_item = 1,
197                 .prev_items = (const enum rte_flow_item_type[]) {
198                                RTE_FLOW_ITEM_TYPE_ETH,
199                                RTE_FLOW_ITEM_TYPE_END,
200                 },
201         },
202         [RTE_FLOW_ITEM_TYPE_IPV4] = {
203                 .copy_item = enic_copy_item_ipv4_v2,
204                 .valid_start_item = 1,
205                 .prev_items = (const enum rte_flow_item_type[]) {
206                                RTE_FLOW_ITEM_TYPE_ETH,
207                                RTE_FLOW_ITEM_TYPE_VLAN,
208                                RTE_FLOW_ITEM_TYPE_END,
209                 },
210         },
211         [RTE_FLOW_ITEM_TYPE_IPV6] = {
212                 .copy_item = enic_copy_item_ipv6_v2,
213                 .valid_start_item = 1,
214                 .prev_items = (const enum rte_flow_item_type[]) {
215                                RTE_FLOW_ITEM_TYPE_ETH,
216                                RTE_FLOW_ITEM_TYPE_VLAN,
217                                RTE_FLOW_ITEM_TYPE_END,
218                 },
219         },
220         [RTE_FLOW_ITEM_TYPE_UDP] = {
221                 .copy_item = enic_copy_item_udp_v2,
222                 .valid_start_item = 1,
223                 .prev_items = (const enum rte_flow_item_type[]) {
224                                RTE_FLOW_ITEM_TYPE_IPV4,
225                                RTE_FLOW_ITEM_TYPE_IPV6,
226                                RTE_FLOW_ITEM_TYPE_END,
227                 },
228         },
229         [RTE_FLOW_ITEM_TYPE_TCP] = {
230                 .copy_item = enic_copy_item_tcp_v2,
231                 .valid_start_item = 1,
232                 .prev_items = (const enum rte_flow_item_type[]) {
233                                RTE_FLOW_ITEM_TYPE_IPV4,
234                                RTE_FLOW_ITEM_TYPE_IPV6,
235                                RTE_FLOW_ITEM_TYPE_END,
236                 },
237         },
238         [RTE_FLOW_ITEM_TYPE_SCTP] = {
239                 .copy_item = enic_copy_item_sctp_v2,
240                 .valid_start_item = 1,
241                 .prev_items = (const enum rte_flow_item_type[]) {
242                                RTE_FLOW_ITEM_TYPE_IPV4,
243                                RTE_FLOW_ITEM_TYPE_IPV6,
244                                RTE_FLOW_ITEM_TYPE_END,
245                 },
246         },
247         [RTE_FLOW_ITEM_TYPE_VXLAN] = {
248                 .copy_item = enic_copy_item_vxlan_v2,
249                 .valid_start_item = 1,
250                 .prev_items = (const enum rte_flow_item_type[]) {
251                                RTE_FLOW_ITEM_TYPE_UDP,
252                                RTE_FLOW_ITEM_TYPE_END,
253                 },
254         },
255 };
256
257 /** Filtering capabilities indexed this NICs supported filter type. */
258 static const struct enic_filter_cap enic_filter_cap[] = {
259         [FILTER_IPV4_5TUPLE] = {
260                 .item_info = enic_items_v1,
261         },
262         [FILTER_USNIC_IP] = {
263                 .item_info = enic_items_v2,
264         },
265         [FILTER_DPDK_1] = {
266                 .item_info = enic_items_v3,
267         },
268 };
269
270 /** Supported actions for older NICs */
271 static const enum rte_flow_action_type enic_supported_actions_v1[] = {
272         RTE_FLOW_ACTION_TYPE_QUEUE,
273         RTE_FLOW_ACTION_TYPE_END,
274 };
275
276 /** Supported actions for newer NICs */
277 static const enum rte_flow_action_type enic_supported_actions_v2_id[] = {
278         RTE_FLOW_ACTION_TYPE_QUEUE,
279         RTE_FLOW_ACTION_TYPE_MARK,
280         RTE_FLOW_ACTION_TYPE_FLAG,
281         RTE_FLOW_ACTION_TYPE_END,
282 };
283
284 static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = {
285         RTE_FLOW_ACTION_TYPE_QUEUE,
286         RTE_FLOW_ACTION_TYPE_MARK,
287         RTE_FLOW_ACTION_TYPE_FLAG,
288         RTE_FLOW_ACTION_TYPE_DROP,
289         RTE_FLOW_ACTION_TYPE_END,
290 };
291
292 static const enum rte_flow_action_type enic_supported_actions_v2_count[] = {
293         RTE_FLOW_ACTION_TYPE_QUEUE,
294         RTE_FLOW_ACTION_TYPE_MARK,
295         RTE_FLOW_ACTION_TYPE_FLAG,
296         RTE_FLOW_ACTION_TYPE_DROP,
297         RTE_FLOW_ACTION_TYPE_COUNT,
298         RTE_FLOW_ACTION_TYPE_END,
299 };
300
301 /** Action capabilities indexed by NIC version information */
302 static const struct enic_action_cap enic_action_cap[] = {
303         [FILTER_ACTION_RQ_STEERING_FLAG] = {
304                 .actions = enic_supported_actions_v1,
305                 .copy_fn = enic_copy_action_v1,
306         },
307         [FILTER_ACTION_FILTER_ID_FLAG] = {
308                 .actions = enic_supported_actions_v2_id,
309                 .copy_fn = enic_copy_action_v2,
310         },
311         [FILTER_ACTION_DROP_FLAG] = {
312                 .actions = enic_supported_actions_v2_drop,
313                 .copy_fn = enic_copy_action_v2,
314         },
315         [FILTER_ACTION_COUNTER_FLAG] = {
316                 .actions = enic_supported_actions_v2_count,
317                 .copy_fn = enic_copy_action_v2,
318         },
319 };
320
321 static int
322 mask_exact_match(const u8 *supported, const u8 *supplied,
323                  unsigned int size)
324 {
325         unsigned int i;
326         for (i = 0; i < size; i++) {
327                 if (supported[i] != supplied[i])
328                         return 0;
329         }
330         return 1;
331 }
332
333 /**
334  * Copy IPv4 item into version 1 NIC filter.
335  *
336  * @param item[in]
337  *   Item specification.
338  * @param enic_filter[out]
339  *   Partially filled in NIC filter structure.
340  * @param inner_ofst[in]
341  *   Should always be 0 for version 1.
342  */
343 static int
344 enic_copy_item_ipv4_v1(const struct rte_flow_item *item,
345                        struct filter_v2 *enic_filter, u8 *inner_ofst)
346 {
347         const struct rte_flow_item_ipv4 *spec = item->spec;
348         const struct rte_flow_item_ipv4 *mask = item->mask;
349         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
350         struct ipv4_hdr supported_mask = {
351                 .src_addr = 0xffffffff,
352                 .dst_addr = 0xffffffff,
353         };
354
355         FLOW_TRACE();
356
357         if (*inner_ofst)
358                 return ENOTSUP;
359
360         if (!mask)
361                 mask = &rte_flow_item_ipv4_mask;
362
363         /* This is an exact match filter, both fields must be set */
364         if (!spec || !spec->hdr.src_addr || !spec->hdr.dst_addr) {
365                 FLOW_LOG(ERR, "IPv4 exact match src/dst addr");
366                 return ENOTSUP;
367         }
368
369         /* check that the suppied mask exactly matches capabilty */
370         if (!mask_exact_match((const u8 *)&supported_mask,
371                               (const u8 *)item->mask, sizeof(*mask))) {
372                 FLOW_LOG(ERR, "IPv4 exact match mask");
373                 return ENOTSUP;
374         }
375
376         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
377         enic_5tup->src_addr = spec->hdr.src_addr;
378         enic_5tup->dst_addr = spec->hdr.dst_addr;
379
380         return 0;
381 }
382
383 /**
384  * Copy UDP item into version 1 NIC filter.
385  *
386  * @param item[in]
387  *   Item specification.
388  * @param enic_filter[out]
389  *   Partially filled in NIC filter structure.
390  * @param inner_ofst[in]
391  *   Should always be 0 for version 1.
392  */
393 static int
394 enic_copy_item_udp_v1(const struct rte_flow_item *item,
395                       struct filter_v2 *enic_filter, u8 *inner_ofst)
396 {
397         const struct rte_flow_item_udp *spec = item->spec;
398         const struct rte_flow_item_udp *mask = item->mask;
399         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
400         struct udp_hdr supported_mask = {
401                 .src_port = 0xffff,
402                 .dst_port = 0xffff,
403         };
404
405         FLOW_TRACE();
406
407         if (*inner_ofst)
408                 return ENOTSUP;
409
410         if (!mask)
411                 mask = &rte_flow_item_udp_mask;
412
413         /* This is an exact match filter, both ports must be set */
414         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
415                 FLOW_LOG(ERR, "UDP exact match src/dst addr");
416                 return ENOTSUP;
417         }
418
419         /* check that the suppied mask exactly matches capabilty */
420         if (!mask_exact_match((const u8 *)&supported_mask,
421                               (const u8 *)item->mask, sizeof(*mask))) {
422                 FLOW_LOG(ERR, "UDP exact match mask");
423                 return ENOTSUP;
424         }
425
426         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
427         enic_5tup->src_port = spec->hdr.src_port;
428         enic_5tup->dst_port = spec->hdr.dst_port;
429         enic_5tup->protocol = PROTO_UDP;
430
431         return 0;
432 }
433
434 /**
435  * Copy TCP item into version 1 NIC filter.
436  *
437  * @param item[in]
438  *   Item specification.
439  * @param enic_filter[out]
440  *   Partially filled in NIC filter structure.
441  * @param inner_ofst[in]
442  *   Should always be 0 for version 1.
443  */
444 static int
445 enic_copy_item_tcp_v1(const struct rte_flow_item *item,
446                       struct filter_v2 *enic_filter, u8 *inner_ofst)
447 {
448         const struct rte_flow_item_tcp *spec = item->spec;
449         const struct rte_flow_item_tcp *mask = item->mask;
450         struct filter_ipv4_5tuple *enic_5tup = &enic_filter->u.ipv4;
451         struct tcp_hdr supported_mask = {
452                 .src_port = 0xffff,
453                 .dst_port = 0xffff,
454         };
455
456         FLOW_TRACE();
457
458         if (*inner_ofst)
459                 return ENOTSUP;
460
461         if (!mask)
462                 mask = &rte_flow_item_tcp_mask;
463
464         /* This is an exact match filter, both ports must be set */
465         if (!spec || !spec->hdr.src_port || !spec->hdr.dst_port) {
466                 FLOW_LOG(ERR, "TCPIPv4 exact match src/dst addr");
467                 return ENOTSUP;
468         }
469
470         /* check that the suppied mask exactly matches capabilty */
471         if (!mask_exact_match((const u8 *)&supported_mask,
472                              (const u8 *)item->mask, sizeof(*mask))) {
473                 FLOW_LOG(ERR, "TCP exact match mask");
474                 return ENOTSUP;
475         }
476
477         enic_filter->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
478         enic_5tup->src_port = spec->hdr.src_port;
479         enic_5tup->dst_port = spec->hdr.dst_port;
480         enic_5tup->protocol = PROTO_TCP;
481
482         return 0;
483 }
484
485 /**
486  * Copy ETH item into version 2 NIC filter.
487  *
488  * @param item[in]
489  *   Item specification.
490  * @param enic_filter[out]
491  *   Partially filled in NIC filter structure.
492  * @param inner_ofst[in]
493  *   If zero, this is an outer header. If non-zero, this is the offset into L5
494  *   where the header begins.
495  */
496 static int
497 enic_copy_item_eth_v2(const struct rte_flow_item *item,
498                       struct filter_v2 *enic_filter, u8 *inner_ofst)
499 {
500         struct ether_hdr enic_spec;
501         struct ether_hdr enic_mask;
502         const struct rte_flow_item_eth *spec = item->spec;
503         const struct rte_flow_item_eth *mask = item->mask;
504         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
505
506         FLOW_TRACE();
507
508         /* Match all if no spec */
509         if (!spec)
510                 return 0;
511
512         if (!mask)
513                 mask = &rte_flow_item_eth_mask;
514
515         memcpy(enic_spec.d_addr.addr_bytes, spec->dst.addr_bytes,
516                ETHER_ADDR_LEN);
517         memcpy(enic_spec.s_addr.addr_bytes, spec->src.addr_bytes,
518                ETHER_ADDR_LEN);
519
520         memcpy(enic_mask.d_addr.addr_bytes, mask->dst.addr_bytes,
521                ETHER_ADDR_LEN);
522         memcpy(enic_mask.s_addr.addr_bytes, mask->src.addr_bytes,
523                ETHER_ADDR_LEN);
524         enic_spec.ether_type = spec->type;
525         enic_mask.ether_type = mask->type;
526
527         if (*inner_ofst == 0) {
528                 /* outer header */
529                 memcpy(gp->layer[FILTER_GENERIC_1_L2].mask, &enic_mask,
530                        sizeof(struct ether_hdr));
531                 memcpy(gp->layer[FILTER_GENERIC_1_L2].val, &enic_spec,
532                        sizeof(struct ether_hdr));
533         } else {
534                 /* inner header */
535                 if ((*inner_ofst + sizeof(struct ether_hdr)) >
536                      FILTER_GENERIC_1_KEY_LEN)
537                         return ENOTSUP;
538                 /* Offset into L5 where inner Ethernet header goes */
539                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
540                        &enic_mask, sizeof(struct ether_hdr));
541                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
542                        &enic_spec, sizeof(struct ether_hdr));
543                 *inner_ofst += sizeof(struct ether_hdr);
544         }
545         return 0;
546 }
547
548 /**
549  * Copy VLAN item into version 2 NIC filter.
550  *
551  * @param item[in]
552  *   Item specification.
553  * @param enic_filter[out]
554  *   Partially filled in NIC filter structure.
555  * @param inner_ofst[in]
556  *   If zero, this is an outer header. If non-zero, this is the offset into L5
557  *   where the header begins.
558  */
559 static int
560 enic_copy_item_vlan_v2(const struct rte_flow_item *item,
561                        struct filter_v2 *enic_filter, u8 *inner_ofst)
562 {
563         const struct rte_flow_item_vlan *spec = item->spec;
564         const struct rte_flow_item_vlan *mask = item->mask;
565         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
566
567         FLOW_TRACE();
568
569         /* Match all if no spec */
570         if (!spec)
571                 return 0;
572
573         if (!mask)
574                 mask = &rte_flow_item_vlan_mask;
575
576         if (*inner_ofst == 0) {
577                 struct ether_hdr *eth_mask =
578                         (void *)gp->layer[FILTER_GENERIC_1_L2].mask;
579                 struct ether_hdr *eth_val =
580                         (void *)gp->layer[FILTER_GENERIC_1_L2].val;
581
582                 /* Outer TPID cannot be matched */
583                 if (eth_mask->ether_type)
584                         return ENOTSUP;
585                 eth_mask->ether_type = mask->inner_type;
586                 eth_val->ether_type = spec->inner_type;
587
588                 /* Outer header. Use the vlan mask/val fields */
589                 gp->mask_vlan = mask->tci;
590                 gp->val_vlan = spec->tci;
591         } else {
592                 /* Inner header. Mask/Val start at *inner_ofst into L5 */
593                 if ((*inner_ofst + sizeof(struct vlan_hdr)) >
594                      FILTER_GENERIC_1_KEY_LEN)
595                         return ENOTSUP;
596                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
597                        mask, sizeof(struct vlan_hdr));
598                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
599                        spec, sizeof(struct vlan_hdr));
600                 *inner_ofst += sizeof(struct vlan_hdr);
601         }
602         return 0;
603 }
604
605 /**
606  * Copy IPv4 item into version 2 NIC filter.
607  *
608  * @param item[in]
609  *   Item specification.
610  * @param enic_filter[out]
611  *   Partially filled in NIC filter structure.
612  * @param inner_ofst[in]
613  *   Must be 0. Don't support inner IPv4 filtering.
614  */
615 static int
616 enic_copy_item_ipv4_v2(const struct rte_flow_item *item,
617                        struct filter_v2 *enic_filter, u8 *inner_ofst)
618 {
619         const struct rte_flow_item_ipv4 *spec = item->spec;
620         const struct rte_flow_item_ipv4 *mask = item->mask;
621         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
622
623         FLOW_TRACE();
624
625         if (*inner_ofst == 0) {
626                 /* Match IPv4 */
627                 gp->mask_flags |= FILTER_GENERIC_1_IPV4;
628                 gp->val_flags |= FILTER_GENERIC_1_IPV4;
629
630                 /* Match all if no spec */
631                 if (!spec)
632                         return 0;
633
634                 if (!mask)
635                         mask = &rte_flow_item_ipv4_mask;
636
637                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
638                        sizeof(struct ipv4_hdr));
639                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
640                        sizeof(struct ipv4_hdr));
641         } else {
642                 /* Inner IPv4 header. Mask/Val start at *inner_ofst into L5 */
643                 if ((*inner_ofst + sizeof(struct ipv4_hdr)) >
644                      FILTER_GENERIC_1_KEY_LEN)
645                         return ENOTSUP;
646                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
647                        mask, sizeof(struct ipv4_hdr));
648                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
649                        spec, sizeof(struct ipv4_hdr));
650                 *inner_ofst += sizeof(struct ipv4_hdr);
651         }
652         return 0;
653 }
654
655 /**
656  * Copy IPv6 item into version 2 NIC filter.
657  *
658  * @param item[in]
659  *   Item specification.
660  * @param enic_filter[out]
661  *   Partially filled in NIC filter structure.
662  * @param inner_ofst[in]
663  *   Must be 0. Don't support inner IPv6 filtering.
664  */
665 static int
666 enic_copy_item_ipv6_v2(const struct rte_flow_item *item,
667                        struct filter_v2 *enic_filter, u8 *inner_ofst)
668 {
669         const struct rte_flow_item_ipv6 *spec = item->spec;
670         const struct rte_flow_item_ipv6 *mask = item->mask;
671         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
672
673         FLOW_TRACE();
674
675         /* Match IPv6 */
676         gp->mask_flags |= FILTER_GENERIC_1_IPV6;
677         gp->val_flags |= FILTER_GENERIC_1_IPV6;
678
679         /* Match all if no spec */
680         if (!spec)
681                 return 0;
682
683         if (!mask)
684                 mask = &rte_flow_item_ipv6_mask;
685
686         if (*inner_ofst == 0) {
687                 memcpy(gp->layer[FILTER_GENERIC_1_L3].mask, &mask->hdr,
688                        sizeof(struct ipv6_hdr));
689                 memcpy(gp->layer[FILTER_GENERIC_1_L3].val, &spec->hdr,
690                        sizeof(struct ipv6_hdr));
691         } else {
692                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
693                 if ((*inner_ofst + sizeof(struct ipv6_hdr)) >
694                      FILTER_GENERIC_1_KEY_LEN)
695                         return ENOTSUP;
696                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
697                        mask, sizeof(struct ipv6_hdr));
698                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
699                        spec, sizeof(struct ipv6_hdr));
700                 *inner_ofst += sizeof(struct ipv6_hdr);
701         }
702         return 0;
703 }
704
705 /**
706  * Copy UDP item into version 2 NIC filter.
707  *
708  * @param item[in]
709  *   Item specification.
710  * @param enic_filter[out]
711  *   Partially filled in NIC filter structure.
712  * @param inner_ofst[in]
713  *   Must be 0. Don't support inner UDP filtering.
714  */
715 static int
716 enic_copy_item_udp_v2(const struct rte_flow_item *item,
717                       struct filter_v2 *enic_filter, u8 *inner_ofst)
718 {
719         const struct rte_flow_item_udp *spec = item->spec;
720         const struct rte_flow_item_udp *mask = item->mask;
721         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
722
723         FLOW_TRACE();
724
725         /* Match UDP */
726         gp->mask_flags |= FILTER_GENERIC_1_UDP;
727         gp->val_flags |= FILTER_GENERIC_1_UDP;
728
729         /* Match all if no spec */
730         if (!spec)
731                 return 0;
732
733         if (!mask)
734                 mask = &rte_flow_item_udp_mask;
735
736         if (*inner_ofst == 0) {
737                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
738                        sizeof(struct udp_hdr));
739                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
740                        sizeof(struct udp_hdr));
741         } else {
742                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
743                 if ((*inner_ofst + sizeof(struct udp_hdr)) >
744                      FILTER_GENERIC_1_KEY_LEN)
745                         return ENOTSUP;
746                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
747                        mask, sizeof(struct udp_hdr));
748                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
749                        spec, sizeof(struct udp_hdr));
750                 *inner_ofst += sizeof(struct udp_hdr);
751         }
752         return 0;
753 }
754
755 /**
756  * Copy TCP item into version 2 NIC filter.
757  *
758  * @param item[in]
759  *   Item specification.
760  * @param enic_filter[out]
761  *   Partially filled in NIC filter structure.
762  * @param inner_ofst[in]
763  *   Must be 0. Don't support inner TCP filtering.
764  */
765 static int
766 enic_copy_item_tcp_v2(const struct rte_flow_item *item,
767                       struct filter_v2 *enic_filter, u8 *inner_ofst)
768 {
769         const struct rte_flow_item_tcp *spec = item->spec;
770         const struct rte_flow_item_tcp *mask = item->mask;
771         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
772
773         FLOW_TRACE();
774
775         /* Match TCP */
776         gp->mask_flags |= FILTER_GENERIC_1_TCP;
777         gp->val_flags |= FILTER_GENERIC_1_TCP;
778
779         /* Match all if no spec */
780         if (!spec)
781                 return 0;
782
783         if (!mask)
784                 return ENOTSUP;
785
786         if (*inner_ofst == 0) {
787                 memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
788                        sizeof(struct tcp_hdr));
789                 memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
790                        sizeof(struct tcp_hdr));
791         } else {
792                 /* Inner IPv6 header. Mask/Val start at *inner_ofst into L5 */
793                 if ((*inner_ofst + sizeof(struct tcp_hdr)) >
794                      FILTER_GENERIC_1_KEY_LEN)
795                         return ENOTSUP;
796                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].mask[*inner_ofst],
797                        mask, sizeof(struct tcp_hdr));
798                 memcpy(&gp->layer[FILTER_GENERIC_1_L5].val[*inner_ofst],
799                        spec, sizeof(struct tcp_hdr));
800                 *inner_ofst += sizeof(struct tcp_hdr);
801         }
802         return 0;
803 }
804
805 /**
806  * Copy SCTP item into version 2 NIC filter.
807  *
808  * @param item[in]
809  *   Item specification.
810  * @param enic_filter[out]
811  *   Partially filled in NIC filter structure.
812  * @param inner_ofst[in]
813  *   Must be 0. Don't support inner SCTP filtering.
814  */
815 static int
816 enic_copy_item_sctp_v2(const struct rte_flow_item *item,
817                        struct filter_v2 *enic_filter, u8 *inner_ofst)
818 {
819         const struct rte_flow_item_sctp *spec = item->spec;
820         const struct rte_flow_item_sctp *mask = item->mask;
821         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
822
823         FLOW_TRACE();
824
825         if (*inner_ofst)
826                 return ENOTSUP;
827
828         /* Match all if no spec */
829         if (!spec)
830                 return 0;
831
832         if (!mask)
833                 mask = &rte_flow_item_sctp_mask;
834
835         memcpy(gp->layer[FILTER_GENERIC_1_L4].mask, &mask->hdr,
836                sizeof(struct sctp_hdr));
837         memcpy(gp->layer[FILTER_GENERIC_1_L4].val, &spec->hdr,
838                sizeof(struct sctp_hdr));
839         return 0;
840 }
841
842 /**
843  * Copy UDP item into version 2 NIC filter.
844  *
845  * @param item[in]
846  *   Item specification.
847  * @param enic_filter[out]
848  *   Partially filled in NIC filter structure.
849  * @param inner_ofst[in]
850  *   Must be 0. VxLAN headers always start at the beginning of L5.
851  */
852 static int
853 enic_copy_item_vxlan_v2(const struct rte_flow_item *item,
854                         struct filter_v2 *enic_filter, u8 *inner_ofst)
855 {
856         const struct rte_flow_item_vxlan *spec = item->spec;
857         const struct rte_flow_item_vxlan *mask = item->mask;
858         struct filter_generic_1 *gp = &enic_filter->u.generic_1;
859
860         FLOW_TRACE();
861
862         if (*inner_ofst)
863                 return EINVAL;
864
865         /* Match all if no spec */
866         if (!spec)
867                 return 0;
868
869         if (!mask)
870                 mask = &rte_flow_item_vxlan_mask;
871
872         memcpy(gp->layer[FILTER_GENERIC_1_L5].mask, mask,
873                sizeof(struct vxlan_hdr));
874         memcpy(gp->layer[FILTER_GENERIC_1_L5].val, spec,
875                sizeof(struct vxlan_hdr));
876
877         *inner_ofst = sizeof(struct vxlan_hdr);
878         return 0;
879 }
880
881 /**
882  * Return 1 if current item is valid on top of the previous one.
883  *
884  * @param prev_item[in]
885  *   The item before this one in the pattern or RTE_FLOW_ITEM_TYPE_END if this
886  *   is the first item.
887  * @param item_info[in]
888  *   Info about this item, like valid previous items.
889  * @param is_first[in]
890  *   True if this the first item in the pattern.
891  */
892 static int
893 item_stacking_valid(enum rte_flow_item_type prev_item,
894                     const struct enic_items *item_info, u8 is_first_item)
895 {
896         enum rte_flow_item_type const *allowed_items = item_info->prev_items;
897
898         FLOW_TRACE();
899
900         for (; *allowed_items != RTE_FLOW_ITEM_TYPE_END; allowed_items++) {
901                 if (prev_item == *allowed_items)
902                         return 1;
903         }
904
905         /* This is the first item in the stack. Check if that's cool */
906         if (is_first_item && item_info->valid_start_item)
907                 return 1;
908
909         return 0;
910 }
911
912 /**
913  * Build the intenal enic filter structure from the provided pattern. The
914  * pattern is validated as the items are copied.
915  *
916  * @param pattern[in]
917  * @param items_info[in]
918  *   Info about this NICs item support, like valid previous items.
919  * @param enic_filter[out]
920  *   NIC specfilc filters derived from the pattern.
921  * @param error[out]
922  */
923 static int
924 enic_copy_filter(const struct rte_flow_item pattern[],
925                  const struct enic_items *items_info,
926                  struct filter_v2 *enic_filter,
927                  struct rte_flow_error *error)
928 {
929         int ret;
930         const struct rte_flow_item *item = pattern;
931         u8 inner_ofst = 0; /* If encapsulated, ofst into L5 */
932         enum rte_flow_item_type prev_item;
933         const struct enic_items *item_info;
934
935         u8 is_first_item = 1;
936
937         FLOW_TRACE();
938
939         prev_item = 0;
940
941         for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
942                 /* Get info about how to validate and copy the item. If NULL
943                  * is returned the nic does not support the item.
944                  */
945                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
946                         continue;
947
948                 item_info = &items_info[item->type];
949
950                 /* check to see if item stacking is valid */
951                 if (!item_stacking_valid(prev_item, item_info, is_first_item))
952                         goto stacking_error;
953
954                 ret = item_info->copy_item(item, enic_filter, &inner_ofst);
955                 if (ret)
956                         goto item_not_supported;
957                 prev_item = item->type;
958                 is_first_item = 0;
959         }
960         return 0;
961
962 item_not_supported:
963         rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_ITEM,
964                            NULL, "enic type error");
965         return -rte_errno;
966
967 stacking_error:
968         rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM,
969                            item, "stacking error");
970         return -rte_errno;
971 }
972
973 /**
974  * Build the intenal version 1 NIC action structure from the provided pattern.
975  * The pattern is validated as the items are copied.
976  *
977  * @param actions[in]
978  * @param enic_action[out]
979  *   NIC specfilc actions derived from the actions.
980  * @param error[out]
981  */
982 static int
983 enic_copy_action_v1(const struct rte_flow_action actions[],
984                     struct filter_action_v2 *enic_action)
985 {
986         enum { FATE = 1, };
987         uint32_t overlap = 0;
988
989         FLOW_TRACE();
990
991         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
992                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID)
993                         continue;
994
995                 switch (actions->type) {
996                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
997                         const struct rte_flow_action_queue *queue =
998                                 (const struct rte_flow_action_queue *)
999                                 actions->conf;
1000
1001                         if (overlap & FATE)
1002                                 return ENOTSUP;
1003                         overlap |= FATE;
1004                         enic_action->rq_idx =
1005                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1006                         break;
1007                 }
1008                 default:
1009                         RTE_ASSERT(0);
1010                         break;
1011                 }
1012         }
1013         if (!(overlap & FATE))
1014                 return ENOTSUP;
1015         enic_action->type = FILTER_ACTION_RQ_STEERING;
1016         return 0;
1017 }
1018
1019 /**
1020  * Build the intenal version 2 NIC action structure from the provided pattern.
1021  * The pattern is validated as the items are copied.
1022  *
1023  * @param actions[in]
1024  * @param enic_action[out]
1025  *   NIC specfilc actions derived from the actions.
1026  * @param error[out]
1027  */
1028 static int
1029 enic_copy_action_v2(const struct rte_flow_action actions[],
1030                     struct filter_action_v2 *enic_action)
1031 {
1032         enum { FATE = 1, MARK = 2, };
1033         uint32_t overlap = 0;
1034
1035         FLOW_TRACE();
1036
1037         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1038                 switch (actions->type) {
1039                 case RTE_FLOW_ACTION_TYPE_QUEUE: {
1040                         const struct rte_flow_action_queue *queue =
1041                                 (const struct rte_flow_action_queue *)
1042                                 actions->conf;
1043
1044                         if (overlap & FATE)
1045                                 return ENOTSUP;
1046                         overlap |= FATE;
1047                         enic_action->rq_idx =
1048                                 enic_rte_rq_idx_to_sop_idx(queue->index);
1049                         enic_action->flags |= FILTER_ACTION_RQ_STEERING_FLAG;
1050                         break;
1051                 }
1052                 case RTE_FLOW_ACTION_TYPE_MARK: {
1053                         const struct rte_flow_action_mark *mark =
1054                                 (const struct rte_flow_action_mark *)
1055                                 actions->conf;
1056
1057                         if (overlap & MARK)
1058                                 return ENOTSUP;
1059                         overlap |= MARK;
1060                         /* ENIC_MAGIC_FILTER_ID is reserved and is the highest
1061                          * in the range of allows mark ids.
1062                          */
1063                         if (mark->id >= ENIC_MAGIC_FILTER_ID)
1064                                 return EINVAL;
1065                         enic_action->filter_id = mark->id;
1066                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1067                         break;
1068                 }
1069                 case RTE_FLOW_ACTION_TYPE_FLAG: {
1070                         if (overlap & MARK)
1071                                 return ENOTSUP;
1072                         overlap |= MARK;
1073                         enic_action->filter_id = ENIC_MAGIC_FILTER_ID;
1074                         enic_action->flags |= FILTER_ACTION_FILTER_ID_FLAG;
1075                         break;
1076                 }
1077                 case RTE_FLOW_ACTION_TYPE_DROP: {
1078                         if (overlap & FATE)
1079                                 return ENOTSUP;
1080                         overlap |= FATE;
1081                         enic_action->flags |= FILTER_ACTION_DROP_FLAG;
1082                         break;
1083                 }
1084                 case RTE_FLOW_ACTION_TYPE_COUNT: {
1085                         enic_action->flags |= FILTER_ACTION_COUNTER_FLAG;
1086                         break;
1087                 }
1088                 case RTE_FLOW_ACTION_TYPE_VOID:
1089                         continue;
1090                 default:
1091                         RTE_ASSERT(0);
1092                         break;
1093                 }
1094         }
1095         if (!(overlap & FATE))
1096                 return ENOTSUP;
1097         enic_action->type = FILTER_ACTION_V2;
1098         return 0;
1099 }
1100
1101 /** Check if the action is supported */
1102 static int
1103 enic_match_action(const struct rte_flow_action *action,
1104                   const enum rte_flow_action_type *supported_actions)
1105 {
1106         for (; *supported_actions != RTE_FLOW_ACTION_TYPE_END;
1107              supported_actions++) {
1108                 if (action->type == *supported_actions)
1109                         return 1;
1110         }
1111         return 0;
1112 }
1113
1114 /** Get the NIC filter capabilties structure */
1115 static const struct enic_filter_cap *
1116 enic_get_filter_cap(struct enic *enic)
1117 {
1118         if (enic->flow_filter_mode)
1119                 return &enic_filter_cap[enic->flow_filter_mode];
1120
1121         return NULL;
1122 }
1123
1124 /** Get the actions for this NIC version. */
1125 static const struct enic_action_cap *
1126 enic_get_action_cap(struct enic *enic)
1127 {
1128         const struct enic_action_cap *ea;
1129         uint8_t actions;
1130
1131         actions = enic->filter_actions;
1132         if (actions & FILTER_ACTION_COUNTER_FLAG)
1133                 ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG];
1134         else if (actions & FILTER_ACTION_DROP_FLAG)
1135                 ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG];
1136         else if (actions & FILTER_ACTION_FILTER_ID_FLAG)
1137                 ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG];
1138         else
1139                 ea = &enic_action_cap[FILTER_ACTION_RQ_STEERING_FLAG];
1140         return ea;
1141 }
1142
1143 /* Debug function to dump internal NIC action structure. */
1144 static void
1145 enic_dump_actions(const struct filter_action_v2 *ea)
1146 {
1147         if (ea->type == FILTER_ACTION_RQ_STEERING) {
1148                 FLOW_LOG(INFO, "Action(V1), queue: %u\n", ea->rq_idx);
1149         } else if (ea->type == FILTER_ACTION_V2) {
1150                 FLOW_LOG(INFO, "Actions(V2)\n");
1151                 if (ea->flags & FILTER_ACTION_RQ_STEERING_FLAG)
1152                         FLOW_LOG(INFO, "\tqueue: %u\n",
1153                                enic_sop_rq_idx_to_rte_idx(ea->rq_idx));
1154                 if (ea->flags & FILTER_ACTION_FILTER_ID_FLAG)
1155                         FLOW_LOG(INFO, "\tfilter_id: %u\n", ea->filter_id);
1156         }
1157 }
1158
1159 /* Debug function to dump internal NIC filter structure. */
1160 static void
1161 enic_dump_filter(const struct filter_v2 *filt)
1162 {
1163         const struct filter_generic_1 *gp;
1164         int i, j, mbyte;
1165         char buf[128], *bp;
1166         char ip4[16], ip6[16], udp[16], tcp[16], tcpudp[16], ip4csum[16];
1167         char l4csum[16], ipfrag[16];
1168
1169         switch (filt->type) {
1170         case FILTER_IPV4_5TUPLE:
1171                 FLOW_LOG(INFO, "FILTER_IPV4_5TUPLE\n");
1172                 break;
1173         case FILTER_USNIC_IP:
1174         case FILTER_DPDK_1:
1175                 /* FIXME: this should be a loop */
1176                 gp = &filt->u.generic_1;
1177                 FLOW_LOG(INFO, "Filter: vlan: 0x%04x, mask: 0x%04x\n",
1178                        gp->val_vlan, gp->mask_vlan);
1179
1180                 if (gp->mask_flags & FILTER_GENERIC_1_IPV4)
1181                         sprintf(ip4, "%s ",
1182                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1183                                  ? "ip4(y)" : "ip4(n)");
1184                 else
1185                         sprintf(ip4, "%s ", "ip4(x)");
1186
1187                 if (gp->mask_flags & FILTER_GENERIC_1_IPV6)
1188                         sprintf(ip6, "%s ",
1189                                 (gp->val_flags & FILTER_GENERIC_1_IPV4)
1190                                  ? "ip6(y)" : "ip6(n)");
1191                 else
1192                         sprintf(ip6, "%s ", "ip6(x)");
1193
1194                 if (gp->mask_flags & FILTER_GENERIC_1_UDP)
1195                         sprintf(udp, "%s ",
1196                                 (gp->val_flags & FILTER_GENERIC_1_UDP)
1197                                  ? "udp(y)" : "udp(n)");
1198                 else
1199                         sprintf(udp, "%s ", "udp(x)");
1200
1201                 if (gp->mask_flags & FILTER_GENERIC_1_TCP)
1202                         sprintf(tcp, "%s ",
1203                                 (gp->val_flags & FILTER_GENERIC_1_TCP)
1204                                  ? "tcp(y)" : "tcp(n)");
1205                 else
1206                         sprintf(tcp, "%s ", "tcp(x)");
1207
1208                 if (gp->mask_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1209                         sprintf(tcpudp, "%s ",
1210                                 (gp->val_flags & FILTER_GENERIC_1_TCP_OR_UDP)
1211                                  ? "tcpudp(y)" : "tcpudp(n)");
1212                 else
1213                         sprintf(tcpudp, "%s ", "tcpudp(x)");
1214
1215                 if (gp->mask_flags & FILTER_GENERIC_1_IP4SUM_OK)
1216                         sprintf(ip4csum, "%s ",
1217                                 (gp->val_flags & FILTER_GENERIC_1_IP4SUM_OK)
1218                                  ? "ip4csum(y)" : "ip4csum(n)");
1219                 else
1220                         sprintf(ip4csum, "%s ", "ip4csum(x)");
1221
1222                 if (gp->mask_flags & FILTER_GENERIC_1_L4SUM_OK)
1223                         sprintf(l4csum, "%s ",
1224                                 (gp->val_flags & FILTER_GENERIC_1_L4SUM_OK)
1225                                  ? "l4csum(y)" : "l4csum(n)");
1226                 else
1227                         sprintf(l4csum, "%s ", "l4csum(x)");
1228
1229                 if (gp->mask_flags & FILTER_GENERIC_1_IPFRAG)
1230                         sprintf(ipfrag, "%s ",
1231                                 (gp->val_flags & FILTER_GENERIC_1_IPFRAG)
1232                                  ? "ipfrag(y)" : "ipfrag(n)");
1233                 else
1234                         sprintf(ipfrag, "%s ", "ipfrag(x)");
1235                 FLOW_LOG(INFO, "\tFlags: %s%s%s%s%s%s%s%s\n", ip4, ip6, udp,
1236                          tcp, tcpudp, ip4csum, l4csum, ipfrag);
1237
1238                 for (i = 0; i < FILTER_GENERIC_1_NUM_LAYERS; i++) {
1239                         mbyte = FILTER_GENERIC_1_KEY_LEN - 1;
1240                         while (mbyte && !gp->layer[i].mask[mbyte])
1241                                 mbyte--;
1242                         if (mbyte == 0)
1243                                 continue;
1244
1245                         bp = buf;
1246                         for (j = 0; j <= mbyte; j++) {
1247                                 sprintf(bp, "%02x",
1248                                         gp->layer[i].mask[j]);
1249                                 bp += 2;
1250                         }
1251                         *bp = '\0';
1252                         FLOW_LOG(INFO, "\tL%u mask: %s\n", i + 2, buf);
1253                         bp = buf;
1254                         for (j = 0; j <= mbyte; j++) {
1255                                 sprintf(bp, "%02x",
1256                                         gp->layer[i].val[j]);
1257                                 bp += 2;
1258                         }
1259                         *bp = '\0';
1260                         FLOW_LOG(INFO, "\tL%u  val: %s\n", i + 2, buf);
1261                 }
1262                 break;
1263         default:
1264                 FLOW_LOG(INFO, "FILTER UNKNOWN\n");
1265                 break;
1266         }
1267 }
1268
1269 /* Debug function to dump internal NIC flow structures. */
1270 static void
1271 enic_dump_flow(const struct filter_action_v2 *ea, const struct filter_v2 *filt)
1272 {
1273         enic_dump_filter(filt);
1274         enic_dump_actions(ea);
1275 }
1276
1277
1278 /**
1279  * Internal flow parse/validate function.
1280  *
1281  * @param dev[in]
1282  *   This device pointer.
1283  * @param pattern[in]
1284  * @param actions[in]
1285  * @param error[out]
1286  * @param enic_filter[out]
1287  *   Internal NIC filter structure pointer.
1288  * @param enic_action[out]
1289  *   Internal NIC action structure pointer.
1290  */
1291 static int
1292 enic_flow_parse(struct rte_eth_dev *dev,
1293                 const struct rte_flow_attr *attrs,
1294                 const struct rte_flow_item pattern[],
1295                 const struct rte_flow_action actions[],
1296                 struct rte_flow_error *error,
1297                 struct filter_v2 *enic_filter,
1298                 struct filter_action_v2 *enic_action)
1299 {
1300         unsigned int ret = 0;
1301         struct enic *enic = pmd_priv(dev);
1302         const struct enic_filter_cap *enic_filter_cap;
1303         const struct enic_action_cap *enic_action_cap;
1304         const struct rte_flow_action *action;
1305
1306         FLOW_TRACE();
1307
1308         memset(enic_filter, 0, sizeof(*enic_filter));
1309         memset(enic_action, 0, sizeof(*enic_action));
1310
1311         if (!pattern) {
1312                 rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_NUM,
1313                                    NULL, "No pattern specified");
1314                 return -rte_errno;
1315         }
1316
1317         if (!actions) {
1318                 rte_flow_error_set(error, EINVAL,
1319                                    RTE_FLOW_ERROR_TYPE_ACTION_NUM,
1320                                    NULL, "No action specified");
1321                 return -rte_errno;
1322         }
1323
1324         if (attrs) {
1325                 if (attrs->group) {
1326                         rte_flow_error_set(error, ENOTSUP,
1327                                            RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1328                                            NULL,
1329                                            "priority groups are not supported");
1330                         return -rte_errno;
1331                 } else if (attrs->priority) {
1332                         rte_flow_error_set(error, ENOTSUP,
1333                                            RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1334                                            NULL,
1335                                            "priorities are not supported");
1336                         return -rte_errno;
1337                 } else if (attrs->egress) {
1338                         rte_flow_error_set(error, ENOTSUP,
1339                                            RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
1340                                            NULL,
1341                                            "egress is not supported");
1342                         return -rte_errno;
1343                 } else if (attrs->transfer) {
1344                         rte_flow_error_set(error, ENOTSUP,
1345                                            RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1346                                            NULL,
1347                                            "transfer is not supported");
1348                         return -rte_errno;
1349                 } else if (!attrs->ingress) {
1350                         rte_flow_error_set(error, ENOTSUP,
1351                                            RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
1352                                            NULL,
1353                                            "only ingress is supported");
1354                         return -rte_errno;
1355                 }
1356
1357         } else {
1358                 rte_flow_error_set(error, EINVAL,
1359                                    RTE_FLOW_ERROR_TYPE_ATTR,
1360                                    NULL, "No attribute specified");
1361                 return -rte_errno;
1362         }
1363
1364         /* Verify Actions. */
1365         enic_action_cap =  enic_get_action_cap(enic);
1366         for (action = &actions[0]; action->type != RTE_FLOW_ACTION_TYPE_END;
1367              action++) {
1368                 if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
1369                         continue;
1370                 else if (!enic_match_action(action, enic_action_cap->actions))
1371                         break;
1372         }
1373         if (action->type != RTE_FLOW_ACTION_TYPE_END) {
1374                 rte_flow_error_set(error, EPERM, RTE_FLOW_ERROR_TYPE_ACTION,
1375                                    action, "Invalid action.");
1376                 return -rte_errno;
1377         }
1378         ret = enic_action_cap->copy_fn(actions, enic_action);
1379         if (ret) {
1380                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1381                            NULL, "Unsupported action.");
1382                 return -rte_errno;
1383         }
1384
1385         /* Verify Flow items. If copying the filter from flow format to enic
1386          * format fails, the flow is not supported
1387          */
1388         enic_filter_cap =  enic_get_filter_cap(enic);
1389         if (enic_filter_cap == NULL) {
1390                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1391                            NULL, "Flow API not available");
1392                 return -rte_errno;
1393         }
1394         enic_filter->type = enic->flow_filter_mode;
1395         ret = enic_copy_filter(pattern, enic_filter_cap->item_info,
1396                                        enic_filter, error);
1397         return ret;
1398 }
1399
1400 /**
1401  * Push filter/action to the NIC.
1402  *
1403  * @param enic[in]
1404  *   Device structure pointer.
1405  * @param enic_filter[in]
1406  *   Internal NIC filter structure pointer.
1407  * @param enic_action[in]
1408  *   Internal NIC action structure pointer.
1409  * @param error[out]
1410  */
1411 static struct rte_flow *
1412 enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter,
1413                    struct filter_action_v2 *enic_action,
1414                    struct rte_flow_error *error)
1415 {
1416         struct rte_flow *flow;
1417         int err;
1418         uint16_t entry;
1419         int ctr_idx;
1420         int last_max_flow_ctr;
1421
1422         FLOW_TRACE();
1423
1424         flow = rte_calloc(__func__, 1, sizeof(*flow), 0);
1425         if (!flow) {
1426                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1427                                    NULL, "cannot allocate flow memory");
1428                 return NULL;
1429         }
1430
1431         flow->counter_idx = -1;
1432         last_max_flow_ctr = -1;
1433         if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) {
1434                 if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) {
1435                         rte_flow_error_set(error, ENOMEM,
1436                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1437                                            NULL, "cannot allocate counter");
1438                         goto unwind_flow_alloc;
1439                 }
1440                 flow->counter_idx = ctr_idx;
1441                 enic_action->counter_index = ctr_idx;
1442
1443                 /* If index is the largest, increase the counter DMA size */
1444                 if (ctr_idx > enic->max_flow_counter) {
1445                         err = vnic_dev_counter_dma_cfg(enic->vdev,
1446                                                  VNIC_FLOW_COUNTER_UPDATE_MSECS,
1447                                                  ctr_idx + 1);
1448                         if (err) {
1449                                 rte_flow_error_set(error, -err,
1450                                            RTE_FLOW_ERROR_TYPE_ACTION_CONF,
1451                                            NULL, "counter DMA config failed");
1452                                 goto unwind_ctr_alloc;
1453                         }
1454                         last_max_flow_ctr = enic->max_flow_counter;
1455                         enic->max_flow_counter = ctr_idx;
1456                 }
1457         }
1458
1459         /* entry[in] is the queue id, entry[out] is the filter Id for delete */
1460         entry = enic_action->rq_idx;
1461         err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter,
1462                                   enic_action);
1463         if (err) {
1464                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1465                                    NULL, "vnic_dev_classifier error");
1466                 goto unwind_ctr_dma_cfg;
1467         }
1468
1469         flow->enic_filter_id = entry;
1470         flow->enic_filter = *enic_filter;
1471
1472         return flow;
1473
1474 /* unwind if there are errors */
1475 unwind_ctr_dma_cfg:
1476         if (last_max_flow_ctr != -1) {
1477                 /* reduce counter DMA size */
1478                 vnic_dev_counter_dma_cfg(enic->vdev,
1479                                          VNIC_FLOW_COUNTER_UPDATE_MSECS,
1480                                          last_max_flow_ctr + 1);
1481                 enic->max_flow_counter = last_max_flow_ctr;
1482         }
1483 unwind_ctr_alloc:
1484         if (flow->counter_idx != -1)
1485                 vnic_dev_counter_free(enic->vdev, ctr_idx);
1486 unwind_flow_alloc:
1487         rte_free(flow);
1488         return NULL;
1489 }
1490
1491 /**
1492  * Remove filter/action from the NIC.
1493  *
1494  * @param enic[in]
1495  *   Device structure pointer.
1496  * @param filter_id[in]
1497  *   Id of NIC filter.
1498  * @param enic_action[in]
1499  *   Internal NIC action structure pointer.
1500  * @param error[out]
1501  */
1502 static int
1503 enic_flow_del_filter(struct enic *enic, struct rte_flow *flow,
1504                    struct rte_flow_error *error)
1505 {
1506         u16 filter_id;
1507         int err;
1508
1509         FLOW_TRACE();
1510
1511         filter_id = flow->enic_filter_id;
1512         err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL);
1513         if (err) {
1514                 rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE,
1515                                    NULL, "vnic_dev_classifier failed");
1516                 return -err;
1517         }
1518
1519         if (flow->counter_idx != -1) {
1520                 if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx))
1521                         dev_err(enic, "counter free failed, idx: %d\n",
1522                                 flow->counter_idx);
1523                 flow->counter_idx = -1;
1524         }
1525         return 0;
1526 }
1527
1528 /*
1529  * The following functions are callbacks for Generic flow API.
1530  */
1531
1532 /**
1533  * Validate a flow supported by the NIC.
1534  *
1535  * @see rte_flow_validate()
1536  * @see rte_flow_ops
1537  */
1538 static int
1539 enic_flow_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attrs,
1540                    const struct rte_flow_item pattern[],
1541                    const struct rte_flow_action actions[],
1542                    struct rte_flow_error *error)
1543 {
1544         struct filter_v2 enic_filter;
1545         struct filter_action_v2 enic_action;
1546         int ret;
1547
1548         FLOW_TRACE();
1549
1550         ret = enic_flow_parse(dev, attrs, pattern, actions, error,
1551                                &enic_filter, &enic_action);
1552         if (!ret)
1553                 enic_dump_flow(&enic_action, &enic_filter);
1554         return ret;
1555 }
1556
1557 /**
1558  * Create a flow supported by the NIC.
1559  *
1560  * @see rte_flow_create()
1561  * @see rte_flow_ops
1562  */
1563 static struct rte_flow *
1564 enic_flow_create(struct rte_eth_dev *dev,
1565                  const struct rte_flow_attr *attrs,
1566                  const struct rte_flow_item pattern[],
1567                  const struct rte_flow_action actions[],
1568                  struct rte_flow_error *error)
1569 {
1570         int ret;
1571         struct filter_v2 enic_filter;
1572         struct filter_action_v2 enic_action;
1573         struct rte_flow *flow;
1574         struct enic *enic = pmd_priv(dev);
1575
1576         FLOW_TRACE();
1577
1578         ret = enic_flow_parse(dev, attrs, pattern, actions, error, &enic_filter,
1579                               &enic_action);
1580         if (ret < 0)
1581                 return NULL;
1582
1583         rte_spinlock_lock(&enic->flows_lock);
1584         flow = enic_flow_add_filter(enic, &enic_filter, &enic_action,
1585                                     error);
1586         if (flow)
1587                 LIST_INSERT_HEAD(&enic->flows, flow, next);
1588         rte_spinlock_unlock(&enic->flows_lock);
1589
1590         return flow;
1591 }
1592
1593 /**
1594  * Destroy a flow supported by the NIC.
1595  *
1596  * @see rte_flow_destroy()
1597  * @see rte_flow_ops
1598  */
1599 static int
1600 enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow,
1601                   __rte_unused struct rte_flow_error *error)
1602 {
1603         struct enic *enic = pmd_priv(dev);
1604
1605         FLOW_TRACE();
1606
1607         rte_spinlock_lock(&enic->flows_lock);
1608         enic_flow_del_filter(enic, flow, error);
1609         LIST_REMOVE(flow, next);
1610         rte_spinlock_unlock(&enic->flows_lock);
1611         rte_free(flow);
1612         return 0;
1613 }
1614
1615 /**
1616  * Flush all flows on the device.
1617  *
1618  * @see rte_flow_flush()
1619  * @see rte_flow_ops
1620  */
1621 static int
1622 enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1623 {
1624         struct rte_flow *flow;
1625         struct enic *enic = pmd_priv(dev);
1626
1627         FLOW_TRACE();
1628
1629         rte_spinlock_lock(&enic->flows_lock);
1630
1631         while (!LIST_EMPTY(&enic->flows)) {
1632                 flow = LIST_FIRST(&enic->flows);
1633                 enic_flow_del_filter(enic, flow, error);
1634                 LIST_REMOVE(flow, next);
1635                 rte_free(flow);
1636         }
1637         rte_spinlock_unlock(&enic->flows_lock);
1638         return 0;
1639 }
1640
1641 static int
1642 enic_flow_query_count(struct rte_eth_dev *dev,
1643                       struct rte_flow *flow, void *data,
1644                       struct rte_flow_error *error)
1645 {
1646         struct enic *enic = pmd_priv(dev);
1647         struct rte_flow_query_count *query;
1648         uint64_t packets, bytes;
1649
1650         FLOW_TRACE();
1651
1652         if (flow->counter_idx == -1) {
1653                 return rte_flow_error_set(error, ENOTSUP,
1654                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1655                                           NULL,
1656                                           "flow does not have counter");
1657         }
1658         query = (struct rte_flow_query_count *)data;
1659         if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx,
1660                                     !!query->reset, &packets, &bytes)) {
1661                 return rte_flow_error_set
1662                         (error, EINVAL,
1663                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1664                          NULL,
1665                          "cannot read counter");
1666         }
1667         query->hits_set = 1;
1668         query->bytes_set = 1;
1669         query->hits = packets;
1670         query->bytes = bytes;
1671         return 0;
1672 }
1673
1674 static int
1675 enic_flow_query(struct rte_eth_dev *dev,
1676                 struct rte_flow *flow,
1677                 const struct rte_flow_action *actions,
1678                 void *data,
1679                 struct rte_flow_error *error)
1680 {
1681         int ret = 0;
1682
1683         FLOW_TRACE();
1684
1685         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1686                 switch (actions->type) {
1687                 case RTE_FLOW_ACTION_TYPE_VOID:
1688                         break;
1689                 case RTE_FLOW_ACTION_TYPE_COUNT:
1690                         ret = enic_flow_query_count(dev, flow, data, error);
1691                         break;
1692                 default:
1693                         return rte_flow_error_set(error, ENOTSUP,
1694                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1695                                                   actions,
1696                                                   "action not supported");
1697                 }
1698                 if (ret < 0)
1699                         return ret;
1700         }
1701         return 0;
1702 }
1703
1704 /**
1705  * Flow callback registration.
1706  *
1707  * @see rte_flow_ops
1708  */
1709 const struct rte_flow_ops enic_flow_ops = {
1710         .validate = enic_flow_validate,
1711         .create = enic_flow_create,
1712         .destroy = enic_flow_destroy,
1713         .flush = enic_flow_flush,
1714         .query = enic_flow_query,
1715 };