New upstream version 18.02
[deb_dpdk.git] / drivers / net / tap / tap_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox.
4  */
5
6 #include <errno.h>
7 #include <string.h>
8 #include <unistd.h>
9 #include <sys/queue.h>
10 #include <sys/resource.h>
11
12 #include <rte_byteorder.h>
13 #include <rte_jhash.h>
14 #include <rte_malloc.h>
15 #include <rte_eth_tap.h>
16 #include <tap_flow.h>
17 #include <tap_autoconf.h>
18 #include <tap_tcmsgs.h>
19 #include <tap_rss.h>
20
21 #ifndef HAVE_TC_FLOWER
22 /*
23  * For kernels < 4.2, this enum is not defined. Runtime checks will be made to
24  * avoid sending TC messages the kernel cannot understand.
25  */
26 enum {
27         TCA_FLOWER_UNSPEC,
28         TCA_FLOWER_CLASSID,
29         TCA_FLOWER_INDEV,
30         TCA_FLOWER_ACT,
31         TCA_FLOWER_KEY_ETH_DST,         /* ETH_ALEN */
32         TCA_FLOWER_KEY_ETH_DST_MASK,    /* ETH_ALEN */
33         TCA_FLOWER_KEY_ETH_SRC,         /* ETH_ALEN */
34         TCA_FLOWER_KEY_ETH_SRC_MASK,    /* ETH_ALEN */
35         TCA_FLOWER_KEY_ETH_TYPE,        /* be16 */
36         TCA_FLOWER_KEY_IP_PROTO,        /* u8 */
37         TCA_FLOWER_KEY_IPV4_SRC,        /* be32 */
38         TCA_FLOWER_KEY_IPV4_SRC_MASK,   /* be32 */
39         TCA_FLOWER_KEY_IPV4_DST,        /* be32 */
40         TCA_FLOWER_KEY_IPV4_DST_MASK,   /* be32 */
41         TCA_FLOWER_KEY_IPV6_SRC,        /* struct in6_addr */
42         TCA_FLOWER_KEY_IPV6_SRC_MASK,   /* struct in6_addr */
43         TCA_FLOWER_KEY_IPV6_DST,        /* struct in6_addr */
44         TCA_FLOWER_KEY_IPV6_DST_MASK,   /* struct in6_addr */
45         TCA_FLOWER_KEY_TCP_SRC,         /* be16 */
46         TCA_FLOWER_KEY_TCP_DST,         /* be16 */
47         TCA_FLOWER_KEY_UDP_SRC,         /* be16 */
48         TCA_FLOWER_KEY_UDP_DST,         /* be16 */
49 };
50 #endif
51 #ifndef HAVE_TC_VLAN_ID
52 enum {
53         /* TCA_FLOWER_FLAGS, */
54         TCA_FLOWER_KEY_VLAN_ID = TCA_FLOWER_KEY_UDP_DST + 2, /* be16 */
55         TCA_FLOWER_KEY_VLAN_PRIO,       /* u8   */
56         TCA_FLOWER_KEY_VLAN_ETH_TYPE,   /* be16 */
57 };
58 #endif
59 /*
60  * For kernels < 4.2 BPF related enums may not be defined.
61  * Runtime checks will be carried out to gracefully report on TC messages that
62  * are rejected by the kernel. Rejection reasons may be due to:
63  * 1. enum is not defined
64  * 2. enum is defined but kernel is not configured to support BPF system calls,
65  *    BPF classifications or BPF actions.
66  */
67 #ifndef HAVE_TC_BPF
68 enum {
69         TCA_BPF_UNSPEC,
70         TCA_BPF_ACT,
71         TCA_BPF_POLICE,
72         TCA_BPF_CLASSID,
73         TCA_BPF_OPS_LEN,
74         TCA_BPF_OPS,
75 };
76 #endif
77 #ifndef HAVE_TC_BPF_FD
78 enum {
79         TCA_BPF_FD = TCA_BPF_OPS + 1,
80         TCA_BPF_NAME,
81 };
82 #endif
83 #ifndef HAVE_TC_ACT_BPF
84 #define tc_gen \
85         __u32                 index; \
86         __u32                 capab; \
87         int                   action; \
88         int                   refcnt; \
89         int                   bindcnt
90
91 struct tc_act_bpf {
92         tc_gen;
93 };
94
95 enum {
96         TCA_ACT_BPF_UNSPEC,
97         TCA_ACT_BPF_TM,
98         TCA_ACT_BPF_PARMS,
99         TCA_ACT_BPF_OPS_LEN,
100         TCA_ACT_BPF_OPS,
101 };
102
103 #endif
104 #ifndef HAVE_TC_ACT_BPF_FD
105 enum {
106         TCA_ACT_BPF_FD = TCA_ACT_BPF_OPS + 1,
107         TCA_ACT_BPF_NAME,
108 };
109 #endif
110
111 /* RSS key management */
112 enum bpf_rss_key_e {
113         KEY_CMD_GET = 1,
114         KEY_CMD_RELEASE,
115         KEY_CMD_INIT,
116         KEY_CMD_DEINIT,
117 };
118
119 enum key_status_e {
120         KEY_STAT_UNSPEC,
121         KEY_STAT_USED,
122         KEY_STAT_AVAILABLE,
123 };
124
125 #define ISOLATE_HANDLE 1
126 #define REMOTE_PROMISCUOUS_HANDLE 2
127
128 struct rte_flow {
129         LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
130         struct rte_flow *remote_flow; /* associated remote flow */
131         int bpf_fd[SEC_MAX]; /* list of bfs fds per ELF section */
132         uint32_t key_idx; /* RSS rule key index into BPF map */
133         struct nlmsg msg;
134 };
135
136 struct convert_data {
137         uint16_t eth_type;
138         uint16_t ip_proto;
139         uint8_t vlan;
140         struct rte_flow *flow;
141 };
142
143 struct remote_rule {
144         struct rte_flow_attr attr;
145         struct rte_flow_item items[2];
146         struct rte_flow_action actions[2];
147         int mirred;
148 };
149
150 struct action_data {
151         char id[16];
152
153         union {
154                 struct tc_gact gact;
155                 struct tc_mirred mirred;
156                 struct skbedit {
157                         struct tc_skbedit skbedit;
158                         uint16_t queue;
159                 } skbedit;
160                 struct bpf {
161                         struct tc_act_bpf bpf;
162                         int bpf_fd;
163                         const char *annotation;
164                 } bpf;
165         };
166 };
167
168 static int tap_flow_create_eth(const struct rte_flow_item *item, void *data);
169 static int tap_flow_create_vlan(const struct rte_flow_item *item, void *data);
170 static int tap_flow_create_ipv4(const struct rte_flow_item *item, void *data);
171 static int tap_flow_create_ipv6(const struct rte_flow_item *item, void *data);
172 static int tap_flow_create_udp(const struct rte_flow_item *item, void *data);
173 static int tap_flow_create_tcp(const struct rte_flow_item *item, void *data);
174 static int
175 tap_flow_validate(struct rte_eth_dev *dev,
176                   const struct rte_flow_attr *attr,
177                   const struct rte_flow_item items[],
178                   const struct rte_flow_action actions[],
179                   struct rte_flow_error *error);
180
181 static struct rte_flow *
182 tap_flow_create(struct rte_eth_dev *dev,
183                 const struct rte_flow_attr *attr,
184                 const struct rte_flow_item items[],
185                 const struct rte_flow_action actions[],
186                 struct rte_flow_error *error);
187
188 static void
189 tap_flow_free(struct pmd_internals *pmd,
190         struct rte_flow *flow);
191
192 static int
193 tap_flow_destroy(struct rte_eth_dev *dev,
194                  struct rte_flow *flow,
195                  struct rte_flow_error *error);
196
197 static int
198 tap_flow_isolate(struct rte_eth_dev *dev,
199                  int set,
200                  struct rte_flow_error *error);
201
202 static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx);
203 static int rss_enable(struct pmd_internals *pmd,
204                         const struct rte_flow_attr *attr,
205                         struct rte_flow_error *error);
206 static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
207                         const struct rte_flow_action_rss *rss,
208                         struct rte_flow_error *error);
209
210 static const struct rte_flow_ops tap_flow_ops = {
211         .validate = tap_flow_validate,
212         .create = tap_flow_create,
213         .destroy = tap_flow_destroy,
214         .flush = tap_flow_flush,
215         .isolate = tap_flow_isolate,
216 };
217
218 /* Static initializer for items. */
219 #define ITEMS(...) \
220         (const enum rte_flow_item_type []){ \
221                 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
222         }
223
224 /* Structure to generate a simple graph of layers supported by the NIC. */
225 struct tap_flow_items {
226         /* Bit-mask corresponding to what is supported for this item. */
227         const void *mask;
228         const unsigned int mask_sz; /* Bit-mask size in bytes. */
229         /*
230          * Bit-mask corresponding to the default mask, if none is provided
231          * along with the item.
232          */
233         const void *default_mask;
234         /**
235          * Conversion function from rte_flow to netlink attributes.
236          *
237          * @param item
238          *   rte_flow item to convert.
239          * @param data
240          *   Internal structure to store the conversion.
241          *
242          * @return
243          *   0 on success, negative value otherwise.
244          */
245         int (*convert)(const struct rte_flow_item *item, void *data);
246         /** List of possible following items.  */
247         const enum rte_flow_item_type *const items;
248 };
249
250 /* Graph of supported items and associated actions. */
251 static const struct tap_flow_items tap_flow_items[] = {
252         [RTE_FLOW_ITEM_TYPE_END] = {
253                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
254         },
255         [RTE_FLOW_ITEM_TYPE_ETH] = {
256                 .items = ITEMS(
257                         RTE_FLOW_ITEM_TYPE_VLAN,
258                         RTE_FLOW_ITEM_TYPE_IPV4,
259                         RTE_FLOW_ITEM_TYPE_IPV6),
260                 .mask = &(const struct rte_flow_item_eth){
261                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
262                         .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
263                         .type = -1,
264                 },
265                 .mask_sz = sizeof(struct rte_flow_item_eth),
266                 .default_mask = &rte_flow_item_eth_mask,
267                 .convert = tap_flow_create_eth,
268         },
269         [RTE_FLOW_ITEM_TYPE_VLAN] = {
270                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
271                                RTE_FLOW_ITEM_TYPE_IPV6),
272                 .mask = &(const struct rte_flow_item_vlan){
273                         .tpid = -1,
274                         /* DEI matching is not supported */
275 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
276                         .tci = 0xffef,
277 #else
278                         .tci = 0xefff,
279 #endif
280                 },
281                 .mask_sz = sizeof(struct rte_flow_item_vlan),
282                 .default_mask = &rte_flow_item_vlan_mask,
283                 .convert = tap_flow_create_vlan,
284         },
285         [RTE_FLOW_ITEM_TYPE_IPV4] = {
286                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
287                                RTE_FLOW_ITEM_TYPE_TCP),
288                 .mask = &(const struct rte_flow_item_ipv4){
289                         .hdr = {
290                                 .src_addr = -1,
291                                 .dst_addr = -1,
292                                 .next_proto_id = -1,
293                         },
294                 },
295                 .mask_sz = sizeof(struct rte_flow_item_ipv4),
296                 .default_mask = &rte_flow_item_ipv4_mask,
297                 .convert = tap_flow_create_ipv4,
298         },
299         [RTE_FLOW_ITEM_TYPE_IPV6] = {
300                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
301                                RTE_FLOW_ITEM_TYPE_TCP),
302                 .mask = &(const struct rte_flow_item_ipv6){
303                         .hdr = {
304                                 .src_addr = {
305                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
306                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
307                                 },
308                                 .dst_addr = {
309                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
310                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
311                                 },
312                                 .proto = -1,
313                         },
314                 },
315                 .mask_sz = sizeof(struct rte_flow_item_ipv6),
316                 .default_mask = &rte_flow_item_ipv6_mask,
317                 .convert = tap_flow_create_ipv6,
318         },
319         [RTE_FLOW_ITEM_TYPE_UDP] = {
320                 .mask = &(const struct rte_flow_item_udp){
321                         .hdr = {
322                                 .src_port = -1,
323                                 .dst_port = -1,
324                         },
325                 },
326                 .mask_sz = sizeof(struct rte_flow_item_udp),
327                 .default_mask = &rte_flow_item_udp_mask,
328                 .convert = tap_flow_create_udp,
329         },
330         [RTE_FLOW_ITEM_TYPE_TCP] = {
331                 .mask = &(const struct rte_flow_item_tcp){
332                         .hdr = {
333                                 .src_port = -1,
334                                 .dst_port = -1,
335                         },
336                 },
337                 .mask_sz = sizeof(struct rte_flow_item_tcp),
338                 .default_mask = &rte_flow_item_tcp_mask,
339                 .convert = tap_flow_create_tcp,
340         },
341 };
342
343 /*
344  *                TC rules, by growing priority
345  *
346  *        Remote netdevice                  Tap netdevice
347  * +-------------+-------------+  +-------------+-------------+
348  * |   Ingress   |   Egress    |  |   Ingress   |   Egress    |
349  * |-------------|-------------|  |-------------|-------------|
350  * |             |  \       /  |  |             |  REMOTE TX  | prio 1
351  * |             |   \     /   |  |             |   \     /   | prio 2
352  * |  EXPLICIT   |    \   /    |  |  EXPLICIT   |    \   /    |   .
353  * |             |     \ /     |  |             |     \ /     |   .
354  * |    RULES    |      X      |  |    RULES    |      X      |   .
355  * |      .      |     / \     |  |      .      |     / \     |   .
356  * |      .      |    /   \    |  |      .      |    /   \    |   .
357  * |      .      |   /     \   |  |      .      |   /     \   |   .
358  * |      .      |  /       \  |  |      .      |  /       \  |   .
359  *
360  *      ....           ....           ....           ....
361  *
362  * |      .      |  \       /  |  |      .      |  \       /  |   .
363  * |      .      |   \     /   |  |      .      |   \     /   |   .
364  * |             |    \   /    |  |             |    \   /    |
365  * |  LOCAL_MAC  |     \ /     |  |    \   /    |     \ /     | last prio - 5
366  * |   PROMISC   |      X      |  |     \ /     |      X      | last prio - 4
367  * |   ALLMULTI  |     / \     |  |      X      |     / \     | last prio - 3
368  * |  BROADCAST  |    /   \    |  |     / \     |    /   \    | last prio - 2
369  * | BROADCASTV6 |   /     \   |  |    /   \    |   /     \   | last prio - 1
370  * |     xx      |  /       \  |  |   ISOLATE   |  /       \  | last prio
371  * +-------------+-------------+  +-------------+-------------+
372  *
373  * The implicit flow rules are stored in a list in with mandatorily the last two
374  * being the ISOLATE and REMOTE_TX rules. e.g.:
375  *
376  * LOCAL_MAC -> BROADCAST -> BROADCASTV6 -> REMOTE_TX -> ISOLATE -> NULL
377  *
378  * That enables tap_flow_isolate() to remove implicit rules by popping the list
379  * head and remove it as long as it applies on the remote netdevice. The
380  * implicit rule for TX redirection is not removed, as isolate concerns only
381  * incoming traffic.
382  */
383
384 static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
385         [TAP_REMOTE_LOCAL_MAC] = {
386                 .attr = {
387                         .group = MAX_GROUP,
388                         .priority = PRIORITY_MASK - TAP_REMOTE_LOCAL_MAC,
389                         .ingress = 1,
390                 },
391                 .items[0] = {
392                         .type = RTE_FLOW_ITEM_TYPE_ETH,
393                         .mask =  &(const struct rte_flow_item_eth){
394                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
395                         },
396                 },
397                 .items[1] = {
398                         .type = RTE_FLOW_ITEM_TYPE_END,
399                 },
400                 .mirred = TCA_EGRESS_REDIR,
401         },
402         [TAP_REMOTE_BROADCAST] = {
403                 .attr = {
404                         .group = MAX_GROUP,
405                         .priority = PRIORITY_MASK - TAP_REMOTE_BROADCAST,
406                         .ingress = 1,
407                 },
408                 .items[0] = {
409                         .type = RTE_FLOW_ITEM_TYPE_ETH,
410                         .mask =  &(const struct rte_flow_item_eth){
411                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
412                         },
413                         .spec = &(const struct rte_flow_item_eth){
414                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
415                         },
416                 },
417                 .items[1] = {
418                         .type = RTE_FLOW_ITEM_TYPE_END,
419                 },
420                 .mirred = TCA_EGRESS_MIRROR,
421         },
422         [TAP_REMOTE_BROADCASTV6] = {
423                 .attr = {
424                         .group = MAX_GROUP,
425                         .priority = PRIORITY_MASK - TAP_REMOTE_BROADCASTV6,
426                         .ingress = 1,
427                 },
428                 .items[0] = {
429                         .type = RTE_FLOW_ITEM_TYPE_ETH,
430                         .mask =  &(const struct rte_flow_item_eth){
431                                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
432                         },
433                         .spec = &(const struct rte_flow_item_eth){
434                                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
435                         },
436                 },
437                 .items[1] = {
438                         .type = RTE_FLOW_ITEM_TYPE_END,
439                 },
440                 .mirred = TCA_EGRESS_MIRROR,
441         },
442         [TAP_REMOTE_PROMISC] = {
443                 .attr = {
444                         .group = MAX_GROUP,
445                         .priority = PRIORITY_MASK - TAP_REMOTE_PROMISC,
446                         .ingress = 1,
447                 },
448                 .items[0] = {
449                         .type = RTE_FLOW_ITEM_TYPE_VOID,
450                 },
451                 .items[1] = {
452                         .type = RTE_FLOW_ITEM_TYPE_END,
453                 },
454                 .mirred = TCA_EGRESS_MIRROR,
455         },
456         [TAP_REMOTE_ALLMULTI] = {
457                 .attr = {
458                         .group = MAX_GROUP,
459                         .priority = PRIORITY_MASK - TAP_REMOTE_ALLMULTI,
460                         .ingress = 1,
461                 },
462                 .items[0] = {
463                         .type = RTE_FLOW_ITEM_TYPE_ETH,
464                         .mask =  &(const struct rte_flow_item_eth){
465                                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
466                         },
467                         .spec = &(const struct rte_flow_item_eth){
468                                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
469                         },
470                 },
471                 .items[1] = {
472                         .type = RTE_FLOW_ITEM_TYPE_END,
473                 },
474                 .mirred = TCA_EGRESS_MIRROR,
475         },
476         [TAP_REMOTE_TX] = {
477                 .attr = {
478                         .group = 0,
479                         .priority = TAP_REMOTE_TX,
480                         .egress = 1,
481                 },
482                 .items[0] = {
483                         .type = RTE_FLOW_ITEM_TYPE_VOID,
484                 },
485                 .items[1] = {
486                         .type = RTE_FLOW_ITEM_TYPE_END,
487                 },
488                 .mirred = TCA_EGRESS_MIRROR,
489         },
490         [TAP_ISOLATE] = {
491                 .attr = {
492                         .group = MAX_GROUP,
493                         .priority = PRIORITY_MASK - TAP_ISOLATE,
494                         .ingress = 1,
495                 },
496                 .items[0] = {
497                         .type = RTE_FLOW_ITEM_TYPE_VOID,
498                 },
499                 .items[1] = {
500                         .type = RTE_FLOW_ITEM_TYPE_END,
501                 },
502         },
503 };
504
505 /**
506  * Make as much checks as possible on an Ethernet item, and if a flow is
507  * provided, fill it appropriately with Ethernet info.
508  *
509  * @param[in] item
510  *   Item specification.
511  * @param[in, out] data
512  *   Additional data structure to tell next layers we've been here.
513  *
514  * @return
515  *   0 if checks are alright, -1 otherwise.
516  */
517 static int
518 tap_flow_create_eth(const struct rte_flow_item *item, void *data)
519 {
520         struct convert_data *info = (struct convert_data *)data;
521         const struct rte_flow_item_eth *spec = item->spec;
522         const struct rte_flow_item_eth *mask = item->mask;
523         struct rte_flow *flow = info->flow;
524         struct nlmsg *msg;
525
526         /* use default mask if none provided */
527         if (!mask)
528                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_ETH].default_mask;
529         /* TC does not support eth_type masking. Only accept if exact match. */
530         if (mask->type && mask->type != 0xffff)
531                 return -1;
532         if (!spec)
533                 return 0;
534         /* store eth_type for consistency if ipv4/6 pattern item comes next */
535         if (spec->type & mask->type)
536                 info->eth_type = spec->type;
537         if (!flow)
538                 return 0;
539         msg = &flow->msg;
540         if (!is_zero_ether_addr(&spec->dst)) {
541                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
542                            &spec->dst.addr_bytes);
543                 tap_nlattr_add(&msg->nh,
544                            TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
545                            &mask->dst.addr_bytes);
546         }
547         if (!is_zero_ether_addr(&mask->src)) {
548                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
549                            &spec->src.addr_bytes);
550                 tap_nlattr_add(&msg->nh,
551                            TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
552                            &mask->src.addr_bytes);
553         }
554         return 0;
555 }
556
557 /**
558  * Make as much checks as possible on a VLAN item, and if a flow is provided,
559  * fill it appropriately with VLAN info.
560  *
561  * @param[in] item
562  *   Item specification.
563  * @param[in, out] data
564  *   Additional data structure to tell next layers we've been here.
565  *
566  * @return
567  *   0 if checks are alright, -1 otherwise.
568  */
569 static int
570 tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
571 {
572         struct convert_data *info = (struct convert_data *)data;
573         const struct rte_flow_item_vlan *spec = item->spec;
574         const struct rte_flow_item_vlan *mask = item->mask;
575         struct rte_flow *flow = info->flow;
576         struct nlmsg *msg;
577
578         /* use default mask if none provided */
579         if (!mask)
580                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask;
581         /* TC does not support tpid masking. Only accept if exact match. */
582         if (mask->tpid && mask->tpid != 0xffff)
583                 return -1;
584         /* Double-tagging not supported. */
585         if (spec && mask->tpid && spec->tpid != htons(ETH_P_8021Q))
586                 return -1;
587         info->vlan = 1;
588         if (!flow)
589                 return 0;
590         msg = &flow->msg;
591         msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_8021Q));
592 #define VLAN_PRIO(tci) ((tci) >> 13)
593 #define VLAN_ID(tci) ((tci) & 0xfff)
594         if (!spec)
595                 return 0;
596         if (spec->tci) {
597                 uint16_t tci = ntohs(spec->tci) & mask->tci;
598                 uint16_t prio = VLAN_PRIO(tci);
599                 uint8_t vid = VLAN_ID(tci);
600
601                 if (prio)
602                         tap_nlattr_add8(&msg->nh,
603                                         TCA_FLOWER_KEY_VLAN_PRIO, prio);
604                 if (vid)
605                         tap_nlattr_add16(&msg->nh,
606                                          TCA_FLOWER_KEY_VLAN_ID, vid);
607         }
608         return 0;
609 }
610
611 /**
612  * Make as much checks as possible on an IPv4 item, and if a flow is provided,
613  * fill it appropriately with IPv4 info.
614  *
615  * @param[in] item
616  *   Item specification.
617  * @param[in, out] data
618  *   Additional data structure to tell next layers we've been here.
619  *
620  * @return
621  *   0 if checks are alright, -1 otherwise.
622  */
623 static int
624 tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
625 {
626         struct convert_data *info = (struct convert_data *)data;
627         const struct rte_flow_item_ipv4 *spec = item->spec;
628         const struct rte_flow_item_ipv4 *mask = item->mask;
629         struct rte_flow *flow = info->flow;
630         struct nlmsg *msg;
631
632         /* use default mask if none provided */
633         if (!mask)
634                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV4].default_mask;
635         /* check that previous eth type is compatible with ipv4 */
636         if (info->eth_type && info->eth_type != htons(ETH_P_IP))
637                 return -1;
638         /* store ip_proto for consistency if udp/tcp pattern item comes next */
639         if (spec)
640                 info->ip_proto = spec->hdr.next_proto_id;
641         if (!flow)
642                 return 0;
643         msg = &flow->msg;
644         if (!info->eth_type)
645                 info->eth_type = htons(ETH_P_IP);
646         if (!spec)
647                 return 0;
648         if (spec->hdr.dst_addr) {
649                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
650                              spec->hdr.dst_addr);
651                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
652                              mask->hdr.dst_addr);
653         }
654         if (spec->hdr.src_addr) {
655                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
656                              spec->hdr.src_addr);
657                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
658                              mask->hdr.src_addr);
659         }
660         if (spec->hdr.next_proto_id)
661                 tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO,
662                             spec->hdr.next_proto_id);
663         return 0;
664 }
665
666 /**
667  * Make as much checks as possible on an IPv6 item, and if a flow is provided,
668  * fill it appropriately with IPv6 info.
669  *
670  * @param[in] item
671  *   Item specification.
672  * @param[in, out] data
673  *   Additional data structure to tell next layers we've been here.
674  *
675  * @return
676  *   0 if checks are alright, -1 otherwise.
677  */
678 static int
679 tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
680 {
681         struct convert_data *info = (struct convert_data *)data;
682         const struct rte_flow_item_ipv6 *spec = item->spec;
683         const struct rte_flow_item_ipv6 *mask = item->mask;
684         struct rte_flow *flow = info->flow;
685         uint8_t empty_addr[16] = { 0 };
686         struct nlmsg *msg;
687
688         /* use default mask if none provided */
689         if (!mask)
690                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV6].default_mask;
691         /* check that previous eth type is compatible with ipv6 */
692         if (info->eth_type && info->eth_type != htons(ETH_P_IPV6))
693                 return -1;
694         /* store ip_proto for consistency if udp/tcp pattern item comes next */
695         if (spec)
696                 info->ip_proto = spec->hdr.proto;
697         if (!flow)
698                 return 0;
699         msg = &flow->msg;
700         if (!info->eth_type)
701                 info->eth_type = htons(ETH_P_IPV6);
702         if (!spec)
703                 return 0;
704         if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
705                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
706                            sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
707                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
708                            sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
709         }
710         if (memcmp(spec->hdr.src_addr, empty_addr, 16)) {
711                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
712                            sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
713                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
714                            sizeof(mask->hdr.src_addr), &mask->hdr.src_addr);
715         }
716         if (spec->hdr.proto)
717                 tap_nlattr_add8(&msg->nh,
718                                 TCA_FLOWER_KEY_IP_PROTO, spec->hdr.proto);
719         return 0;
720 }
721
722 /**
723  * Make as much checks as possible on a UDP item, and if a flow is provided,
724  * fill it appropriately with UDP info.
725  *
726  * @param[in] item
727  *   Item specification.
728  * @param[in, out] data
729  *   Additional data structure to tell next layers we've been here.
730  *
731  * @return
732  *   0 if checks are alright, -1 otherwise.
733  */
734 static int
735 tap_flow_create_udp(const struct rte_flow_item *item, void *data)
736 {
737         struct convert_data *info = (struct convert_data *)data;
738         const struct rte_flow_item_udp *spec = item->spec;
739         const struct rte_flow_item_udp *mask = item->mask;
740         struct rte_flow *flow = info->flow;
741         struct nlmsg *msg;
742
743         /* use default mask if none provided */
744         if (!mask)
745                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_UDP].default_mask;
746         /* check that previous ip_proto is compatible with udp */
747         if (info->ip_proto && info->ip_proto != IPPROTO_UDP)
748                 return -1;
749         /* TC does not support UDP port masking. Only accept if exact match. */
750         if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
751             (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
752                 return -1;
753         if (!flow)
754                 return 0;
755         msg = &flow->msg;
756         tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
757         if (!spec)
758                 return 0;
759         if (spec->hdr.dst_port & mask->hdr.dst_port)
760                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
761                              spec->hdr.dst_port);
762         if (spec->hdr.src_port & mask->hdr.src_port)
763                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
764                              spec->hdr.src_port);
765         return 0;
766 }
767
768 /**
769  * Make as much checks as possible on a TCP item, and if a flow is provided,
770  * fill it appropriately with TCP info.
771  *
772  * @param[in] item
773  *   Item specification.
774  * @param[in, out] data
775  *   Additional data structure to tell next layers we've been here.
776  *
777  * @return
778  *   0 if checks are alright, -1 otherwise.
779  */
780 static int
781 tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
782 {
783         struct convert_data *info = (struct convert_data *)data;
784         const struct rte_flow_item_tcp *spec = item->spec;
785         const struct rte_flow_item_tcp *mask = item->mask;
786         struct rte_flow *flow = info->flow;
787         struct nlmsg *msg;
788
789         /* use default mask if none provided */
790         if (!mask)
791                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_TCP].default_mask;
792         /* check that previous ip_proto is compatible with tcp */
793         if (info->ip_proto && info->ip_proto != IPPROTO_TCP)
794                 return -1;
795         /* TC does not support TCP port masking. Only accept if exact match. */
796         if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
797             (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
798                 return -1;
799         if (!flow)
800                 return 0;
801         msg = &flow->msg;
802         tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
803         if (!spec)
804                 return 0;
805         if (spec->hdr.dst_port & mask->hdr.dst_port)
806                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
807                              spec->hdr.dst_port);
808         if (spec->hdr.src_port & mask->hdr.src_port)
809                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
810                              spec->hdr.src_port);
811         return 0;
812 }
813
814 /**
815  * Check support for a given item.
816  *
817  * @param[in] item
818  *   Item specification.
819  * @param size
820  *   Bit-Mask size in bytes.
821  * @param[in] supported_mask
822  *   Bit-mask covering supported fields to compare with spec, last and mask in
823  *   \item.
824  * @param[in] default_mask
825  *   Bit-mask default mask if none is provided in \item.
826  *
827  * @return
828  *   0 on success.
829  */
830 static int
831 tap_flow_item_validate(const struct rte_flow_item *item,
832                        unsigned int size,
833                        const uint8_t *supported_mask,
834                        const uint8_t *default_mask)
835 {
836         int ret = 0;
837
838         /* An empty layer is allowed, as long as all fields are NULL */
839         if (!item->spec && (item->mask || item->last))
840                 return -1;
841         /* Is the item spec compatible with what the NIC supports? */
842         if (item->spec && !item->mask) {
843                 unsigned int i;
844                 const uint8_t *spec = item->spec;
845
846                 for (i = 0; i < size; ++i)
847                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
848                                 return -1;
849                 /* Is the default mask compatible with what the NIC supports? */
850                 for (i = 0; i < size; i++)
851                         if ((default_mask[i] | supported_mask[i]) !=
852                             supported_mask[i])
853                                 return -1;
854         }
855         /* Is the item last compatible with what the NIC supports? */
856         if (item->last && !item->mask) {
857                 unsigned int i;
858                 const uint8_t *spec = item->last;
859
860                 for (i = 0; i < size; ++i)
861                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
862                                 return -1;
863         }
864         /* Is the item mask compatible with what the NIC supports? */
865         if (item->mask) {
866                 unsigned int i;
867                 const uint8_t *spec = item->mask;
868
869                 for (i = 0; i < size; ++i)
870                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
871                                 return -1;
872         }
873         /**
874          * Once masked, Are item spec and item last equal?
875          * TC does not support range so anything else is invalid.
876          */
877         if (item->spec && item->last) {
878                 uint8_t spec[size];
879                 uint8_t last[size];
880                 const uint8_t *apply = default_mask;
881                 unsigned int i;
882
883                 if (item->mask)
884                         apply = item->mask;
885                 for (i = 0; i < size; ++i) {
886                         spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
887                         last[i] = ((const uint8_t *)item->last)[i] & apply[i];
888                 }
889                 ret = memcmp(spec, last, size);
890         }
891         return ret;
892 }
893
894 /**
895  * Configure the kernel with a TC action and its configured parameters
896  * Handled actions: "gact", "mirred", "skbedit", "bpf"
897  *
898  * @param[in] flow
899  *   Pointer to rte flow containing the netlink message
900  *
901  * @param[in, out] act_index
902  *   Pointer to action sequence number in the TC command
903  *
904  * @param[in] adata
905  *  Pointer to struct holding the action parameters
906  *
907  * @return
908  *   -1 on failure, 0 on success
909  */
910 static int
911 add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata)
912 {
913         struct nlmsg *msg = &flow->msg;
914
915         if (tap_nlattr_nested_start(msg, (*act_index)++) < 0)
916                 return -1;
917
918         tap_nlattr_add(&msg->nh, TCA_ACT_KIND,
919                                 strlen(adata->id) + 1, adata->id);
920         if (tap_nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
921                 return -1;
922         if (strcmp("gact", adata->id) == 0) {
923                 tap_nlattr_add(&msg->nh, TCA_GACT_PARMS, sizeof(adata->gact),
924                            &adata->gact);
925         } else if (strcmp("mirred", adata->id) == 0) {
926                 if (adata->mirred.eaction == TCA_EGRESS_MIRROR)
927                         adata->mirred.action = TC_ACT_PIPE;
928                 else /* REDIRECT */
929                         adata->mirred.action = TC_ACT_STOLEN;
930                 tap_nlattr_add(&msg->nh, TCA_MIRRED_PARMS,
931                            sizeof(adata->mirred),
932                            &adata->mirred);
933         } else if (strcmp("skbedit", adata->id) == 0) {
934                 tap_nlattr_add(&msg->nh, TCA_SKBEDIT_PARMS,
935                            sizeof(adata->skbedit.skbedit),
936                            &adata->skbedit.skbedit);
937                 tap_nlattr_add16(&msg->nh, TCA_SKBEDIT_QUEUE_MAPPING,
938                              adata->skbedit.queue);
939         } else if (strcmp("bpf", adata->id) == 0) {
940                 tap_nlattr_add32(&msg->nh, TCA_ACT_BPF_FD, adata->bpf.bpf_fd);
941                 tap_nlattr_add(&msg->nh, TCA_ACT_BPF_NAME,
942                            strlen(adata->bpf.annotation) + 1,
943                            adata->bpf.annotation);
944                 tap_nlattr_add(&msg->nh, TCA_ACT_BPF_PARMS,
945                            sizeof(adata->bpf.bpf),
946                            &adata->bpf.bpf);
947         } else {
948                 return -1;
949         }
950         tap_nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
951         tap_nlattr_nested_finish(msg); /* nested act_index */
952         return 0;
953 }
954
955 /**
956  * Helper function to send a serie of TC actions to the kernel
957  *
958  * @param[in] flow
959  *   Pointer to rte flow containing the netlink message
960  *
961  * @param[in] nb_actions
962  *   Number of actions in an array of action structs
963  *
964  * @param[in] data
965  *   Pointer to an array of action structs
966  *
967  * @param[in] classifier_actions
968  *   The classifier on behave of which the actions are configured
969  *
970  * @return
971  *   -1 on failure, 0 on success
972  */
973 static int
974 add_actions(struct rte_flow *flow, int nb_actions, struct action_data *data,
975             int classifier_action)
976 {
977         struct nlmsg *msg = &flow->msg;
978         size_t act_index = 1;
979         int i;
980
981         if (tap_nlattr_nested_start(msg, classifier_action) < 0)
982                 return -1;
983         for (i = 0; i < nb_actions; i++)
984                 if (add_action(flow, &act_index, data + i) < 0)
985                         return -1;
986         tap_nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
987         return 0;
988 }
989
990 /**
991  * Validate a flow supported by TC.
992  * If flow param is not NULL, then also fill the netlink message inside.
993  *
994  * @param pmd
995  *   Pointer to private structure.
996  * @param[in] attr
997  *   Flow rule attributes.
998  * @param[in] pattern
999  *   Pattern specification (list terminated by the END pattern item).
1000  * @param[in] actions
1001  *   Associated actions (list terminated by the END action).
1002  * @param[out] error
1003  *   Perform verbose error reporting if not NULL.
1004  * @param[in, out] flow
1005  *   Flow structure to update.
1006  * @param[in] mirred
1007  *   If set to TCA_EGRESS_REDIR, provided actions will be replaced with a
1008  *   redirection to the tap netdevice, and the TC rule will be configured
1009  *   on the remote netdevice in pmd.
1010  *   If set to TCA_EGRESS_MIRROR, provided actions will be replaced with a
1011  *   mirroring to the tap netdevice, and the TC rule will be configured
1012  *   on the remote netdevice in pmd. Matching packets will thus be duplicated.
1013  *   If set to 0, the standard behavior is to be used: set correct actions for
1014  *   the TC rule, and apply it on the tap netdevice.
1015  *
1016  * @return
1017  *   0 on success, a negative errno value otherwise and rte_errno is set.
1018  */
1019 static int
1020 priv_flow_process(struct pmd_internals *pmd,
1021                   const struct rte_flow_attr *attr,
1022                   const struct rte_flow_item items[],
1023                   const struct rte_flow_action actions[],
1024                   struct rte_flow_error *error,
1025                   struct rte_flow *flow,
1026                   int mirred)
1027 {
1028         const struct tap_flow_items *cur_item = tap_flow_items;
1029         struct convert_data data = {
1030                 .eth_type = 0,
1031                 .ip_proto = 0,
1032                 .flow = flow,
1033         };
1034         int action = 0; /* Only one action authorized for now */
1035
1036         if (attr->group > MAX_GROUP) {
1037                 rte_flow_error_set(
1038                         error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1039                         NULL, "group value too big: cannot exceed 15");
1040                 return -rte_errno;
1041         }
1042         if (attr->priority > MAX_PRIORITY) {
1043                 rte_flow_error_set(
1044                         error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1045                         NULL, "priority value too big");
1046                 return -rte_errno;
1047         } else if (flow) {
1048                 uint16_t group = attr->group << GROUP_SHIFT;
1049                 uint16_t prio = group | (attr->priority +
1050                                 RSS_PRIORITY_OFFSET + PRIORITY_OFFSET);
1051                 flow->msg.t.tcm_info = TC_H_MAKE(prio << 16,
1052                                                  flow->msg.t.tcm_info);
1053         }
1054         if (flow) {
1055                 if (mirred) {
1056                         /*
1057                          * If attr->ingress, the rule applies on remote ingress
1058                          * to match incoming packets
1059                          * If attr->egress, the rule applies on tap ingress (as
1060                          * seen from the kernel) to deal with packets going out
1061                          * from the DPDK app.
1062                          */
1063                         flow->msg.t.tcm_parent = TC_H_MAKE(TC_H_INGRESS, 0);
1064                 } else {
1065                         /* Standard rule on tap egress (kernel standpoint). */
1066                         flow->msg.t.tcm_parent =
1067                                 TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
1068                 }
1069                 /* use flower filter type */
1070                 tap_nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
1071                 if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
1072                         goto exit_item_not_supported;
1073         }
1074         for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
1075                 const struct tap_flow_items *token = NULL;
1076                 unsigned int i;
1077                 int err = 0;
1078
1079                 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
1080                         continue;
1081                 for (i = 0;
1082                      cur_item->items &&
1083                      cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
1084                      ++i) {
1085                         if (cur_item->items[i] == items->type) {
1086                                 token = &tap_flow_items[items->type];
1087                                 break;
1088                         }
1089                 }
1090                 if (!token)
1091                         goto exit_item_not_supported;
1092                 cur_item = token;
1093                 err = tap_flow_item_validate(
1094                         items, cur_item->mask_sz,
1095                         (const uint8_t *)cur_item->mask,
1096                         (const uint8_t *)cur_item->default_mask);
1097                 if (err)
1098                         goto exit_item_not_supported;
1099                 if (flow && cur_item->convert) {
1100                         err = cur_item->convert(items, &data);
1101                         if (err)
1102                                 goto exit_item_not_supported;
1103                 }
1104         }
1105         if (flow) {
1106                 if (data.vlan) {
1107                         tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
1108                                      htons(ETH_P_8021Q));
1109                         tap_nlattr_add16(&flow->msg.nh,
1110                                      TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1111                                      data.eth_type ?
1112                                      data.eth_type : htons(ETH_P_ALL));
1113                 } else if (data.eth_type) {
1114                         tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
1115                                      data.eth_type);
1116                 }
1117         }
1118         if (mirred && flow) {
1119                 struct action_data adata = {
1120                         .id = "mirred",
1121                         .mirred = {
1122                                 .eaction = mirred,
1123                         },
1124                 };
1125
1126                 /*
1127                  * If attr->egress && mirred, then this is a special
1128                  * case where the rule must be applied on the tap, to
1129                  * redirect packets coming from the DPDK App, out
1130                  * through the remote netdevice.
1131                  */
1132                 adata.mirred.ifindex = attr->ingress ? pmd->if_index :
1133                         pmd->remote_if_index;
1134                 if (mirred == TCA_EGRESS_MIRROR)
1135                         adata.mirred.action = TC_ACT_PIPE;
1136                 else
1137                         adata.mirred.action = TC_ACT_STOLEN;
1138                 if (add_actions(flow, 1, &adata, TCA_FLOWER_ACT) < 0)
1139                         goto exit_action_not_supported;
1140                 else
1141                         goto end;
1142         }
1143         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
1144                 int err = 0;
1145
1146                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
1147                         continue;
1148                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
1149                         if (action)
1150                                 goto exit_action_not_supported;
1151                         action = 1;
1152                         if (flow) {
1153                                 struct action_data adata = {
1154                                         .id = "gact",
1155                                         .gact = {
1156                                                 .action = TC_ACT_SHOT,
1157                                         },
1158                                 };
1159
1160                                 err = add_actions(flow, 1, &adata,
1161                                                   TCA_FLOWER_ACT);
1162                         }
1163                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_PASSTHRU) {
1164                         if (action)
1165                                 goto exit_action_not_supported;
1166                         action = 1;
1167                         if (flow) {
1168                                 struct action_data adata = {
1169                                         .id = "gact",
1170                                         .gact = {
1171                                                 /* continue */
1172                                                 .action = TC_ACT_UNSPEC,
1173                                         },
1174                                 };
1175
1176                                 err = add_actions(flow, 1, &adata,
1177                                                   TCA_FLOWER_ACT);
1178                         }
1179                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1180                         const struct rte_flow_action_queue *queue =
1181                                 (const struct rte_flow_action_queue *)
1182                                 actions->conf;
1183
1184                         if (action)
1185                                 goto exit_action_not_supported;
1186                         action = 1;
1187                         if (!queue ||
1188                             (queue->index > pmd->dev->data->nb_rx_queues - 1))
1189                                 goto exit_action_not_supported;
1190                         if (flow) {
1191                                 struct action_data adata = {
1192                                         .id = "skbedit",
1193                                         .skbedit = {
1194                                                 .skbedit = {
1195                                                         .action = TC_ACT_PIPE,
1196                                                 },
1197                                                 .queue = queue->index,
1198                                         },
1199                                 };
1200
1201                                 err = add_actions(flow, 1, &adata,
1202                                         TCA_FLOWER_ACT);
1203                         }
1204                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
1205                         const struct rte_flow_action_rss *rss =
1206                                 (const struct rte_flow_action_rss *)
1207                                 actions->conf;
1208
1209                         if (action++)
1210                                 goto exit_action_not_supported;
1211
1212                         if (!pmd->rss_enabled) {
1213                                 err = rss_enable(pmd, attr, error);
1214                                 if (err)
1215                                         goto exit_action_not_supported;
1216                         }
1217                         if (flow && rss)
1218                                 err = rss_add_actions(flow, pmd, rss, error);
1219                 } else {
1220                         goto exit_action_not_supported;
1221                 }
1222                 if (err)
1223                         goto exit_action_not_supported;
1224         }
1225 end:
1226         if (flow)
1227                 tap_nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
1228         return 0;
1229 exit_item_not_supported:
1230         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1231                            items, "item not supported");
1232         return -rte_errno;
1233 exit_action_not_supported:
1234         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1235                            actions, "action not supported");
1236         return -rte_errno;
1237 }
1238
1239
1240
1241 /**
1242  * Validate a flow.
1243  *
1244  * @see rte_flow_validate()
1245  * @see rte_flow_ops
1246  */
1247 static int
1248 tap_flow_validate(struct rte_eth_dev *dev,
1249                   const struct rte_flow_attr *attr,
1250                   const struct rte_flow_item items[],
1251                   const struct rte_flow_action actions[],
1252                   struct rte_flow_error *error)
1253 {
1254         struct pmd_internals *pmd = dev->data->dev_private;
1255
1256         return priv_flow_process(pmd, attr, items, actions, error, NULL, 0);
1257 }
1258
1259 /**
1260  * Set a unique handle in a flow.
1261  *
1262  * The kernel supports TC rules with equal priority, as long as they use the
1263  * same matching fields (e.g.: dst mac and ipv4) with different values (and
1264  * full mask to ensure no collision is possible).
1265  * In those rules, the handle (uint32_t) is the part that would identify
1266  * specifically each rule.
1267  *
1268  * On 32-bit architectures, the handle can simply be the flow's pointer address.
1269  * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently)
1270  * unique handle.
1271  *
1272  * @param[in, out] flow
1273  *   The flow that needs its handle set.
1274  */
1275 static void
1276 tap_flow_set_handle(struct rte_flow *flow)
1277 {
1278         uint32_t handle = 0;
1279
1280         if (sizeof(flow) > 4)
1281                 handle = rte_jhash(&flow, sizeof(flow), 1);
1282         else
1283                 handle = (uintptr_t)flow;
1284         /* must be at least 1 to avoid letting the kernel choose one for us */
1285         if (!handle)
1286                 handle = 1;
1287         flow->msg.t.tcm_handle = handle;
1288 }
1289
1290 /**
1291  * Free the flow opened file descriptors and allocated memory
1292  *
1293  * @param[in] flow
1294  *   Pointer to the flow to free
1295  *
1296  */
1297 static void
1298 tap_flow_free(struct pmd_internals *pmd, struct rte_flow *flow)
1299 {
1300         int i;
1301
1302         if (!flow)
1303                 return;
1304
1305         if (pmd->rss_enabled) {
1306                 /* Close flow BPF file descriptors */
1307                 for (i = 0; i < SEC_MAX; i++)
1308                         if (flow->bpf_fd[i] != 0) {
1309                                 close(flow->bpf_fd[i]);
1310                                 flow->bpf_fd[i] = 0;
1311                         }
1312
1313                 /* Release the map key for this RSS rule */
1314                 bpf_rss_key(KEY_CMD_RELEASE, &flow->key_idx);
1315                 flow->key_idx = 0;
1316         }
1317
1318         /* Free flow allocated memory */
1319         rte_free(flow);
1320 }
1321
1322 /**
1323  * Create a flow.
1324  *
1325  * @see rte_flow_create()
1326  * @see rte_flow_ops
1327  */
1328 static struct rte_flow *
1329 tap_flow_create(struct rte_eth_dev *dev,
1330                 const struct rte_flow_attr *attr,
1331                 const struct rte_flow_item items[],
1332                 const struct rte_flow_action actions[],
1333                 struct rte_flow_error *error)
1334 {
1335         struct pmd_internals *pmd = dev->data->dev_private;
1336         struct rte_flow *remote_flow = NULL;
1337         struct rte_flow *flow = NULL;
1338         struct nlmsg *msg = NULL;
1339         int err;
1340
1341         if (!pmd->if_index) {
1342                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1343                                    NULL,
1344                                    "can't create rule, ifindex not found");
1345                 goto fail;
1346         }
1347         /*
1348          * No rules configured through standard rte_flow should be set on the
1349          * priorities used by implicit rules.
1350          */
1351         if ((attr->group == MAX_GROUP) &&
1352             attr->priority > (MAX_PRIORITY - TAP_REMOTE_MAX_IDX)) {
1353                 rte_flow_error_set(
1354                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1355                         NULL, "priority value too big");
1356                 goto fail;
1357         }
1358         flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1359         if (!flow) {
1360                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1361                                    NULL, "cannot allocate memory for rte_flow");
1362                 goto fail;
1363         }
1364         msg = &flow->msg;
1365         tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER,
1366                     NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1367         msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1368         tap_flow_set_handle(flow);
1369         if (priv_flow_process(pmd, attr, items, actions, error, flow, 0))
1370                 goto fail;
1371         err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1372         if (err < 0) {
1373                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1374                                    NULL, "couldn't send request to kernel");
1375                 goto fail;
1376         }
1377         err = tap_nl_recv_ack(pmd->nlsk_fd);
1378         if (err < 0) {
1379                 RTE_LOG(ERR, PMD,
1380                         "Kernel refused TC filter rule creation (%d): %s\n",
1381                         errno, strerror(errno));
1382                 rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
1383                                    NULL,
1384                                    "overlapping rules or Kernel too old for flower support");
1385                 goto fail;
1386         }
1387         LIST_INSERT_HEAD(&pmd->flows, flow, next);
1388         /**
1389          * If a remote device is configured, a TC rule with identical items for
1390          * matching must be set on that device, with a single action: redirect
1391          * to the local pmd->if_index.
1392          */
1393         if (pmd->remote_if_index) {
1394                 remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1395                 if (!remote_flow) {
1396                         rte_flow_error_set(
1397                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1398                                 "cannot allocate memory for rte_flow");
1399                         goto fail;
1400                 }
1401                 msg = &remote_flow->msg;
1402                 /* set the rule if_index for the remote netdevice */
1403                 tc_init_msg(
1404                         msg, pmd->remote_if_index, RTM_NEWTFILTER,
1405                         NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1406                 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1407                 tap_flow_set_handle(remote_flow);
1408                 if (priv_flow_process(pmd, attr, items, NULL,
1409                                       error, remote_flow, TCA_EGRESS_REDIR)) {
1410                         rte_flow_error_set(
1411                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1412                                 NULL, "rte flow rule validation failed");
1413                         goto fail;
1414                 }
1415                 err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1416                 if (err < 0) {
1417                         rte_flow_error_set(
1418                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1419                                 NULL, "Failure sending nl request");
1420                         goto fail;
1421                 }
1422                 err = tap_nl_recv_ack(pmd->nlsk_fd);
1423                 if (err < 0) {
1424                         RTE_LOG(ERR, PMD,
1425                                 "Kernel refused TC filter rule creation (%d): %s\n",
1426                                 errno, strerror(errno));
1427                         rte_flow_error_set(
1428                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1429                                 NULL,
1430                                 "overlapping rules or Kernel too old for flower support");
1431                         goto fail;
1432                 }
1433                 flow->remote_flow = remote_flow;
1434         }
1435         return flow;
1436 fail:
1437         if (remote_flow)
1438                 rte_free(remote_flow);
1439         if (flow)
1440                 tap_flow_free(pmd, flow);
1441         return NULL;
1442 }
1443
1444 /**
1445  * Destroy a flow using pointer to pmd_internal.
1446  *
1447  * @param[in, out] pmd
1448  *   Pointer to private structure.
1449  * @param[in] flow
1450  *   Pointer to the flow to destroy.
1451  * @param[in, out] error
1452  *   Pointer to the flow error handler
1453  *
1454  * @return 0 if the flow could be destroyed, -1 otherwise.
1455  */
1456 static int
1457 tap_flow_destroy_pmd(struct pmd_internals *pmd,
1458                      struct rte_flow *flow,
1459                      struct rte_flow_error *error)
1460 {
1461         struct rte_flow *remote_flow = flow->remote_flow;
1462         int ret = 0;
1463
1464         LIST_REMOVE(flow, next);
1465         flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1466         flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1467
1468         ret = tap_nl_send(pmd->nlsk_fd, &flow->msg.nh);
1469         if (ret < 0) {
1470                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1471                                    NULL, "couldn't send request to kernel");
1472                 goto end;
1473         }
1474         ret = tap_nl_recv_ack(pmd->nlsk_fd);
1475         /* If errno is ENOENT, the rule is already no longer in the kernel. */
1476         if (ret < 0 && errno == ENOENT)
1477                 ret = 0;
1478         if (ret < 0) {
1479                 RTE_LOG(ERR, PMD,
1480                         "Kernel refused TC filter rule deletion (%d): %s\n",
1481                         errno, strerror(errno));
1482                 rte_flow_error_set(
1483                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1484                         "couldn't receive kernel ack to our request");
1485                 goto end;
1486         }
1487
1488         if (remote_flow) {
1489                 remote_flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1490                 remote_flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1491
1492                 ret = tap_nl_send(pmd->nlsk_fd, &remote_flow->msg.nh);
1493                 if (ret < 0) {
1494                         rte_flow_error_set(
1495                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1496                                 NULL, "Failure sending nl request");
1497                         goto end;
1498                 }
1499                 ret = tap_nl_recv_ack(pmd->nlsk_fd);
1500                 if (ret < 0 && errno == ENOENT)
1501                         ret = 0;
1502                 if (ret < 0) {
1503                         RTE_LOG(ERR, PMD,
1504                                 "Kernel refused TC filter rule deletion (%d): %s\n",
1505                                 errno, strerror(errno));
1506                         rte_flow_error_set(
1507                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1508                                 NULL, "Failure trying to receive nl ack");
1509                         goto end;
1510                 }
1511         }
1512 end:
1513         if (remote_flow)
1514                 rte_free(remote_flow);
1515         tap_flow_free(pmd, flow);
1516         return ret;
1517 }
1518
1519 /**
1520  * Destroy a flow.
1521  *
1522  * @see rte_flow_destroy()
1523  * @see rte_flow_ops
1524  */
1525 static int
1526 tap_flow_destroy(struct rte_eth_dev *dev,
1527                  struct rte_flow *flow,
1528                  struct rte_flow_error *error)
1529 {
1530         struct pmd_internals *pmd = dev->data->dev_private;
1531
1532         return tap_flow_destroy_pmd(pmd, flow, error);
1533 }
1534
1535 /**
1536  * Enable/disable flow isolation.
1537  *
1538  * @see rte_flow_isolate()
1539  * @see rte_flow_ops
1540  */
1541 static int
1542 tap_flow_isolate(struct rte_eth_dev *dev,
1543                  int set,
1544                  struct rte_flow_error *error __rte_unused)
1545 {
1546         struct pmd_internals *pmd = dev->data->dev_private;
1547
1548         if (set)
1549                 pmd->flow_isolate = 1;
1550         else
1551                 pmd->flow_isolate = 0;
1552         /*
1553          * If netdevice is there, setup appropriate flow rules immediately.
1554          * Otherwise it will be set when bringing up the netdevice (tun_alloc).
1555          */
1556         if (!pmd->rxq[0].fd)
1557                 return 0;
1558         if (set) {
1559                 struct rte_flow *flow;
1560
1561                 while (1) {
1562                         flow = LIST_FIRST(&pmd->implicit_flows);
1563                         if (!flow)
1564                                 break;
1565                         /*
1566                          * Remove all implicit rules on the remote.
1567                          * Keep the local rule to redirect packets on TX.
1568                          * Keep also the last implicit local rule: ISOLATE.
1569                          */
1570                         if (flow->msg.t.tcm_ifindex == pmd->if_index)
1571                                 break;
1572                         if (tap_flow_destroy_pmd(pmd, flow, NULL) < 0)
1573                                 goto error;
1574                 }
1575                 /* Switch the TC rule according to pmd->flow_isolate */
1576                 if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
1577                         goto error;
1578         } else {
1579                 /* Switch the TC rule according to pmd->flow_isolate */
1580                 if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
1581                         goto error;
1582                 if (!pmd->remote_if_index)
1583                         return 0;
1584                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0)
1585                         goto error;
1586                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
1587                         goto error;
1588                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0)
1589                         goto error;
1590                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0)
1591                         goto error;
1592                 if (dev->data->promiscuous &&
1593                     tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC) < 0)
1594                         goto error;
1595                 if (dev->data->all_multicast &&
1596                     tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI) < 0)
1597                         goto error;
1598         }
1599         return 0;
1600 error:
1601         pmd->flow_isolate = 0;
1602         return rte_flow_error_set(
1603                 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1604                 "TC rule creation failed");
1605 }
1606
1607 /**
1608  * Destroy all flows.
1609  *
1610  * @see rte_flow_flush()
1611  * @see rte_flow_ops
1612  */
1613 int
1614 tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1615 {
1616         struct pmd_internals *pmd = dev->data->dev_private;
1617         struct rte_flow *flow;
1618
1619         while (!LIST_EMPTY(&pmd->flows)) {
1620                 flow = LIST_FIRST(&pmd->flows);
1621                 if (tap_flow_destroy(dev, flow, error) < 0)
1622                         return -1;
1623         }
1624         return 0;
1625 }
1626
1627 /**
1628  * Add an implicit flow rule on the remote device to make sure traffic gets to
1629  * the tap netdevice from there.
1630  *
1631  * @param pmd
1632  *   Pointer to private structure.
1633  * @param[in] idx
1634  *   The idx in the implicit_rte_flows array specifying which rule to apply.
1635  *
1636  * @return -1 if the rule couldn't be applied, 0 otherwise.
1637  */
1638 int tap_flow_implicit_create(struct pmd_internals *pmd,
1639                              enum implicit_rule_index idx)
1640 {
1641         uint16_t flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE;
1642         struct rte_flow_action *actions = implicit_rte_flows[idx].actions;
1643         struct rte_flow_action isolate_actions[2] = {
1644                 [1] = {
1645                         .type = RTE_FLOW_ACTION_TYPE_END,
1646                 },
1647         };
1648         struct rte_flow_item *items = implicit_rte_flows[idx].items;
1649         struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
1650         struct rte_flow_item_eth eth_local = { .type = 0 };
1651         uint16_t if_index = pmd->remote_if_index;
1652         struct rte_flow *remote_flow = NULL;
1653         struct nlmsg *msg = NULL;
1654         int err = 0;
1655         struct rte_flow_item items_local[2] = {
1656                 [0] = {
1657                         .type = items[0].type,
1658                         .spec = &eth_local,
1659                         .mask = items[0].mask,
1660                 },
1661                 [1] = {
1662                         .type = items[1].type,
1663                 }
1664         };
1665
1666         remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1667         if (!remote_flow) {
1668                 RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow\n");
1669                 goto fail;
1670         }
1671         msg = &remote_flow->msg;
1672         if (idx == TAP_REMOTE_TX) {
1673                 if_index = pmd->if_index;
1674         } else if (idx == TAP_ISOLATE) {
1675                 if_index = pmd->if_index;
1676                 /* Don't be exclusive for this rule, it can be changed later. */
1677                 flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
1678                 isolate_actions[0].type = pmd->flow_isolate ?
1679                         RTE_FLOW_ACTION_TYPE_DROP :
1680                         RTE_FLOW_ACTION_TYPE_PASSTHRU;
1681                 actions = isolate_actions;
1682         } else if (idx == TAP_REMOTE_LOCAL_MAC) {
1683                 /*
1684                  * eth addr couldn't be set in implicit_rte_flows[] as it is not
1685                  * known at compile time.
1686                  */
1687                 memcpy(&eth_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
1688                 items = items_local;
1689         }
1690         tc_init_msg(msg, if_index, RTM_NEWTFILTER, flags);
1691         msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1692         /*
1693          * The ISOLATE rule is always present and must have a static handle, as
1694          * the action is changed whether the feature is enabled (DROP) or
1695          * disabled (PASSTHRU).
1696          * There is just one REMOTE_PROMISCUOUS rule in all cases. It should
1697          * have a static handle such that adding it twice will fail with EEXIST
1698          * with any kernel version. Remark: old kernels may falsely accept the
1699          * same REMOTE_PROMISCUOUS rules if they had different handles.
1700          */
1701         if (idx == TAP_ISOLATE)
1702                 remote_flow->msg.t.tcm_handle = ISOLATE_HANDLE;
1703         else if (idx == TAP_REMOTE_PROMISC)
1704                 remote_flow->msg.t.tcm_handle = REMOTE_PROMISCUOUS_HANDLE;
1705         else
1706                 tap_flow_set_handle(remote_flow);
1707         if (priv_flow_process(pmd, attr, items, actions, NULL,
1708                               remote_flow, implicit_rte_flows[idx].mirred)) {
1709                 RTE_LOG(ERR, PMD, "rte flow rule validation failed\n");
1710                 goto fail;
1711         }
1712         err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1713         if (err < 0) {
1714                 RTE_LOG(ERR, PMD, "Failure sending nl request\n");
1715                 goto fail;
1716         }
1717         err = tap_nl_recv_ack(pmd->nlsk_fd);
1718         if (err < 0) {
1719                 /* Silently ignore re-entering remote promiscuous rule */
1720                 if (errno == EEXIST && idx == TAP_REMOTE_PROMISC)
1721                         goto success;
1722                 RTE_LOG(ERR, PMD,
1723                         "Kernel refused TC filter rule creation (%d): %s\n",
1724                         errno, strerror(errno));
1725                 goto fail;
1726         }
1727         LIST_INSERT_HEAD(&pmd->implicit_flows, remote_flow, next);
1728 success:
1729         return 0;
1730 fail:
1731         if (remote_flow)
1732                 rte_free(remote_flow);
1733         return -1;
1734 }
1735
1736 /**
1737  * Remove specific implicit flow rule on the remote device.
1738  *
1739  * @param[in, out] pmd
1740  *   Pointer to private structure.
1741  * @param[in] idx
1742  *   The idx in the implicit_rte_flows array specifying which rule to remove.
1743  *
1744  * @return -1 if one of the implicit rules couldn't be created, 0 otherwise.
1745  */
1746 int tap_flow_implicit_destroy(struct pmd_internals *pmd,
1747                               enum implicit_rule_index idx)
1748 {
1749         struct rte_flow *remote_flow;
1750         int cur_prio = -1;
1751         int idx_prio = implicit_rte_flows[idx].attr.priority + PRIORITY_OFFSET;
1752
1753         for (remote_flow = LIST_FIRST(&pmd->implicit_flows);
1754              remote_flow;
1755              remote_flow = LIST_NEXT(remote_flow, next)) {
1756                 cur_prio = (remote_flow->msg.t.tcm_info >> 16) & PRIORITY_MASK;
1757                 if (cur_prio != idx_prio)
1758                         continue;
1759                 return tap_flow_destroy_pmd(pmd, remote_flow, NULL);
1760         }
1761         return 0;
1762 }
1763
1764 /**
1765  * Destroy all implicit flows.
1766  *
1767  * @see rte_flow_flush()
1768  */
1769 int
1770 tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
1771 {
1772         struct rte_flow *remote_flow;
1773
1774         while (!LIST_EMPTY(&pmd->implicit_flows)) {
1775                 remote_flow = LIST_FIRST(&pmd->implicit_flows);
1776                 if (tap_flow_destroy_pmd(pmd, remote_flow, error) < 0)
1777                         return -1;
1778         }
1779         return 0;
1780 }
1781
1782 #define MAX_RSS_KEYS 256
1783 #define KEY_IDX_OFFSET (3 * MAX_RSS_KEYS)
1784 #define SEC_NAME_CLS_Q "cls_q"
1785
1786 const char *sec_name[SEC_MAX] = {
1787         [SEC_L3_L4] = "l3_l4",
1788 };
1789
1790 /**
1791  * Enable RSS on tap: create TC rules for queuing.
1792  *
1793  * @param[in, out] pmd
1794  *   Pointer to private structure.
1795  *
1796  * @param[in] attr
1797  *   Pointer to rte_flow to get flow group
1798  *
1799  * @param[out] error
1800  *   Pointer to error reporting if not NULL.
1801  *
1802  * @return 0 on success, negative value on failure.
1803  */
1804 static int rss_enable(struct pmd_internals *pmd,
1805                         const struct rte_flow_attr *attr,
1806                         struct rte_flow_error *error)
1807 {
1808         struct rte_flow *rss_flow = NULL;
1809         struct nlmsg *msg = NULL;
1810         /* 4096 is the maximum number of instructions for a BPF program */
1811         char annotation[64];
1812         int i;
1813         int err = 0;
1814
1815         /* unlimit locked memory */
1816         struct rlimit memlock_limit = {
1817                 .rlim_cur = RLIM_INFINITY,
1818                 .rlim_max = RLIM_INFINITY,
1819         };
1820         setrlimit(RLIMIT_MEMLOCK, &memlock_limit);
1821
1822          /* Get a new map key for a new RSS rule */
1823         err = bpf_rss_key(KEY_CMD_INIT, NULL);
1824         if (err < 0) {
1825                 rte_flow_error_set(
1826                         error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1827                         "Failed to initialize BPF RSS keys");
1828
1829                 return -1;
1830         }
1831
1832         /*
1833          *  Create BPF RSS MAP
1834          */
1835         pmd->map_fd = tap_flow_bpf_rss_map_create(sizeof(__u32), /* key size */
1836                                 sizeof(struct rss_key),
1837                                 MAX_RSS_KEYS);
1838         if (pmd->map_fd < 0) {
1839                 RTE_LOG(ERR, PMD,
1840                         "Failed to create BPF map (%d): %s\n",
1841                                 errno, strerror(errno));
1842                 rte_flow_error_set(
1843                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1844                         "Kernel too old or not configured "
1845                         "to support BPF maps");
1846
1847                 return -ENOTSUP;
1848         }
1849
1850         /*
1851          * Add a rule per queue to match reclassified packets and direct them to
1852          * the correct queue.
1853          */
1854         for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
1855                 pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i);
1856                 if (pmd->bpf_fd[i] < 0) {
1857                         RTE_LOG(ERR, PMD,
1858                                 "Failed to load BPF section %s for queue %d",
1859                                 SEC_NAME_CLS_Q, i);
1860                         rte_flow_error_set(
1861                                 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1862                                 NULL,
1863                                 "Kernel too old or not configured "
1864                                 "to support BPF programs loading");
1865
1866                         return -ENOTSUP;
1867                 }
1868
1869                 rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1870                 if (!rss_flow) {
1871                         RTE_LOG(ERR, PMD,
1872                                 "Cannot allocate memory for rte_flow");
1873                         return -1;
1874                 }
1875                 msg = &rss_flow->msg;
1876                 tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER, NLM_F_REQUEST |
1877                             NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1878                 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1879                 tap_flow_set_handle(rss_flow);
1880                 uint16_t group = attr->group << GROUP_SHIFT;
1881                 uint16_t prio = group | (i + PRIORITY_OFFSET);
1882                 msg->t.tcm_info = TC_H_MAKE(prio << 16, msg->t.tcm_info);
1883                 msg->t.tcm_parent = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
1884
1885                 tap_nlattr_add(&msg->nh, TCA_KIND, sizeof("bpf"), "bpf");
1886                 if (tap_nlattr_nested_start(msg, TCA_OPTIONS) < 0)
1887                         return -1;
1888                 tap_nlattr_add32(&msg->nh, TCA_BPF_FD, pmd->bpf_fd[i]);
1889                 snprintf(annotation, sizeof(annotation), "[%s%d]",
1890                         SEC_NAME_CLS_Q, i);
1891                 tap_nlattr_add(&msg->nh, TCA_BPF_NAME, strlen(annotation) + 1,
1892                            annotation);
1893                 /* Actions */
1894                 {
1895                         struct action_data adata = {
1896                                 .id = "skbedit",
1897                                 .skbedit = {
1898                                         .skbedit = {
1899                                                 .action = TC_ACT_PIPE,
1900                                         },
1901                                         .queue = i,
1902                                 },
1903                         };
1904                         if (add_actions(rss_flow, 1, &adata, TCA_BPF_ACT) < 0)
1905                                 return -1;
1906                 }
1907                 tap_nlattr_nested_finish(msg); /* nested TCA_OPTIONS */
1908
1909                 /* Netlink message is now ready to be sent */
1910                 if (tap_nl_send(pmd->nlsk_fd, &msg->nh) < 0)
1911                         return -1;
1912                 err = tap_nl_recv_ack(pmd->nlsk_fd);
1913                 if (err < 0) {
1914                         RTE_LOG(ERR, PMD,
1915                                 "Kernel refused TC filter rule creation (%d): %s\n",
1916                                 errno, strerror(errno));
1917                         return err;
1918                 }
1919                 LIST_INSERT_HEAD(&pmd->rss_flows, rss_flow, next);
1920         }
1921
1922         pmd->rss_enabled = 1;
1923         return err;
1924 }
1925
1926 /**
1927  * Manage bpf RSS keys repository with operations: init, get, release
1928  *
1929  * @param[in] cmd
1930  *   Command on RSS keys: init, get, release
1931  *
1932  * @param[in, out] key_idx
1933  *   Pointer to RSS Key index (out for get command, in for release command)
1934  *
1935  * @return -1 if couldn't get, release or init the RSS keys, 0 otherwise.
1936  */
1937 static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx)
1938 {
1939         __u32 i;
1940         int err = 0;
1941         static __u32 num_used_keys;
1942         static __u32 rss_keys[MAX_RSS_KEYS] = {KEY_STAT_UNSPEC};
1943         static __u32 rss_keys_initialized;
1944         __u32 key;
1945
1946         switch (cmd) {
1947         case KEY_CMD_GET:
1948                 if (!rss_keys_initialized) {
1949                         err = -1;
1950                         break;
1951                 }
1952
1953                 if (num_used_keys == RTE_DIM(rss_keys)) {
1954                         err = -1;
1955                         break;
1956                 }
1957
1958                 *key_idx = num_used_keys % RTE_DIM(rss_keys);
1959                 while (rss_keys[*key_idx] == KEY_STAT_USED)
1960                         *key_idx = (*key_idx + 1) % RTE_DIM(rss_keys);
1961
1962                 rss_keys[*key_idx] = KEY_STAT_USED;
1963
1964                 /*
1965                  * Add an offset to key_idx in order to handle a case of
1966                  * RSS and non RSS flows mixture.
1967                  * If a non RSS flow is destroyed it has an eBPF map
1968                  * index 0 (initialized on flow creation) and might
1969                  * unintentionally remove RSS entry 0 from eBPF map.
1970                  * To avoid this issue, add an offset to the real index
1971                  * during a KEY_CMD_GET operation and subtract this offset
1972                  * during a KEY_CMD_RELEASE operation in order to restore
1973                  * the real index.
1974                  */
1975                 *key_idx += KEY_IDX_OFFSET;
1976                 num_used_keys++;
1977         break;
1978
1979         case KEY_CMD_RELEASE:
1980                 if (!rss_keys_initialized)
1981                         break;
1982
1983                 /*
1984                  * Subtract offest to restore real key index
1985                  * If a non RSS flow is falsely trying to release map
1986                  * entry 0 - the offset subtraction will calculate the real
1987                  * map index as an out-of-range value and the release operation
1988                  * will be silently ignored.
1989                  */
1990                 key = *key_idx - KEY_IDX_OFFSET;
1991                 if (key >= RTE_DIM(rss_keys))
1992                         break;
1993
1994                 if (rss_keys[key] == KEY_STAT_USED) {
1995                         rss_keys[key] = KEY_STAT_AVAILABLE;
1996                         num_used_keys--;
1997                 }
1998         break;
1999
2000         case KEY_CMD_INIT:
2001                 for (i = 0; i < RTE_DIM(rss_keys); i++)
2002                         rss_keys[i] = KEY_STAT_AVAILABLE;
2003
2004                 rss_keys_initialized = 1;
2005                 num_used_keys = 0;
2006         break;
2007
2008         case KEY_CMD_DEINIT:
2009                 for (i = 0; i < RTE_DIM(rss_keys); i++)
2010                         rss_keys[i] = KEY_STAT_UNSPEC;
2011
2012                 rss_keys_initialized = 0;
2013                 num_used_keys = 0;
2014         break;
2015
2016         default:
2017                 break;
2018         }
2019
2020         return err;
2021 }
2022
2023 /**
2024  * Add RSS hash calculations and queue selection
2025  *
2026  * @param[in, out] pmd
2027  *   Pointer to internal structure. Used to set/get RSS map fd
2028  *
2029  * @param[in] rss
2030  *   Pointer to RSS flow actions
2031  *
2032  * @param[out] error
2033  *   Pointer to error reporting if not NULL.
2034  *
2035  * @return 0 on success, negative value on failure
2036  */
2037 static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
2038                            const struct rte_flow_action_rss *rss,
2039                            struct rte_flow_error *error)
2040 {
2041         /* 4096 is the maximum number of instructions for a BPF program */
2042         int i;
2043         int err;
2044         struct rss_key rss_entry = { .hash_fields = 0,
2045                                      .key_size = 0 };
2046
2047         /* Get a new map key for a new RSS rule */
2048         err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx);
2049         if (err < 0) {
2050                 rte_flow_error_set(
2051                         error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2052                         "Failed to get BPF RSS key");
2053
2054                 return -1;
2055         }
2056
2057         /* Update RSS map entry with queues */
2058         rss_entry.nb_queues = rss->num;
2059         for (i = 0; i < rss->num; i++)
2060                 rss_entry.queues[i] = rss->queue[i];
2061         rss_entry.hash_fields =
2062                 (1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4);
2063
2064         /* Add this RSS entry to map */
2065         err = tap_flow_bpf_update_rss_elem(pmd->map_fd,
2066                                 &flow->key_idx, &rss_entry);
2067
2068         if (err) {
2069                 RTE_LOG(ERR, PMD,
2070                         "Failed to update BPF map entry #%u (%d): %s\n",
2071                         flow->key_idx, errno, strerror(errno));
2072                 rte_flow_error_set(
2073                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2074                         "Kernel too old or not configured "
2075                         "to support BPF maps updates");
2076
2077                 return -ENOTSUP;
2078         }
2079
2080
2081         /*
2082          * Load bpf rules to calculate hash for this key_idx
2083          */
2084
2085         flow->bpf_fd[SEC_L3_L4] =
2086                 tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd);
2087         if (flow->bpf_fd[SEC_L3_L4] < 0) {
2088                 RTE_LOG(ERR, PMD,
2089                         "Failed to load BPF section %s (%d): %s\n",
2090                                 sec_name[SEC_L3_L4], errno, strerror(errno));
2091                 rte_flow_error_set(
2092                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2093                         "Kernel too old or not configured "
2094                         "to support BPF program loading");
2095
2096                 return -ENOTSUP;
2097         }
2098
2099         /* Actions */
2100         {
2101                 struct action_data adata[] = {
2102                         {
2103                                 .id = "bpf",
2104                                 .bpf = {
2105                                         .bpf_fd = flow->bpf_fd[SEC_L3_L4],
2106                                         .annotation = sec_name[SEC_L3_L4],
2107                                         .bpf = {
2108                                                 .action = TC_ACT_PIPE,
2109                                         },
2110                                 },
2111                         },
2112                 };
2113
2114                 if (add_actions(flow, RTE_DIM(adata), adata,
2115                         TCA_FLOWER_ACT) < 0)
2116                         return -1;
2117         }
2118
2119         return 0;
2120 }
2121
2122 /**
2123  * Manage filter operations.
2124  *
2125  * @param dev
2126  *   Pointer to Ethernet device structure.
2127  * @param filter_type
2128  *   Filter type.
2129  * @param filter_op
2130  *   Operation to perform.
2131  * @param arg
2132  *   Pointer to operation-specific structure.
2133  *
2134  * @return
2135  *   0 on success, negative errno value on failure.
2136  */
2137 int
2138 tap_dev_filter_ctrl(struct rte_eth_dev *dev,
2139                     enum rte_filter_type filter_type,
2140                     enum rte_filter_op filter_op,
2141                     void *arg)
2142 {
2143         switch (filter_type) {
2144         case RTE_ETH_FILTER_GENERIC:
2145                 if (filter_op != RTE_ETH_FILTER_GET)
2146                         return -EINVAL;
2147                 *(const void **)arg = &tap_flow_ops;
2148                 return 0;
2149         default:
2150                 RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported\n",
2151                         (void *)dev, filter_type);
2152         }
2153         return -EINVAL;
2154 }
2155