New upstream version 18.08
[deb_dpdk.git] / drivers / net / tap / tap_flow.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2017 6WIND S.A.
3  * Copyright 2017 Mellanox Technologies, Ltd
4  */
5
6 #include <errno.h>
7 #include <string.h>
8 #include <unistd.h>
9 #include <sys/queue.h>
10 #include <sys/resource.h>
11
12 #include <rte_byteorder.h>
13 #include <rte_jhash.h>
14 #include <rte_malloc.h>
15 #include <rte_eth_tap.h>
16 #include <tap_flow.h>
17 #include <tap_autoconf.h>
18 #include <tap_tcmsgs.h>
19 #include <tap_rss.h>
20
21 #ifndef HAVE_TC_FLOWER
22 /*
23  * For kernels < 4.2, this enum is not defined. Runtime checks will be made to
24  * avoid sending TC messages the kernel cannot understand.
25  */
26 enum {
27         TCA_FLOWER_UNSPEC,
28         TCA_FLOWER_CLASSID,
29         TCA_FLOWER_INDEV,
30         TCA_FLOWER_ACT,
31         TCA_FLOWER_KEY_ETH_DST,         /* ETH_ALEN */
32         TCA_FLOWER_KEY_ETH_DST_MASK,    /* ETH_ALEN */
33         TCA_FLOWER_KEY_ETH_SRC,         /* ETH_ALEN */
34         TCA_FLOWER_KEY_ETH_SRC_MASK,    /* ETH_ALEN */
35         TCA_FLOWER_KEY_ETH_TYPE,        /* be16 */
36         TCA_FLOWER_KEY_IP_PROTO,        /* u8 */
37         TCA_FLOWER_KEY_IPV4_SRC,        /* be32 */
38         TCA_FLOWER_KEY_IPV4_SRC_MASK,   /* be32 */
39         TCA_FLOWER_KEY_IPV4_DST,        /* be32 */
40         TCA_FLOWER_KEY_IPV4_DST_MASK,   /* be32 */
41         TCA_FLOWER_KEY_IPV6_SRC,        /* struct in6_addr */
42         TCA_FLOWER_KEY_IPV6_SRC_MASK,   /* struct in6_addr */
43         TCA_FLOWER_KEY_IPV6_DST,        /* struct in6_addr */
44         TCA_FLOWER_KEY_IPV6_DST_MASK,   /* struct in6_addr */
45         TCA_FLOWER_KEY_TCP_SRC,         /* be16 */
46         TCA_FLOWER_KEY_TCP_DST,         /* be16 */
47         TCA_FLOWER_KEY_UDP_SRC,         /* be16 */
48         TCA_FLOWER_KEY_UDP_DST,         /* be16 */
49 };
50 #endif
51 #ifndef HAVE_TC_VLAN_ID
52 enum {
53         /* TCA_FLOWER_FLAGS, */
54         TCA_FLOWER_KEY_VLAN_ID = TCA_FLOWER_KEY_UDP_DST + 2, /* be16 */
55         TCA_FLOWER_KEY_VLAN_PRIO,       /* u8   */
56         TCA_FLOWER_KEY_VLAN_ETH_TYPE,   /* be16 */
57 };
58 #endif
59 /*
60  * For kernels < 4.2 BPF related enums may not be defined.
61  * Runtime checks will be carried out to gracefully report on TC messages that
62  * are rejected by the kernel. Rejection reasons may be due to:
63  * 1. enum is not defined
64  * 2. enum is defined but kernel is not configured to support BPF system calls,
65  *    BPF classifications or BPF actions.
66  */
67 #ifndef HAVE_TC_BPF
68 enum {
69         TCA_BPF_UNSPEC,
70         TCA_BPF_ACT,
71         TCA_BPF_POLICE,
72         TCA_BPF_CLASSID,
73         TCA_BPF_OPS_LEN,
74         TCA_BPF_OPS,
75 };
76 #endif
77 #ifndef HAVE_TC_BPF_FD
78 enum {
79         TCA_BPF_FD = TCA_BPF_OPS + 1,
80         TCA_BPF_NAME,
81 };
82 #endif
83 #ifndef HAVE_TC_ACT_BPF
84 #define tc_gen \
85         __u32                 index; \
86         __u32                 capab; \
87         int                   action; \
88         int                   refcnt; \
89         int                   bindcnt
90
91 struct tc_act_bpf {
92         tc_gen;
93 };
94
95 enum {
96         TCA_ACT_BPF_UNSPEC,
97         TCA_ACT_BPF_TM,
98         TCA_ACT_BPF_PARMS,
99         TCA_ACT_BPF_OPS_LEN,
100         TCA_ACT_BPF_OPS,
101 };
102
103 #endif
104 #ifndef HAVE_TC_ACT_BPF_FD
105 enum {
106         TCA_ACT_BPF_FD = TCA_ACT_BPF_OPS + 1,
107         TCA_ACT_BPF_NAME,
108 };
109 #endif
110
111 /* RSS key management */
112 enum bpf_rss_key_e {
113         KEY_CMD_GET = 1,
114         KEY_CMD_RELEASE,
115         KEY_CMD_INIT,
116         KEY_CMD_DEINIT,
117 };
118
119 enum key_status_e {
120         KEY_STAT_UNSPEC,
121         KEY_STAT_USED,
122         KEY_STAT_AVAILABLE,
123 };
124
125 #define ISOLATE_HANDLE 1
126 #define REMOTE_PROMISCUOUS_HANDLE 2
127
128 struct rte_flow {
129         LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
130         struct rte_flow *remote_flow; /* associated remote flow */
131         int bpf_fd[SEC_MAX]; /* list of bfs fds per ELF section */
132         uint32_t key_idx; /* RSS rule key index into BPF map */
133         struct nlmsg msg;
134 };
135
136 struct convert_data {
137         uint16_t eth_type;
138         uint16_t ip_proto;
139         uint8_t vlan;
140         struct rte_flow *flow;
141 };
142
143 struct remote_rule {
144         struct rte_flow_attr attr;
145         struct rte_flow_item items[2];
146         struct rte_flow_action actions[2];
147         int mirred;
148 };
149
150 struct action_data {
151         char id[16];
152
153         union {
154                 struct tc_gact gact;
155                 struct tc_mirred mirred;
156                 struct skbedit {
157                         struct tc_skbedit skbedit;
158                         uint16_t queue;
159                 } skbedit;
160                 struct bpf {
161                         struct tc_act_bpf bpf;
162                         int bpf_fd;
163                         const char *annotation;
164                 } bpf;
165         };
166 };
167
168 static int tap_flow_create_eth(const struct rte_flow_item *item, void *data);
169 static int tap_flow_create_vlan(const struct rte_flow_item *item, void *data);
170 static int tap_flow_create_ipv4(const struct rte_flow_item *item, void *data);
171 static int tap_flow_create_ipv6(const struct rte_flow_item *item, void *data);
172 static int tap_flow_create_udp(const struct rte_flow_item *item, void *data);
173 static int tap_flow_create_tcp(const struct rte_flow_item *item, void *data);
174 static int
175 tap_flow_validate(struct rte_eth_dev *dev,
176                   const struct rte_flow_attr *attr,
177                   const struct rte_flow_item items[],
178                   const struct rte_flow_action actions[],
179                   struct rte_flow_error *error);
180
181 static struct rte_flow *
182 tap_flow_create(struct rte_eth_dev *dev,
183                 const struct rte_flow_attr *attr,
184                 const struct rte_flow_item items[],
185                 const struct rte_flow_action actions[],
186                 struct rte_flow_error *error);
187
188 static void
189 tap_flow_free(struct pmd_internals *pmd,
190         struct rte_flow *flow);
191
192 static int
193 tap_flow_destroy(struct rte_eth_dev *dev,
194                  struct rte_flow *flow,
195                  struct rte_flow_error *error);
196
197 static int
198 tap_flow_isolate(struct rte_eth_dev *dev,
199                  int set,
200                  struct rte_flow_error *error);
201
202 static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx);
203 static int rss_enable(struct pmd_internals *pmd,
204                         const struct rte_flow_attr *attr,
205                         struct rte_flow_error *error);
206 static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
207                         const struct rte_flow_action_rss *rss,
208                         struct rte_flow_error *error);
209
210 static const struct rte_flow_ops tap_flow_ops = {
211         .validate = tap_flow_validate,
212         .create = tap_flow_create,
213         .destroy = tap_flow_destroy,
214         .flush = tap_flow_flush,
215         .isolate = tap_flow_isolate,
216 };
217
218 /* Static initializer for items. */
219 #define ITEMS(...) \
220         (const enum rte_flow_item_type []){ \
221                 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
222         }
223
224 /* Structure to generate a simple graph of layers supported by the NIC. */
225 struct tap_flow_items {
226         /* Bit-mask corresponding to what is supported for this item. */
227         const void *mask;
228         const unsigned int mask_sz; /* Bit-mask size in bytes. */
229         /*
230          * Bit-mask corresponding to the default mask, if none is provided
231          * along with the item.
232          */
233         const void *default_mask;
234         /**
235          * Conversion function from rte_flow to netlink attributes.
236          *
237          * @param item
238          *   rte_flow item to convert.
239          * @param data
240          *   Internal structure to store the conversion.
241          *
242          * @return
243          *   0 on success, negative value otherwise.
244          */
245         int (*convert)(const struct rte_flow_item *item, void *data);
246         /** List of possible following items.  */
247         const enum rte_flow_item_type *const items;
248 };
249
250 /* Graph of supported items and associated actions. */
251 static const struct tap_flow_items tap_flow_items[] = {
252         [RTE_FLOW_ITEM_TYPE_END] = {
253                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
254         },
255         [RTE_FLOW_ITEM_TYPE_ETH] = {
256                 .items = ITEMS(
257                         RTE_FLOW_ITEM_TYPE_VLAN,
258                         RTE_FLOW_ITEM_TYPE_IPV4,
259                         RTE_FLOW_ITEM_TYPE_IPV6),
260                 .mask = &(const struct rte_flow_item_eth){
261                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
262                         .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
263                         .type = -1,
264                 },
265                 .mask_sz = sizeof(struct rte_flow_item_eth),
266                 .default_mask = &rte_flow_item_eth_mask,
267                 .convert = tap_flow_create_eth,
268         },
269         [RTE_FLOW_ITEM_TYPE_VLAN] = {
270                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
271                                RTE_FLOW_ITEM_TYPE_IPV6),
272                 .mask = &(const struct rte_flow_item_vlan){
273                         /* DEI matching is not supported */
274 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
275                         .tci = 0xffef,
276 #else
277                         .tci = 0xefff,
278 #endif
279                         .inner_type = -1,
280                 },
281                 .mask_sz = sizeof(struct rte_flow_item_vlan),
282                 .default_mask = &rte_flow_item_vlan_mask,
283                 .convert = tap_flow_create_vlan,
284         },
285         [RTE_FLOW_ITEM_TYPE_IPV4] = {
286                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
287                                RTE_FLOW_ITEM_TYPE_TCP),
288                 .mask = &(const struct rte_flow_item_ipv4){
289                         .hdr = {
290                                 .src_addr = -1,
291                                 .dst_addr = -1,
292                                 .next_proto_id = -1,
293                         },
294                 },
295                 .mask_sz = sizeof(struct rte_flow_item_ipv4),
296                 .default_mask = &rte_flow_item_ipv4_mask,
297                 .convert = tap_flow_create_ipv4,
298         },
299         [RTE_FLOW_ITEM_TYPE_IPV6] = {
300                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
301                                RTE_FLOW_ITEM_TYPE_TCP),
302                 .mask = &(const struct rte_flow_item_ipv6){
303                         .hdr = {
304                                 .src_addr = {
305                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
306                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
307                                 },
308                                 .dst_addr = {
309                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
310                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
311                                 },
312                                 .proto = -1,
313                         },
314                 },
315                 .mask_sz = sizeof(struct rte_flow_item_ipv6),
316                 .default_mask = &rte_flow_item_ipv6_mask,
317                 .convert = tap_flow_create_ipv6,
318         },
319         [RTE_FLOW_ITEM_TYPE_UDP] = {
320                 .mask = &(const struct rte_flow_item_udp){
321                         .hdr = {
322                                 .src_port = -1,
323                                 .dst_port = -1,
324                         },
325                 },
326                 .mask_sz = sizeof(struct rte_flow_item_udp),
327                 .default_mask = &rte_flow_item_udp_mask,
328                 .convert = tap_flow_create_udp,
329         },
330         [RTE_FLOW_ITEM_TYPE_TCP] = {
331                 .mask = &(const struct rte_flow_item_tcp){
332                         .hdr = {
333                                 .src_port = -1,
334                                 .dst_port = -1,
335                         },
336                 },
337                 .mask_sz = sizeof(struct rte_flow_item_tcp),
338                 .default_mask = &rte_flow_item_tcp_mask,
339                 .convert = tap_flow_create_tcp,
340         },
341 };
342
343 /*
344  *                TC rules, by growing priority
345  *
346  *        Remote netdevice                  Tap netdevice
347  * +-------------+-------------+  +-------------+-------------+
348  * |   Ingress   |   Egress    |  |   Ingress   |   Egress    |
349  * |-------------|-------------|  |-------------|-------------|
350  * |             |  \       /  |  |             |  REMOTE TX  | prio 1
351  * |             |   \     /   |  |             |   \     /   | prio 2
352  * |  EXPLICIT   |    \   /    |  |  EXPLICIT   |    \   /    |   .
353  * |             |     \ /     |  |             |     \ /     |   .
354  * |    RULES    |      X      |  |    RULES    |      X      |   .
355  * |      .      |     / \     |  |      .      |     / \     |   .
356  * |      .      |    /   \    |  |      .      |    /   \    |   .
357  * |      .      |   /     \   |  |      .      |   /     \   |   .
358  * |      .      |  /       \  |  |      .      |  /       \  |   .
359  *
360  *      ....           ....           ....           ....
361  *
362  * |      .      |  \       /  |  |      .      |  \       /  |   .
363  * |      .      |   \     /   |  |      .      |   \     /   |   .
364  * |             |    \   /    |  |             |    \   /    |
365  * |  LOCAL_MAC  |     \ /     |  |    \   /    |     \ /     | last prio - 5
366  * |   PROMISC   |      X      |  |     \ /     |      X      | last prio - 4
367  * |   ALLMULTI  |     / \     |  |      X      |     / \     | last prio - 3
368  * |  BROADCAST  |    /   \    |  |     / \     |    /   \    | last prio - 2
369  * | BROADCASTV6 |   /     \   |  |    /   \    |   /     \   | last prio - 1
370  * |     xx      |  /       \  |  |   ISOLATE   |  /       \  | last prio
371  * +-------------+-------------+  +-------------+-------------+
372  *
373  * The implicit flow rules are stored in a list in with mandatorily the last two
374  * being the ISOLATE and REMOTE_TX rules. e.g.:
375  *
376  * LOCAL_MAC -> BROADCAST -> BROADCASTV6 -> REMOTE_TX -> ISOLATE -> NULL
377  *
378  * That enables tap_flow_isolate() to remove implicit rules by popping the list
379  * head and remove it as long as it applies on the remote netdevice. The
380  * implicit rule for TX redirection is not removed, as isolate concerns only
381  * incoming traffic.
382  */
383
384 static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
385         [TAP_REMOTE_LOCAL_MAC] = {
386                 .attr = {
387                         .group = MAX_GROUP,
388                         .priority = PRIORITY_MASK - TAP_REMOTE_LOCAL_MAC,
389                         .ingress = 1,
390                 },
391                 .items[0] = {
392                         .type = RTE_FLOW_ITEM_TYPE_ETH,
393                         .mask =  &(const struct rte_flow_item_eth){
394                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
395                         },
396                 },
397                 .items[1] = {
398                         .type = RTE_FLOW_ITEM_TYPE_END,
399                 },
400                 .mirred = TCA_EGRESS_REDIR,
401         },
402         [TAP_REMOTE_BROADCAST] = {
403                 .attr = {
404                         .group = MAX_GROUP,
405                         .priority = PRIORITY_MASK - TAP_REMOTE_BROADCAST,
406                         .ingress = 1,
407                 },
408                 .items[0] = {
409                         .type = RTE_FLOW_ITEM_TYPE_ETH,
410                         .mask =  &(const struct rte_flow_item_eth){
411                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
412                         },
413                         .spec = &(const struct rte_flow_item_eth){
414                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
415                         },
416                 },
417                 .items[1] = {
418                         .type = RTE_FLOW_ITEM_TYPE_END,
419                 },
420                 .mirred = TCA_EGRESS_MIRROR,
421         },
422         [TAP_REMOTE_BROADCASTV6] = {
423                 .attr = {
424                         .group = MAX_GROUP,
425                         .priority = PRIORITY_MASK - TAP_REMOTE_BROADCASTV6,
426                         .ingress = 1,
427                 },
428                 .items[0] = {
429                         .type = RTE_FLOW_ITEM_TYPE_ETH,
430                         .mask =  &(const struct rte_flow_item_eth){
431                                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
432                         },
433                         .spec = &(const struct rte_flow_item_eth){
434                                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
435                         },
436                 },
437                 .items[1] = {
438                         .type = RTE_FLOW_ITEM_TYPE_END,
439                 },
440                 .mirred = TCA_EGRESS_MIRROR,
441         },
442         [TAP_REMOTE_PROMISC] = {
443                 .attr = {
444                         .group = MAX_GROUP,
445                         .priority = PRIORITY_MASK - TAP_REMOTE_PROMISC,
446                         .ingress = 1,
447                 },
448                 .items[0] = {
449                         .type = RTE_FLOW_ITEM_TYPE_VOID,
450                 },
451                 .items[1] = {
452                         .type = RTE_FLOW_ITEM_TYPE_END,
453                 },
454                 .mirred = TCA_EGRESS_MIRROR,
455         },
456         [TAP_REMOTE_ALLMULTI] = {
457                 .attr = {
458                         .group = MAX_GROUP,
459                         .priority = PRIORITY_MASK - TAP_REMOTE_ALLMULTI,
460                         .ingress = 1,
461                 },
462                 .items[0] = {
463                         .type = RTE_FLOW_ITEM_TYPE_ETH,
464                         .mask =  &(const struct rte_flow_item_eth){
465                                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
466                         },
467                         .spec = &(const struct rte_flow_item_eth){
468                                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
469                         },
470                 },
471                 .items[1] = {
472                         .type = RTE_FLOW_ITEM_TYPE_END,
473                 },
474                 .mirred = TCA_EGRESS_MIRROR,
475         },
476         [TAP_REMOTE_TX] = {
477                 .attr = {
478                         .group = 0,
479                         .priority = TAP_REMOTE_TX,
480                         .egress = 1,
481                 },
482                 .items[0] = {
483                         .type = RTE_FLOW_ITEM_TYPE_VOID,
484                 },
485                 .items[1] = {
486                         .type = RTE_FLOW_ITEM_TYPE_END,
487                 },
488                 .mirred = TCA_EGRESS_MIRROR,
489         },
490         [TAP_ISOLATE] = {
491                 .attr = {
492                         .group = MAX_GROUP,
493                         .priority = PRIORITY_MASK - TAP_ISOLATE,
494                         .ingress = 1,
495                 },
496                 .items[0] = {
497                         .type = RTE_FLOW_ITEM_TYPE_VOID,
498                 },
499                 .items[1] = {
500                         .type = RTE_FLOW_ITEM_TYPE_END,
501                 },
502         },
503 };
504
505 /**
506  * Make as much checks as possible on an Ethernet item, and if a flow is
507  * provided, fill it appropriately with Ethernet info.
508  *
509  * @param[in] item
510  *   Item specification.
511  * @param[in, out] data
512  *   Additional data structure to tell next layers we've been here.
513  *
514  * @return
515  *   0 if checks are alright, -1 otherwise.
516  */
517 static int
518 tap_flow_create_eth(const struct rte_flow_item *item, void *data)
519 {
520         struct convert_data *info = (struct convert_data *)data;
521         const struct rte_flow_item_eth *spec = item->spec;
522         const struct rte_flow_item_eth *mask = item->mask;
523         struct rte_flow *flow = info->flow;
524         struct nlmsg *msg;
525
526         /* use default mask if none provided */
527         if (!mask)
528                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_ETH].default_mask;
529         /* TC does not support eth_type masking. Only accept if exact match. */
530         if (mask->type && mask->type != 0xffff)
531                 return -1;
532         if (!spec)
533                 return 0;
534         /* store eth_type for consistency if ipv4/6 pattern item comes next */
535         if (spec->type & mask->type)
536                 info->eth_type = spec->type;
537         if (!flow)
538                 return 0;
539         msg = &flow->msg;
540         if (!is_zero_ether_addr(&mask->dst)) {
541                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
542                            &spec->dst.addr_bytes);
543                 tap_nlattr_add(&msg->nh,
544                            TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
545                            &mask->dst.addr_bytes);
546         }
547         if (!is_zero_ether_addr(&mask->src)) {
548                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
549                            &spec->src.addr_bytes);
550                 tap_nlattr_add(&msg->nh,
551                            TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
552                            &mask->src.addr_bytes);
553         }
554         return 0;
555 }
556
557 /**
558  * Make as much checks as possible on a VLAN item, and if a flow is provided,
559  * fill it appropriately with VLAN info.
560  *
561  * @param[in] item
562  *   Item specification.
563  * @param[in, out] data
564  *   Additional data structure to tell next layers we've been here.
565  *
566  * @return
567  *   0 if checks are alright, -1 otherwise.
568  */
569 static int
570 tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
571 {
572         struct convert_data *info = (struct convert_data *)data;
573         const struct rte_flow_item_vlan *spec = item->spec;
574         const struct rte_flow_item_vlan *mask = item->mask;
575         struct rte_flow *flow = info->flow;
576         struct nlmsg *msg;
577
578         /* use default mask if none provided */
579         if (!mask)
580                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask;
581         /* Outer TPID cannot be matched. */
582         if (info->eth_type)
583                 return -1;
584         /* Double-tagging not supported. */
585         if (info->vlan)
586                 return -1;
587         info->vlan = 1;
588         if (mask->inner_type) {
589                 /* TC does not support partial eth_type masking */
590                 if (mask->inner_type != RTE_BE16(0xffff))
591                         return -1;
592                 info->eth_type = spec->inner_type;
593         }
594         if (!flow)
595                 return 0;
596         msg = &flow->msg;
597         msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_8021Q));
598 #define VLAN_PRIO(tci) ((tci) >> 13)
599 #define VLAN_ID(tci) ((tci) & 0xfff)
600         if (!spec)
601                 return 0;
602         if (spec->tci) {
603                 uint16_t tci = ntohs(spec->tci) & mask->tci;
604                 uint16_t prio = VLAN_PRIO(tci);
605                 uint8_t vid = VLAN_ID(tci);
606
607                 if (prio)
608                         tap_nlattr_add8(&msg->nh,
609                                         TCA_FLOWER_KEY_VLAN_PRIO, prio);
610                 if (vid)
611                         tap_nlattr_add16(&msg->nh,
612                                          TCA_FLOWER_KEY_VLAN_ID, vid);
613         }
614         return 0;
615 }
616
617 /**
618  * Make as much checks as possible on an IPv4 item, and if a flow is provided,
619  * fill it appropriately with IPv4 info.
620  *
621  * @param[in] item
622  *   Item specification.
623  * @param[in, out] data
624  *   Additional data structure to tell next layers we've been here.
625  *
626  * @return
627  *   0 if checks are alright, -1 otherwise.
628  */
629 static int
630 tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
631 {
632         struct convert_data *info = (struct convert_data *)data;
633         const struct rte_flow_item_ipv4 *spec = item->spec;
634         const struct rte_flow_item_ipv4 *mask = item->mask;
635         struct rte_flow *flow = info->flow;
636         struct nlmsg *msg;
637
638         /* use default mask if none provided */
639         if (!mask)
640                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV4].default_mask;
641         /* check that previous eth type is compatible with ipv4 */
642         if (info->eth_type && info->eth_type != htons(ETH_P_IP))
643                 return -1;
644         /* store ip_proto for consistency if udp/tcp pattern item comes next */
645         if (spec)
646                 info->ip_proto = spec->hdr.next_proto_id;
647         if (!flow)
648                 return 0;
649         msg = &flow->msg;
650         if (!info->eth_type)
651                 info->eth_type = htons(ETH_P_IP);
652         if (!spec)
653                 return 0;
654         if (mask->hdr.dst_addr) {
655                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
656                              spec->hdr.dst_addr);
657                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
658                              mask->hdr.dst_addr);
659         }
660         if (mask->hdr.src_addr) {
661                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
662                              spec->hdr.src_addr);
663                 tap_nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
664                              mask->hdr.src_addr);
665         }
666         if (spec->hdr.next_proto_id)
667                 tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO,
668                             spec->hdr.next_proto_id);
669         return 0;
670 }
671
672 /**
673  * Make as much checks as possible on an IPv6 item, and if a flow is provided,
674  * fill it appropriately with IPv6 info.
675  *
676  * @param[in] item
677  *   Item specification.
678  * @param[in, out] data
679  *   Additional data structure to tell next layers we've been here.
680  *
681  * @return
682  *   0 if checks are alright, -1 otherwise.
683  */
684 static int
685 tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
686 {
687         struct convert_data *info = (struct convert_data *)data;
688         const struct rte_flow_item_ipv6 *spec = item->spec;
689         const struct rte_flow_item_ipv6 *mask = item->mask;
690         struct rte_flow *flow = info->flow;
691         uint8_t empty_addr[16] = { 0 };
692         struct nlmsg *msg;
693
694         /* use default mask if none provided */
695         if (!mask)
696                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV6].default_mask;
697         /* check that previous eth type is compatible with ipv6 */
698         if (info->eth_type && info->eth_type != htons(ETH_P_IPV6))
699                 return -1;
700         /* store ip_proto for consistency if udp/tcp pattern item comes next */
701         if (spec)
702                 info->ip_proto = spec->hdr.proto;
703         if (!flow)
704                 return 0;
705         msg = &flow->msg;
706         if (!info->eth_type)
707                 info->eth_type = htons(ETH_P_IPV6);
708         if (!spec)
709                 return 0;
710         if (memcmp(mask->hdr.dst_addr, empty_addr, 16)) {
711                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
712                            sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
713                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
714                            sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
715         }
716         if (memcmp(mask->hdr.src_addr, empty_addr, 16)) {
717                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
718                            sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
719                 tap_nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
720                            sizeof(mask->hdr.src_addr), &mask->hdr.src_addr);
721         }
722         if (spec->hdr.proto)
723                 tap_nlattr_add8(&msg->nh,
724                                 TCA_FLOWER_KEY_IP_PROTO, spec->hdr.proto);
725         return 0;
726 }
727
728 /**
729  * Make as much checks as possible on a UDP item, and if a flow is provided,
730  * fill it appropriately with UDP info.
731  *
732  * @param[in] item
733  *   Item specification.
734  * @param[in, out] data
735  *   Additional data structure to tell next layers we've been here.
736  *
737  * @return
738  *   0 if checks are alright, -1 otherwise.
739  */
740 static int
741 tap_flow_create_udp(const struct rte_flow_item *item, void *data)
742 {
743         struct convert_data *info = (struct convert_data *)data;
744         const struct rte_flow_item_udp *spec = item->spec;
745         const struct rte_flow_item_udp *mask = item->mask;
746         struct rte_flow *flow = info->flow;
747         struct nlmsg *msg;
748
749         /* use default mask if none provided */
750         if (!mask)
751                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_UDP].default_mask;
752         /* check that previous ip_proto is compatible with udp */
753         if (info->ip_proto && info->ip_proto != IPPROTO_UDP)
754                 return -1;
755         /* TC does not support UDP port masking. Only accept if exact match. */
756         if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
757             (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
758                 return -1;
759         if (!flow)
760                 return 0;
761         msg = &flow->msg;
762         tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
763         if (!spec)
764                 return 0;
765         if (mask->hdr.dst_port)
766                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
767                              spec->hdr.dst_port);
768         if (mask->hdr.src_port)
769                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
770                              spec->hdr.src_port);
771         return 0;
772 }
773
774 /**
775  * Make as much checks as possible on a TCP item, and if a flow is provided,
776  * fill it appropriately with TCP info.
777  *
778  * @param[in] item
779  *   Item specification.
780  * @param[in, out] data
781  *   Additional data structure to tell next layers we've been here.
782  *
783  * @return
784  *   0 if checks are alright, -1 otherwise.
785  */
786 static int
787 tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
788 {
789         struct convert_data *info = (struct convert_data *)data;
790         const struct rte_flow_item_tcp *spec = item->spec;
791         const struct rte_flow_item_tcp *mask = item->mask;
792         struct rte_flow *flow = info->flow;
793         struct nlmsg *msg;
794
795         /* use default mask if none provided */
796         if (!mask)
797                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_TCP].default_mask;
798         /* check that previous ip_proto is compatible with tcp */
799         if (info->ip_proto && info->ip_proto != IPPROTO_TCP)
800                 return -1;
801         /* TC does not support TCP port masking. Only accept if exact match. */
802         if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
803             (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
804                 return -1;
805         if (!flow)
806                 return 0;
807         msg = &flow->msg;
808         tap_nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
809         if (!spec)
810                 return 0;
811         if (mask->hdr.dst_port)
812                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
813                              spec->hdr.dst_port);
814         if (mask->hdr.src_port)
815                 tap_nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
816                              spec->hdr.src_port);
817         return 0;
818 }
819
820 /**
821  * Check support for a given item.
822  *
823  * @param[in] item
824  *   Item specification.
825  * @param size
826  *   Bit-Mask size in bytes.
827  * @param[in] supported_mask
828  *   Bit-mask covering supported fields to compare with spec, last and mask in
829  *   \item.
830  * @param[in] default_mask
831  *   Bit-mask default mask if none is provided in \item.
832  *
833  * @return
834  *   0 on success.
835  */
836 static int
837 tap_flow_item_validate(const struct rte_flow_item *item,
838                        unsigned int size,
839                        const uint8_t *supported_mask,
840                        const uint8_t *default_mask)
841 {
842         int ret = 0;
843
844         /* An empty layer is allowed, as long as all fields are NULL */
845         if (!item->spec && (item->mask || item->last))
846                 return -1;
847         /* Is the item spec compatible with what the NIC supports? */
848         if (item->spec && !item->mask) {
849                 unsigned int i;
850                 const uint8_t *spec = item->spec;
851
852                 for (i = 0; i < size; ++i)
853                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
854                                 return -1;
855                 /* Is the default mask compatible with what the NIC supports? */
856                 for (i = 0; i < size; i++)
857                         if ((default_mask[i] | supported_mask[i]) !=
858                             supported_mask[i])
859                                 return -1;
860         }
861         /* Is the item last compatible with what the NIC supports? */
862         if (item->last && !item->mask) {
863                 unsigned int i;
864                 const uint8_t *spec = item->last;
865
866                 for (i = 0; i < size; ++i)
867                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
868                                 return -1;
869         }
870         /* Is the item mask compatible with what the NIC supports? */
871         if (item->mask) {
872                 unsigned int i;
873                 const uint8_t *spec = item->mask;
874
875                 for (i = 0; i < size; ++i)
876                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
877                                 return -1;
878         }
879         /**
880          * Once masked, Are item spec and item last equal?
881          * TC does not support range so anything else is invalid.
882          */
883         if (item->spec && item->last) {
884                 uint8_t spec[size];
885                 uint8_t last[size];
886                 const uint8_t *apply = default_mask;
887                 unsigned int i;
888
889                 if (item->mask)
890                         apply = item->mask;
891                 for (i = 0; i < size; ++i) {
892                         spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
893                         last[i] = ((const uint8_t *)item->last)[i] & apply[i];
894                 }
895                 ret = memcmp(spec, last, size);
896         }
897         return ret;
898 }
899
900 /**
901  * Configure the kernel with a TC action and its configured parameters
902  * Handled actions: "gact", "mirred", "skbedit", "bpf"
903  *
904  * @param[in] flow
905  *   Pointer to rte flow containing the netlink message
906  *
907  * @param[in, out] act_index
908  *   Pointer to action sequence number in the TC command
909  *
910  * @param[in] adata
911  *  Pointer to struct holding the action parameters
912  *
913  * @return
914  *   -1 on failure, 0 on success
915  */
916 static int
917 add_action(struct rte_flow *flow, size_t *act_index, struct action_data *adata)
918 {
919         struct nlmsg *msg = &flow->msg;
920
921         if (tap_nlattr_nested_start(msg, (*act_index)++) < 0)
922                 return -1;
923
924         tap_nlattr_add(&msg->nh, TCA_ACT_KIND,
925                                 strlen(adata->id) + 1, adata->id);
926         if (tap_nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
927                 return -1;
928         if (strcmp("gact", adata->id) == 0) {
929                 tap_nlattr_add(&msg->nh, TCA_GACT_PARMS, sizeof(adata->gact),
930                            &adata->gact);
931         } else if (strcmp("mirred", adata->id) == 0) {
932                 if (adata->mirred.eaction == TCA_EGRESS_MIRROR)
933                         adata->mirred.action = TC_ACT_PIPE;
934                 else /* REDIRECT */
935                         adata->mirred.action = TC_ACT_STOLEN;
936                 tap_nlattr_add(&msg->nh, TCA_MIRRED_PARMS,
937                            sizeof(adata->mirred),
938                            &adata->mirred);
939         } else if (strcmp("skbedit", adata->id) == 0) {
940                 tap_nlattr_add(&msg->nh, TCA_SKBEDIT_PARMS,
941                            sizeof(adata->skbedit.skbedit),
942                            &adata->skbedit.skbedit);
943                 tap_nlattr_add16(&msg->nh, TCA_SKBEDIT_QUEUE_MAPPING,
944                              adata->skbedit.queue);
945         } else if (strcmp("bpf", adata->id) == 0) {
946                 tap_nlattr_add32(&msg->nh, TCA_ACT_BPF_FD, adata->bpf.bpf_fd);
947                 tap_nlattr_add(&msg->nh, TCA_ACT_BPF_NAME,
948                            strlen(adata->bpf.annotation) + 1,
949                            adata->bpf.annotation);
950                 tap_nlattr_add(&msg->nh, TCA_ACT_BPF_PARMS,
951                            sizeof(adata->bpf.bpf),
952                            &adata->bpf.bpf);
953         } else {
954                 return -1;
955         }
956         tap_nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
957         tap_nlattr_nested_finish(msg); /* nested act_index */
958         return 0;
959 }
960
961 /**
962  * Helper function to send a serie of TC actions to the kernel
963  *
964  * @param[in] flow
965  *   Pointer to rte flow containing the netlink message
966  *
967  * @param[in] nb_actions
968  *   Number of actions in an array of action structs
969  *
970  * @param[in] data
971  *   Pointer to an array of action structs
972  *
973  * @param[in] classifier_actions
974  *   The classifier on behave of which the actions are configured
975  *
976  * @return
977  *   -1 on failure, 0 on success
978  */
979 static int
980 add_actions(struct rte_flow *flow, int nb_actions, struct action_data *data,
981             int classifier_action)
982 {
983         struct nlmsg *msg = &flow->msg;
984         size_t act_index = 1;
985         int i;
986
987         if (tap_nlattr_nested_start(msg, classifier_action) < 0)
988                 return -1;
989         for (i = 0; i < nb_actions; i++)
990                 if (add_action(flow, &act_index, data + i) < 0)
991                         return -1;
992         tap_nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
993         return 0;
994 }
995
996 /**
997  * Validate a flow supported by TC.
998  * If flow param is not NULL, then also fill the netlink message inside.
999  *
1000  * @param pmd
1001  *   Pointer to private structure.
1002  * @param[in] attr
1003  *   Flow rule attributes.
1004  * @param[in] pattern
1005  *   Pattern specification (list terminated by the END pattern item).
1006  * @param[in] actions
1007  *   Associated actions (list terminated by the END action).
1008  * @param[out] error
1009  *   Perform verbose error reporting if not NULL.
1010  * @param[in, out] flow
1011  *   Flow structure to update.
1012  * @param[in] mirred
1013  *   If set to TCA_EGRESS_REDIR, provided actions will be replaced with a
1014  *   redirection to the tap netdevice, and the TC rule will be configured
1015  *   on the remote netdevice in pmd.
1016  *   If set to TCA_EGRESS_MIRROR, provided actions will be replaced with a
1017  *   mirroring to the tap netdevice, and the TC rule will be configured
1018  *   on the remote netdevice in pmd. Matching packets will thus be duplicated.
1019  *   If set to 0, the standard behavior is to be used: set correct actions for
1020  *   the TC rule, and apply it on the tap netdevice.
1021  *
1022  * @return
1023  *   0 on success, a negative errno value otherwise and rte_errno is set.
1024  */
1025 static int
1026 priv_flow_process(struct pmd_internals *pmd,
1027                   const struct rte_flow_attr *attr,
1028                   const struct rte_flow_item items[],
1029                   const struct rte_flow_action actions[],
1030                   struct rte_flow_error *error,
1031                   struct rte_flow *flow,
1032                   int mirred)
1033 {
1034         const struct tap_flow_items *cur_item = tap_flow_items;
1035         struct convert_data data = {
1036                 .eth_type = 0,
1037                 .ip_proto = 0,
1038                 .flow = flow,
1039         };
1040         int action = 0; /* Only one action authorized for now */
1041
1042         if (attr->transfer) {
1043                 rte_flow_error_set(
1044                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
1045                         NULL, "transfer is not supported");
1046                 return -rte_errno;
1047         }
1048         if (attr->group > MAX_GROUP) {
1049                 rte_flow_error_set(
1050                         error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
1051                         NULL, "group value too big: cannot exceed 15");
1052                 return -rte_errno;
1053         }
1054         if (attr->priority > MAX_PRIORITY) {
1055                 rte_flow_error_set(
1056                         error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1057                         NULL, "priority value too big");
1058                 return -rte_errno;
1059         } else if (flow) {
1060                 uint16_t group = attr->group << GROUP_SHIFT;
1061                 uint16_t prio = group | (attr->priority +
1062                                 RSS_PRIORITY_OFFSET + PRIORITY_OFFSET);
1063                 flow->msg.t.tcm_info = TC_H_MAKE(prio << 16,
1064                                                  flow->msg.t.tcm_info);
1065         }
1066         if (flow) {
1067                 if (mirred) {
1068                         /*
1069                          * If attr->ingress, the rule applies on remote ingress
1070                          * to match incoming packets
1071                          * If attr->egress, the rule applies on tap ingress (as
1072                          * seen from the kernel) to deal with packets going out
1073                          * from the DPDK app.
1074                          */
1075                         flow->msg.t.tcm_parent = TC_H_MAKE(TC_H_INGRESS, 0);
1076                 } else {
1077                         /* Standard rule on tap egress (kernel standpoint). */
1078                         flow->msg.t.tcm_parent =
1079                                 TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
1080                 }
1081                 /* use flower filter type */
1082                 tap_nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
1083                 if (tap_nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
1084                         goto exit_item_not_supported;
1085         }
1086         for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
1087                 const struct tap_flow_items *token = NULL;
1088                 unsigned int i;
1089                 int err = 0;
1090
1091                 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
1092                         continue;
1093                 for (i = 0;
1094                      cur_item->items &&
1095                      cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
1096                      ++i) {
1097                         if (cur_item->items[i] == items->type) {
1098                                 token = &tap_flow_items[items->type];
1099                                 break;
1100                         }
1101                 }
1102                 if (!token)
1103                         goto exit_item_not_supported;
1104                 cur_item = token;
1105                 err = tap_flow_item_validate(
1106                         items, cur_item->mask_sz,
1107                         (const uint8_t *)cur_item->mask,
1108                         (const uint8_t *)cur_item->default_mask);
1109                 if (err)
1110                         goto exit_item_not_supported;
1111                 if (flow && cur_item->convert) {
1112                         err = cur_item->convert(items, &data);
1113                         if (err)
1114                                 goto exit_item_not_supported;
1115                 }
1116         }
1117         if (flow) {
1118                 if (data.vlan) {
1119                         tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
1120                                      htons(ETH_P_8021Q));
1121                         tap_nlattr_add16(&flow->msg.nh,
1122                                      TCA_FLOWER_KEY_VLAN_ETH_TYPE,
1123                                      data.eth_type ?
1124                                      data.eth_type : htons(ETH_P_ALL));
1125                 } else if (data.eth_type) {
1126                         tap_nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
1127                                      data.eth_type);
1128                 }
1129         }
1130         if (mirred && flow) {
1131                 struct action_data adata = {
1132                         .id = "mirred",
1133                         .mirred = {
1134                                 .eaction = mirred,
1135                         },
1136                 };
1137
1138                 /*
1139                  * If attr->egress && mirred, then this is a special
1140                  * case where the rule must be applied on the tap, to
1141                  * redirect packets coming from the DPDK App, out
1142                  * through the remote netdevice.
1143                  */
1144                 adata.mirred.ifindex = attr->ingress ? pmd->if_index :
1145                         pmd->remote_if_index;
1146                 if (mirred == TCA_EGRESS_MIRROR)
1147                         adata.mirred.action = TC_ACT_PIPE;
1148                 else
1149                         adata.mirred.action = TC_ACT_STOLEN;
1150                 if (add_actions(flow, 1, &adata, TCA_FLOWER_ACT) < 0)
1151                         goto exit_action_not_supported;
1152                 else
1153                         goto end;
1154         }
1155 actions:
1156         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
1157                 int err = 0;
1158
1159                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
1160                         continue;
1161                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
1162                         if (action)
1163                                 goto exit_action_not_supported;
1164                         action = 1;
1165                         if (flow) {
1166                                 struct action_data adata = {
1167                                         .id = "gact",
1168                                         .gact = {
1169                                                 .action = TC_ACT_SHOT,
1170                                         },
1171                                 };
1172
1173                                 err = add_actions(flow, 1, &adata,
1174                                                   TCA_FLOWER_ACT);
1175                         }
1176                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_PASSTHRU) {
1177                         if (action)
1178                                 goto exit_action_not_supported;
1179                         action = 1;
1180                         if (flow) {
1181                                 struct action_data adata = {
1182                                         .id = "gact",
1183                                         .gact = {
1184                                                 /* continue */
1185                                                 .action = TC_ACT_UNSPEC,
1186                                         },
1187                                 };
1188
1189                                 err = add_actions(flow, 1, &adata,
1190                                                   TCA_FLOWER_ACT);
1191                         }
1192                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1193                         const struct rte_flow_action_queue *queue =
1194                                 (const struct rte_flow_action_queue *)
1195                                 actions->conf;
1196
1197                         if (action)
1198                                 goto exit_action_not_supported;
1199                         action = 1;
1200                         if (!queue ||
1201                             (queue->index > pmd->dev->data->nb_rx_queues - 1))
1202                                 goto exit_action_not_supported;
1203                         if (flow) {
1204                                 struct action_data adata = {
1205                                         .id = "skbedit",
1206                                         .skbedit = {
1207                                                 .skbedit = {
1208                                                         .action = TC_ACT_PIPE,
1209                                                 },
1210                                                 .queue = queue->index,
1211                                         },
1212                                 };
1213
1214                                 err = add_actions(flow, 1, &adata,
1215                                         TCA_FLOWER_ACT);
1216                         }
1217                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_RSS) {
1218                         const struct rte_flow_action_rss *rss =
1219                                 (const struct rte_flow_action_rss *)
1220                                 actions->conf;
1221
1222                         if (action++)
1223                                 goto exit_action_not_supported;
1224
1225                         if (!pmd->rss_enabled) {
1226                                 err = rss_enable(pmd, attr, error);
1227                                 if (err)
1228                                         goto exit_action_not_supported;
1229                         }
1230                         if (flow)
1231                                 err = rss_add_actions(flow, pmd, rss, error);
1232                 } else {
1233                         goto exit_action_not_supported;
1234                 }
1235                 if (err)
1236                         goto exit_action_not_supported;
1237         }
1238         /* When fate is unknown, drop traffic. */
1239         if (!action) {
1240                 static const struct rte_flow_action drop[] = {
1241                         { .type = RTE_FLOW_ACTION_TYPE_DROP, },
1242                         { .type = RTE_FLOW_ACTION_TYPE_END, },
1243                 };
1244
1245                 actions = drop;
1246                 goto actions;
1247         }
1248 end:
1249         if (flow)
1250                 tap_nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
1251         return 0;
1252 exit_item_not_supported:
1253         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1254                            items, "item not supported");
1255         return -rte_errno;
1256 exit_action_not_supported:
1257         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1258                            actions, "action not supported");
1259         return -rte_errno;
1260 }
1261
1262
1263
1264 /**
1265  * Validate a flow.
1266  *
1267  * @see rte_flow_validate()
1268  * @see rte_flow_ops
1269  */
1270 static int
1271 tap_flow_validate(struct rte_eth_dev *dev,
1272                   const struct rte_flow_attr *attr,
1273                   const struct rte_flow_item items[],
1274                   const struct rte_flow_action actions[],
1275                   struct rte_flow_error *error)
1276 {
1277         struct pmd_internals *pmd = dev->data->dev_private;
1278
1279         return priv_flow_process(pmd, attr, items, actions, error, NULL, 0);
1280 }
1281
1282 /**
1283  * Set a unique handle in a flow.
1284  *
1285  * The kernel supports TC rules with equal priority, as long as they use the
1286  * same matching fields (e.g.: dst mac and ipv4) with different values (and
1287  * full mask to ensure no collision is possible).
1288  * In those rules, the handle (uint32_t) is the part that would identify
1289  * specifically each rule.
1290  *
1291  * On 32-bit architectures, the handle can simply be the flow's pointer address.
1292  * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently)
1293  * unique handle.
1294  *
1295  * @param[in, out] flow
1296  *   The flow that needs its handle set.
1297  */
1298 static void
1299 tap_flow_set_handle(struct rte_flow *flow)
1300 {
1301         uint32_t handle = 0;
1302
1303         if (sizeof(flow) > 4)
1304                 handle = rte_jhash(&flow, sizeof(flow), 1);
1305         else
1306                 handle = (uintptr_t)flow;
1307         /* must be at least 1 to avoid letting the kernel choose one for us */
1308         if (!handle)
1309                 handle = 1;
1310         flow->msg.t.tcm_handle = handle;
1311 }
1312
1313 /**
1314  * Free the flow opened file descriptors and allocated memory
1315  *
1316  * @param[in] flow
1317  *   Pointer to the flow to free
1318  *
1319  */
1320 static void
1321 tap_flow_free(struct pmd_internals *pmd, struct rte_flow *flow)
1322 {
1323         int i;
1324
1325         if (!flow)
1326                 return;
1327
1328         if (pmd->rss_enabled) {
1329                 /* Close flow BPF file descriptors */
1330                 for (i = 0; i < SEC_MAX; i++)
1331                         if (flow->bpf_fd[i] != 0) {
1332                                 close(flow->bpf_fd[i]);
1333                                 flow->bpf_fd[i] = 0;
1334                         }
1335
1336                 /* Release the map key for this RSS rule */
1337                 bpf_rss_key(KEY_CMD_RELEASE, &flow->key_idx);
1338                 flow->key_idx = 0;
1339         }
1340
1341         /* Free flow allocated memory */
1342         rte_free(flow);
1343 }
1344
1345 /**
1346  * Create a flow.
1347  *
1348  * @see rte_flow_create()
1349  * @see rte_flow_ops
1350  */
1351 static struct rte_flow *
1352 tap_flow_create(struct rte_eth_dev *dev,
1353                 const struct rte_flow_attr *attr,
1354                 const struct rte_flow_item items[],
1355                 const struct rte_flow_action actions[],
1356                 struct rte_flow_error *error)
1357 {
1358         struct pmd_internals *pmd = dev->data->dev_private;
1359         struct rte_flow *remote_flow = NULL;
1360         struct rte_flow *flow = NULL;
1361         struct nlmsg *msg = NULL;
1362         int err;
1363
1364         if (!pmd->if_index) {
1365                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1366                                    NULL,
1367                                    "can't create rule, ifindex not found");
1368                 goto fail;
1369         }
1370         /*
1371          * No rules configured through standard rte_flow should be set on the
1372          * priorities used by implicit rules.
1373          */
1374         if ((attr->group == MAX_GROUP) &&
1375             attr->priority > (MAX_PRIORITY - TAP_REMOTE_MAX_IDX)) {
1376                 rte_flow_error_set(
1377                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1378                         NULL, "priority value too big");
1379                 goto fail;
1380         }
1381         flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1382         if (!flow) {
1383                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1384                                    NULL, "cannot allocate memory for rte_flow");
1385                 goto fail;
1386         }
1387         msg = &flow->msg;
1388         tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER,
1389                     NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1390         msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1391         tap_flow_set_handle(flow);
1392         if (priv_flow_process(pmd, attr, items, actions, error, flow, 0))
1393                 goto fail;
1394         err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1395         if (err < 0) {
1396                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1397                                    NULL, "couldn't send request to kernel");
1398                 goto fail;
1399         }
1400         err = tap_nl_recv_ack(pmd->nlsk_fd);
1401         if (err < 0) {
1402                 TAP_LOG(ERR,
1403                         "Kernel refused TC filter rule creation (%d): %s",
1404                         errno, strerror(errno));
1405                 rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
1406                                    NULL,
1407                                    "overlapping rules or Kernel too old for flower support");
1408                 goto fail;
1409         }
1410         LIST_INSERT_HEAD(&pmd->flows, flow, next);
1411         /**
1412          * If a remote device is configured, a TC rule with identical items for
1413          * matching must be set on that device, with a single action: redirect
1414          * to the local pmd->if_index.
1415          */
1416         if (pmd->remote_if_index) {
1417                 remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1418                 if (!remote_flow) {
1419                         rte_flow_error_set(
1420                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1421                                 "cannot allocate memory for rte_flow");
1422                         goto fail;
1423                 }
1424                 msg = &remote_flow->msg;
1425                 /* set the rule if_index for the remote netdevice */
1426                 tc_init_msg(
1427                         msg, pmd->remote_if_index, RTM_NEWTFILTER,
1428                         NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1429                 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1430                 tap_flow_set_handle(remote_flow);
1431                 if (priv_flow_process(pmd, attr, items, NULL,
1432                                       error, remote_flow, TCA_EGRESS_REDIR)) {
1433                         rte_flow_error_set(
1434                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1435                                 NULL, "rte flow rule validation failed");
1436                         goto fail;
1437                 }
1438                 err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1439                 if (err < 0) {
1440                         rte_flow_error_set(
1441                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1442                                 NULL, "Failure sending nl request");
1443                         goto fail;
1444                 }
1445                 err = tap_nl_recv_ack(pmd->nlsk_fd);
1446                 if (err < 0) {
1447                         TAP_LOG(ERR,
1448                                 "Kernel refused TC filter rule creation (%d): %s",
1449                                 errno, strerror(errno));
1450                         rte_flow_error_set(
1451                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1452                                 NULL,
1453                                 "overlapping rules or Kernel too old for flower support");
1454                         goto fail;
1455                 }
1456                 flow->remote_flow = remote_flow;
1457         }
1458         return flow;
1459 fail:
1460         if (remote_flow)
1461                 rte_free(remote_flow);
1462         if (flow)
1463                 tap_flow_free(pmd, flow);
1464         return NULL;
1465 }
1466
1467 /**
1468  * Destroy a flow using pointer to pmd_internal.
1469  *
1470  * @param[in, out] pmd
1471  *   Pointer to private structure.
1472  * @param[in] flow
1473  *   Pointer to the flow to destroy.
1474  * @param[in, out] error
1475  *   Pointer to the flow error handler
1476  *
1477  * @return 0 if the flow could be destroyed, -1 otherwise.
1478  */
1479 static int
1480 tap_flow_destroy_pmd(struct pmd_internals *pmd,
1481                      struct rte_flow *flow,
1482                      struct rte_flow_error *error)
1483 {
1484         struct rte_flow *remote_flow = flow->remote_flow;
1485         int ret = 0;
1486
1487         LIST_REMOVE(flow, next);
1488         flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1489         flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1490
1491         ret = tap_nl_send(pmd->nlsk_fd, &flow->msg.nh);
1492         if (ret < 0) {
1493                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1494                                    NULL, "couldn't send request to kernel");
1495                 goto end;
1496         }
1497         ret = tap_nl_recv_ack(pmd->nlsk_fd);
1498         /* If errno is ENOENT, the rule is already no longer in the kernel. */
1499         if (ret < 0 && errno == ENOENT)
1500                 ret = 0;
1501         if (ret < 0) {
1502                 TAP_LOG(ERR,
1503                         "Kernel refused TC filter rule deletion (%d): %s",
1504                         errno, strerror(errno));
1505                 rte_flow_error_set(
1506                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1507                         "couldn't receive kernel ack to our request");
1508                 goto end;
1509         }
1510
1511         if (remote_flow) {
1512                 remote_flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1513                 remote_flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1514
1515                 ret = tap_nl_send(pmd->nlsk_fd, &remote_flow->msg.nh);
1516                 if (ret < 0) {
1517                         rte_flow_error_set(
1518                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1519                                 NULL, "Failure sending nl request");
1520                         goto end;
1521                 }
1522                 ret = tap_nl_recv_ack(pmd->nlsk_fd);
1523                 if (ret < 0 && errno == ENOENT)
1524                         ret = 0;
1525                 if (ret < 0) {
1526                         TAP_LOG(ERR,
1527                                 "Kernel refused TC filter rule deletion (%d): %s",
1528                                 errno, strerror(errno));
1529                         rte_flow_error_set(
1530                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1531                                 NULL, "Failure trying to receive nl ack");
1532                         goto end;
1533                 }
1534         }
1535 end:
1536         if (remote_flow)
1537                 rte_free(remote_flow);
1538         tap_flow_free(pmd, flow);
1539         return ret;
1540 }
1541
1542 /**
1543  * Destroy a flow.
1544  *
1545  * @see rte_flow_destroy()
1546  * @see rte_flow_ops
1547  */
1548 static int
1549 tap_flow_destroy(struct rte_eth_dev *dev,
1550                  struct rte_flow *flow,
1551                  struct rte_flow_error *error)
1552 {
1553         struct pmd_internals *pmd = dev->data->dev_private;
1554
1555         return tap_flow_destroy_pmd(pmd, flow, error);
1556 }
1557
1558 /**
1559  * Enable/disable flow isolation.
1560  *
1561  * @see rte_flow_isolate()
1562  * @see rte_flow_ops
1563  */
1564 static int
1565 tap_flow_isolate(struct rte_eth_dev *dev,
1566                  int set,
1567                  struct rte_flow_error *error __rte_unused)
1568 {
1569         struct pmd_internals *pmd = dev->data->dev_private;
1570
1571         /* normalize 'set' variable to contain 0 or 1 values */
1572         if (set)
1573                 set = 1;
1574         /* if already in the right isolation mode - nothing to do */
1575         if ((set ^ pmd->flow_isolate) == 0)
1576                 return 0;
1577         /* mark the isolation mode for tap_flow_implicit_create() */
1578         pmd->flow_isolate = set;
1579         /*
1580          * If netdevice is there, setup appropriate flow rules immediately.
1581          * Otherwise it will be set when bringing up the netdevice (tun_alloc).
1582          */
1583         if (!pmd->rxq[0].fd)
1584                 return 0;
1585         if (set) {
1586                 struct rte_flow *remote_flow;
1587
1588                 while (1) {
1589                         remote_flow = LIST_FIRST(&pmd->implicit_flows);
1590                         if (!remote_flow)
1591                                 break;
1592                         /*
1593                          * Remove all implicit rules on the remote.
1594                          * Keep the local rule to redirect packets on TX.
1595                          * Keep also the last implicit local rule: ISOLATE.
1596                          */
1597                         if (remote_flow->msg.t.tcm_ifindex == pmd->if_index)
1598                                 break;
1599                         if (tap_flow_destroy_pmd(pmd, remote_flow, NULL) < 0)
1600                                 goto error;
1601                 }
1602                 /* Switch the TC rule according to pmd->flow_isolate */
1603                 if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
1604                         goto error;
1605         } else {
1606                 /* Switch the TC rule according to pmd->flow_isolate */
1607                 if (tap_flow_implicit_create(pmd, TAP_ISOLATE) == -1)
1608                         goto error;
1609                 if (!pmd->remote_if_index)
1610                         return 0;
1611                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_TX) < 0)
1612                         goto error;
1613                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_LOCAL_MAC) < 0)
1614                         goto error;
1615                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCAST) < 0)
1616                         goto error;
1617                 if (tap_flow_implicit_create(pmd, TAP_REMOTE_BROADCASTV6) < 0)
1618                         goto error;
1619                 if (dev->data->promiscuous &&
1620                     tap_flow_implicit_create(pmd, TAP_REMOTE_PROMISC) < 0)
1621                         goto error;
1622                 if (dev->data->all_multicast &&
1623                     tap_flow_implicit_create(pmd, TAP_REMOTE_ALLMULTI) < 0)
1624                         goto error;
1625         }
1626         return 0;
1627 error:
1628         pmd->flow_isolate = 0;
1629         return rte_flow_error_set(
1630                 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1631                 "TC rule creation failed");
1632 }
1633
1634 /**
1635  * Destroy all flows.
1636  *
1637  * @see rte_flow_flush()
1638  * @see rte_flow_ops
1639  */
1640 int
1641 tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1642 {
1643         struct pmd_internals *pmd = dev->data->dev_private;
1644         struct rte_flow *flow;
1645
1646         while (!LIST_EMPTY(&pmd->flows)) {
1647                 flow = LIST_FIRST(&pmd->flows);
1648                 if (tap_flow_destroy(dev, flow, error) < 0)
1649                         return -1;
1650         }
1651         return 0;
1652 }
1653
1654 /**
1655  * Add an implicit flow rule on the remote device to make sure traffic gets to
1656  * the tap netdevice from there.
1657  *
1658  * @param pmd
1659  *   Pointer to private structure.
1660  * @param[in] idx
1661  *   The idx in the implicit_rte_flows array specifying which rule to apply.
1662  *
1663  * @return -1 if the rule couldn't be applied, 0 otherwise.
1664  */
1665 int tap_flow_implicit_create(struct pmd_internals *pmd,
1666                              enum implicit_rule_index idx)
1667 {
1668         uint16_t flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE;
1669         struct rte_flow_action *actions = implicit_rte_flows[idx].actions;
1670         struct rte_flow_action isolate_actions[2] = {
1671                 [1] = {
1672                         .type = RTE_FLOW_ACTION_TYPE_END,
1673                 },
1674         };
1675         struct rte_flow_item *items = implicit_rte_flows[idx].items;
1676         struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
1677         struct rte_flow_item_eth eth_local = { .type = 0 };
1678         uint16_t if_index = pmd->remote_if_index;
1679         struct rte_flow *remote_flow = NULL;
1680         struct nlmsg *msg = NULL;
1681         int err = 0;
1682         struct rte_flow_item items_local[2] = {
1683                 [0] = {
1684                         .type = items[0].type,
1685                         .spec = &eth_local,
1686                         .mask = items[0].mask,
1687                 },
1688                 [1] = {
1689                         .type = items[1].type,
1690                 }
1691         };
1692
1693         remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1694         if (!remote_flow) {
1695                 TAP_LOG(ERR, "Cannot allocate memory for rte_flow");
1696                 goto fail;
1697         }
1698         msg = &remote_flow->msg;
1699         if (idx == TAP_REMOTE_TX) {
1700                 if_index = pmd->if_index;
1701         } else if (idx == TAP_ISOLATE) {
1702                 if_index = pmd->if_index;
1703                 /* Don't be exclusive for this rule, it can be changed later. */
1704                 flags = NLM_F_REQUEST | NLM_F_ACK | NLM_F_CREATE;
1705                 isolate_actions[0].type = pmd->flow_isolate ?
1706                         RTE_FLOW_ACTION_TYPE_DROP :
1707                         RTE_FLOW_ACTION_TYPE_PASSTHRU;
1708                 actions = isolate_actions;
1709         } else if (idx == TAP_REMOTE_LOCAL_MAC) {
1710                 /*
1711                  * eth addr couldn't be set in implicit_rte_flows[] as it is not
1712                  * known at compile time.
1713                  */
1714                 memcpy(&eth_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
1715                 items = items_local;
1716         }
1717         tc_init_msg(msg, if_index, RTM_NEWTFILTER, flags);
1718         msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1719         /*
1720          * The ISOLATE rule is always present and must have a static handle, as
1721          * the action is changed whether the feature is enabled (DROP) or
1722          * disabled (PASSTHRU).
1723          * There is just one REMOTE_PROMISCUOUS rule in all cases. It should
1724          * have a static handle such that adding it twice will fail with EEXIST
1725          * with any kernel version. Remark: old kernels may falsely accept the
1726          * same REMOTE_PROMISCUOUS rules if they had different handles.
1727          */
1728         if (idx == TAP_ISOLATE)
1729                 remote_flow->msg.t.tcm_handle = ISOLATE_HANDLE;
1730         else if (idx == TAP_REMOTE_PROMISC)
1731                 remote_flow->msg.t.tcm_handle = REMOTE_PROMISCUOUS_HANDLE;
1732         else
1733                 tap_flow_set_handle(remote_flow);
1734         if (priv_flow_process(pmd, attr, items, actions, NULL,
1735                               remote_flow, implicit_rte_flows[idx].mirred)) {
1736                 TAP_LOG(ERR, "rte flow rule validation failed");
1737                 goto fail;
1738         }
1739         err = tap_nl_send(pmd->nlsk_fd, &msg->nh);
1740         if (err < 0) {
1741                 TAP_LOG(ERR, "Failure sending nl request");
1742                 goto fail;
1743         }
1744         err = tap_nl_recv_ack(pmd->nlsk_fd);
1745         if (err < 0) {
1746                 /* Silently ignore re-entering existing rule */
1747                 if (errno == EEXIST)
1748                         goto success;
1749                 TAP_LOG(ERR,
1750                         "Kernel refused TC filter rule creation (%d): %s",
1751                         errno, strerror(errno));
1752                 goto fail;
1753         }
1754         LIST_INSERT_HEAD(&pmd->implicit_flows, remote_flow, next);
1755 success:
1756         return 0;
1757 fail:
1758         if (remote_flow)
1759                 rte_free(remote_flow);
1760         return -1;
1761 }
1762
1763 /**
1764  * Remove specific implicit flow rule on the remote device.
1765  *
1766  * @param[in, out] pmd
1767  *   Pointer to private structure.
1768  * @param[in] idx
1769  *   The idx in the implicit_rte_flows array specifying which rule to remove.
1770  *
1771  * @return -1 if one of the implicit rules couldn't be created, 0 otherwise.
1772  */
1773 int tap_flow_implicit_destroy(struct pmd_internals *pmd,
1774                               enum implicit_rule_index idx)
1775 {
1776         struct rte_flow *remote_flow;
1777         int cur_prio = -1;
1778         int idx_prio = implicit_rte_flows[idx].attr.priority + PRIORITY_OFFSET;
1779
1780         for (remote_flow = LIST_FIRST(&pmd->implicit_flows);
1781              remote_flow;
1782              remote_flow = LIST_NEXT(remote_flow, next)) {
1783                 cur_prio = (remote_flow->msg.t.tcm_info >> 16) & PRIORITY_MASK;
1784                 if (cur_prio != idx_prio)
1785                         continue;
1786                 return tap_flow_destroy_pmd(pmd, remote_flow, NULL);
1787         }
1788         return 0;
1789 }
1790
1791 /**
1792  * Destroy all implicit flows.
1793  *
1794  * @see rte_flow_flush()
1795  */
1796 int
1797 tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
1798 {
1799         struct rte_flow *remote_flow;
1800
1801         while (!LIST_EMPTY(&pmd->implicit_flows)) {
1802                 remote_flow = LIST_FIRST(&pmd->implicit_flows);
1803                 if (tap_flow_destroy_pmd(pmd, remote_flow, error) < 0)
1804                         return -1;
1805         }
1806         return 0;
1807 }
1808
1809 #define MAX_RSS_KEYS 256
1810 #define KEY_IDX_OFFSET (3 * MAX_RSS_KEYS)
1811 #define SEC_NAME_CLS_Q "cls_q"
1812
1813 const char *sec_name[SEC_MAX] = {
1814         [SEC_L3_L4] = "l3_l4",
1815 };
1816
1817 /**
1818  * Enable RSS on tap: create TC rules for queuing.
1819  *
1820  * @param[in, out] pmd
1821  *   Pointer to private structure.
1822  *
1823  * @param[in] attr
1824  *   Pointer to rte_flow to get flow group
1825  *
1826  * @param[out] error
1827  *   Pointer to error reporting if not NULL.
1828  *
1829  * @return 0 on success, negative value on failure.
1830  */
1831 static int rss_enable(struct pmd_internals *pmd,
1832                         const struct rte_flow_attr *attr,
1833                         struct rte_flow_error *error)
1834 {
1835         struct rte_flow *rss_flow = NULL;
1836         struct nlmsg *msg = NULL;
1837         /* 4096 is the maximum number of instructions for a BPF program */
1838         char annotation[64];
1839         int i;
1840         int err = 0;
1841
1842         /* unlimit locked memory */
1843         struct rlimit memlock_limit = {
1844                 .rlim_cur = RLIM_INFINITY,
1845                 .rlim_max = RLIM_INFINITY,
1846         };
1847         setrlimit(RLIMIT_MEMLOCK, &memlock_limit);
1848
1849          /* Get a new map key for a new RSS rule */
1850         err = bpf_rss_key(KEY_CMD_INIT, NULL);
1851         if (err < 0) {
1852                 rte_flow_error_set(
1853                         error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1854                         "Failed to initialize BPF RSS keys");
1855
1856                 return -1;
1857         }
1858
1859         /*
1860          *  Create BPF RSS MAP
1861          */
1862         pmd->map_fd = tap_flow_bpf_rss_map_create(sizeof(__u32), /* key size */
1863                                 sizeof(struct rss_key),
1864                                 MAX_RSS_KEYS);
1865         if (pmd->map_fd < 0) {
1866                 TAP_LOG(ERR,
1867                         "Failed to create BPF map (%d): %s",
1868                                 errno, strerror(errno));
1869                 rte_flow_error_set(
1870                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1871                         "Kernel too old or not configured "
1872                         "to support BPF maps");
1873
1874                 return -ENOTSUP;
1875         }
1876
1877         /*
1878          * Add a rule per queue to match reclassified packets and direct them to
1879          * the correct queue.
1880          */
1881         for (i = 0; i < pmd->dev->data->nb_rx_queues; i++) {
1882                 pmd->bpf_fd[i] = tap_flow_bpf_cls_q(i);
1883                 if (pmd->bpf_fd[i] < 0) {
1884                         TAP_LOG(ERR,
1885                                 "Failed to load BPF section %s for queue %d",
1886                                 SEC_NAME_CLS_Q, i);
1887                         rte_flow_error_set(
1888                                 error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1889                                 NULL,
1890                                 "Kernel too old or not configured "
1891                                 "to support BPF programs loading");
1892
1893                         return -ENOTSUP;
1894                 }
1895
1896                 rss_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1897                 if (!rss_flow) {
1898                         TAP_LOG(ERR,
1899                                 "Cannot allocate memory for rte_flow");
1900                         return -1;
1901                 }
1902                 msg = &rss_flow->msg;
1903                 tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER, NLM_F_REQUEST |
1904                             NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1905                 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1906                 tap_flow_set_handle(rss_flow);
1907                 uint16_t group = attr->group << GROUP_SHIFT;
1908                 uint16_t prio = group | (i + PRIORITY_OFFSET);
1909                 msg->t.tcm_info = TC_H_MAKE(prio << 16, msg->t.tcm_info);
1910                 msg->t.tcm_parent = TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
1911
1912                 tap_nlattr_add(&msg->nh, TCA_KIND, sizeof("bpf"), "bpf");
1913                 if (tap_nlattr_nested_start(msg, TCA_OPTIONS) < 0)
1914                         return -1;
1915                 tap_nlattr_add32(&msg->nh, TCA_BPF_FD, pmd->bpf_fd[i]);
1916                 snprintf(annotation, sizeof(annotation), "[%s%d]",
1917                         SEC_NAME_CLS_Q, i);
1918                 tap_nlattr_add(&msg->nh, TCA_BPF_NAME, strlen(annotation) + 1,
1919                            annotation);
1920                 /* Actions */
1921                 {
1922                         struct action_data adata = {
1923                                 .id = "skbedit",
1924                                 .skbedit = {
1925                                         .skbedit = {
1926                                                 .action = TC_ACT_PIPE,
1927                                         },
1928                                         .queue = i,
1929                                 },
1930                         };
1931                         if (add_actions(rss_flow, 1, &adata, TCA_BPF_ACT) < 0)
1932                                 return -1;
1933                 }
1934                 tap_nlattr_nested_finish(msg); /* nested TCA_OPTIONS */
1935
1936                 /* Netlink message is now ready to be sent */
1937                 if (tap_nl_send(pmd->nlsk_fd, &msg->nh) < 0)
1938                         return -1;
1939                 err = tap_nl_recv_ack(pmd->nlsk_fd);
1940                 if (err < 0) {
1941                         TAP_LOG(ERR,
1942                                 "Kernel refused TC filter rule creation (%d): %s",
1943                                 errno, strerror(errno));
1944                         return err;
1945                 }
1946                 LIST_INSERT_HEAD(&pmd->rss_flows, rss_flow, next);
1947         }
1948
1949         pmd->rss_enabled = 1;
1950         return err;
1951 }
1952
1953 /**
1954  * Manage bpf RSS keys repository with operations: init, get, release
1955  *
1956  * @param[in] cmd
1957  *   Command on RSS keys: init, get, release
1958  *
1959  * @param[in, out] key_idx
1960  *   Pointer to RSS Key index (out for get command, in for release command)
1961  *
1962  * @return -1 if couldn't get, release or init the RSS keys, 0 otherwise.
1963  */
1964 static int bpf_rss_key(enum bpf_rss_key_e cmd, __u32 *key_idx)
1965 {
1966         __u32 i;
1967         int err = 0;
1968         static __u32 num_used_keys;
1969         static __u32 rss_keys[MAX_RSS_KEYS] = {KEY_STAT_UNSPEC};
1970         static __u32 rss_keys_initialized;
1971         __u32 key;
1972
1973         switch (cmd) {
1974         case KEY_CMD_GET:
1975                 if (!rss_keys_initialized) {
1976                         err = -1;
1977                         break;
1978                 }
1979
1980                 if (num_used_keys == RTE_DIM(rss_keys)) {
1981                         err = -1;
1982                         break;
1983                 }
1984
1985                 *key_idx = num_used_keys % RTE_DIM(rss_keys);
1986                 while (rss_keys[*key_idx] == KEY_STAT_USED)
1987                         *key_idx = (*key_idx + 1) % RTE_DIM(rss_keys);
1988
1989                 rss_keys[*key_idx] = KEY_STAT_USED;
1990
1991                 /*
1992                  * Add an offset to key_idx in order to handle a case of
1993                  * RSS and non RSS flows mixture.
1994                  * If a non RSS flow is destroyed it has an eBPF map
1995                  * index 0 (initialized on flow creation) and might
1996                  * unintentionally remove RSS entry 0 from eBPF map.
1997                  * To avoid this issue, add an offset to the real index
1998                  * during a KEY_CMD_GET operation and subtract this offset
1999                  * during a KEY_CMD_RELEASE operation in order to restore
2000                  * the real index.
2001                  */
2002                 *key_idx += KEY_IDX_OFFSET;
2003                 num_used_keys++;
2004         break;
2005
2006         case KEY_CMD_RELEASE:
2007                 if (!rss_keys_initialized)
2008                         break;
2009
2010                 /*
2011                  * Subtract offest to restore real key index
2012                  * If a non RSS flow is falsely trying to release map
2013                  * entry 0 - the offset subtraction will calculate the real
2014                  * map index as an out-of-range value and the release operation
2015                  * will be silently ignored.
2016                  */
2017                 key = *key_idx - KEY_IDX_OFFSET;
2018                 if (key >= RTE_DIM(rss_keys))
2019                         break;
2020
2021                 if (rss_keys[key] == KEY_STAT_USED) {
2022                         rss_keys[key] = KEY_STAT_AVAILABLE;
2023                         num_used_keys--;
2024                 }
2025         break;
2026
2027         case KEY_CMD_INIT:
2028                 for (i = 0; i < RTE_DIM(rss_keys); i++)
2029                         rss_keys[i] = KEY_STAT_AVAILABLE;
2030
2031                 rss_keys_initialized = 1;
2032                 num_used_keys = 0;
2033         break;
2034
2035         case KEY_CMD_DEINIT:
2036                 for (i = 0; i < RTE_DIM(rss_keys); i++)
2037                         rss_keys[i] = KEY_STAT_UNSPEC;
2038
2039                 rss_keys_initialized = 0;
2040                 num_used_keys = 0;
2041         break;
2042
2043         default:
2044                 break;
2045         }
2046
2047         return err;
2048 }
2049
2050 /**
2051  * Add RSS hash calculations and queue selection
2052  *
2053  * @param[in, out] pmd
2054  *   Pointer to internal structure. Used to set/get RSS map fd
2055  *
2056  * @param[in] rss
2057  *   Pointer to RSS flow actions
2058  *
2059  * @param[out] error
2060  *   Pointer to error reporting if not NULL.
2061  *
2062  * @return 0 on success, negative value on failure
2063  */
2064 static int rss_add_actions(struct rte_flow *flow, struct pmd_internals *pmd,
2065                            const struct rte_flow_action_rss *rss,
2066                            struct rte_flow_error *error)
2067 {
2068         /* 4096 is the maximum number of instructions for a BPF program */
2069         unsigned int i;
2070         int err;
2071         struct rss_key rss_entry = { .hash_fields = 0,
2072                                      .key_size = 0 };
2073
2074         /* Check supported RSS features */
2075         if (rss->func != RTE_ETH_HASH_FUNCTION_DEFAULT)
2076                 return rte_flow_error_set
2077                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2078                          "non-default RSS hash functions are not supported");
2079         if (rss->level)
2080                 return rte_flow_error_set
2081                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2082                          "a nonzero RSS encapsulation level is not supported");
2083
2084         /* Get a new map key for a new RSS rule */
2085         err = bpf_rss_key(KEY_CMD_GET, &flow->key_idx);
2086         if (err < 0) {
2087                 rte_flow_error_set(
2088                         error, EINVAL, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2089                         "Failed to get BPF RSS key");
2090
2091                 return -1;
2092         }
2093
2094         /* Update RSS map entry with queues */
2095         rss_entry.nb_queues = rss->queue_num;
2096         for (i = 0; i < rss->queue_num; i++)
2097                 rss_entry.queues[i] = rss->queue[i];
2098         rss_entry.hash_fields =
2099                 (1 << HASH_FIELD_IPV4_L3_L4) | (1 << HASH_FIELD_IPV6_L3_L4);
2100
2101         /* Add this RSS entry to map */
2102         err = tap_flow_bpf_update_rss_elem(pmd->map_fd,
2103                                 &flow->key_idx, &rss_entry);
2104
2105         if (err) {
2106                 TAP_LOG(ERR,
2107                         "Failed to update BPF map entry #%u (%d): %s",
2108                         flow->key_idx, errno, strerror(errno));
2109                 rte_flow_error_set(
2110                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2111                         "Kernel too old or not configured "
2112                         "to support BPF maps updates");
2113
2114                 return -ENOTSUP;
2115         }
2116
2117
2118         /*
2119          * Load bpf rules to calculate hash for this key_idx
2120          */
2121
2122         flow->bpf_fd[SEC_L3_L4] =
2123                 tap_flow_bpf_calc_l3_l4_hash(flow->key_idx, pmd->map_fd);
2124         if (flow->bpf_fd[SEC_L3_L4] < 0) {
2125                 TAP_LOG(ERR,
2126                         "Failed to load BPF section %s (%d): %s",
2127                                 sec_name[SEC_L3_L4], errno, strerror(errno));
2128                 rte_flow_error_set(
2129                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
2130                         "Kernel too old or not configured "
2131                         "to support BPF program loading");
2132
2133                 return -ENOTSUP;
2134         }
2135
2136         /* Actions */
2137         {
2138                 struct action_data adata[] = {
2139                         {
2140                                 .id = "bpf",
2141                                 .bpf = {
2142                                         .bpf_fd = flow->bpf_fd[SEC_L3_L4],
2143                                         .annotation = sec_name[SEC_L3_L4],
2144                                         .bpf = {
2145                                                 .action = TC_ACT_PIPE,
2146                                         },
2147                                 },
2148                         },
2149                 };
2150
2151                 if (add_actions(flow, RTE_DIM(adata), adata,
2152                         TCA_FLOWER_ACT) < 0)
2153                         return -1;
2154         }
2155
2156         return 0;
2157 }
2158
2159 /**
2160  * Manage filter operations.
2161  *
2162  * @param dev
2163  *   Pointer to Ethernet device structure.
2164  * @param filter_type
2165  *   Filter type.
2166  * @param filter_op
2167  *   Operation to perform.
2168  * @param arg
2169  *   Pointer to operation-specific structure.
2170  *
2171  * @return
2172  *   0 on success, negative errno value on failure.
2173  */
2174 int
2175 tap_dev_filter_ctrl(struct rte_eth_dev *dev,
2176                     enum rte_filter_type filter_type,
2177                     enum rte_filter_op filter_op,
2178                     void *arg)
2179 {
2180         switch (filter_type) {
2181         case RTE_ETH_FILTER_GENERIC:
2182                 if (filter_op != RTE_ETH_FILTER_GET)
2183                         return -EINVAL;
2184                 *(const void **)arg = &tap_flow_ops;
2185                 return 0;
2186         default:
2187                 TAP_LOG(ERR, "%p: filter type (%d) not supported",
2188                         dev, filter_type);
2189         }
2190         return -EINVAL;
2191 }