cf1c8a26c8ffeba1d174f69af8a89bfaa2353a41
[deb_dpdk.git] / drivers / net / tap / tap_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox.
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 #include <errno.h>
35 #include <string.h>
36 #include <sys/queue.h>
37
38 #include <rte_byteorder.h>
39 #include <rte_jhash.h>
40 #include <rte_malloc.h>
41 #include <rte_eth_tap.h>
42 #include <tap_flow.h>
43 #include <tap_autoconf.h>
44 #include <tap_tcmsgs.h>
45
46 #ifndef HAVE_TC_FLOWER
47 /*
48  * For kernels < 4.2, this enum is not defined. Runtime checks will be made to
49  * avoid sending TC messages the kernel cannot understand.
50  */
51 enum {
52         TCA_FLOWER_UNSPEC,
53         TCA_FLOWER_CLASSID,
54         TCA_FLOWER_INDEV,
55         TCA_FLOWER_ACT,
56         TCA_FLOWER_KEY_ETH_DST,         /* ETH_ALEN */
57         TCA_FLOWER_KEY_ETH_DST_MASK,    /* ETH_ALEN */
58         TCA_FLOWER_KEY_ETH_SRC,         /* ETH_ALEN */
59         TCA_FLOWER_KEY_ETH_SRC_MASK,    /* ETH_ALEN */
60         TCA_FLOWER_KEY_ETH_TYPE,        /* be16 */
61         TCA_FLOWER_KEY_IP_PROTO,        /* u8 */
62         TCA_FLOWER_KEY_IPV4_SRC,        /* be32 */
63         TCA_FLOWER_KEY_IPV4_SRC_MASK,   /* be32 */
64         TCA_FLOWER_KEY_IPV4_DST,        /* be32 */
65         TCA_FLOWER_KEY_IPV4_DST_MASK,   /* be32 */
66         TCA_FLOWER_KEY_IPV6_SRC,        /* struct in6_addr */
67         TCA_FLOWER_KEY_IPV6_SRC_MASK,   /* struct in6_addr */
68         TCA_FLOWER_KEY_IPV6_DST,        /* struct in6_addr */
69         TCA_FLOWER_KEY_IPV6_DST_MASK,   /* struct in6_addr */
70         TCA_FLOWER_KEY_TCP_SRC,         /* be16 */
71         TCA_FLOWER_KEY_TCP_DST,         /* be16 */
72         TCA_FLOWER_KEY_UDP_SRC,         /* be16 */
73         TCA_FLOWER_KEY_UDP_DST,         /* be16 */
74 };
75 #endif
76 #ifndef HAVE_TC_VLAN_ID
77 enum {
78         /* TCA_FLOWER_FLAGS, */
79         TCA_FLOWER_KEY_VLAN_ID = TCA_FLOWER_KEY_UDP_DST + 2, /* be16 */
80         TCA_FLOWER_KEY_VLAN_PRIO,       /* u8   */
81         TCA_FLOWER_KEY_VLAN_ETH_TYPE,   /* be16 */
82 };
83 #endif
84
85 struct rte_flow {
86         LIST_ENTRY(rte_flow) next; /* Pointer to the next rte_flow structure */
87         struct rte_flow *remote_flow; /* associated remote flow */
88         struct nlmsg msg;
89 };
90
91 struct convert_data {
92         uint16_t eth_type;
93         uint16_t ip_proto;
94         uint8_t vlan;
95         struct rte_flow *flow;
96 };
97
98 struct remote_rule {
99         struct rte_flow_attr attr;
100         struct rte_flow_item items[2];
101         int mirred;
102 };
103
104 static int tap_flow_create_eth(const struct rte_flow_item *item, void *data);
105 static int tap_flow_create_vlan(const struct rte_flow_item *item, void *data);
106 static int tap_flow_create_ipv4(const struct rte_flow_item *item, void *data);
107 static int tap_flow_create_ipv6(const struct rte_flow_item *item, void *data);
108 static int tap_flow_create_udp(const struct rte_flow_item *item, void *data);
109 static int tap_flow_create_tcp(const struct rte_flow_item *item, void *data);
110 static int
111 tap_flow_validate(struct rte_eth_dev *dev,
112                   const struct rte_flow_attr *attr,
113                   const struct rte_flow_item items[],
114                   const struct rte_flow_action actions[],
115                   struct rte_flow_error *error);
116
117 static struct rte_flow *
118 tap_flow_create(struct rte_eth_dev *dev,
119                 const struct rte_flow_attr *attr,
120                 const struct rte_flow_item items[],
121                 const struct rte_flow_action actions[],
122                 struct rte_flow_error *error);
123
124 static int
125 tap_flow_destroy(struct rte_eth_dev *dev,
126                  struct rte_flow *flow,
127                  struct rte_flow_error *error);
128
129 static const struct rte_flow_ops tap_flow_ops = {
130         .validate = tap_flow_validate,
131         .create = tap_flow_create,
132         .destroy = tap_flow_destroy,
133         .flush = tap_flow_flush,
134 };
135
136 /* Static initializer for items. */
137 #define ITEMS(...) \
138         (const enum rte_flow_item_type []){ \
139                 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
140         }
141
142 /* Structure to generate a simple graph of layers supported by the NIC. */
143 struct tap_flow_items {
144         /* Bit-mask corresponding to what is supported for this item. */
145         const void *mask;
146         const unsigned int mask_sz; /* Bit-mask size in bytes. */
147         /*
148          * Bit-mask corresponding to the default mask, if none is provided
149          * along with the item.
150          */
151         const void *default_mask;
152         /**
153          * Conversion function from rte_flow to netlink attributes.
154          *
155          * @param item
156          *   rte_flow item to convert.
157          * @param data
158          *   Internal structure to store the conversion.
159          *
160          * @return
161          *   0 on success, negative value otherwise.
162          */
163         int (*convert)(const struct rte_flow_item *item, void *data);
164         /** List of possible following items.  */
165         const enum rte_flow_item_type *const items;
166 };
167
168 /* Graph of supported items and associated actions. */
169 static const struct tap_flow_items tap_flow_items[] = {
170         [RTE_FLOW_ITEM_TYPE_END] = {
171                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH),
172         },
173         [RTE_FLOW_ITEM_TYPE_ETH] = {
174                 .items = ITEMS(
175                         RTE_FLOW_ITEM_TYPE_VLAN,
176                         RTE_FLOW_ITEM_TYPE_IPV4,
177                         RTE_FLOW_ITEM_TYPE_IPV6),
178                 .mask = &(const struct rte_flow_item_eth){
179                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
180                         .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
181                         .type = -1,
182                 },
183                 .mask_sz = sizeof(struct rte_flow_item_eth),
184                 .default_mask = &rte_flow_item_eth_mask,
185                 .convert = tap_flow_create_eth,
186         },
187         [RTE_FLOW_ITEM_TYPE_VLAN] = {
188                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_IPV4,
189                                RTE_FLOW_ITEM_TYPE_IPV6),
190                 .mask = &(const struct rte_flow_item_vlan){
191                         .tpid = -1,
192                         /* DEI matching is not supported */
193 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN
194                         .tci = 0xffef,
195 #else
196                         .tci = 0xefff,
197 #endif
198                 },
199                 .mask_sz = sizeof(struct rte_flow_item_vlan),
200                 .default_mask = &rte_flow_item_vlan_mask,
201                 .convert = tap_flow_create_vlan,
202         },
203         [RTE_FLOW_ITEM_TYPE_IPV4] = {
204                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
205                                RTE_FLOW_ITEM_TYPE_TCP),
206                 .mask = &(const struct rte_flow_item_ipv4){
207                         .hdr = {
208                                 .src_addr = -1,
209                                 .dst_addr = -1,
210                                 .next_proto_id = -1,
211                         },
212                 },
213                 .mask_sz = sizeof(struct rte_flow_item_ipv4),
214                 .default_mask = &rte_flow_item_ipv4_mask,
215                 .convert = tap_flow_create_ipv4,
216         },
217         [RTE_FLOW_ITEM_TYPE_IPV6] = {
218                 .items = ITEMS(RTE_FLOW_ITEM_TYPE_UDP,
219                                RTE_FLOW_ITEM_TYPE_TCP),
220                 .mask = &(const struct rte_flow_item_ipv6){
221                         .hdr = {
222                                 .src_addr = {
223                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
224                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
225                                 },
226                                 .dst_addr = {
227                                         "\xff\xff\xff\xff\xff\xff\xff\xff"
228                                         "\xff\xff\xff\xff\xff\xff\xff\xff",
229                                 },
230                                 .proto = -1,
231                         },
232                 },
233                 .mask_sz = sizeof(struct rte_flow_item_ipv6),
234                 .default_mask = &rte_flow_item_ipv6_mask,
235                 .convert = tap_flow_create_ipv6,
236         },
237         [RTE_FLOW_ITEM_TYPE_UDP] = {
238                 .mask = &(const struct rte_flow_item_udp){
239                         .hdr = {
240                                 .src_port = -1,
241                                 .dst_port = -1,
242                         },
243                 },
244                 .mask_sz = sizeof(struct rte_flow_item_udp),
245                 .default_mask = &rte_flow_item_udp_mask,
246                 .convert = tap_flow_create_udp,
247         },
248         [RTE_FLOW_ITEM_TYPE_TCP] = {
249                 .mask = &(const struct rte_flow_item_tcp){
250                         .hdr = {
251                                 .src_port = -1,
252                                 .dst_port = -1,
253                         },
254                 },
255                 .mask_sz = sizeof(struct rte_flow_item_tcp),
256                 .default_mask = &rte_flow_item_tcp_mask,
257                 .convert = tap_flow_create_tcp,
258         },
259 };
260
261 static struct remote_rule implicit_rte_flows[TAP_REMOTE_MAX_IDX] = {
262         [TAP_REMOTE_LOCAL_MAC] = {
263                 .attr = {
264                         .group = MAX_GROUP,
265                         .priority = PRIORITY_MASK - TAP_REMOTE_LOCAL_MAC,
266                         .ingress = 1,
267                 },
268                 .items[0] = {
269                         .type = RTE_FLOW_ITEM_TYPE_ETH,
270                         .mask =  &(const struct rte_flow_item_eth){
271                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
272                         },
273                 },
274                 .items[1] = {
275                         .type = RTE_FLOW_ITEM_TYPE_END,
276                 },
277                 .mirred = TCA_EGRESS_REDIR,
278         },
279         [TAP_REMOTE_BROADCAST] = {
280                 .attr = {
281                         .group = MAX_GROUP,
282                         .priority = PRIORITY_MASK - TAP_REMOTE_BROADCAST,
283                         .ingress = 1,
284                 },
285                 .items[0] = {
286                         .type = RTE_FLOW_ITEM_TYPE_ETH,
287                         .mask =  &(const struct rte_flow_item_eth){
288                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
289                         },
290                         .spec = &(const struct rte_flow_item_eth){
291                                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
292                         },
293                 },
294                 .items[1] = {
295                         .type = RTE_FLOW_ITEM_TYPE_END,
296                 },
297                 .mirred = TCA_EGRESS_MIRROR,
298         },
299         [TAP_REMOTE_BROADCASTV6] = {
300                 .attr = {
301                         .group = MAX_GROUP,
302                         .priority = PRIORITY_MASK - TAP_REMOTE_BROADCASTV6,
303                         .ingress = 1,
304                 },
305                 .items[0] = {
306                         .type = RTE_FLOW_ITEM_TYPE_ETH,
307                         .mask =  &(const struct rte_flow_item_eth){
308                                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
309                         },
310                         .spec = &(const struct rte_flow_item_eth){
311                                 .dst.addr_bytes = "\x33\x33\x00\x00\x00\x00",
312                         },
313                 },
314                 .items[1] = {
315                         .type = RTE_FLOW_ITEM_TYPE_END,
316                 },
317                 .mirred = TCA_EGRESS_MIRROR,
318         },
319         [TAP_REMOTE_PROMISC] = {
320                 .attr = {
321                         .group = MAX_GROUP,
322                         .priority = PRIORITY_MASK - TAP_REMOTE_PROMISC,
323                         .ingress = 1,
324                 },
325                 .items[0] = {
326                         .type = RTE_FLOW_ITEM_TYPE_VOID,
327                 },
328                 .items[1] = {
329                         .type = RTE_FLOW_ITEM_TYPE_END,
330                 },
331                 .mirred = TCA_EGRESS_MIRROR,
332         },
333         [TAP_REMOTE_ALLMULTI] = {
334                 .attr = {
335                         .group = MAX_GROUP,
336                         .priority = PRIORITY_MASK - TAP_REMOTE_ALLMULTI,
337                         .ingress = 1,
338                 },
339                 .items[0] = {
340                         .type = RTE_FLOW_ITEM_TYPE_ETH,
341                         .mask =  &(const struct rte_flow_item_eth){
342                                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
343                         },
344                         .spec = &(const struct rte_flow_item_eth){
345                                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
346                         },
347                 },
348                 .items[1] = {
349                         .type = RTE_FLOW_ITEM_TYPE_END,
350                 },
351                 .mirred = TCA_EGRESS_MIRROR,
352         },
353         [TAP_REMOTE_TX] = {
354                 .attr = {
355                         .group = 0,
356                         .priority = TAP_REMOTE_TX,
357                         .egress = 1,
358                 },
359                 .items[0] = {
360                         .type = RTE_FLOW_ITEM_TYPE_VOID,
361                 },
362                 .items[1] = {
363                         .type = RTE_FLOW_ITEM_TYPE_END,
364                 },
365                 .mirred = TCA_EGRESS_MIRROR,
366         },
367 };
368
369 /**
370  * Make as much checks as possible on an Ethernet item, and if a flow is
371  * provided, fill it appropriately with Ethernet info.
372  *
373  * @param[in] item
374  *   Item specification.
375  * @param[in, out] data
376  *   Additional data structure to tell next layers we've been here.
377  *
378  * @return
379  *   0 if checks are alright, -1 otherwise.
380  */
381 static int
382 tap_flow_create_eth(const struct rte_flow_item *item, void *data)
383 {
384         struct convert_data *info = (struct convert_data *)data;
385         const struct rte_flow_item_eth *spec = item->spec;
386         const struct rte_flow_item_eth *mask = item->mask;
387         struct rte_flow *flow = info->flow;
388         struct nlmsg *msg;
389
390         /* use default mask if none provided */
391         if (!mask)
392                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_ETH].default_mask;
393         /* TC does not support eth_type masking. Only accept if exact match. */
394         if (mask->type && mask->type != 0xffff)
395                 return -1;
396         if (!spec)
397                 return 0;
398         /* store eth_type for consistency if ipv4/6 pattern item comes next */
399         if (spec->type & mask->type)
400                 info->eth_type = spec->type;
401         if (!flow)
402                 return 0;
403         msg = &flow->msg;
404         if (spec->type & mask->type)
405                 msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info,
406                                             (spec->type & mask->type));
407         if (!is_zero_ether_addr(&spec->dst)) {
408                 nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_DST, ETHER_ADDR_LEN,
409                            &spec->dst.addr_bytes);
410                 nlattr_add(&msg->nh,
411                            TCA_FLOWER_KEY_ETH_DST_MASK, ETHER_ADDR_LEN,
412                            &mask->dst.addr_bytes);
413         }
414         if (!is_zero_ether_addr(&mask->src)) {
415                 nlattr_add(&msg->nh, TCA_FLOWER_KEY_ETH_SRC, ETHER_ADDR_LEN,
416                            &spec->src.addr_bytes);
417                 nlattr_add(&msg->nh,
418                            TCA_FLOWER_KEY_ETH_SRC_MASK, ETHER_ADDR_LEN,
419                            &mask->src.addr_bytes);
420         }
421         return 0;
422 }
423
424 /**
425  * Make as much checks as possible on a VLAN item, and if a flow is provided,
426  * fill it appropriately with VLAN info.
427  *
428  * @param[in] item
429  *   Item specification.
430  * @param[in, out] data
431  *   Additional data structure to tell next layers we've been here.
432  *
433  * @return
434  *   0 if checks are alright, -1 otherwise.
435  */
436 static int
437 tap_flow_create_vlan(const struct rte_flow_item *item, void *data)
438 {
439         struct convert_data *info = (struct convert_data *)data;
440         const struct rte_flow_item_vlan *spec = item->spec;
441         const struct rte_flow_item_vlan *mask = item->mask;
442         struct rte_flow *flow = info->flow;
443         struct nlmsg *msg;
444
445         /* use default mask if none provided */
446         if (!mask)
447                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_VLAN].default_mask;
448         /* TC does not support tpid masking. Only accept if exact match. */
449         if (mask->tpid && mask->tpid != 0xffff)
450                 return -1;
451         /* Double-tagging not supported. */
452         if (spec && mask->tpid && spec->tpid != htons(ETH_P_8021Q))
453                 return -1;
454         info->vlan = 1;
455         if (!flow)
456                 return 0;
457         msg = &flow->msg;
458         msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_8021Q));
459 #define VLAN_PRIO(tci) ((tci) >> 13)
460 #define VLAN_ID(tci) ((tci) & 0xfff)
461         if (!spec)
462                 return 0;
463         if (spec->tci) {
464                 uint16_t tci = ntohs(spec->tci) & mask->tci;
465                 uint16_t prio = VLAN_PRIO(tci);
466                 uint8_t vid = VLAN_ID(tci);
467
468                 if (prio)
469                         nlattr_add8(&msg->nh, TCA_FLOWER_KEY_VLAN_PRIO, prio);
470                 if (vid)
471                         nlattr_add16(&msg->nh, TCA_FLOWER_KEY_VLAN_ID, vid);
472         }
473         return 0;
474 }
475
476 /**
477  * Make as much checks as possible on an IPv4 item, and if a flow is provided,
478  * fill it appropriately with IPv4 info.
479  *
480  * @param[in] item
481  *   Item specification.
482  * @param[in, out] data
483  *   Additional data structure to tell next layers we've been here.
484  *
485  * @return
486  *   0 if checks are alright, -1 otherwise.
487  */
488 static int
489 tap_flow_create_ipv4(const struct rte_flow_item *item, void *data)
490 {
491         struct convert_data *info = (struct convert_data *)data;
492         const struct rte_flow_item_ipv4 *spec = item->spec;
493         const struct rte_flow_item_ipv4 *mask = item->mask;
494         struct rte_flow *flow = info->flow;
495         struct nlmsg *msg;
496
497         /* use default mask if none provided */
498         if (!mask)
499                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV4].default_mask;
500         /* check that previous eth type is compatible with ipv4 */
501         if (info->eth_type && info->eth_type != htons(ETH_P_IP))
502                 return -1;
503         /* store ip_proto for consistency if udp/tcp pattern item comes next */
504         if (spec)
505                 info->ip_proto = spec->hdr.next_proto_id;
506         if (!flow)
507                 return 0;
508         msg = &flow->msg;
509         if (!info->eth_type)
510                 info->eth_type = htons(ETH_P_IP);
511         if (!info->vlan)
512                 msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_IP));
513         if (!spec)
514                 return 0;
515         if (spec->hdr.dst_addr) {
516                 nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST,
517                              spec->hdr.dst_addr);
518                 nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_DST_MASK,
519                              mask->hdr.dst_addr);
520         }
521         if (spec->hdr.src_addr) {
522                 nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC,
523                              spec->hdr.src_addr);
524                 nlattr_add32(&msg->nh, TCA_FLOWER_KEY_IPV4_SRC_MASK,
525                              mask->hdr.src_addr);
526         }
527         if (spec->hdr.next_proto_id)
528                 nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO,
529                             spec->hdr.next_proto_id);
530         return 0;
531 }
532
533 /**
534  * Make as much checks as possible on an IPv6 item, and if a flow is provided,
535  * fill it appropriately with IPv6 info.
536  *
537  * @param[in] item
538  *   Item specification.
539  * @param[in, out] data
540  *   Additional data structure to tell next layers we've been here.
541  *
542  * @return
543  *   0 if checks are alright, -1 otherwise.
544  */
545 static int
546 tap_flow_create_ipv6(const struct rte_flow_item *item, void *data)
547 {
548         struct convert_data *info = (struct convert_data *)data;
549         const struct rte_flow_item_ipv6 *spec = item->spec;
550         const struct rte_flow_item_ipv6 *mask = item->mask;
551         struct rte_flow *flow = info->flow;
552         uint8_t empty_addr[16] = { 0 };
553         struct nlmsg *msg;
554
555         /* use default mask if none provided */
556         if (!mask)
557                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_IPV6].default_mask;
558         /* check that previous eth type is compatible with ipv6 */
559         if (info->eth_type && info->eth_type != htons(ETH_P_IPV6))
560                 return -1;
561         /* store ip_proto for consistency if udp/tcp pattern item comes next */
562         if (spec)
563                 info->ip_proto = spec->hdr.proto;
564         if (!flow)
565                 return 0;
566         msg = &flow->msg;
567         if (!info->eth_type)
568                 info->eth_type = htons(ETH_P_IPV6);
569         if (!info->vlan)
570                 msg->t.tcm_info = TC_H_MAKE(msg->t.tcm_info, htons(ETH_P_IPV6));
571         if (!spec)
572                 return 0;
573         if (memcmp(spec->hdr.dst_addr, empty_addr, 16)) {
574                 nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST,
575                            sizeof(spec->hdr.dst_addr), &spec->hdr.dst_addr);
576                 nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_DST_MASK,
577                            sizeof(mask->hdr.dst_addr), &mask->hdr.dst_addr);
578         }
579         if (memcmp(spec->hdr.src_addr, empty_addr, 16)) {
580                 nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC,
581                            sizeof(spec->hdr.src_addr), &spec->hdr.src_addr);
582                 nlattr_add(&msg->nh, TCA_FLOWER_KEY_IPV6_SRC_MASK,
583                            sizeof(mask->hdr.src_addr), &mask->hdr.src_addr);
584         }
585         if (spec->hdr.proto)
586                 nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, spec->hdr.proto);
587         return 0;
588 }
589
590 /**
591  * Make as much checks as possible on a UDP item, and if a flow is provided,
592  * fill it appropriately with UDP info.
593  *
594  * @param[in] item
595  *   Item specification.
596  * @param[in, out] data
597  *   Additional data structure to tell next layers we've been here.
598  *
599  * @return
600  *   0 if checks are alright, -1 otherwise.
601  */
602 static int
603 tap_flow_create_udp(const struct rte_flow_item *item, void *data)
604 {
605         struct convert_data *info = (struct convert_data *)data;
606         const struct rte_flow_item_udp *spec = item->spec;
607         const struct rte_flow_item_udp *mask = item->mask;
608         struct rte_flow *flow = info->flow;
609         struct nlmsg *msg;
610
611         /* use default mask if none provided */
612         if (!mask)
613                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_UDP].default_mask;
614         /* check that previous ip_proto is compatible with udp */
615         if (info->ip_proto && info->ip_proto != IPPROTO_UDP)
616                 return -1;
617         /* TC does not support UDP port masking. Only accept if exact match. */
618         if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
619             (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
620                 return -1;
621         if (!flow)
622                 return 0;
623         msg = &flow->msg;
624         nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_UDP);
625         if (!spec)
626                 return 0;
627         if (spec->hdr.dst_port & mask->hdr.dst_port)
628                 nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_DST,
629                              spec->hdr.dst_port);
630         if (spec->hdr.src_port & mask->hdr.src_port)
631                 nlattr_add16(&msg->nh, TCA_FLOWER_KEY_UDP_SRC,
632                              spec->hdr.src_port);
633         return 0;
634 }
635
636 /**
637  * Make as much checks as possible on a TCP item, and if a flow is provided,
638  * fill it appropriately with TCP info.
639  *
640  * @param[in] item
641  *   Item specification.
642  * @param[in, out] data
643  *   Additional data structure to tell next layers we've been here.
644  *
645  * @return
646  *   0 if checks are alright, -1 otherwise.
647  */
648 static int
649 tap_flow_create_tcp(const struct rte_flow_item *item, void *data)
650 {
651         struct convert_data *info = (struct convert_data *)data;
652         const struct rte_flow_item_tcp *spec = item->spec;
653         const struct rte_flow_item_tcp *mask = item->mask;
654         struct rte_flow *flow = info->flow;
655         struct nlmsg *msg;
656
657         /* use default mask if none provided */
658         if (!mask)
659                 mask = tap_flow_items[RTE_FLOW_ITEM_TYPE_TCP].default_mask;
660         /* check that previous ip_proto is compatible with tcp */
661         if (info->ip_proto && info->ip_proto != IPPROTO_TCP)
662                 return -1;
663         /* TC does not support TCP port masking. Only accept if exact match. */
664         if ((mask->hdr.src_port && mask->hdr.src_port != 0xffff) ||
665             (mask->hdr.dst_port && mask->hdr.dst_port != 0xffff))
666                 return -1;
667         if (!flow)
668                 return 0;
669         msg = &flow->msg;
670         nlattr_add8(&msg->nh, TCA_FLOWER_KEY_IP_PROTO, IPPROTO_TCP);
671         if (!spec)
672                 return 0;
673         if (spec->hdr.dst_port & mask->hdr.dst_port)
674                 nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_DST,
675                              spec->hdr.dst_port);
676         if (spec->hdr.src_port & mask->hdr.src_port)
677                 nlattr_add16(&msg->nh, TCA_FLOWER_KEY_TCP_SRC,
678                              spec->hdr.src_port);
679         return 0;
680 }
681
682 /**
683  * Check support for a given item.
684  *
685  * @param[in] item
686  *   Item specification.
687  * @param size
688  *   Bit-Mask size in bytes.
689  * @param[in] supported_mask
690  *   Bit-mask covering supported fields to compare with spec, last and mask in
691  *   \item.
692  * @param[in] default_mask
693  *   Bit-mask default mask if none is provided in \item.
694  *
695  * @return
696  *   0 on success.
697  */
698 static int
699 tap_flow_item_validate(const struct rte_flow_item *item,
700                        unsigned int size,
701                        const uint8_t *supported_mask,
702                        const uint8_t *default_mask)
703 {
704         int ret = 0;
705
706         /* An empty layer is allowed, as long as all fields are NULL */
707         if (!item->spec && (item->mask || item->last))
708                 return -1;
709         /* Is the item spec compatible with what the NIC supports? */
710         if (item->spec && !item->mask) {
711                 unsigned int i;
712                 const uint8_t *spec = item->spec;
713
714                 for (i = 0; i < size; ++i)
715                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
716                                 return -1;
717                 /* Is the default mask compatible with what the NIC supports? */
718                 for (i = 0; i < size; i++)
719                         if ((default_mask[i] | supported_mask[i]) !=
720                             supported_mask[i])
721                                 return -1;
722         }
723         /* Is the item last compatible with what the NIC supports? */
724         if (item->last && !item->mask) {
725                 unsigned int i;
726                 const uint8_t *spec = item->last;
727
728                 for (i = 0; i < size; ++i)
729                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
730                                 return -1;
731         }
732         /* Is the item mask compatible with what the NIC supports? */
733         if (item->mask) {
734                 unsigned int i;
735                 const uint8_t *spec = item->mask;
736
737                 for (i = 0; i < size; ++i)
738                         if ((spec[i] | supported_mask[i]) != supported_mask[i])
739                                 return -1;
740         }
741         /**
742          * Once masked, Are item spec and item last equal?
743          * TC does not support range so anything else is invalid.
744          */
745         if (item->spec && item->last) {
746                 uint8_t spec[size];
747                 uint8_t last[size];
748                 const uint8_t *apply = default_mask;
749                 unsigned int i;
750
751                 if (item->mask)
752                         apply = item->mask;
753                 for (i = 0; i < size; ++i) {
754                         spec[i] = ((const uint8_t *)item->spec)[i] & apply[i];
755                         last[i] = ((const uint8_t *)item->last)[i] & apply[i];
756                 }
757                 ret = memcmp(spec, last, size);
758         }
759         return ret;
760 }
761
762 /**
763  * Transform a DROP/PASSTHRU action item in the provided flow for TC.
764  *
765  * @param[in, out] flow
766  *   Flow to be filled.
767  * @param[in] action
768  *   Appropriate action to be set in the TCA_GACT_PARMS structure.
769  *
770  * @return
771  *   0 if checks are alright, -1 otherwise.
772  */
773 static int
774 add_action_gact(struct rte_flow *flow, int action)
775 {
776         struct nlmsg *msg = &flow->msg;
777         size_t act_index = 1;
778         struct tc_gact p = {
779                 .action = action
780         };
781
782         if (nlattr_nested_start(msg, TCA_FLOWER_ACT) < 0)
783                 return -1;
784         if (nlattr_nested_start(msg, act_index++) < 0)
785                 return -1;
786         nlattr_add(&msg->nh, TCA_ACT_KIND, sizeof("gact"), "gact");
787         if (nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
788                 return -1;
789         nlattr_add(&msg->nh, TCA_GACT_PARMS, sizeof(p), &p);
790         nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
791         nlattr_nested_finish(msg); /* nested act_index */
792         nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
793         return 0;
794 }
795
796 /**
797  * Transform a MIRRED action item in the provided flow for TC.
798  *
799  * @param[in, out] flow
800  *   Flow to be filled.
801  * @param[in] ifindex
802  *   Netdevice ifindex, where to mirror/redirect packet to.
803  * @param[in] action_type
804  *   Either TCA_EGRESS_REDIR for redirection or TCA_EGRESS_MIRROR for mirroring.
805  *
806  * @return
807  *   0 if checks are alright, -1 otherwise.
808  */
809 static int
810 add_action_mirred(struct rte_flow *flow, uint16_t ifindex, uint16_t action_type)
811 {
812         struct nlmsg *msg = &flow->msg;
813         size_t act_index = 1;
814         struct tc_mirred p = {
815                 .eaction = action_type,
816                 .ifindex = ifindex,
817         };
818
819         if (nlattr_nested_start(msg, TCA_FLOWER_ACT) < 0)
820                 return -1;
821         if (nlattr_nested_start(msg, act_index++) < 0)
822                 return -1;
823         nlattr_add(&msg->nh, TCA_ACT_KIND, sizeof("mirred"), "mirred");
824         if (nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
825                 return -1;
826         if (action_type == TCA_EGRESS_MIRROR)
827                 p.action = TC_ACT_PIPE;
828         else /* REDIRECT */
829                 p.action = TC_ACT_STOLEN;
830         nlattr_add(&msg->nh, TCA_MIRRED_PARMS, sizeof(p), &p);
831         nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
832         nlattr_nested_finish(msg); /* nested act_index */
833         nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
834         return 0;
835 }
836
837 /**
838  * Transform a QUEUE action item in the provided flow for TC.
839  *
840  * @param[in, out] flow
841  *   Flow to be filled.
842  * @param[in] queue
843  *   Queue id to use.
844  *
845  * @return
846  *   0 if checks are alright, -1 otherwise.
847  */
848 static int
849 add_action_skbedit(struct rte_flow *flow, uint16_t queue)
850 {
851         struct nlmsg *msg = &flow->msg;
852         size_t act_index = 1;
853         struct tc_skbedit p = {
854                 .action = TC_ACT_PIPE
855         };
856
857         if (nlattr_nested_start(msg, TCA_FLOWER_ACT) < 0)
858                 return -1;
859         if (nlattr_nested_start(msg, act_index++) < 0)
860                 return -1;
861         nlattr_add(&msg->nh, TCA_ACT_KIND, sizeof("skbedit"), "skbedit");
862         if (nlattr_nested_start(msg, TCA_ACT_OPTIONS) < 0)
863                 return -1;
864         nlattr_add(&msg->nh, TCA_SKBEDIT_PARMS, sizeof(p), &p);
865         nlattr_add16(&msg->nh, TCA_SKBEDIT_QUEUE_MAPPING, queue);
866         nlattr_nested_finish(msg); /* nested TCA_ACT_OPTIONS */
867         nlattr_nested_finish(msg); /* nested act_index */
868         nlattr_nested_finish(msg); /* nested TCA_FLOWER_ACT */
869         return 0;
870 }
871
872 /**
873  * Validate a flow supported by TC.
874  * If flow param is not NULL, then also fill the netlink message inside.
875  *
876  * @param pmd
877  *   Pointer to private structure.
878  * @param[in] attr
879  *   Flow rule attributes.
880  * @param[in] pattern
881  *   Pattern specification (list terminated by the END pattern item).
882  * @param[in] actions
883  *   Associated actions (list terminated by the END action).
884  * @param[out] error
885  *   Perform verbose error reporting if not NULL.
886  * @param[in, out] flow
887  *   Flow structure to update.
888  * @param[in] mirred
889  *   If set to TCA_EGRESS_REDIR, provided actions will be replaced with a
890  *   redirection to the tap netdevice, and the TC rule will be configured
891  *   on the remote netdevice in pmd.
892  *   If set to TCA_EGRESS_MIRROR, provided actions will be replaced with a
893  *   mirroring to the tap netdevice, and the TC rule will be configured
894  *   on the remote netdevice in pmd. Matching packets will thus be duplicated.
895  *   If set to 0, the standard behavior is to be used: set correct actions for
896  *   the TC rule, and apply it on the tap netdevice.
897  *
898  * @return
899  *   0 on success, a negative errno value otherwise and rte_errno is set.
900  */
901 static int
902 priv_flow_process(struct pmd_internals *pmd,
903                   const struct rte_flow_attr *attr,
904                   const struct rte_flow_item items[],
905                   const struct rte_flow_action actions[],
906                   struct rte_flow_error *error,
907                   struct rte_flow *flow,
908                   int mirred)
909 {
910         const struct tap_flow_items *cur_item = tap_flow_items;
911         struct convert_data data = {
912                 .eth_type = 0,
913                 .ip_proto = 0,
914                 .flow = flow,
915         };
916         int action = 0; /* Only one action authorized for now */
917
918         if (attr->group > MAX_GROUP) {
919                 rte_flow_error_set(
920                         error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
921                         NULL, "group value too big: cannot exceed 15");
922                 return -rte_errno;
923         }
924         if (attr->priority > MAX_PRIORITY) {
925                 rte_flow_error_set(
926                         error, EINVAL, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
927                         NULL, "priority value too big");
928                 return -rte_errno;
929         } else if (flow) {
930                 uint16_t group = attr->group << GROUP_SHIFT;
931                 uint16_t prio = group | (attr->priority + PRIORITY_OFFSET);
932                 flow->msg.t.tcm_info = TC_H_MAKE(prio << 16,
933                                                  flow->msg.t.tcm_info);
934         }
935         if (flow) {
936                 if (mirred) {
937                         /*
938                          * If attr->ingress, the rule applies on remote ingress
939                          * to match incoming packets
940                          * If attr->egress, the rule applies on tap ingress (as
941                          * seen from the kernel) to deal with packets going out
942                          * from the DPDK app.
943                          */
944                         flow->msg.t.tcm_parent = TC_H_MAKE(TC_H_INGRESS, 0);
945                 } else {
946                         /* Standard rule on tap egress (kernel standpoint). */
947                         flow->msg.t.tcm_parent =
948                                 TC_H_MAKE(MULTIQ_MAJOR_HANDLE, 0);
949                 }
950                 /* use flower filter type */
951                 nlattr_add(&flow->msg.nh, TCA_KIND, sizeof("flower"), "flower");
952                 if (nlattr_nested_start(&flow->msg, TCA_OPTIONS) < 0)
953                         goto exit_item_not_supported;
954         }
955         for (; items->type != RTE_FLOW_ITEM_TYPE_END; ++items) {
956                 const struct tap_flow_items *token = NULL;
957                 unsigned int i;
958                 int err = 0;
959
960                 if (items->type == RTE_FLOW_ITEM_TYPE_VOID)
961                         continue;
962                 for (i = 0;
963                      cur_item->items &&
964                      cur_item->items[i] != RTE_FLOW_ITEM_TYPE_END;
965                      ++i) {
966                         if (cur_item->items[i] == items->type) {
967                                 token = &tap_flow_items[items->type];
968                                 break;
969                         }
970                 }
971                 if (!token)
972                         goto exit_item_not_supported;
973                 cur_item = token;
974                 err = tap_flow_item_validate(
975                         items, cur_item->mask_sz,
976                         (const uint8_t *)cur_item->mask,
977                         (const uint8_t *)cur_item->default_mask);
978                 if (err)
979                         goto exit_item_not_supported;
980                 if (flow && cur_item->convert) {
981                         if (!pmd->flower_vlan_support &&
982                             cur_item->convert == tap_flow_create_vlan)
983                                 goto exit_item_not_supported;
984                         err = cur_item->convert(items, &data);
985                         if (err)
986                                 goto exit_item_not_supported;
987                 }
988         }
989         if (flow) {
990                 if (pmd->flower_vlan_support && data.vlan) {
991                         nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
992                                      htons(ETH_P_8021Q));
993                         nlattr_add16(&flow->msg.nh,
994                                      TCA_FLOWER_KEY_VLAN_ETH_TYPE,
995                                      data.eth_type ?
996                                      data.eth_type : htons(ETH_P_ALL));
997                 } else if (data.eth_type) {
998                         nlattr_add16(&flow->msg.nh, TCA_FLOWER_KEY_ETH_TYPE,
999                                      data.eth_type);
1000                 }
1001         }
1002         if (mirred && flow) {
1003                 uint16_t if_index = pmd->if_index;
1004
1005                 /*
1006                  * If attr->egress && mirred, then this is a special
1007                  * case where the rule must be applied on the tap, to
1008                  * redirect packets coming from the DPDK App, out
1009                  * through the remote netdevice.
1010                  */
1011                 if (attr->egress)
1012                         if_index = pmd->remote_if_index;
1013                 if (add_action_mirred(flow, if_index, mirred) < 0)
1014                         goto exit_action_not_supported;
1015                 else
1016                         goto end;
1017         }
1018         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; ++actions) {
1019                 int err = 0;
1020
1021                 if (actions->type == RTE_FLOW_ACTION_TYPE_VOID) {
1022                         continue;
1023                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_DROP) {
1024                         if (action)
1025                                 goto exit_action_not_supported;
1026                         action = 1;
1027                         if (flow)
1028                                 err = add_action_gact(flow, TC_ACT_SHOT);
1029                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_PASSTHRU) {
1030                         if (action)
1031                                 goto exit_action_not_supported;
1032                         action = 1;
1033                         if (flow)
1034                                 err = add_action_gact(flow, TC_ACT_UNSPEC);
1035                 } else if (actions->type == RTE_FLOW_ACTION_TYPE_QUEUE) {
1036                         const struct rte_flow_action_queue *queue =
1037                                 (const struct rte_flow_action_queue *)
1038                                 actions->conf;
1039                         if (action)
1040                                 goto exit_action_not_supported;
1041                         action = 1;
1042                         if (!queue || (queue->index >= pmd->nb_queues))
1043                                 goto exit_action_not_supported;
1044                         if (flow)
1045                                 err = add_action_skbedit(flow, queue->index);
1046                 } else {
1047                         goto exit_action_not_supported;
1048                 }
1049                 if (err)
1050                         goto exit_action_not_supported;
1051         }
1052 end:
1053         if (flow)
1054                 nlattr_nested_finish(&flow->msg); /* nested TCA_OPTIONS */
1055         return 0;
1056 exit_item_not_supported:
1057         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
1058                            items, "item not supported");
1059         return -rte_errno;
1060 exit_action_not_supported:
1061         rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
1062                            actions, "action not supported");
1063         return -rte_errno;
1064 }
1065
1066
1067
1068 /**
1069  * Validate a flow.
1070  *
1071  * @see rte_flow_validate()
1072  * @see rte_flow_ops
1073  */
1074 static int
1075 tap_flow_validate(struct rte_eth_dev *dev,
1076                   const struct rte_flow_attr *attr,
1077                   const struct rte_flow_item items[],
1078                   const struct rte_flow_action actions[],
1079                   struct rte_flow_error *error)
1080 {
1081         struct pmd_internals *pmd = dev->data->dev_private;
1082
1083         return priv_flow_process(pmd, attr, items, actions, error, NULL, 0);
1084 }
1085
1086 /**
1087  * Set a unique handle in a flow.
1088  *
1089  * The kernel supports TC rules with equal priority, as long as they use the
1090  * same matching fields (e.g.: dst mac and ipv4) with different values (and
1091  * full mask to ensure no collision is possible).
1092  * In those rules, the handle (uint32_t) is the part that would identify
1093  * specifically each rule.
1094  *
1095  * On 32-bit architectures, the handle can simply be the flow's pointer address.
1096  * On 64-bit architectures, we rely on jhash(flow) to find a (sufficiently)
1097  * unique handle.
1098  *
1099  * @param[in, out] flow
1100  *   The flow that needs its handle set.
1101  */
1102 static void
1103 tap_flow_set_handle(struct rte_flow *flow)
1104 {
1105         uint32_t handle = 0;
1106
1107         if (sizeof(flow) > 4)
1108                 handle = rte_jhash(&flow, sizeof(flow), 1);
1109         else
1110                 handle = (uintptr_t)flow;
1111         /* must be at least 1 to avoid letting the kernel choose one for us */
1112         if (!handle)
1113                 handle = 1;
1114         flow->msg.t.tcm_handle = handle;
1115 }
1116
1117 /**
1118  * Create a flow.
1119  *
1120  * @see rte_flow_create()
1121  * @see rte_flow_ops
1122  */
1123 static struct rte_flow *
1124 tap_flow_create(struct rte_eth_dev *dev,
1125                 const struct rte_flow_attr *attr,
1126                 const struct rte_flow_item items[],
1127                 const struct rte_flow_action actions[],
1128                 struct rte_flow_error *error)
1129 {
1130         struct pmd_internals *pmd = dev->data->dev_private;
1131         struct rte_flow *remote_flow = NULL;
1132         struct rte_flow *flow = NULL;
1133         struct nlmsg *msg = NULL;
1134         int err;
1135
1136         if (!pmd->if_index) {
1137                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1138                                    NULL,
1139                                    "can't create rule, ifindex not found");
1140                 goto fail;
1141         }
1142         /*
1143          * No rules configured through standard rte_flow should be set on the
1144          * priorities used by implicit rules.
1145          */
1146         if ((attr->group == MAX_GROUP) &&
1147             attr->priority > (MAX_PRIORITY - TAP_REMOTE_MAX_IDX)) {
1148                 rte_flow_error_set(
1149                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
1150                         NULL, "priority value too big");
1151                 goto fail;
1152         }
1153         flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1154         if (!flow) {
1155                 rte_flow_error_set(error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1156                                    NULL, "cannot allocate memory for rte_flow");
1157                 goto fail;
1158         }
1159         msg = &flow->msg;
1160         tc_init_msg(msg, pmd->if_index, RTM_NEWTFILTER,
1161                     NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1162         msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1163         tap_flow_set_handle(flow);
1164         if (priv_flow_process(pmd, attr, items, actions, error, flow, 0))
1165                 goto fail;
1166         err = nl_send(pmd->nlsk_fd, &msg->nh);
1167         if (err < 0) {
1168                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1169                                    NULL, "couldn't send request to kernel");
1170                 goto fail;
1171         }
1172         err = nl_recv_ack(pmd->nlsk_fd);
1173         if (err < 0) {
1174                 RTE_LOG(ERR, PMD,
1175                         "Kernel refused TC filter rule creation (%d): %s\n",
1176                         errno, strerror(errno));
1177                 rte_flow_error_set(error, EEXIST, RTE_FLOW_ERROR_TYPE_HANDLE,
1178                                    NULL, "overlapping rules");
1179                 goto fail;
1180         }
1181         LIST_INSERT_HEAD(&pmd->flows, flow, next);
1182         /**
1183          * If a remote device is configured, a TC rule with identical items for
1184          * matching must be set on that device, with a single action: redirect
1185          * to the local pmd->if_index.
1186          */
1187         if (pmd->remote_if_index) {
1188                 remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1189                 if (!remote_flow) {
1190                         rte_flow_error_set(
1191                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1192                                 "cannot allocate memory for rte_flow");
1193                         goto fail;
1194                 }
1195                 msg = &remote_flow->msg;
1196                 /* set the rule if_index for the remote netdevice */
1197                 tc_init_msg(
1198                         msg, pmd->remote_if_index, RTM_NEWTFILTER,
1199                         NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1200                 msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1201                 tap_flow_set_handle(remote_flow);
1202                 if (priv_flow_process(pmd, attr, items, NULL,
1203                                       error, remote_flow, TCA_EGRESS_REDIR)) {
1204                         rte_flow_error_set(
1205                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1206                                 NULL, "rte flow rule validation failed");
1207                         goto fail;
1208                 }
1209                 err = nl_send(pmd->nlsk_fd, &msg->nh);
1210                 if (err < 0) {
1211                         rte_flow_error_set(
1212                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1213                                 NULL, "Failure sending nl request");
1214                         goto fail;
1215                 }
1216                 err = nl_recv_ack(pmd->nlsk_fd);
1217                 if (err < 0) {
1218                         RTE_LOG(ERR, PMD,
1219                                 "Kernel refused TC filter rule creation (%d): %s\n",
1220                                 errno, strerror(errno));
1221                         rte_flow_error_set(
1222                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1223                                 NULL, "overlapping rules");
1224                         goto fail;
1225                 }
1226                 flow->remote_flow = remote_flow;
1227         }
1228         return flow;
1229 fail:
1230         if (remote_flow)
1231                 rte_free(remote_flow);
1232         if (flow)
1233                 rte_free(flow);
1234         return NULL;
1235 }
1236
1237 /**
1238  * Destroy a flow using pointer to pmd_internal.
1239  *
1240  * @param[in, out] pmd
1241  *   Pointer to private structure.
1242  * @param[in] flow
1243  *   Pointer to the flow to destroy.
1244  * @param[in, out] error
1245  *   Pointer to the flow error handler
1246  *
1247  * @return 0 if the flow could be destroyed, -1 otherwise.
1248  */
1249 static int
1250 tap_flow_destroy_pmd(struct pmd_internals *pmd,
1251                      struct rte_flow *flow,
1252                      struct rte_flow_error *error)
1253 {
1254         struct rte_flow *remote_flow = flow->remote_flow;
1255         int ret = 0;
1256
1257         LIST_REMOVE(flow, next);
1258         flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1259         flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1260
1261         ret = nl_send(pmd->nlsk_fd, &flow->msg.nh);
1262         if (ret < 0) {
1263                 rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE,
1264                                    NULL, "couldn't send request to kernel");
1265                 goto end;
1266         }
1267         ret = nl_recv_ack(pmd->nlsk_fd);
1268         /* If errno is ENOENT, the rule is already no longer in the kernel. */
1269         if (ret < 0 && errno == ENOENT)
1270                 ret = 0;
1271         if (ret < 0) {
1272                 RTE_LOG(ERR, PMD,
1273                         "Kernel refused TC filter rule deletion (%d): %s\n",
1274                         errno, strerror(errno));
1275                 rte_flow_error_set(
1276                         error, ENOTSUP, RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1277                         "couldn't receive kernel ack to our request");
1278                 goto end;
1279         }
1280         if (remote_flow) {
1281                 remote_flow->msg.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
1282                 remote_flow->msg.nh.nlmsg_type = RTM_DELTFILTER;
1283
1284                 ret = nl_send(pmd->nlsk_fd, &remote_flow->msg.nh);
1285                 if (ret < 0) {
1286                         rte_flow_error_set(
1287                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1288                                 NULL, "Failure sending nl request");
1289                         goto end;
1290                 }
1291                 ret = nl_recv_ack(pmd->nlsk_fd);
1292                 if (ret < 0 && errno == ENOENT)
1293                         ret = 0;
1294                 if (ret < 0) {
1295                         RTE_LOG(ERR, PMD,
1296                                 "Kernel refused TC filter rule deletion (%d): %s\n",
1297                                 errno, strerror(errno));
1298                         rte_flow_error_set(
1299                                 error, ENOMEM, RTE_FLOW_ERROR_TYPE_HANDLE,
1300                                 NULL, "Failure trying to receive nl ack");
1301                         goto end;
1302                 }
1303         }
1304 end:
1305         if (remote_flow)
1306                 rte_free(remote_flow);
1307         rte_free(flow);
1308         return ret;
1309 }
1310
1311 /**
1312  * Destroy a flow.
1313  *
1314  * @see rte_flow_destroy()
1315  * @see rte_flow_ops
1316  */
1317 static int
1318 tap_flow_destroy(struct rte_eth_dev *dev,
1319                  struct rte_flow *flow,
1320                  struct rte_flow_error *error)
1321 {
1322         struct pmd_internals *pmd = dev->data->dev_private;
1323
1324         return tap_flow_destroy_pmd(pmd, flow, error);
1325 }
1326
1327 /**
1328  * Destroy all flows.
1329  *
1330  * @see rte_flow_flush()
1331  * @see rte_flow_ops
1332  */
1333 int
1334 tap_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1335 {
1336         struct pmd_internals *pmd = dev->data->dev_private;
1337         struct rte_flow *flow;
1338
1339         while (!LIST_EMPTY(&pmd->flows)) {
1340                 flow = LIST_FIRST(&pmd->flows);
1341                 if (tap_flow_destroy(dev, flow, error) < 0)
1342                         return -1;
1343         }
1344         return 0;
1345 }
1346
1347 /**
1348  * Add an implicit flow rule on the remote device to make sure traffic gets to
1349  * the tap netdevice from there.
1350  *
1351  * @param pmd
1352  *   Pointer to private structure.
1353  * @param[in] idx
1354  *   The idx in the implicit_rte_flows array specifying which rule to apply.
1355  *
1356  * @return -1 if the rule couldn't be applied, 0 otherwise.
1357  */
1358 int tap_flow_implicit_create(struct pmd_internals *pmd,
1359                              enum implicit_rule_index idx)
1360 {
1361         struct rte_flow_item *items = implicit_rte_flows[idx].items;
1362         struct rte_flow_attr *attr = &implicit_rte_flows[idx].attr;
1363         struct rte_flow_item_eth eth_local = { .type = 0 };
1364         uint16_t if_index = pmd->remote_if_index;
1365         struct rte_flow *remote_flow = NULL;
1366         struct nlmsg *msg = NULL;
1367         int err = 0;
1368         struct rte_flow_item items_local[2] = {
1369                 [0] = {
1370                         .type = items[0].type,
1371                         .spec = &eth_local,
1372                         .mask = items[0].mask,
1373                 },
1374                 [1] = {
1375                         .type = items[1].type,
1376                 }
1377         };
1378
1379         remote_flow = rte_malloc(__func__, sizeof(struct rte_flow), 0);
1380         if (!remote_flow) {
1381                 RTE_LOG(ERR, PMD, "Cannot allocate memory for rte_flow");
1382                 goto fail;
1383         }
1384         msg = &remote_flow->msg;
1385         if (idx == TAP_REMOTE_TX) {
1386                 if_index = pmd->if_index;
1387         } else if (idx == TAP_REMOTE_LOCAL_MAC) {
1388                 /*
1389                  * eth addr couldn't be set in implicit_rte_flows[] as it is not
1390                  * known at compile time.
1391                  */
1392                 memcpy(&eth_local.dst, &pmd->eth_addr, sizeof(pmd->eth_addr));
1393                 items = items_local;
1394         }
1395         tc_init_msg(msg, if_index, RTM_NEWTFILTER,
1396                     NLM_F_REQUEST | NLM_F_ACK | NLM_F_EXCL | NLM_F_CREATE);
1397         msg->t.tcm_info = TC_H_MAKE(0, htons(ETH_P_ALL));
1398         tap_flow_set_handle(remote_flow);
1399         if (priv_flow_process(pmd, attr, items, NULL, NULL,
1400                               remote_flow, implicit_rte_flows[idx].mirred)) {
1401                 RTE_LOG(ERR, PMD, "rte flow rule validation failed\n");
1402                 goto fail;
1403         }
1404         err = nl_send(pmd->nlsk_fd, &msg->nh);
1405         if (err < 0) {
1406                 RTE_LOG(ERR, PMD, "Failure sending nl request");
1407                 goto fail;
1408         }
1409         err = nl_recv_ack(pmd->nlsk_fd);
1410         if (err < 0) {
1411                 RTE_LOG(ERR, PMD,
1412                         "Kernel refused TC filter rule creation (%d): %s\n",
1413                         errno, strerror(errno));
1414                 goto fail;
1415         }
1416         LIST_INSERT_HEAD(&pmd->implicit_flows, remote_flow, next);
1417         return 0;
1418 fail:
1419         if (remote_flow)
1420                 rte_free(remote_flow);
1421         return -1;
1422 }
1423
1424 /**
1425  * Remove specific implicit flow rule on the remote device.
1426  *
1427  * @param[in, out] pmd
1428  *   Pointer to private structure.
1429  * @param[in] idx
1430  *   The idx in the implicit_rte_flows array specifying which rule to remove.
1431  *
1432  * @return -1 if one of the implicit rules couldn't be created, 0 otherwise.
1433  */
1434 int tap_flow_implicit_destroy(struct pmd_internals *pmd,
1435                               enum implicit_rule_index idx)
1436 {
1437         struct rte_flow *remote_flow;
1438         int cur_prio = -1;
1439         int idx_prio = implicit_rte_flows[idx].attr.priority + PRIORITY_OFFSET;
1440
1441         for (remote_flow = LIST_FIRST(&pmd->implicit_flows);
1442              remote_flow;
1443              remote_flow = LIST_NEXT(remote_flow, next)) {
1444                 cur_prio = (remote_flow->msg.t.tcm_info >> 16) & PRIORITY_MASK;
1445                 if (cur_prio != idx_prio)
1446                         continue;
1447                 return tap_flow_destroy_pmd(pmd, remote_flow, NULL);
1448         }
1449         return 0;
1450 }
1451
1452 /**
1453  * Destroy all implicit flows.
1454  *
1455  * @see rte_flow_flush()
1456  */
1457 int
1458 tap_flow_implicit_flush(struct pmd_internals *pmd, struct rte_flow_error *error)
1459 {
1460         struct rte_flow *remote_flow;
1461
1462         while (!LIST_EMPTY(&pmd->implicit_flows)) {
1463                 remote_flow = LIST_FIRST(&pmd->implicit_flows);
1464                 if (tap_flow_destroy_pmd(pmd, remote_flow, error) < 0)
1465                         return -1;
1466         }
1467         return 0;
1468 }
1469
1470 /**
1471  * Manage filter operations.
1472  *
1473  * @param dev
1474  *   Pointer to Ethernet device structure.
1475  * @param filter_type
1476  *   Filter type.
1477  * @param filter_op
1478  *   Operation to perform.
1479  * @param arg
1480  *   Pointer to operation-specific structure.
1481  *
1482  * @return
1483  *   0 on success, negative errno value on failure.
1484  */
1485 int
1486 tap_dev_filter_ctrl(struct rte_eth_dev *dev,
1487                     enum rte_filter_type filter_type,
1488                     enum rte_filter_op filter_op,
1489                     void *arg)
1490 {
1491         struct pmd_internals *pmd = dev->data->dev_private;
1492
1493         if (!pmd->flower_support)
1494                 return -ENOTSUP;
1495         switch (filter_type) {
1496         case RTE_ETH_FILTER_GENERIC:
1497                 if (filter_op != RTE_ETH_FILTER_GET)
1498                         return -EINVAL;
1499                 *(const void **)arg = &tap_flow_ops;
1500                 return 0;
1501         default:
1502                 RTE_LOG(ERR, PMD, "%p: filter type (%d) not supported",
1503                         (void *)dev, filter_type);
1504         }
1505         return -EINVAL;
1506 }
1507