New upstream version 17.11-rc3
[deb_dpdk.git] / drivers / net / mlx4 / mlx4_flow.c
1 /*-
2  *   BSD LICENSE
3  *
4  *   Copyright 2017 6WIND S.A.
5  *   Copyright 2017 Mellanox
6  *
7  *   Redistribution and use in source and binary forms, with or without
8  *   modification, are permitted provided that the following conditions
9  *   are met:
10  *
11  *     * Redistributions of source code must retain the above copyright
12  *       notice, this list of conditions and the following disclaimer.
13  *     * Redistributions in binary form must reproduce the above copyright
14  *       notice, this list of conditions and the following disclaimer in
15  *       the documentation and/or other materials provided with the
16  *       distribution.
17  *     * Neither the name of 6WIND S.A. nor the names of its
18  *       contributors may be used to endorse or promote products derived
19  *       from this software without specific prior written permission.
20  *
21  *   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  *   "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  *   LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24  *   A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25  *   OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26  *   SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27  *   LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28  *   DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29  *   THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30  *   (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31  *   OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32  */
33
34 /**
35  * @file
36  * Flow API operations for mlx4 driver.
37  */
38
39 #include <arpa/inet.h>
40 #include <assert.h>
41 #include <errno.h>
42 #include <stdalign.h>
43 #include <stddef.h>
44 #include <stdint.h>
45 #include <string.h>
46 #include <sys/queue.h>
47
48 /* Verbs headers do not support -pedantic. */
49 #ifdef PEDANTIC
50 #pragma GCC diagnostic ignored "-Wpedantic"
51 #endif
52 #include <infiniband/verbs.h>
53 #ifdef PEDANTIC
54 #pragma GCC diagnostic error "-Wpedantic"
55 #endif
56
57 #include <rte_byteorder.h>
58 #include <rte_errno.h>
59 #include <rte_eth_ctrl.h>
60 #include <rte_ethdev.h>
61 #include <rte_ether.h>
62 #include <rte_flow.h>
63 #include <rte_flow_driver.h>
64 #include <rte_malloc.h>
65
66 /* PMD headers. */
67 #include "mlx4.h"
68 #include "mlx4_flow.h"
69 #include "mlx4_rxtx.h"
70 #include "mlx4_utils.h"
71
72 /** Static initializer for a list of subsequent item types. */
73 #define NEXT_ITEM(...) \
74         (const enum rte_flow_item_type []){ \
75                 __VA_ARGS__, RTE_FLOW_ITEM_TYPE_END, \
76         }
77
78 /** Processor structure associated with a flow item. */
79 struct mlx4_flow_proc_item {
80         /** Bit-mask for fields supported by this PMD. */
81         const void *mask_support;
82         /** Bit-mask to use when @p item->mask is not provided. */
83         const void *mask_default;
84         /** Size in bytes for @p mask_support and @p mask_default. */
85         const unsigned int mask_sz;
86         /** Merge a pattern item into a flow rule handle. */
87         int (*merge)(struct rte_flow *flow,
88                      const struct rte_flow_item *item,
89                      const struct mlx4_flow_proc_item *proc,
90                      struct rte_flow_error *error);
91         /** Size in bytes of the destination structure. */
92         const unsigned int dst_sz;
93         /** List of possible subsequent items. */
94         const enum rte_flow_item_type *const next_item;
95 };
96
97 /** Shared resources for drop flow rules. */
98 struct mlx4_drop {
99         struct ibv_qp *qp; /**< QP target. */
100         struct ibv_cq *cq; /**< CQ associated with above QP. */
101         struct priv *priv; /**< Back pointer to private data. */
102         uint32_t refcnt; /**< Reference count. */
103 };
104
105 /**
106  * Convert DPDK RSS hash fields to their Verbs equivalent.
107  *
108  * @param rss_hf
109  *   Hash fields in DPDK format (see struct rte_eth_rss_conf).
110  *
111  * @return
112  *   A valid Verbs RSS hash fields mask for mlx4 on success, (uint64_t)-1
113  *   otherwise and rte_errno is set.
114  */
115 static uint64_t
116 mlx4_conv_rss_hf(uint64_t rss_hf)
117 {
118         enum { IPV4, IPV6, TCP, UDP, };
119         const uint64_t in[] = {
120                 [IPV4] = (ETH_RSS_IPV4 |
121                           ETH_RSS_FRAG_IPV4 |
122                           ETH_RSS_NONFRAG_IPV4_TCP |
123                           ETH_RSS_NONFRAG_IPV4_UDP |
124                           ETH_RSS_NONFRAG_IPV4_OTHER),
125                 [IPV6] = (ETH_RSS_IPV6 |
126                           ETH_RSS_FRAG_IPV6 |
127                           ETH_RSS_NONFRAG_IPV6_TCP |
128                           ETH_RSS_NONFRAG_IPV6_UDP |
129                           ETH_RSS_NONFRAG_IPV6_OTHER |
130                           ETH_RSS_IPV6_EX |
131                           ETH_RSS_IPV6_TCP_EX |
132                           ETH_RSS_IPV6_UDP_EX),
133                 [TCP] = (ETH_RSS_NONFRAG_IPV4_TCP |
134                          ETH_RSS_NONFRAG_IPV6_TCP |
135                          ETH_RSS_IPV6_TCP_EX),
136                 /*
137                  * UDP support is temporarily disabled due to an
138                  * implementation issue in the kernel.
139                  */
140                 [UDP] = 0,
141         };
142         const uint64_t out[RTE_DIM(in)] = {
143                 [IPV4] = IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4,
144                 [IPV6] = IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6,
145                 [TCP] = IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP,
146                 [UDP] = IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP,
147         };
148         uint64_t seen = 0;
149         uint64_t conv = 0;
150         unsigned int i;
151
152         for (i = 0; i != RTE_DIM(in); ++i)
153                 if (rss_hf & in[i]) {
154                         seen |= rss_hf & in[i];
155                         conv |= out[i];
156                 }
157         if (!(rss_hf & ~seen))
158                 return conv;
159         rte_errno = ENOTSUP;
160         return (uint64_t)-1;
161 }
162
163 /**
164  * Merge Ethernet pattern item into flow rule handle.
165  *
166  * Additional mlx4-specific constraints on supported fields:
167  *
168  * - No support for partial masks, except in the specific case of matching
169  *   all multicast traffic (@p spec->dst and @p mask->dst equal to
170  *   01:00:00:00:00:00).
171  * - Not providing @p item->spec or providing an empty @p mask->dst is
172  *   *only* supported if the rule doesn't specify additional matching
173  *   criteria (i.e. rule is promiscuous-like).
174  *
175  * @param[in, out] flow
176  *   Flow rule handle to update.
177  * @param[in] item
178  *   Pattern item to merge.
179  * @param[in] proc
180  *   Associated item-processing object.
181  * @param[out] error
182  *   Perform verbose error reporting if not NULL.
183  *
184  * @return
185  *   0 on success, a negative errno value otherwise and rte_errno is set.
186  */
187 static int
188 mlx4_flow_merge_eth(struct rte_flow *flow,
189                     const struct rte_flow_item *item,
190                     const struct mlx4_flow_proc_item *proc,
191                     struct rte_flow_error *error)
192 {
193         const struct rte_flow_item_eth *spec = item->spec;
194         const struct rte_flow_item_eth *mask =
195                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
196         struct ibv_flow_spec_eth *eth;
197         const char *msg;
198         unsigned int i;
199
200         if (!mask) {
201                 flow->promisc = 1;
202         } else {
203                 uint32_t sum_dst = 0;
204                 uint32_t sum_src = 0;
205
206                 for (i = 0; i != sizeof(mask->dst.addr_bytes); ++i) {
207                         sum_dst += mask->dst.addr_bytes[i];
208                         sum_src += mask->src.addr_bytes[i];
209                 }
210                 if (sum_src) {
211                         msg = "mlx4 does not support source MAC matching";
212                         goto error;
213                 } else if (!sum_dst) {
214                         flow->promisc = 1;
215                 } else if (sum_dst == 1 && mask->dst.addr_bytes[0] == 1) {
216                         if (!(spec->dst.addr_bytes[0] & 1)) {
217                                 msg = "mlx4 does not support the explicit"
218                                         " exclusion of all multicast traffic";
219                                 goto error;
220                         }
221                         flow->allmulti = 1;
222                 } else if (sum_dst != (UINT8_C(0xff) * ETHER_ADDR_LEN)) {
223                         msg = "mlx4 does not support matching partial"
224                                 " Ethernet fields";
225                         goto error;
226                 }
227         }
228         if (!flow->ibv_attr)
229                 return 0;
230         if (flow->promisc) {
231                 flow->ibv_attr->type = IBV_FLOW_ATTR_ALL_DEFAULT;
232                 return 0;
233         }
234         if (flow->allmulti) {
235                 flow->ibv_attr->type = IBV_FLOW_ATTR_MC_DEFAULT;
236                 return 0;
237         }
238         ++flow->ibv_attr->num_of_specs;
239         eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
240         *eth = (struct ibv_flow_spec_eth) {
241                 .type = IBV_FLOW_SPEC_ETH,
242                 .size = sizeof(*eth),
243         };
244         memcpy(eth->val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
245         memcpy(eth->mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
246         /* Remove unwanted bits from values. */
247         for (i = 0; i < ETHER_ADDR_LEN; ++i) {
248                 eth->val.dst_mac[i] &= eth->mask.dst_mac[i];
249         }
250         return 0;
251 error:
252         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
253                                   item, msg);
254 }
255
256 /**
257  * Merge VLAN pattern item into flow rule handle.
258  *
259  * Additional mlx4-specific constraints on supported fields:
260  *
261  * - Matching *all* VLAN traffic by omitting @p item->spec or providing an
262  *   empty @p item->mask would also include non-VLAN traffic. Doing so is
263  *   therefore unsupported.
264  * - No support for partial masks.
265  *
266  * @param[in, out] flow
267  *   Flow rule handle to update.
268  * @param[in] item
269  *   Pattern item to merge.
270  * @param[in] proc
271  *   Associated item-processing object.
272  * @param[out] error
273  *   Perform verbose error reporting if not NULL.
274  *
275  * @return
276  *   0 on success, a negative errno value otherwise and rte_errno is set.
277  */
278 static int
279 mlx4_flow_merge_vlan(struct rte_flow *flow,
280                      const struct rte_flow_item *item,
281                      const struct mlx4_flow_proc_item *proc,
282                      struct rte_flow_error *error)
283 {
284         const struct rte_flow_item_vlan *spec = item->spec;
285         const struct rte_flow_item_vlan *mask =
286                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
287         struct ibv_flow_spec_eth *eth;
288         const char *msg;
289
290         if (!mask || !mask->tci) {
291                 msg = "mlx4 cannot match all VLAN traffic while excluding"
292                         " non-VLAN traffic, TCI VID must be specified";
293                 goto error;
294         }
295         if (mask->tci != RTE_BE16(0x0fff)) {
296                 msg = "mlx4 does not support partial TCI VID matching";
297                 goto error;
298         }
299         if (!flow->ibv_attr)
300                 return 0;
301         eth = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size -
302                        sizeof(*eth));
303         eth->val.vlan_tag = spec->tci;
304         eth->mask.vlan_tag = mask->tci;
305         eth->val.vlan_tag &= eth->mask.vlan_tag;
306         return 0;
307 error:
308         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
309                                   item, msg);
310 }
311
312 /**
313  * Merge IPv4 pattern item into flow rule handle.
314  *
315  * Additional mlx4-specific constraints on supported fields:
316  *
317  * - No support for partial masks.
318  *
319  * @param[in, out] flow
320  *   Flow rule handle to update.
321  * @param[in] item
322  *   Pattern item to merge.
323  * @param[in] proc
324  *   Associated item-processing object.
325  * @param[out] error
326  *   Perform verbose error reporting if not NULL.
327  *
328  * @return
329  *   0 on success, a negative errno value otherwise and rte_errno is set.
330  */
331 static int
332 mlx4_flow_merge_ipv4(struct rte_flow *flow,
333                      const struct rte_flow_item *item,
334                      const struct mlx4_flow_proc_item *proc,
335                      struct rte_flow_error *error)
336 {
337         const struct rte_flow_item_ipv4 *spec = item->spec;
338         const struct rte_flow_item_ipv4 *mask =
339                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
340         struct ibv_flow_spec_ipv4 *ipv4;
341         const char *msg;
342
343         if (mask &&
344             ((uint32_t)(mask->hdr.src_addr + 1) > UINT32_C(1) ||
345              (uint32_t)(mask->hdr.dst_addr + 1) > UINT32_C(1))) {
346                 msg = "mlx4 does not support matching partial IPv4 fields";
347                 goto error;
348         }
349         if (!flow->ibv_attr)
350                 return 0;
351         ++flow->ibv_attr->num_of_specs;
352         ipv4 = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
353         *ipv4 = (struct ibv_flow_spec_ipv4) {
354                 .type = IBV_FLOW_SPEC_IPV4,
355                 .size = sizeof(*ipv4),
356         };
357         if (!spec)
358                 return 0;
359         ipv4->val = (struct ibv_flow_ipv4_filter) {
360                 .src_ip = spec->hdr.src_addr,
361                 .dst_ip = spec->hdr.dst_addr,
362         };
363         ipv4->mask = (struct ibv_flow_ipv4_filter) {
364                 .src_ip = mask->hdr.src_addr,
365                 .dst_ip = mask->hdr.dst_addr,
366         };
367         /* Remove unwanted bits from values. */
368         ipv4->val.src_ip &= ipv4->mask.src_ip;
369         ipv4->val.dst_ip &= ipv4->mask.dst_ip;
370         return 0;
371 error:
372         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
373                                   item, msg);
374 }
375
376 /**
377  * Merge UDP pattern item into flow rule handle.
378  *
379  * Additional mlx4-specific constraints on supported fields:
380  *
381  * - No support for partial masks.
382  *
383  * @param[in, out] flow
384  *   Flow rule handle to update.
385  * @param[in] item
386  *   Pattern item to merge.
387  * @param[in] proc
388  *   Associated item-processing object.
389  * @param[out] error
390  *   Perform verbose error reporting if not NULL.
391  *
392  * @return
393  *   0 on success, a negative errno value otherwise and rte_errno is set.
394  */
395 static int
396 mlx4_flow_merge_udp(struct rte_flow *flow,
397                     const struct rte_flow_item *item,
398                     const struct mlx4_flow_proc_item *proc,
399                     struct rte_flow_error *error)
400 {
401         const struct rte_flow_item_udp *spec = item->spec;
402         const struct rte_flow_item_udp *mask =
403                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
404         struct ibv_flow_spec_tcp_udp *udp;
405         const char *msg;
406
407         if (mask &&
408             ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
409              (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
410                 msg = "mlx4 does not support matching partial UDP fields";
411                 goto error;
412         }
413         if (!flow->ibv_attr)
414                 return 0;
415         ++flow->ibv_attr->num_of_specs;
416         udp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
417         *udp = (struct ibv_flow_spec_tcp_udp) {
418                 .type = IBV_FLOW_SPEC_UDP,
419                 .size = sizeof(*udp),
420         };
421         if (!spec)
422                 return 0;
423         udp->val.dst_port = spec->hdr.dst_port;
424         udp->val.src_port = spec->hdr.src_port;
425         udp->mask.dst_port = mask->hdr.dst_port;
426         udp->mask.src_port = mask->hdr.src_port;
427         /* Remove unwanted bits from values. */
428         udp->val.src_port &= udp->mask.src_port;
429         udp->val.dst_port &= udp->mask.dst_port;
430         return 0;
431 error:
432         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
433                                   item, msg);
434 }
435
436 /**
437  * Merge TCP pattern item into flow rule handle.
438  *
439  * Additional mlx4-specific constraints on supported fields:
440  *
441  * - No support for partial masks.
442  *
443  * @param[in, out] flow
444  *   Flow rule handle to update.
445  * @param[in] item
446  *   Pattern item to merge.
447  * @param[in] proc
448  *   Associated item-processing object.
449  * @param[out] error
450  *   Perform verbose error reporting if not NULL.
451  *
452  * @return
453  *   0 on success, a negative errno value otherwise and rte_errno is set.
454  */
455 static int
456 mlx4_flow_merge_tcp(struct rte_flow *flow,
457                     const struct rte_flow_item *item,
458                     const struct mlx4_flow_proc_item *proc,
459                     struct rte_flow_error *error)
460 {
461         const struct rte_flow_item_tcp *spec = item->spec;
462         const struct rte_flow_item_tcp *mask =
463                 spec ? (item->mask ? item->mask : proc->mask_default) : NULL;
464         struct ibv_flow_spec_tcp_udp *tcp;
465         const char *msg;
466
467         if (mask &&
468             ((uint16_t)(mask->hdr.src_port + 1) > UINT16_C(1) ||
469              (uint16_t)(mask->hdr.dst_port + 1) > UINT16_C(1))) {
470                 msg = "mlx4 does not support matching partial TCP fields";
471                 goto error;
472         }
473         if (!flow->ibv_attr)
474                 return 0;
475         ++flow->ibv_attr->num_of_specs;
476         tcp = (void *)((uintptr_t)flow->ibv_attr + flow->ibv_attr_size);
477         *tcp = (struct ibv_flow_spec_tcp_udp) {
478                 .type = IBV_FLOW_SPEC_TCP,
479                 .size = sizeof(*tcp),
480         };
481         if (!spec)
482                 return 0;
483         tcp->val.dst_port = spec->hdr.dst_port;
484         tcp->val.src_port = spec->hdr.src_port;
485         tcp->mask.dst_port = mask->hdr.dst_port;
486         tcp->mask.src_port = mask->hdr.src_port;
487         /* Remove unwanted bits from values. */
488         tcp->val.src_port &= tcp->mask.src_port;
489         tcp->val.dst_port &= tcp->mask.dst_port;
490         return 0;
491 error:
492         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
493                                   item, msg);
494 }
495
496 /**
497  * Perform basic sanity checks on a pattern item.
498  *
499  * @param[in] item
500  *   Item specification.
501  * @param[in] proc
502  *   Associated item-processing object.
503  * @param[out] error
504  *   Perform verbose error reporting if not NULL.
505  *
506  * @return
507  *   0 on success, a negative errno value otherwise and rte_errno is set.
508  */
509 static int
510 mlx4_flow_item_check(const struct rte_flow_item *item,
511                      const struct mlx4_flow_proc_item *proc,
512                      struct rte_flow_error *error)
513 {
514         const uint8_t *mask;
515         unsigned int i;
516
517         /* item->last and item->mask cannot exist without item->spec. */
518         if (!item->spec && (item->mask || item->last))
519                 return rte_flow_error_set
520                         (error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM, item,
521                          "\"mask\" or \"last\" field provided without a"
522                          " corresponding \"spec\"");
523         /* No spec, no mask, no problem. */
524         if (!item->spec)
525                 return 0;
526         mask = item->mask ?
527                 (const uint8_t *)item->mask :
528                 (const uint8_t *)proc->mask_default;
529         assert(mask);
530         /*
531          * Single-pass check to make sure that:
532          * - Mask is supported, no bits are set outside proc->mask_support.
533          * - Both item->spec and item->last are included in mask.
534          */
535         for (i = 0; i != proc->mask_sz; ++i) {
536                 if (!mask[i])
537                         continue;
538                 if ((mask[i] | ((const uint8_t *)proc->mask_support)[i]) !=
539                     ((const uint8_t *)proc->mask_support)[i])
540                         return rte_flow_error_set
541                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
542                                  item, "unsupported field found in \"mask\"");
543                 if (item->last &&
544                     (((const uint8_t *)item->spec)[i] & mask[i]) !=
545                     (((const uint8_t *)item->last)[i] & mask[i]))
546                         return rte_flow_error_set
547                                 (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
548                                  item,
549                                  "range between \"spec\" and \"last\""
550                                  " is larger than \"mask\"");
551         }
552         return 0;
553 }
554
555 /** Graph of supported items and associated actions. */
556 static const struct mlx4_flow_proc_item mlx4_flow_proc_item_list[] = {
557         [RTE_FLOW_ITEM_TYPE_END] = {
558                 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_ETH),
559         },
560         [RTE_FLOW_ITEM_TYPE_ETH] = {
561                 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_VLAN,
562                                        RTE_FLOW_ITEM_TYPE_IPV4),
563                 .mask_support = &(const struct rte_flow_item_eth){
564                         /* Only destination MAC can be matched. */
565                         .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
566                 },
567                 .mask_default = &rte_flow_item_eth_mask,
568                 .mask_sz = sizeof(struct rte_flow_item_eth),
569                 .merge = mlx4_flow_merge_eth,
570                 .dst_sz = sizeof(struct ibv_flow_spec_eth),
571         },
572         [RTE_FLOW_ITEM_TYPE_VLAN] = {
573                 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_IPV4),
574                 .mask_support = &(const struct rte_flow_item_vlan){
575                         /* Only TCI VID matching is supported. */
576                         .tci = RTE_BE16(0x0fff),
577                 },
578                 .mask_default = &rte_flow_item_vlan_mask,
579                 .mask_sz = sizeof(struct rte_flow_item_vlan),
580                 .merge = mlx4_flow_merge_vlan,
581                 .dst_sz = 0,
582         },
583         [RTE_FLOW_ITEM_TYPE_IPV4] = {
584                 .next_item = NEXT_ITEM(RTE_FLOW_ITEM_TYPE_UDP,
585                                        RTE_FLOW_ITEM_TYPE_TCP),
586                 .mask_support = &(const struct rte_flow_item_ipv4){
587                         .hdr = {
588                                 .src_addr = RTE_BE32(0xffffffff),
589                                 .dst_addr = RTE_BE32(0xffffffff),
590                         },
591                 },
592                 .mask_default = &rte_flow_item_ipv4_mask,
593                 .mask_sz = sizeof(struct rte_flow_item_ipv4),
594                 .merge = mlx4_flow_merge_ipv4,
595                 .dst_sz = sizeof(struct ibv_flow_spec_ipv4),
596         },
597         [RTE_FLOW_ITEM_TYPE_UDP] = {
598                 .mask_support = &(const struct rte_flow_item_udp){
599                         .hdr = {
600                                 .src_port = RTE_BE16(0xffff),
601                                 .dst_port = RTE_BE16(0xffff),
602                         },
603                 },
604                 .mask_default = &rte_flow_item_udp_mask,
605                 .mask_sz = sizeof(struct rte_flow_item_udp),
606                 .merge = mlx4_flow_merge_udp,
607                 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
608         },
609         [RTE_FLOW_ITEM_TYPE_TCP] = {
610                 .mask_support = &(const struct rte_flow_item_tcp){
611                         .hdr = {
612                                 .src_port = RTE_BE16(0xffff),
613                                 .dst_port = RTE_BE16(0xffff),
614                         },
615                 },
616                 .mask_default = &rte_flow_item_tcp_mask,
617                 .mask_sz = sizeof(struct rte_flow_item_tcp),
618                 .merge = mlx4_flow_merge_tcp,
619                 .dst_sz = sizeof(struct ibv_flow_spec_tcp_udp),
620         },
621 };
622
623 /**
624  * Make sure a flow rule is supported and initialize associated structure.
625  *
626  * @param priv
627  *   Pointer to private structure.
628  * @param[in] attr
629  *   Flow rule attributes.
630  * @param[in] pattern
631  *   Pattern specification (list terminated by the END pattern item).
632  * @param[in] actions
633  *   Associated actions (list terminated by the END action).
634  * @param[out] error
635  *   Perform verbose error reporting if not NULL.
636  * @param[in, out] addr
637  *   Buffer where the resulting flow rule handle pointer must be stored.
638  *   If NULL, stop processing after validation stage.
639  *
640  * @return
641  *   0 on success, a negative errno value otherwise and rte_errno is set.
642  */
643 static int
644 mlx4_flow_prepare(struct priv *priv,
645                   const struct rte_flow_attr *attr,
646                   const struct rte_flow_item pattern[],
647                   const struct rte_flow_action actions[],
648                   struct rte_flow_error *error,
649                   struct rte_flow **addr)
650 {
651         const struct rte_flow_item *item;
652         const struct rte_flow_action *action;
653         const struct mlx4_flow_proc_item *proc;
654         struct rte_flow temp = { .ibv_attr_size = sizeof(*temp.ibv_attr) };
655         struct rte_flow *flow = &temp;
656         const char *msg = NULL;
657
658         if (attr->group)
659                 return rte_flow_error_set
660                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
661                          NULL, "groups are not supported");
662         if (attr->priority > MLX4_FLOW_PRIORITY_LAST)
663                 return rte_flow_error_set
664                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
665                          NULL, "maximum priority level is "
666                          MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST));
667         if (attr->egress)
668                 return rte_flow_error_set
669                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
670                          NULL, "egress is not supported");
671         if (!attr->ingress)
672                 return rte_flow_error_set
673                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
674                          NULL, "only ingress is supported");
675 fill:
676         proc = mlx4_flow_proc_item_list;
677         /* Go over pattern. */
678         for (item = pattern; item->type; ++item) {
679                 const struct mlx4_flow_proc_item *next = NULL;
680                 unsigned int i;
681                 int err;
682
683                 if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
684                         continue;
685                 if (item->type == MLX4_FLOW_ITEM_TYPE_INTERNAL) {
686                         flow->internal = 1;
687                         continue;
688                 }
689                 if (flow->promisc || flow->allmulti) {
690                         msg = "mlx4 does not support additional matching"
691                                 " criteria combined with indiscriminate"
692                                 " matching on Ethernet headers";
693                         goto exit_item_not_supported;
694                 }
695                 for (i = 0; proc->next_item && proc->next_item[i]; ++i) {
696                         if (proc->next_item[i] == item->type) {
697                                 next = &mlx4_flow_proc_item_list[item->type];
698                                 break;
699                         }
700                 }
701                 if (!next)
702                         goto exit_item_not_supported;
703                 proc = next;
704                 /*
705                  * Perform basic sanity checks only once, while handle is
706                  * not allocated.
707                  */
708                 if (flow == &temp) {
709                         err = mlx4_flow_item_check(item, proc, error);
710                         if (err)
711                                 return err;
712                 }
713                 if (proc->merge) {
714                         err = proc->merge(flow, item, proc, error);
715                         if (err)
716                                 return err;
717                 }
718                 flow->ibv_attr_size += proc->dst_sz;
719         }
720         /* Go over actions list. */
721         for (action = actions; action->type; ++action) {
722                 switch (action->type) {
723                         const struct rte_flow_action_queue *queue;
724                         const struct rte_flow_action_rss *rss;
725                         const struct rte_eth_rss_conf *rss_conf;
726                         unsigned int i;
727
728                 case RTE_FLOW_ACTION_TYPE_VOID:
729                         continue;
730                 case RTE_FLOW_ACTION_TYPE_DROP:
731                         flow->drop = 1;
732                         break;
733                 case RTE_FLOW_ACTION_TYPE_QUEUE:
734                         if (flow->rss)
735                                 break;
736                         queue = action->conf;
737                         if (queue->index >= priv->dev->data->nb_rx_queues) {
738                                 msg = "queue target index beyond number of"
739                                         " configured Rx queues";
740                                 goto exit_action_not_supported;
741                         }
742                         flow->rss = mlx4_rss_get
743                                 (priv, 0, mlx4_rss_hash_key_default, 1,
744                                  &queue->index);
745                         if (!flow->rss) {
746                                 msg = "not enough resources for additional"
747                                         " single-queue RSS context";
748                                 goto exit_action_not_supported;
749                         }
750                         break;
751                 case RTE_FLOW_ACTION_TYPE_RSS:
752                         if (flow->rss)
753                                 break;
754                         rss = action->conf;
755                         /* Default RSS configuration if none is provided. */
756                         rss_conf =
757                                 rss->rss_conf ?
758                                 rss->rss_conf :
759                                 &(struct rte_eth_rss_conf){
760                                         .rss_key = mlx4_rss_hash_key_default,
761                                         .rss_key_len = MLX4_RSS_HASH_KEY_SIZE,
762                                         .rss_hf = (ETH_RSS_IPV4 |
763                                                    ETH_RSS_NONFRAG_IPV4_TCP |
764                                                    ETH_RSS_IPV6 |
765                                                    ETH_RSS_NONFRAG_IPV6_TCP),
766                                 };
767                         /* Sanity checks. */
768                         for (i = 0; i < rss->num; ++i)
769                                 if (rss->queue[i] >=
770                                     priv->dev->data->nb_rx_queues)
771                                         break;
772                         if (i != rss->num) {
773                                 msg = "queue index target beyond number of"
774                                         " configured Rx queues";
775                                 goto exit_action_not_supported;
776                         }
777                         if (!rte_is_power_of_2(rss->num)) {
778                                 msg = "for RSS, mlx4 requires the number of"
779                                         " queues to be a power of two";
780                                 goto exit_action_not_supported;
781                         }
782                         if (rss_conf->rss_key_len !=
783                             sizeof(flow->rss->key)) {
784                                 msg = "mlx4 supports exactly one RSS hash key"
785                                         " length: "
786                                         MLX4_STR_EXPAND(MLX4_RSS_HASH_KEY_SIZE);
787                                 goto exit_action_not_supported;
788                         }
789                         for (i = 1; i < rss->num; ++i)
790                                 if (rss->queue[i] - rss->queue[i - 1] != 1)
791                                         break;
792                         if (i != rss->num) {
793                                 msg = "mlx4 requires RSS contexts to use"
794                                         " consecutive queue indices only";
795                                 goto exit_action_not_supported;
796                         }
797                         if (rss->queue[0] % rss->num) {
798                                 msg = "mlx4 requires the first queue of a RSS"
799                                         " context to be aligned on a multiple"
800                                         " of the context size";
801                                 goto exit_action_not_supported;
802                         }
803                         flow->rss = mlx4_rss_get
804                                 (priv, mlx4_conv_rss_hf(rss_conf->rss_hf),
805                                  rss_conf->rss_key, rss->num, rss->queue);
806                         if (!flow->rss) {
807                                 msg = "either invalid parameters or not enough"
808                                         " resources for additional multi-queue"
809                                         " RSS context";
810                                 goto exit_action_not_supported;
811                         }
812                         break;
813                 default:
814                         goto exit_action_not_supported;
815                 }
816         }
817         if (!flow->rss && !flow->drop)
818                 return rte_flow_error_set
819                         (error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
820                          NULL, "no valid action");
821         /* Validation ends here. */
822         if (!addr) {
823                 if (flow->rss)
824                         mlx4_rss_put(flow->rss);
825                 return 0;
826         }
827         if (flow == &temp) {
828                 /* Allocate proper handle based on collected data. */
829                 const struct mlx4_malloc_vec vec[] = {
830                         {
831                                 .align = alignof(struct rte_flow),
832                                 .size = sizeof(*flow),
833                                 .addr = (void **)&flow,
834                         },
835                         {
836                                 .align = alignof(struct ibv_flow_attr),
837                                 .size = temp.ibv_attr_size,
838                                 .addr = (void **)&temp.ibv_attr,
839                         },
840                 };
841
842                 if (!mlx4_zmallocv(__func__, vec, RTE_DIM(vec)))
843                         return rte_flow_error_set
844                                 (error, -rte_errno,
845                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
846                                  "flow rule handle allocation failure");
847                 /* Most fields will be updated by second pass. */
848                 *flow = (struct rte_flow){
849                         .ibv_attr = temp.ibv_attr,
850                         .ibv_attr_size = sizeof(*flow->ibv_attr),
851                         .rss = temp.rss,
852                 };
853                 *flow->ibv_attr = (struct ibv_flow_attr){
854                         .type = IBV_FLOW_ATTR_NORMAL,
855                         .size = sizeof(*flow->ibv_attr),
856                         .priority = attr->priority,
857                         .port = priv->port,
858                 };
859                 goto fill;
860         }
861         *addr = flow;
862         return 0;
863 exit_item_not_supported:
864         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
865                                   item, msg ? msg : "item not supported");
866 exit_action_not_supported:
867         return rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
868                                   action, msg ? msg : "action not supported");
869 }
870
871 /**
872  * Validate a flow supported by the NIC.
873  *
874  * @see rte_flow_validate()
875  * @see rte_flow_ops
876  */
877 static int
878 mlx4_flow_validate(struct rte_eth_dev *dev,
879                    const struct rte_flow_attr *attr,
880                    const struct rte_flow_item pattern[],
881                    const struct rte_flow_action actions[],
882                    struct rte_flow_error *error)
883 {
884         struct priv *priv = dev->data->dev_private;
885
886         return mlx4_flow_prepare(priv, attr, pattern, actions, error, NULL);
887 }
888
889 /**
890  * Get a drop flow rule resources instance.
891  *
892  * @param priv
893  *   Pointer to private structure.
894  *
895  * @return
896  *   Pointer to drop flow resources on success, NULL otherwise and rte_errno
897  *   is set.
898  */
899 static struct mlx4_drop *
900 mlx4_drop_get(struct priv *priv)
901 {
902         struct mlx4_drop *drop = priv->drop;
903
904         if (drop) {
905                 assert(drop->refcnt);
906                 assert(drop->priv == priv);
907                 ++drop->refcnt;
908                 return drop;
909         }
910         drop = rte_malloc(__func__, sizeof(*drop), 0);
911         if (!drop)
912                 goto error;
913         *drop = (struct mlx4_drop){
914                 .priv = priv,
915                 .refcnt = 1,
916         };
917         drop->cq = ibv_create_cq(priv->ctx, 1, NULL, NULL, 0);
918         if (!drop->cq)
919                 goto error;
920         drop->qp = ibv_create_qp(priv->pd,
921                                  &(struct ibv_qp_init_attr){
922                                         .send_cq = drop->cq,
923                                         .recv_cq = drop->cq,
924                                         .qp_type = IBV_QPT_RAW_PACKET,
925                                  });
926         if (!drop->qp)
927                 goto error;
928         priv->drop = drop;
929         return drop;
930 error:
931         if (drop->qp)
932                 claim_zero(ibv_destroy_qp(drop->qp));
933         if (drop->cq)
934                 claim_zero(ibv_destroy_cq(drop->cq));
935         if (drop)
936                 rte_free(drop);
937         rte_errno = ENOMEM;
938         return NULL;
939 }
940
941 /**
942  * Give back a drop flow rule resources instance.
943  *
944  * @param drop
945  *   Pointer to drop flow rule resources.
946  */
947 static void
948 mlx4_drop_put(struct mlx4_drop *drop)
949 {
950         assert(drop->refcnt);
951         if (--drop->refcnt)
952                 return;
953         drop->priv->drop = NULL;
954         claim_zero(ibv_destroy_qp(drop->qp));
955         claim_zero(ibv_destroy_cq(drop->cq));
956         rte_free(drop);
957 }
958
959 /**
960  * Toggle a configured flow rule.
961  *
962  * @param priv
963  *   Pointer to private structure.
964  * @param flow
965  *   Flow rule handle to toggle.
966  * @param enable
967  *   Whether associated Verbs flow must be created or removed.
968  * @param[out] error
969  *   Perform verbose error reporting if not NULL.
970  *
971  * @return
972  *   0 on success, a negative errno value otherwise and rte_errno is set.
973  */
974 static int
975 mlx4_flow_toggle(struct priv *priv,
976                  struct rte_flow *flow,
977                  int enable,
978                  struct rte_flow_error *error)
979 {
980         struct ibv_qp *qp = NULL;
981         const char *msg;
982         int err;
983
984         if (!enable) {
985                 if (!flow->ibv_flow)
986                         return 0;
987                 claim_zero(ibv_destroy_flow(flow->ibv_flow));
988                 flow->ibv_flow = NULL;
989                 if (flow->drop)
990                         mlx4_drop_put(priv->drop);
991                 else if (flow->rss)
992                         mlx4_rss_detach(flow->rss);
993                 return 0;
994         }
995         assert(flow->ibv_attr);
996         if (!flow->internal &&
997             !priv->isolated &&
998             flow->ibv_attr->priority == MLX4_FLOW_PRIORITY_LAST) {
999                 if (flow->ibv_flow) {
1000                         claim_zero(ibv_destroy_flow(flow->ibv_flow));
1001                         flow->ibv_flow = NULL;
1002                         if (flow->drop)
1003                                 mlx4_drop_put(priv->drop);
1004                         else if (flow->rss)
1005                                 mlx4_rss_detach(flow->rss);
1006                 }
1007                 err = EACCES;
1008                 msg = ("priority level "
1009                        MLX4_STR_EXPAND(MLX4_FLOW_PRIORITY_LAST)
1010                        " is reserved when not in isolated mode");
1011                 goto error;
1012         }
1013         if (flow->rss) {
1014                 struct mlx4_rss *rss = flow->rss;
1015                 int missing = 0;
1016                 unsigned int i;
1017
1018                 /* Stop at the first nonexistent target queue. */
1019                 for (i = 0; i != rss->queues; ++i)
1020                         if (rss->queue_id[i] >=
1021                             priv->dev->data->nb_rx_queues ||
1022                             !priv->dev->data->rx_queues[rss->queue_id[i]]) {
1023                                 missing = 1;
1024                                 break;
1025                         }
1026                 if (flow->ibv_flow) {
1027                         if (missing ^ !flow->drop)
1028                                 return 0;
1029                         /* Verbs flow needs updating. */
1030                         claim_zero(ibv_destroy_flow(flow->ibv_flow));
1031                         flow->ibv_flow = NULL;
1032                         if (flow->drop)
1033                                 mlx4_drop_put(priv->drop);
1034                         else
1035                                 mlx4_rss_detach(rss);
1036                 }
1037                 if (!missing) {
1038                         err = mlx4_rss_attach(rss);
1039                         if (err) {
1040                                 err = -err;
1041                                 msg = "cannot create indirection table or hash"
1042                                         " QP to associate flow rule with";
1043                                 goto error;
1044                         }
1045                         qp = rss->qp;
1046                 }
1047                 /* A missing target queue drops traffic implicitly. */
1048                 flow->drop = missing;
1049         }
1050         if (flow->drop) {
1051                 mlx4_drop_get(priv);
1052                 if (!priv->drop) {
1053                         err = rte_errno;
1054                         msg = "resources for drop flow rule cannot be created";
1055                         goto error;
1056                 }
1057                 qp = priv->drop->qp;
1058         }
1059         assert(qp);
1060         if (flow->ibv_flow)
1061                 return 0;
1062         flow->ibv_flow = ibv_create_flow(qp, flow->ibv_attr);
1063         if (flow->ibv_flow)
1064                 return 0;
1065         if (flow->drop)
1066                 mlx4_drop_put(priv->drop);
1067         else if (flow->rss)
1068                 mlx4_rss_detach(flow->rss);
1069         err = errno;
1070         msg = "flow rule rejected by device";
1071 error:
1072         return rte_flow_error_set
1073                 (error, err, RTE_FLOW_ERROR_TYPE_HANDLE, flow, msg);
1074 }
1075
1076 /**
1077  * Create a flow.
1078  *
1079  * @see rte_flow_create()
1080  * @see rte_flow_ops
1081  */
1082 static struct rte_flow *
1083 mlx4_flow_create(struct rte_eth_dev *dev,
1084                  const struct rte_flow_attr *attr,
1085                  const struct rte_flow_item pattern[],
1086                  const struct rte_flow_action actions[],
1087                  struct rte_flow_error *error)
1088 {
1089         struct priv *priv = dev->data->dev_private;
1090         struct rte_flow *flow;
1091         int err;
1092
1093         err = mlx4_flow_prepare(priv, attr, pattern, actions, error, &flow);
1094         if (err)
1095                 return NULL;
1096         err = mlx4_flow_toggle(priv, flow, priv->started, error);
1097         if (!err) {
1098                 struct rte_flow *curr = LIST_FIRST(&priv->flows);
1099
1100                 /* New rules are inserted after internal ones. */
1101                 if (!curr || !curr->internal) {
1102                         LIST_INSERT_HEAD(&priv->flows, flow, next);
1103                 } else {
1104                         while (LIST_NEXT(curr, next) &&
1105                                LIST_NEXT(curr, next)->internal)
1106                                 curr = LIST_NEXT(curr, next);
1107                         LIST_INSERT_AFTER(curr, flow, next);
1108                 }
1109                 return flow;
1110         }
1111         if (flow->rss)
1112                 mlx4_rss_put(flow->rss);
1113         rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1114                            error->message);
1115         rte_free(flow);
1116         return NULL;
1117 }
1118
1119 /**
1120  * Configure isolated mode.
1121  *
1122  * @see rte_flow_isolate()
1123  * @see rte_flow_ops
1124  */
1125 static int
1126 mlx4_flow_isolate(struct rte_eth_dev *dev,
1127                   int enable,
1128                   struct rte_flow_error *error)
1129 {
1130         struct priv *priv = dev->data->dev_private;
1131
1132         if (!!enable == !!priv->isolated)
1133                 return 0;
1134         priv->isolated = !!enable;
1135         if (mlx4_flow_sync(priv, error)) {
1136                 priv->isolated = !enable;
1137                 return -rte_errno;
1138         }
1139         return 0;
1140 }
1141
1142 /**
1143  * Destroy a flow rule.
1144  *
1145  * @see rte_flow_destroy()
1146  * @see rte_flow_ops
1147  */
1148 static int
1149 mlx4_flow_destroy(struct rte_eth_dev *dev,
1150                   struct rte_flow *flow,
1151                   struct rte_flow_error *error)
1152 {
1153         struct priv *priv = dev->data->dev_private;
1154         int err = mlx4_flow_toggle(priv, flow, 0, error);
1155
1156         if (err)
1157                 return err;
1158         LIST_REMOVE(flow, next);
1159         if (flow->rss)
1160                 mlx4_rss_put(flow->rss);
1161         rte_free(flow);
1162         return 0;
1163 }
1164
1165 /**
1166  * Destroy user-configured flow rules.
1167  *
1168  * This function skips internal flows rules.
1169  *
1170  * @see rte_flow_flush()
1171  * @see rte_flow_ops
1172  */
1173 static int
1174 mlx4_flow_flush(struct rte_eth_dev *dev,
1175                 struct rte_flow_error *error)
1176 {
1177         struct priv *priv = dev->data->dev_private;
1178         struct rte_flow *flow = LIST_FIRST(&priv->flows);
1179
1180         while (flow) {
1181                 struct rte_flow *next = LIST_NEXT(flow, next);
1182
1183                 if (!flow->internal)
1184                         mlx4_flow_destroy(dev, flow, error);
1185                 flow = next;
1186         }
1187         return 0;
1188 }
1189
1190 /**
1191  * Helper function to determine the next configured VLAN filter.
1192  *
1193  * @param priv
1194  *   Pointer to private structure.
1195  * @param vlan
1196  *   VLAN ID to use as a starting point.
1197  *
1198  * @return
1199  *   Next configured VLAN ID or a high value (>= 4096) if there is none.
1200  */
1201 static uint16_t
1202 mlx4_flow_internal_next_vlan(struct priv *priv, uint16_t vlan)
1203 {
1204         while (vlan < 4096) {
1205                 if (priv->dev->data->vlan_filter_conf.ids[vlan / 64] &
1206                     (UINT64_C(1) << (vlan % 64)))
1207                         return vlan;
1208                 ++vlan;
1209         }
1210         return vlan;
1211 }
1212
1213 /**
1214  * Generate internal flow rules.
1215  *
1216  * Various flow rules are created depending on the mode the device is in:
1217  *
1218  * 1. Promiscuous: port MAC + catch-all (VLAN filtering is ignored).
1219  * 2. All multicast: port MAC/VLAN + catch-all multicast.
1220  * 3. Otherwise: port MAC/VLAN + broadcast MAC/VLAN.
1221  *
1222  * About MAC flow rules:
1223  *
1224  * - MAC flow rules are generated from @p dev->data->mac_addrs
1225  *   (@p priv->mac array).
1226  * - An additional flow rule for Ethernet broadcasts is also generated.
1227  * - All these are per-VLAN if @p dev->data->dev_conf.rxmode.hw_vlan_filter
1228  *   is enabled and VLAN filters are configured.
1229  *
1230  * @param priv
1231  *   Pointer to private structure.
1232  * @param[out] error
1233  *   Perform verbose error reporting if not NULL.
1234  *
1235  * @return
1236  *   0 on success, a negative errno value otherwise and rte_errno is set.
1237  */
1238 static int
1239 mlx4_flow_internal(struct priv *priv, struct rte_flow_error *error)
1240 {
1241         struct rte_flow_attr attr = {
1242                 .priority = MLX4_FLOW_PRIORITY_LAST,
1243                 .ingress = 1,
1244         };
1245         struct rte_flow_item_eth eth_spec;
1246         const struct rte_flow_item_eth eth_mask = {
1247                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1248         };
1249         const struct rte_flow_item_eth eth_allmulti = {
1250                 .dst.addr_bytes = "\x01\x00\x00\x00\x00\x00",
1251         };
1252         struct rte_flow_item_vlan vlan_spec;
1253         const struct rte_flow_item_vlan vlan_mask = {
1254                 .tci = RTE_BE16(0x0fff),
1255         };
1256         struct rte_flow_item pattern[] = {
1257                 {
1258                         .type = MLX4_FLOW_ITEM_TYPE_INTERNAL,
1259                 },
1260                 {
1261                         .type = RTE_FLOW_ITEM_TYPE_ETH,
1262                         .spec = &eth_spec,
1263                         .mask = &eth_mask,
1264                 },
1265                 {
1266                         /* Replaced with VLAN if filtering is enabled. */
1267                         .type = RTE_FLOW_ITEM_TYPE_END,
1268                 },
1269                 {
1270                         .type = RTE_FLOW_ITEM_TYPE_END,
1271                 },
1272         };
1273         /*
1274          * Round number of queues down to their previous power of 2 to
1275          * comply with RSS context limitations. Extra queues silently do not
1276          * get RSS by default.
1277          */
1278         uint32_t queues =
1279                 rte_align32pow2(priv->dev->data->nb_rx_queues + 1) >> 1;
1280         alignas(struct rte_flow_action_rss) uint8_t rss_conf_data
1281                 [offsetof(struct rte_flow_action_rss, queue) +
1282                  sizeof(((struct rte_flow_action_rss *)0)->queue[0]) * queues];
1283         struct rte_flow_action_rss *rss_conf = (void *)rss_conf_data;
1284         struct rte_flow_action actions[] = {
1285                 {
1286                         .type = RTE_FLOW_ACTION_TYPE_RSS,
1287                         .conf = rss_conf,
1288                 },
1289                 {
1290                         .type = RTE_FLOW_ACTION_TYPE_END,
1291                 },
1292         };
1293         struct ether_addr *rule_mac = &eth_spec.dst;
1294         rte_be16_t *rule_vlan =
1295                 priv->dev->data->dev_conf.rxmode.hw_vlan_filter &&
1296                 !priv->dev->data->promiscuous ?
1297                 &vlan_spec.tci :
1298                 NULL;
1299         int broadcast =
1300                 !priv->dev->data->promiscuous &&
1301                 !priv->dev->data->all_multicast;
1302         uint16_t vlan = 0;
1303         struct rte_flow *flow;
1304         unsigned int i;
1305         int err = 0;
1306
1307         /* Nothing to be done if there are no Rx queues. */
1308         if (!queues)
1309                 goto error;
1310         /* Prepare default RSS configuration. */
1311         *rss_conf = (struct rte_flow_action_rss){
1312                 .rss_conf = NULL, /* Rely on default fallback settings. */
1313                 .num = queues,
1314         };
1315         for (i = 0; i != queues; ++i)
1316                 rss_conf->queue[i] = i;
1317         /*
1318          * Set up VLAN item if filtering is enabled and at least one VLAN
1319          * filter is configured.
1320          */
1321         if (rule_vlan) {
1322                 vlan = mlx4_flow_internal_next_vlan(priv, 0);
1323                 if (vlan < 4096) {
1324                         pattern[2] = (struct rte_flow_item){
1325                                 .type = RTE_FLOW_ITEM_TYPE_VLAN,
1326                                 .spec = &vlan_spec,
1327                                 .mask = &vlan_mask,
1328                         };
1329 next_vlan:
1330                         *rule_vlan = rte_cpu_to_be_16(vlan);
1331                 } else {
1332                         rule_vlan = NULL;
1333                 }
1334         }
1335         for (i = 0; i != RTE_DIM(priv->mac) + broadcast; ++i) {
1336                 const struct ether_addr *mac;
1337
1338                 /* Broadcasts are handled by an extra iteration. */
1339                 if (i < RTE_DIM(priv->mac))
1340                         mac = &priv->mac[i];
1341                 else
1342                         mac = &eth_mask.dst;
1343                 if (is_zero_ether_addr(mac))
1344                         continue;
1345                 /* Check if MAC flow rule is already present. */
1346                 for (flow = LIST_FIRST(&priv->flows);
1347                      flow && flow->internal;
1348                      flow = LIST_NEXT(flow, next)) {
1349                         const struct ibv_flow_spec_eth *eth =
1350                                 (const void *)((uintptr_t)flow->ibv_attr +
1351                                                sizeof(*flow->ibv_attr));
1352                         unsigned int j;
1353
1354                         if (!flow->mac)
1355                                 continue;
1356                         assert(flow->ibv_attr->type == IBV_FLOW_ATTR_NORMAL);
1357                         assert(flow->ibv_attr->num_of_specs == 1);
1358                         assert(eth->type == IBV_FLOW_SPEC_ETH);
1359                         assert(flow->rss);
1360                         if (rule_vlan &&
1361                             (eth->val.vlan_tag != *rule_vlan ||
1362                              eth->mask.vlan_tag != RTE_BE16(0x0fff)))
1363                                 continue;
1364                         if (!rule_vlan && eth->mask.vlan_tag)
1365                                 continue;
1366                         for (j = 0; j != sizeof(mac->addr_bytes); ++j)
1367                                 if (eth->val.dst_mac[j] != mac->addr_bytes[j] ||
1368                                     eth->mask.dst_mac[j] != UINT8_C(0xff) ||
1369                                     eth->val.src_mac[j] != UINT8_C(0x00) ||
1370                                     eth->mask.src_mac[j] != UINT8_C(0x00))
1371                                         break;
1372                         if (j != sizeof(mac->addr_bytes))
1373                                 continue;
1374                         if (flow->rss->queues != queues ||
1375                             memcmp(flow->rss->queue_id, rss_conf->queue,
1376                                    queues * sizeof(flow->rss->queue_id[0])))
1377                                 continue;
1378                         break;
1379                 }
1380                 if (!flow || !flow->internal) {
1381                         /* Not found, create a new flow rule. */
1382                         memcpy(rule_mac, mac, sizeof(*mac));
1383                         flow = mlx4_flow_create(priv->dev, &attr, pattern,
1384                                                 actions, error);
1385                         if (!flow) {
1386                                 err = -rte_errno;
1387                                 goto error;
1388                         }
1389                 }
1390                 flow->select = 1;
1391                 flow->mac = 1;
1392         }
1393         if (rule_vlan) {
1394                 vlan = mlx4_flow_internal_next_vlan(priv, vlan + 1);
1395                 if (vlan < 4096)
1396                         goto next_vlan;
1397         }
1398         /* Take care of promiscuous and all multicast flow rules. */
1399         if (!broadcast) {
1400                 for (flow = LIST_FIRST(&priv->flows);
1401                      flow && flow->internal;
1402                      flow = LIST_NEXT(flow, next)) {
1403                         if (priv->dev->data->promiscuous) {
1404                                 if (flow->promisc)
1405                                         break;
1406                         } else {
1407                                 assert(priv->dev->data->all_multicast);
1408                                 if (flow->allmulti)
1409                                         break;
1410                         }
1411                 }
1412                 if (flow && flow->internal) {
1413                         assert(flow->rss);
1414                         if (flow->rss->queues != queues ||
1415                             memcmp(flow->rss->queue_id, rss_conf->queue,
1416                                    queues * sizeof(flow->rss->queue_id[0])))
1417                                 flow = NULL;
1418                 }
1419                 if (!flow || !flow->internal) {
1420                         /* Not found, create a new flow rule. */
1421                         if (priv->dev->data->promiscuous) {
1422                                 pattern[1].spec = NULL;
1423                                 pattern[1].mask = NULL;
1424                         } else {
1425                                 assert(priv->dev->data->all_multicast);
1426                                 pattern[1].spec = &eth_allmulti;
1427                                 pattern[1].mask = &eth_allmulti;
1428                         }
1429                         pattern[2] = pattern[3];
1430                         flow = mlx4_flow_create(priv->dev, &attr, pattern,
1431                                                 actions, error);
1432                         if (!flow) {
1433                                 err = -rte_errno;
1434                                 goto error;
1435                         }
1436                 }
1437                 assert(flow->promisc || flow->allmulti);
1438                 flow->select = 1;
1439         }
1440 error:
1441         /* Clear selection and clean up stale internal flow rules. */
1442         flow = LIST_FIRST(&priv->flows);
1443         while (flow && flow->internal) {
1444                 struct rte_flow *next = LIST_NEXT(flow, next);
1445
1446                 if (!flow->select)
1447                         claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1448                 else
1449                         flow->select = 0;
1450                 flow = next;
1451         }
1452         return err;
1453 }
1454
1455 /**
1456  * Synchronize flow rules.
1457  *
1458  * This function synchronizes flow rules with the state of the device by
1459  * taking into account isolated mode and whether target queues are
1460  * configured.
1461  *
1462  * @param priv
1463  *   Pointer to private structure.
1464  * @param[out] error
1465  *   Perform verbose error reporting if not NULL.
1466  *
1467  * @return
1468  *   0 on success, a negative errno value otherwise and rte_errno is set.
1469  */
1470 int
1471 mlx4_flow_sync(struct priv *priv, struct rte_flow_error *error)
1472 {
1473         struct rte_flow *flow;
1474         int ret;
1475
1476         /* Internal flow rules are guaranteed to come first in the list. */
1477         if (priv->isolated) {
1478                 /*
1479                  * Get rid of them in isolated mode, stop at the first
1480                  * non-internal rule found.
1481                  */
1482                 for (flow = LIST_FIRST(&priv->flows);
1483                      flow && flow->internal;
1484                      flow = LIST_FIRST(&priv->flows))
1485                         claim_zero(mlx4_flow_destroy(priv->dev, flow, error));
1486         } else {
1487                 /* Refresh internal rules. */
1488                 ret = mlx4_flow_internal(priv, error);
1489                 if (ret)
1490                         return ret;
1491         }
1492         /* Toggle the remaining flow rules . */
1493         LIST_FOREACH(flow, &priv->flows, next) {
1494                 ret = mlx4_flow_toggle(priv, flow, priv->started, error);
1495                 if (ret)
1496                         return ret;
1497         }
1498         if (!priv->started)
1499                 assert(!priv->drop);
1500         return 0;
1501 }
1502
1503 /**
1504  * Clean up all flow rules.
1505  *
1506  * Unlike mlx4_flow_flush(), this function takes care of all remaining flow
1507  * rules regardless of whether they are internal or user-configured.
1508  *
1509  * @param priv
1510  *   Pointer to private structure.
1511  */
1512 void
1513 mlx4_flow_clean(struct priv *priv)
1514 {
1515         struct rte_flow *flow;
1516
1517         while ((flow = LIST_FIRST(&priv->flows)))
1518                 mlx4_flow_destroy(priv->dev, flow, NULL);
1519         assert(LIST_EMPTY(&priv->rss));
1520 }
1521
1522 static const struct rte_flow_ops mlx4_flow_ops = {
1523         .validate = mlx4_flow_validate,
1524         .create = mlx4_flow_create,
1525         .destroy = mlx4_flow_destroy,
1526         .flush = mlx4_flow_flush,
1527         .isolate = mlx4_flow_isolate,
1528 };
1529
1530 /**
1531  * Manage filter operations.
1532  *
1533  * @param dev
1534  *   Pointer to Ethernet device structure.
1535  * @param filter_type
1536  *   Filter type.
1537  * @param filter_op
1538  *   Operation to perform.
1539  * @param arg
1540  *   Pointer to operation-specific structure.
1541  *
1542  * @return
1543  *   0 on success, negative errno value otherwise and rte_errno is set.
1544  */
1545 int
1546 mlx4_filter_ctrl(struct rte_eth_dev *dev,
1547                  enum rte_filter_type filter_type,
1548                  enum rte_filter_op filter_op,
1549                  void *arg)
1550 {
1551         switch (filter_type) {
1552         case RTE_ETH_FILTER_GENERIC:
1553                 if (filter_op != RTE_ETH_FILTER_GET)
1554                         break;
1555                 *(const void **)arg = &mlx4_flow_ops;
1556                 return 0;
1557         default:
1558                 ERROR("%p: filter type (%d) not supported",
1559                       (void *)dev, filter_type);
1560                 break;
1561         }
1562         rte_errno = ENOTSUP;
1563         return -rte_errno;
1564 }