New upstream version 18.11-rc2
[deb_dpdk.git] / drivers / net / mlx5 / mlx5_flow_verbs.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <netinet/in.h>
6 #include <sys/queue.h>
7 #include <stdalign.h>
8 #include <stdint.h>
9 #include <string.h>
10
11 /* Verbs header. */
12 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #ifdef PEDANTIC
14 #pragma GCC diagnostic ignored "-Wpedantic"
15 #endif
16 #include <infiniband/verbs.h>
17 #ifdef PEDANTIC
18 #pragma GCC diagnostic error "-Wpedantic"
19 #endif
20
21 #include <rte_common.h>
22 #include <rte_ether.h>
23 #include <rte_eth_ctrl.h>
24 #include <rte_ethdev_driver.h>
25 #include <rte_flow.h>
26 #include <rte_flow_driver.h>
27 #include <rte_malloc.h>
28 #include <rte_ip.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_prm.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35
36 #define VERBS_SPEC_INNER(item_flags) \
37         (!!((item_flags) & MLX5_FLOW_LAYER_TUNNEL) ? IBV_FLOW_SPEC_INNER : 0)
38
39 /**
40  * Create Verbs flow counter with Verbs library.
41  *
42  * @param[in] dev
43  *   Pointer to the Ethernet device structure.
44  * @param[in, out] counter
45  *   mlx5 flow counter object, contains the counter id,
46  *   handle of created Verbs flow counter is returned
47  *   in cs field (if counters are supported).
48  *
49  * @return
50  *   0 On success else a negative errno value is returned
51  *   and rte_errno is set.
52  */
53 static int
54 flow_verbs_counter_create(struct rte_eth_dev *dev,
55                           struct mlx5_flow_counter *counter)
56 {
57 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
58         struct priv *priv = dev->data->dev_private;
59         struct ibv_counter_set_init_attr init = {
60                          .counter_set_id = counter->id};
61
62         counter->cs = mlx5_glue->create_counter_set(priv->ctx, &init);
63         if (!counter->cs) {
64                 rte_errno = ENOTSUP;
65                 return -ENOTSUP;
66         }
67         return 0;
68 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
69         struct priv *priv = dev->data->dev_private;
70         struct ibv_counters_init_attr init = {0};
71         struct ibv_counter_attach_attr attach = {0};
72         int ret;
73
74         counter->cs = mlx5_glue->create_counters(priv->ctx, &init);
75         if (!counter->cs) {
76                 rte_errno = ENOTSUP;
77                 return -ENOTSUP;
78         }
79         attach.counter_desc = IBV_COUNTER_PACKETS;
80         attach.index = 0;
81         ret = mlx5_glue->attach_counters(counter->cs, &attach, NULL);
82         if (!ret) {
83                 attach.counter_desc = IBV_COUNTER_BYTES;
84                 attach.index = 1;
85                 ret = mlx5_glue->attach_counters
86                                         (counter->cs, &attach, NULL);
87         }
88         if (ret) {
89                 claim_zero(mlx5_glue->destroy_counters(counter->cs));
90                 counter->cs = NULL;
91                 rte_errno = ret;
92                 return -ret;
93         }
94         return 0;
95 #else
96         (void)dev;
97         (void)counter;
98         rte_errno = ENOTSUP;
99         return -ENOTSUP;
100 #endif
101 }
102
103 /**
104  * Get a flow counter.
105  *
106  * @param[in] dev
107  *   Pointer to the Ethernet device structure.
108  * @param[in] shared
109  *   Indicate if this counter is shared with other flows.
110  * @param[in] id
111  *   Counter identifier.
112  *
113  * @return
114  *   A pointer to the counter, NULL otherwise and rte_errno is set.
115  */
116 static struct mlx5_flow_counter *
117 flow_verbs_counter_new(struct rte_eth_dev *dev, uint32_t shared, uint32_t id)
118 {
119         struct priv *priv = dev->data->dev_private;
120         struct mlx5_flow_counter *cnt;
121         int ret;
122
123         LIST_FOREACH(cnt, &priv->flow_counters, next) {
124                 if (!cnt->shared || cnt->shared != shared)
125                         continue;
126                 if (cnt->id != id)
127                         continue;
128                 cnt->ref_cnt++;
129                 return cnt;
130         }
131         cnt = rte_calloc(__func__, 1, sizeof(*cnt), 0);
132         if (!cnt) {
133                 rte_errno = ENOMEM;
134                 return NULL;
135         }
136         cnt->id = id;
137         cnt->shared = shared;
138         cnt->ref_cnt = 1;
139         cnt->hits = 0;
140         cnt->bytes = 0;
141         /* Create counter with Verbs. */
142         ret = flow_verbs_counter_create(dev, cnt);
143         if (!ret) {
144                 LIST_INSERT_HEAD(&priv->flow_counters, cnt, next);
145                 return cnt;
146         }
147         /* Some error occurred in Verbs library. */
148         rte_free(cnt);
149         rte_errno = -ret;
150         return NULL;
151 }
152
153 /**
154  * Release a flow counter.
155  *
156  * @param[in] counter
157  *   Pointer to the counter handler.
158  */
159 static void
160 flow_verbs_counter_release(struct mlx5_flow_counter *counter)
161 {
162         if (--counter->ref_cnt == 0) {
163 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
164                 claim_zero(mlx5_glue->destroy_counter_set(counter->cs));
165 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
166                 claim_zero(mlx5_glue->destroy_counters(counter->cs));
167 #endif
168                 LIST_REMOVE(counter, next);
169                 rte_free(counter);
170         }
171 }
172
173 /**
174  * Query a flow counter via Verbs library call.
175  *
176  * @see rte_flow_query()
177  * @see rte_flow_ops
178  */
179 static int
180 flow_verbs_counter_query(struct rte_eth_dev *dev __rte_unused,
181                          struct rte_flow *flow, void *data,
182                          struct rte_flow_error *error)
183 {
184 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
185         defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
186         if (flow->actions & MLX5_FLOW_ACTION_COUNT) {
187                 struct rte_flow_query_count *qc = data;
188                 uint64_t counters[2] = {0, 0};
189 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
190                 struct ibv_query_counter_set_attr query_cs_attr = {
191                         .cs = flow->counter->cs,
192                         .query_flags = IBV_COUNTER_SET_FORCE_UPDATE,
193                 };
194                 struct ibv_counter_set_data query_out = {
195                         .out = counters,
196                         .outlen = 2 * sizeof(uint64_t),
197                 };
198                 int err = mlx5_glue->query_counter_set(&query_cs_attr,
199                                                        &query_out);
200 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
201                 int err = mlx5_glue->query_counters
202                                (flow->counter->cs, counters,
203                                 RTE_DIM(counters),
204                                 IBV_READ_COUNTERS_ATTR_PREFER_CACHED);
205 #endif
206                 if (err)
207                         return rte_flow_error_set
208                                 (error, err,
209                                  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
210                                  NULL,
211                                  "cannot read counter");
212                 qc->hits_set = 1;
213                 qc->bytes_set = 1;
214                 qc->hits = counters[0] - flow->counter->hits;
215                 qc->bytes = counters[1] - flow->counter->bytes;
216                 if (qc->reset) {
217                         flow->counter->hits = counters[0];
218                         flow->counter->bytes = counters[1];
219                 }
220                 return 0;
221         }
222         return rte_flow_error_set(error, EINVAL,
223                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
224                                   NULL,
225                                   "flow does not have counter");
226 #else
227         (void)flow;
228         (void)data;
229         return rte_flow_error_set(error, ENOTSUP,
230                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
231                                   NULL,
232                                   "counters are not available");
233 #endif
234 }
235
236 /**
237  * Add a verbs item specification into @p verbs.
238  *
239  * @param[out] verbs
240  *   Pointer to verbs structure.
241  * @param[in] src
242  *   Create specification.
243  * @param[in] size
244  *   Size in bytes of the specification to copy.
245  */
246 static void
247 flow_verbs_spec_add(struct mlx5_flow_verbs *verbs, void *src, unsigned int size)
248 {
249         void *dst;
250
251         if (!verbs)
252                 return;
253         assert(verbs->specs);
254         dst = (void *)(verbs->specs + verbs->size);
255         memcpy(dst, src, size);
256         ++verbs->attr->num_of_specs;
257         verbs->size += size;
258 }
259
260 /**
261  * Convert the @p item into a Verbs specification. This function assumes that
262  * the input is valid and that there is space to insert the requested item
263  * into the flow.
264  *
265  * @param[in, out] dev_flow
266  *   Pointer to dev_flow structure.
267  * @param[in] item
268  *   Item specification.
269  * @param[in] item_flags
270  *   Parsed item flags.
271  */
272 static void
273 flow_verbs_translate_item_eth(struct mlx5_flow *dev_flow,
274                               const struct rte_flow_item *item,
275                               uint64_t item_flags)
276 {
277         const struct rte_flow_item_eth *spec = item->spec;
278         const struct rte_flow_item_eth *mask = item->mask;
279         const unsigned int size = sizeof(struct ibv_flow_spec_eth);
280         struct ibv_flow_spec_eth eth = {
281                 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
282                 .size = size,
283         };
284
285         if (!mask)
286                 mask = &rte_flow_item_eth_mask;
287         if (spec) {
288                 unsigned int i;
289
290                 memcpy(&eth.val.dst_mac, spec->dst.addr_bytes, ETHER_ADDR_LEN);
291                 memcpy(&eth.val.src_mac, spec->src.addr_bytes, ETHER_ADDR_LEN);
292                 eth.val.ether_type = spec->type;
293                 memcpy(&eth.mask.dst_mac, mask->dst.addr_bytes, ETHER_ADDR_LEN);
294                 memcpy(&eth.mask.src_mac, mask->src.addr_bytes, ETHER_ADDR_LEN);
295                 eth.mask.ether_type = mask->type;
296                 /* Remove unwanted bits from values. */
297                 for (i = 0; i < ETHER_ADDR_LEN; ++i) {
298                         eth.val.dst_mac[i] &= eth.mask.dst_mac[i];
299                         eth.val.src_mac[i] &= eth.mask.src_mac[i];
300                 }
301                 eth.val.ether_type &= eth.mask.ether_type;
302         }
303         flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
304 }
305
306 /**
307  * Update the VLAN tag in the Verbs Ethernet specification.
308  * This function assumes that the input is valid and there is space to add
309  * the requested item.
310  *
311  * @param[in, out] attr
312  *   Pointer to Verbs attributes structure.
313  * @param[in] eth
314  *   Verbs structure containing the VLAN information to copy.
315  */
316 static void
317 flow_verbs_item_vlan_update(struct ibv_flow_attr *attr,
318                             struct ibv_flow_spec_eth *eth)
319 {
320         unsigned int i;
321         const enum ibv_flow_spec_type search = eth->type;
322         struct ibv_spec_header *hdr = (struct ibv_spec_header *)
323                 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
324
325         for (i = 0; i != attr->num_of_specs; ++i) {
326                 if (hdr->type == search) {
327                         struct ibv_flow_spec_eth *e =
328                                 (struct ibv_flow_spec_eth *)hdr;
329
330                         e->val.vlan_tag = eth->val.vlan_tag;
331                         e->mask.vlan_tag = eth->mask.vlan_tag;
332                         e->val.ether_type = eth->val.ether_type;
333                         e->mask.ether_type = eth->mask.ether_type;
334                         break;
335                 }
336                 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
337         }
338 }
339
340 /**
341  * Convert the @p item into a Verbs specification. This function assumes that
342  * the input is valid and that there is space to insert the requested item
343  * into the flow.
344  *
345  * @param[in, out] dev_flow
346  *   Pointer to dev_flow structure.
347  * @param[in] item
348  *   Item specification.
349  * @param[in] item_flags
350  *   Parsed item flags.
351  */
352 static void
353 flow_verbs_translate_item_vlan(struct mlx5_flow *dev_flow,
354                                const struct rte_flow_item *item,
355                                uint64_t item_flags)
356 {
357         const struct rte_flow_item_vlan *spec = item->spec;
358         const struct rte_flow_item_vlan *mask = item->mask;
359         unsigned int size = sizeof(struct ibv_flow_spec_eth);
360         const int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
361         struct ibv_flow_spec_eth eth = {
362                 .type = IBV_FLOW_SPEC_ETH | VERBS_SPEC_INNER(item_flags),
363                 .size = size,
364         };
365         const uint32_t l2m = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
366                                       MLX5_FLOW_LAYER_OUTER_L2;
367
368         if (!mask)
369                 mask = &rte_flow_item_vlan_mask;
370         if (spec) {
371                 eth.val.vlan_tag = spec->tci;
372                 eth.mask.vlan_tag = mask->tci;
373                 eth.val.vlan_tag &= eth.mask.vlan_tag;
374                 eth.val.ether_type = spec->inner_type;
375                 eth.mask.ether_type = mask->inner_type;
376                 eth.val.ether_type &= eth.mask.ether_type;
377         }
378         if (!(item_flags & l2m))
379                 flow_verbs_spec_add(&dev_flow->verbs, &eth, size);
380         else
381                 flow_verbs_item_vlan_update(dev_flow->verbs.attr, &eth);
382 }
383
384 /**
385  * Convert the @p item into a Verbs specification. This function assumes that
386  * the input is valid and that there is space to insert the requested item
387  * into the flow.
388  *
389  * @param[in, out] dev_flow
390  *   Pointer to dev_flow structure.
391  * @param[in] item
392  *   Item specification.
393  * @param[in] item_flags
394  *   Parsed item flags.
395  */
396 static void
397 flow_verbs_translate_item_ipv4(struct mlx5_flow *dev_flow,
398                                const struct rte_flow_item *item,
399                                uint64_t item_flags)
400 {
401         const struct rte_flow_item_ipv4 *spec = item->spec;
402         const struct rte_flow_item_ipv4 *mask = item->mask;
403         unsigned int size = sizeof(struct ibv_flow_spec_ipv4_ext);
404         struct ibv_flow_spec_ipv4_ext ipv4 = {
405                 .type = IBV_FLOW_SPEC_IPV4_EXT | VERBS_SPEC_INNER(item_flags),
406                 .size = size,
407         };
408
409         if (!mask)
410                 mask = &rte_flow_item_ipv4_mask;
411         if (spec) {
412                 ipv4.val = (struct ibv_flow_ipv4_ext_filter){
413                         .src_ip = spec->hdr.src_addr,
414                         .dst_ip = spec->hdr.dst_addr,
415                         .proto = spec->hdr.next_proto_id,
416                         .tos = spec->hdr.type_of_service,
417                 };
418                 ipv4.mask = (struct ibv_flow_ipv4_ext_filter){
419                         .src_ip = mask->hdr.src_addr,
420                         .dst_ip = mask->hdr.dst_addr,
421                         .proto = mask->hdr.next_proto_id,
422                         .tos = mask->hdr.type_of_service,
423                 };
424                 /* Remove unwanted bits from values. */
425                 ipv4.val.src_ip &= ipv4.mask.src_ip;
426                 ipv4.val.dst_ip &= ipv4.mask.dst_ip;
427                 ipv4.val.proto &= ipv4.mask.proto;
428                 ipv4.val.tos &= ipv4.mask.tos;
429         }
430         flow_verbs_spec_add(&dev_flow->verbs, &ipv4, size);
431 }
432
433 /**
434  * Convert the @p item into a Verbs specification. This function assumes that
435  * the input is valid and that there is space to insert the requested item
436  * into the flow.
437  *
438  * @param[in, out] dev_flow
439  *   Pointer to dev_flow structure.
440  * @param[in] item
441  *   Item specification.
442  * @param[in] item_flags
443  *   Parsed item flags.
444  */
445 static void
446 flow_verbs_translate_item_ipv6(struct mlx5_flow *dev_flow,
447                                const struct rte_flow_item *item,
448                                uint64_t item_flags)
449 {
450         const struct rte_flow_item_ipv6 *spec = item->spec;
451         const struct rte_flow_item_ipv6 *mask = item->mask;
452         unsigned int size = sizeof(struct ibv_flow_spec_ipv6);
453         struct ibv_flow_spec_ipv6 ipv6 = {
454                 .type = IBV_FLOW_SPEC_IPV6 | VERBS_SPEC_INNER(item_flags),
455                 .size = size,
456         };
457
458         if (!mask)
459                 mask = &rte_flow_item_ipv6_mask;
460         if (spec) {
461                 unsigned int i;
462                 uint32_t vtc_flow_val;
463                 uint32_t vtc_flow_mask;
464
465                 memcpy(&ipv6.val.src_ip, spec->hdr.src_addr,
466                        RTE_DIM(ipv6.val.src_ip));
467                 memcpy(&ipv6.val.dst_ip, spec->hdr.dst_addr,
468                        RTE_DIM(ipv6.val.dst_ip));
469                 memcpy(&ipv6.mask.src_ip, mask->hdr.src_addr,
470                        RTE_DIM(ipv6.mask.src_ip));
471                 memcpy(&ipv6.mask.dst_ip, mask->hdr.dst_addr,
472                        RTE_DIM(ipv6.mask.dst_ip));
473                 vtc_flow_val = rte_be_to_cpu_32(spec->hdr.vtc_flow);
474                 vtc_flow_mask = rte_be_to_cpu_32(mask->hdr.vtc_flow);
475                 ipv6.val.flow_label =
476                         rte_cpu_to_be_32((vtc_flow_val & IPV6_HDR_FL_MASK) >>
477                                          IPV6_HDR_FL_SHIFT);
478                 ipv6.val.traffic_class = (vtc_flow_val & IPV6_HDR_TC_MASK) >>
479                                          IPV6_HDR_TC_SHIFT;
480                 ipv6.val.next_hdr = spec->hdr.proto;
481                 ipv6.val.hop_limit = spec->hdr.hop_limits;
482                 ipv6.mask.flow_label =
483                         rte_cpu_to_be_32((vtc_flow_mask & IPV6_HDR_FL_MASK) >>
484                                          IPV6_HDR_FL_SHIFT);
485                 ipv6.mask.traffic_class = (vtc_flow_mask & IPV6_HDR_TC_MASK) >>
486                                           IPV6_HDR_TC_SHIFT;
487                 ipv6.mask.next_hdr = mask->hdr.proto;
488                 ipv6.mask.hop_limit = mask->hdr.hop_limits;
489                 /* Remove unwanted bits from values. */
490                 for (i = 0; i < RTE_DIM(ipv6.val.src_ip); ++i) {
491                         ipv6.val.src_ip[i] &= ipv6.mask.src_ip[i];
492                         ipv6.val.dst_ip[i] &= ipv6.mask.dst_ip[i];
493                 }
494                 ipv6.val.flow_label &= ipv6.mask.flow_label;
495                 ipv6.val.traffic_class &= ipv6.mask.traffic_class;
496                 ipv6.val.next_hdr &= ipv6.mask.next_hdr;
497                 ipv6.val.hop_limit &= ipv6.mask.hop_limit;
498         }
499         flow_verbs_spec_add(&dev_flow->verbs, &ipv6, size);
500 }
501
502 /**
503  * Convert the @p item into a Verbs specification. This function assumes that
504  * the input is valid and that there is space to insert the requested item
505  * into the flow.
506  *
507  * @param[in, out] dev_flow
508  *   Pointer to dev_flow structure.
509  * @param[in] item
510  *   Item specification.
511  * @param[in] item_flags
512  *   Parsed item flags.
513  */
514 static void
515 flow_verbs_translate_item_tcp(struct mlx5_flow *dev_flow,
516                               const struct rte_flow_item *item,
517                               uint64_t item_flags __rte_unused)
518 {
519         const struct rte_flow_item_tcp *spec = item->spec;
520         const struct rte_flow_item_tcp *mask = item->mask;
521         unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
522         struct ibv_flow_spec_tcp_udp tcp = {
523                 .type = IBV_FLOW_SPEC_TCP | VERBS_SPEC_INNER(item_flags),
524                 .size = size,
525         };
526
527         if (!mask)
528                 mask = &rte_flow_item_tcp_mask;
529         if (spec) {
530                 tcp.val.dst_port = spec->hdr.dst_port;
531                 tcp.val.src_port = spec->hdr.src_port;
532                 tcp.mask.dst_port = mask->hdr.dst_port;
533                 tcp.mask.src_port = mask->hdr.src_port;
534                 /* Remove unwanted bits from values. */
535                 tcp.val.src_port &= tcp.mask.src_port;
536                 tcp.val.dst_port &= tcp.mask.dst_port;
537         }
538         flow_verbs_spec_add(&dev_flow->verbs, &tcp, size);
539 }
540
541 /**
542  * Convert the @p item into a Verbs specification. This function assumes that
543  * the input is valid and that there is space to insert the requested item
544  * into the flow.
545  *
546  * @param[in, out] dev_flow
547  *   Pointer to dev_flow structure.
548  * @param[in] item
549  *   Item specification.
550  * @param[in] item_flags
551  *   Parsed item flags.
552  */
553 static void
554 flow_verbs_translate_item_udp(struct mlx5_flow *dev_flow,
555                               const struct rte_flow_item *item,
556                               uint64_t item_flags __rte_unused)
557 {
558         const struct rte_flow_item_udp *spec = item->spec;
559         const struct rte_flow_item_udp *mask = item->mask;
560         unsigned int size = sizeof(struct ibv_flow_spec_tcp_udp);
561         struct ibv_flow_spec_tcp_udp udp = {
562                 .type = IBV_FLOW_SPEC_UDP | VERBS_SPEC_INNER(item_flags),
563                 .size = size,
564         };
565
566         if (!mask)
567                 mask = &rte_flow_item_udp_mask;
568         if (spec) {
569                 udp.val.dst_port = spec->hdr.dst_port;
570                 udp.val.src_port = spec->hdr.src_port;
571                 udp.mask.dst_port = mask->hdr.dst_port;
572                 udp.mask.src_port = mask->hdr.src_port;
573                 /* Remove unwanted bits from values. */
574                 udp.val.src_port &= udp.mask.src_port;
575                 udp.val.dst_port &= udp.mask.dst_port;
576         }
577         flow_verbs_spec_add(&dev_flow->verbs, &udp, size);
578 }
579
580 /**
581  * Convert the @p item into a Verbs specification. This function assumes that
582  * the input is valid and that there is space to insert the requested item
583  * into the flow.
584  *
585  * @param[in, out] dev_flow
586  *   Pointer to dev_flow structure.
587  * @param[in] item
588  *   Item specification.
589  * @param[in] item_flags
590  *   Parsed item flags.
591  */
592 static void
593 flow_verbs_translate_item_vxlan(struct mlx5_flow *dev_flow,
594                                 const struct rte_flow_item *item,
595                                 uint64_t item_flags __rte_unused)
596 {
597         const struct rte_flow_item_vxlan *spec = item->spec;
598         const struct rte_flow_item_vxlan *mask = item->mask;
599         unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
600         struct ibv_flow_spec_tunnel vxlan = {
601                 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
602                 .size = size,
603         };
604         union vni {
605                 uint32_t vlan_id;
606                 uint8_t vni[4];
607         } id = { .vlan_id = 0, };
608
609         if (!mask)
610                 mask = &rte_flow_item_vxlan_mask;
611         if (spec) {
612                 memcpy(&id.vni[1], spec->vni, 3);
613                 vxlan.val.tunnel_id = id.vlan_id;
614                 memcpy(&id.vni[1], mask->vni, 3);
615                 vxlan.mask.tunnel_id = id.vlan_id;
616                 /* Remove unwanted bits from values. */
617                 vxlan.val.tunnel_id &= vxlan.mask.tunnel_id;
618         }
619         flow_verbs_spec_add(&dev_flow->verbs, &vxlan, size);
620 }
621
622 /**
623  * Convert the @p item into a Verbs specification. This function assumes that
624  * the input is valid and that there is space to insert the requested item
625  * into the flow.
626  *
627  * @param[in, out] dev_flow
628  *   Pointer to dev_flow structure.
629  * @param[in] item
630  *   Item specification.
631  * @param[in] item_flags
632  *   Parsed item flags.
633  */
634 static void
635 flow_verbs_translate_item_vxlan_gpe(struct mlx5_flow *dev_flow,
636                                     const struct rte_flow_item *item,
637                                     uint64_t item_flags __rte_unused)
638 {
639         const struct rte_flow_item_vxlan_gpe *spec = item->spec;
640         const struct rte_flow_item_vxlan_gpe *mask = item->mask;
641         unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
642         struct ibv_flow_spec_tunnel vxlan_gpe = {
643                 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
644                 .size = size,
645         };
646         union vni {
647                 uint32_t vlan_id;
648                 uint8_t vni[4];
649         } id = { .vlan_id = 0, };
650
651         if (!mask)
652                 mask = &rte_flow_item_vxlan_gpe_mask;
653         if (spec) {
654                 memcpy(&id.vni[1], spec->vni, 3);
655                 vxlan_gpe.val.tunnel_id = id.vlan_id;
656                 memcpy(&id.vni[1], mask->vni, 3);
657                 vxlan_gpe.mask.tunnel_id = id.vlan_id;
658                 /* Remove unwanted bits from values. */
659                 vxlan_gpe.val.tunnel_id &= vxlan_gpe.mask.tunnel_id;
660         }
661         flow_verbs_spec_add(&dev_flow->verbs, &vxlan_gpe, size);
662 }
663
664 /**
665  * Update the protocol in Verbs IPv4/IPv6 spec.
666  *
667  * @param[in, out] attr
668  *   Pointer to Verbs attributes structure.
669  * @param[in] search
670  *   Specification type to search in order to update the IP protocol.
671  * @param[in] protocol
672  *   Protocol value to set if none is present in the specification.
673  */
674 static void
675 flow_verbs_item_gre_ip_protocol_update(struct ibv_flow_attr *attr,
676                                        enum ibv_flow_spec_type search,
677                                        uint8_t protocol)
678 {
679         unsigned int i;
680         struct ibv_spec_header *hdr = (struct ibv_spec_header *)
681                 ((uint8_t *)attr + sizeof(struct ibv_flow_attr));
682
683         if (!attr)
684                 return;
685         for (i = 0; i != attr->num_of_specs; ++i) {
686                 if (hdr->type == search) {
687                         union {
688                                 struct ibv_flow_spec_ipv4_ext *ipv4;
689                                 struct ibv_flow_spec_ipv6 *ipv6;
690                         } ip;
691
692                         switch (search) {
693                         case IBV_FLOW_SPEC_IPV4_EXT:
694                                 ip.ipv4 = (struct ibv_flow_spec_ipv4_ext *)hdr;
695                                 if (!ip.ipv4->val.proto) {
696                                         ip.ipv4->val.proto = protocol;
697                                         ip.ipv4->mask.proto = 0xff;
698                                 }
699                                 break;
700                         case IBV_FLOW_SPEC_IPV6:
701                                 ip.ipv6 = (struct ibv_flow_spec_ipv6 *)hdr;
702                                 if (!ip.ipv6->val.next_hdr) {
703                                         ip.ipv6->val.next_hdr = protocol;
704                                         ip.ipv6->mask.next_hdr = 0xff;
705                                 }
706                                 break;
707                         default:
708                                 break;
709                         }
710                         break;
711                 }
712                 hdr = (struct ibv_spec_header *)((uint8_t *)hdr + hdr->size);
713         }
714 }
715
716 /**
717  * Convert the @p item into a Verbs specification. This function assumes that
718  * the input is valid and that there is space to insert the requested item
719  * into the flow.
720  *
721  * @param[in, out] dev_flow
722  *   Pointer to dev_flow structure.
723  * @param[in] item
724  *   Item specification.
725  * @param[in] item_flags
726  *   Parsed item flags.
727  */
728 static void
729 flow_verbs_translate_item_gre(struct mlx5_flow *dev_flow,
730                               const struct rte_flow_item *item __rte_unused,
731                               uint64_t item_flags)
732 {
733         struct mlx5_flow_verbs *verbs = &dev_flow->verbs;
734 #ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT
735         unsigned int size = sizeof(struct ibv_flow_spec_tunnel);
736         struct ibv_flow_spec_tunnel tunnel = {
737                 .type = IBV_FLOW_SPEC_VXLAN_TUNNEL,
738                 .size = size,
739         };
740 #else
741         const struct rte_flow_item_gre *spec = item->spec;
742         const struct rte_flow_item_gre *mask = item->mask;
743         unsigned int size = sizeof(struct ibv_flow_spec_gre);
744         struct ibv_flow_spec_gre tunnel = {
745                 .type = IBV_FLOW_SPEC_GRE,
746                 .size = size,
747         };
748
749         if (!mask)
750                 mask = &rte_flow_item_gre_mask;
751         if (spec) {
752                 tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver;
753                 tunnel.val.protocol = spec->protocol;
754                 tunnel.mask.c_ks_res0_ver = mask->c_rsvd0_ver;
755                 tunnel.mask.protocol = mask->protocol;
756                 /* Remove unwanted bits from values. */
757                 tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver;
758                 tunnel.val.protocol &= tunnel.mask.protocol;
759                 tunnel.val.key &= tunnel.mask.key;
760         }
761 #endif
762         if (item_flags & MLX5_FLOW_LAYER_OUTER_L3_IPV4)
763                 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
764                                                        IBV_FLOW_SPEC_IPV4_EXT,
765                                                        IPPROTO_GRE);
766         else
767                 flow_verbs_item_gre_ip_protocol_update(verbs->attr,
768                                                        IBV_FLOW_SPEC_IPV6,
769                                                        IPPROTO_GRE);
770         flow_verbs_spec_add(verbs, &tunnel, size);
771 }
772
773 /**
774  * Convert the @p action into a Verbs specification. This function assumes that
775  * the input is valid and that there is space to insert the requested action
776  * into the flow. This function also return the action that was added.
777  *
778  * @param[in, out] dev_flow
779  *   Pointer to dev_flow structure.
780  * @param[in] item
781  *   Item specification.
782  * @param[in] item_flags
783  *   Parsed item flags.
784  */
785 static void
786 flow_verbs_translate_item_mpls(struct mlx5_flow *dev_flow __rte_unused,
787                                const struct rte_flow_item *item __rte_unused,
788                                uint64_t item_flags __rte_unused)
789 {
790 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
791         const struct rte_flow_item_mpls *spec = item->spec;
792         const struct rte_flow_item_mpls *mask = item->mask;
793         unsigned int size = sizeof(struct ibv_flow_spec_mpls);
794         struct ibv_flow_spec_mpls mpls = {
795                 .type = IBV_FLOW_SPEC_MPLS,
796                 .size = size,
797         };
798
799         if (!mask)
800                 mask = &rte_flow_item_mpls_mask;
801         if (spec) {
802                 memcpy(&mpls.val.label, spec, sizeof(mpls.val.label));
803                 memcpy(&mpls.mask.label, mask, sizeof(mpls.mask.label));
804                 /* Remove unwanted bits from values.  */
805                 mpls.val.label &= mpls.mask.label;
806         }
807         flow_verbs_spec_add(&dev_flow->verbs, &mpls, size);
808 #endif
809 }
810
811 /**
812  * Convert the @p action into a Verbs specification. This function assumes that
813  * the input is valid and that there is space to insert the requested action
814  * into the flow.
815  *
816  * @param[in] dev_flow
817  *   Pointer to mlx5_flow.
818  * @param[in] action
819  *   Action configuration.
820  */
821 static void
822 flow_verbs_translate_action_drop
823         (struct mlx5_flow *dev_flow,
824          const struct rte_flow_action *action __rte_unused)
825 {
826         unsigned int size = sizeof(struct ibv_flow_spec_action_drop);
827         struct ibv_flow_spec_action_drop drop = {
828                         .type = IBV_FLOW_SPEC_ACTION_DROP,
829                         .size = size,
830         };
831
832         flow_verbs_spec_add(&dev_flow->verbs, &drop, size);
833 }
834
835 /**
836  * Convert the @p action into a Verbs specification. This function assumes that
837  * the input is valid and that there is space to insert the requested action
838  * into the flow.
839  *
840  * @param[in] dev_flow
841  *   Pointer to mlx5_flow.
842  * @param[in] action
843  *   Action configuration.
844  */
845 static void
846 flow_verbs_translate_action_queue(struct mlx5_flow *dev_flow,
847                                   const struct rte_flow_action *action)
848 {
849         const struct rte_flow_action_queue *queue = action->conf;
850         struct rte_flow *flow = dev_flow->flow;
851
852         if (flow->queue)
853                 (*flow->queue)[0] = queue->index;
854         flow->rss.queue_num = 1;
855 }
856
857 /**
858  * Convert the @p action into a Verbs specification. This function assumes that
859  * the input is valid and that there is space to insert the requested action
860  * into the flow.
861  *
862  * @param[in] action
863  *   Action configuration.
864  * @param[in, out] action_flags
865  *   Pointer to the detected actions.
866  * @param[in] dev_flow
867  *   Pointer to mlx5_flow.
868  */
869 static void
870 flow_verbs_translate_action_rss(struct mlx5_flow *dev_flow,
871                                 const struct rte_flow_action *action)
872 {
873         const struct rte_flow_action_rss *rss = action->conf;
874         const uint8_t *rss_key;
875         struct rte_flow *flow = dev_flow->flow;
876
877         if (flow->queue)
878                 memcpy((*flow->queue), rss->queue,
879                        rss->queue_num * sizeof(uint16_t));
880         flow->rss.queue_num = rss->queue_num;
881         /* NULL RSS key indicates default RSS key. */
882         rss_key = !rss->key ? rss_hash_default_key : rss->key;
883         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
884         /* RSS type 0 indicates default RSS type (ETH_RSS_IP). */
885         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
886         flow->rss.level = rss->level;
887 }
888
889 /**
890  * Convert the @p action into a Verbs specification. This function assumes that
891  * the input is valid and that there is space to insert the requested action
892  * into the flow.
893  *
894  * @param[in] dev_flow
895  *   Pointer to mlx5_flow.
896  * @param[in] action
897  *   Action configuration.
898  */
899 static void
900 flow_verbs_translate_action_flag
901         (struct mlx5_flow *dev_flow,
902          const struct rte_flow_action *action __rte_unused)
903 {
904         unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
905         struct ibv_flow_spec_action_tag tag = {
906                 .type = IBV_FLOW_SPEC_ACTION_TAG,
907                 .size = size,
908                 .tag_id = mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT),
909         };
910
911         flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
912 }
913
914 /**
915  * Convert the @p action into a Verbs specification. This function assumes that
916  * the input is valid and that there is space to insert the requested action
917  * into the flow.
918  *
919  * @param[in] dev_flow
920  *   Pointer to mlx5_flow.
921  * @param[in] action
922  *   Action configuration.
923  */
924 static void
925 flow_verbs_translate_action_mark(struct mlx5_flow *dev_flow,
926                                  const struct rte_flow_action *action)
927 {
928         const struct rte_flow_action_mark *mark = action->conf;
929         unsigned int size = sizeof(struct ibv_flow_spec_action_tag);
930         struct ibv_flow_spec_action_tag tag = {
931                 .type = IBV_FLOW_SPEC_ACTION_TAG,
932                 .size = size,
933                 .tag_id = mlx5_flow_mark_set(mark->id),
934         };
935
936         flow_verbs_spec_add(&dev_flow->verbs, &tag, size);
937 }
938
939 /**
940  * Convert the @p action into a Verbs specification. This function assumes that
941  * the input is valid and that there is space to insert the requested action
942  * into the flow.
943  *
944  * @param[in] dev
945  *   Pointer to the Ethernet device structure.
946  * @param[in] action
947  *   Action configuration.
948  * @param[in] dev_flow
949  *   Pointer to mlx5_flow.
950  * @param[out] error
951  *   Pointer to error structure.
952  *
953  * @return
954  *   0 On success else a negative errno value is returned and rte_errno is set.
955  */
956 static int
957 flow_verbs_translate_action_count(struct mlx5_flow *dev_flow,
958                                   const struct rte_flow_action *action,
959                                   struct rte_eth_dev *dev,
960                                   struct rte_flow_error *error)
961 {
962         const struct rte_flow_action_count *count = action->conf;
963         struct rte_flow *flow = dev_flow->flow;
964 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
965         defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
966         unsigned int size = sizeof(struct ibv_flow_spec_counter_action);
967         struct ibv_flow_spec_counter_action counter = {
968                 .type = IBV_FLOW_SPEC_ACTION_COUNT,
969                 .size = size,
970         };
971 #endif
972
973         if (!flow->counter) {
974                 flow->counter = flow_verbs_counter_new(dev, count->shared,
975                                                        count->id);
976                 if (!flow->counter)
977                         return rte_flow_error_set(error, rte_errno,
978                                                   RTE_FLOW_ERROR_TYPE_ACTION,
979                                                   action,
980                                                   "cannot get counter"
981                                                   " context.");
982         }
983 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42)
984         counter.counter_set_handle = flow->counter->cs->handle;
985         flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
986 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
987         counter.counters = flow->counter->cs;
988         flow_verbs_spec_add(&dev_flow->verbs, &counter, size);
989 #endif
990         return 0;
991 }
992
993 /**
994  * Internal validation function. For validating both actions and items.
995  *
996  * @param[in] dev
997  *   Pointer to the Ethernet device structure.
998  * @param[in] attr
999  *   Pointer to the flow attributes.
1000  * @param[in] items
1001  *   Pointer to the list of items.
1002  * @param[in] actions
1003  *   Pointer to the list of actions.
1004  * @param[out] error
1005  *   Pointer to the error structure.
1006  *
1007  * @return
1008  *   0 on success, a negative errno value otherwise and rte_errno is set.
1009  */
1010 static int
1011 flow_verbs_validate(struct rte_eth_dev *dev,
1012                     const struct rte_flow_attr *attr,
1013                     const struct rte_flow_item items[],
1014                     const struct rte_flow_action actions[],
1015                     struct rte_flow_error *error)
1016 {
1017         int ret;
1018         uint64_t action_flags = 0;
1019         uint64_t item_flags = 0;
1020         uint8_t next_protocol = 0xff;
1021
1022         if (items == NULL)
1023                 return -1;
1024         ret = mlx5_flow_validate_attributes(dev, attr, error);
1025         if (ret < 0)
1026                 return ret;
1027         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1028                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1029                 int ret = 0;
1030
1031                 switch (items->type) {
1032                 case RTE_FLOW_ITEM_TYPE_VOID:
1033                         break;
1034                 case RTE_FLOW_ITEM_TYPE_ETH:
1035                         ret = mlx5_flow_validate_item_eth(items, item_flags,
1036                                                           error);
1037                         if (ret < 0)
1038                                 return ret;
1039                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1040                                                MLX5_FLOW_LAYER_OUTER_L2;
1041                         break;
1042                 case RTE_FLOW_ITEM_TYPE_VLAN:
1043                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
1044                                                            error);
1045                         if (ret < 0)
1046                                 return ret;
1047                         item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1048                                                 MLX5_FLOW_LAYER_INNER_VLAN) :
1049                                                (MLX5_FLOW_LAYER_OUTER_L2 |
1050                                                 MLX5_FLOW_LAYER_OUTER_VLAN);
1051                         break;
1052                 case RTE_FLOW_ITEM_TYPE_IPV4:
1053                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
1054                                                            error);
1055                         if (ret < 0)
1056                                 return ret;
1057                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1058                                                MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1059                         if (items->mask != NULL &&
1060                             ((const struct rte_flow_item_ipv4 *)
1061                              items->mask)->hdr.next_proto_id)
1062                                 next_protocol =
1063                                         ((const struct rte_flow_item_ipv4 *)
1064                                          (items->spec))->hdr.next_proto_id;
1065                         break;
1066                 case RTE_FLOW_ITEM_TYPE_IPV6:
1067                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
1068                                                            error);
1069                         if (ret < 0)
1070                                 return ret;
1071                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1072                                                MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1073                         if (items->mask != NULL &&
1074                             ((const struct rte_flow_item_ipv6 *)
1075                              items->mask)->hdr.proto)
1076                                 next_protocol =
1077                                         ((const struct rte_flow_item_ipv6 *)
1078                                          items->spec)->hdr.proto;
1079                         break;
1080                 case RTE_FLOW_ITEM_TYPE_UDP:
1081                         ret = mlx5_flow_validate_item_udp(items, item_flags,
1082                                                           next_protocol,
1083                                                           error);
1084                         if (ret < 0)
1085                                 return ret;
1086                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1087                                                MLX5_FLOW_LAYER_OUTER_L4_UDP;
1088                         break;
1089                 case RTE_FLOW_ITEM_TYPE_TCP:
1090                         ret = mlx5_flow_validate_item_tcp
1091                                                 (items, item_flags,
1092                                                  next_protocol,
1093                                                  &rte_flow_item_tcp_mask,
1094                                                  error);
1095                         if (ret < 0)
1096                                 return ret;
1097                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1098                                                MLX5_FLOW_LAYER_OUTER_L4_TCP;
1099                         break;
1100                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1101                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
1102                                                             error);
1103                         if (ret < 0)
1104                                 return ret;
1105                         item_flags |= MLX5_FLOW_LAYER_VXLAN;
1106                         break;
1107                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1108                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
1109                                                                 item_flags,
1110                                                                 dev, error);
1111                         if (ret < 0)
1112                                 return ret;
1113                         item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1114                         break;
1115                 case RTE_FLOW_ITEM_TYPE_GRE:
1116                         ret = mlx5_flow_validate_item_gre(items, item_flags,
1117                                                           next_protocol, error);
1118                         if (ret < 0)
1119                                 return ret;
1120                         item_flags |= MLX5_FLOW_LAYER_GRE;
1121                         break;
1122                 case RTE_FLOW_ITEM_TYPE_MPLS:
1123                         ret = mlx5_flow_validate_item_mpls(items, item_flags,
1124                                                            next_protocol,
1125                                                            error);
1126                         if (ret < 0)
1127                                 return ret;
1128                         if (next_protocol != 0xff &&
1129                             next_protocol != IPPROTO_MPLS)
1130                                 return rte_flow_error_set
1131                                         (error, EINVAL,
1132                                          RTE_FLOW_ERROR_TYPE_ITEM, items,
1133                                          "protocol filtering not compatible"
1134                                          " with MPLS layer");
1135                         item_flags |= MLX5_FLOW_LAYER_MPLS;
1136                         break;
1137                 default:
1138                         return rte_flow_error_set(error, ENOTSUP,
1139                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1140                                                   NULL, "item not supported");
1141                 }
1142         }
1143         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1144                 switch (actions->type) {
1145                 case RTE_FLOW_ACTION_TYPE_VOID:
1146                         break;
1147                 case RTE_FLOW_ACTION_TYPE_FLAG:
1148                         ret = mlx5_flow_validate_action_flag(action_flags,
1149                                                              attr,
1150                                                              error);
1151                         if (ret < 0)
1152                                 return ret;
1153                         action_flags |= MLX5_FLOW_ACTION_FLAG;
1154                         break;
1155                 case RTE_FLOW_ACTION_TYPE_MARK:
1156                         ret = mlx5_flow_validate_action_mark(actions,
1157                                                              action_flags,
1158                                                              attr,
1159                                                              error);
1160                         if (ret < 0)
1161                                 return ret;
1162                         action_flags |= MLX5_FLOW_ACTION_MARK;
1163                         break;
1164                 case RTE_FLOW_ACTION_TYPE_DROP:
1165                         ret = mlx5_flow_validate_action_drop(action_flags,
1166                                                              attr,
1167                                                              error);
1168                         if (ret < 0)
1169                                 return ret;
1170                         action_flags |= MLX5_FLOW_ACTION_DROP;
1171                         break;
1172                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1173                         ret = mlx5_flow_validate_action_queue(actions,
1174                                                               action_flags, dev,
1175                                                               attr,
1176                                                               error);
1177                         if (ret < 0)
1178                                 return ret;
1179                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
1180                         break;
1181                 case RTE_FLOW_ACTION_TYPE_RSS:
1182                         ret = mlx5_flow_validate_action_rss(actions,
1183                                                             action_flags, dev,
1184                                                             attr,
1185                                                             error);
1186                         if (ret < 0)
1187                                 return ret;
1188                         action_flags |= MLX5_FLOW_ACTION_RSS;
1189                         break;
1190                 case RTE_FLOW_ACTION_TYPE_COUNT:
1191                         ret = mlx5_flow_validate_action_count(dev, attr, error);
1192                         if (ret < 0)
1193                                 return ret;
1194                         action_flags |= MLX5_FLOW_ACTION_COUNT;
1195                         break;
1196                 default:
1197                         return rte_flow_error_set(error, ENOTSUP,
1198                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1199                                                   actions,
1200                                                   "action not supported");
1201                 }
1202         }
1203         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS))
1204                 return rte_flow_error_set(error, EINVAL,
1205                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
1206                                           "no fate action is found");
1207         return 0;
1208 }
1209
1210 /**
1211  * Calculate the required bytes that are needed for the action part of the verbs
1212  * flow.
1213  *
1214  * @param[in] actions
1215  *   Pointer to the list of actions.
1216  *
1217  * @return
1218  *   The size of the memory needed for all actions.
1219  */
1220 static int
1221 flow_verbs_get_actions_size(const struct rte_flow_action actions[])
1222 {
1223         int size = 0;
1224
1225         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1226                 switch (actions->type) {
1227                 case RTE_FLOW_ACTION_TYPE_VOID:
1228                         break;
1229                 case RTE_FLOW_ACTION_TYPE_FLAG:
1230                         size += sizeof(struct ibv_flow_spec_action_tag);
1231                         break;
1232                 case RTE_FLOW_ACTION_TYPE_MARK:
1233                         size += sizeof(struct ibv_flow_spec_action_tag);
1234                         break;
1235                 case RTE_FLOW_ACTION_TYPE_DROP:
1236                         size += sizeof(struct ibv_flow_spec_action_drop);
1237                         break;
1238                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1239                         break;
1240                 case RTE_FLOW_ACTION_TYPE_RSS:
1241                         break;
1242                 case RTE_FLOW_ACTION_TYPE_COUNT:
1243 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
1244         defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
1245                         size += sizeof(struct ibv_flow_spec_counter_action);
1246 #endif
1247                         break;
1248                 default:
1249                         break;
1250                 }
1251         }
1252         return size;
1253 }
1254
1255 /**
1256  * Calculate the required bytes that are needed for the item part of the verbs
1257  * flow.
1258  *
1259  * @param[in] items
1260  *   Pointer to the list of items.
1261  *
1262  * @return
1263  *   The size of the memory needed for all items.
1264  */
1265 static int
1266 flow_verbs_get_items_size(const struct rte_flow_item items[])
1267 {
1268         int size = 0;
1269
1270         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1271                 switch (items->type) {
1272                 case RTE_FLOW_ITEM_TYPE_VOID:
1273                         break;
1274                 case RTE_FLOW_ITEM_TYPE_ETH:
1275                         size += sizeof(struct ibv_flow_spec_eth);
1276                         break;
1277                 case RTE_FLOW_ITEM_TYPE_VLAN:
1278                         size += sizeof(struct ibv_flow_spec_eth);
1279                         break;
1280                 case RTE_FLOW_ITEM_TYPE_IPV4:
1281                         size += sizeof(struct ibv_flow_spec_ipv4_ext);
1282                         break;
1283                 case RTE_FLOW_ITEM_TYPE_IPV6:
1284                         size += sizeof(struct ibv_flow_spec_ipv6);
1285                         break;
1286                 case RTE_FLOW_ITEM_TYPE_UDP:
1287                         size += sizeof(struct ibv_flow_spec_tcp_udp);
1288                         break;
1289                 case RTE_FLOW_ITEM_TYPE_TCP:
1290                         size += sizeof(struct ibv_flow_spec_tcp_udp);
1291                         break;
1292                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1293                         size += sizeof(struct ibv_flow_spec_tunnel);
1294                         break;
1295                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1296                         size += sizeof(struct ibv_flow_spec_tunnel);
1297                         break;
1298 #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
1299                 case RTE_FLOW_ITEM_TYPE_GRE:
1300                         size += sizeof(struct ibv_flow_spec_gre);
1301                         break;
1302                 case RTE_FLOW_ITEM_TYPE_MPLS:
1303                         size += sizeof(struct ibv_flow_spec_mpls);
1304                         break;
1305 #else
1306                 case RTE_FLOW_ITEM_TYPE_GRE:
1307                         size += sizeof(struct ibv_flow_spec_tunnel);
1308                         break;
1309 #endif
1310                 default:
1311                         break;
1312                 }
1313         }
1314         return size;
1315 }
1316
1317 /**
1318  * Internal preparation function. Allocate mlx5_flow with the required size.
1319  * The required size is calculate based on the actions and items. This function
1320  * also returns the detected actions and items for later use.
1321  *
1322  * @param[in] attr
1323  *   Pointer to the flow attributes.
1324  * @param[in] items
1325  *   Pointer to the list of items.
1326  * @param[in] actions
1327  *   Pointer to the list of actions.
1328  * @param[out] error
1329  *   Pointer to the error structure.
1330  *
1331  * @return
1332  *   Pointer to mlx5_flow object on success, otherwise NULL and rte_errno
1333  *   is set.
1334  */
1335 static struct mlx5_flow *
1336 flow_verbs_prepare(const struct rte_flow_attr *attr __rte_unused,
1337                    const struct rte_flow_item items[],
1338                    const struct rte_flow_action actions[],
1339                    struct rte_flow_error *error)
1340 {
1341         uint32_t size = sizeof(struct mlx5_flow) + sizeof(struct ibv_flow_attr);
1342         struct mlx5_flow *flow;
1343
1344         size += flow_verbs_get_actions_size(actions);
1345         size += flow_verbs_get_items_size(items);
1346         flow = rte_calloc(__func__, 1, size, 0);
1347         if (!flow) {
1348                 rte_flow_error_set(error, ENOMEM,
1349                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1350                                    "not enough memory to create flow");
1351                 return NULL;
1352         }
1353         flow->verbs.attr = (void *)(flow + 1);
1354         flow->verbs.specs =
1355                 (uint8_t *)(flow + 1) + sizeof(struct ibv_flow_attr);
1356         return flow;
1357 }
1358
1359 /**
1360  * Fill the flow with verb spec.
1361  *
1362  * @param[in] dev
1363  *   Pointer to Ethernet device.
1364  * @param[in, out] dev_flow
1365  *   Pointer to the mlx5 flow.
1366  * @param[in] attr
1367  *   Pointer to the flow attributes.
1368  * @param[in] items
1369  *   Pointer to the list of items.
1370  * @param[in] actions
1371  *   Pointer to the list of actions.
1372  * @param[out] error
1373  *   Pointer to the error structure.
1374  *
1375  * @return
1376  *   0 on success, else a negative errno value otherwise and rte_ernno is set.
1377  */
1378 static int
1379 flow_verbs_translate(struct rte_eth_dev *dev,
1380                      struct mlx5_flow *dev_flow,
1381                      const struct rte_flow_attr *attr,
1382                      const struct rte_flow_item items[],
1383                      const struct rte_flow_action actions[],
1384                      struct rte_flow_error *error)
1385 {
1386         struct rte_flow *flow = dev_flow->flow;
1387         uint64_t item_flags = 0;
1388         uint64_t action_flags = 0;
1389         uint64_t priority = attr->priority;
1390         uint32_t subpriority = 0;
1391         struct priv *priv = dev->data->dev_private;
1392
1393         if (priority == MLX5_FLOW_PRIO_RSVD)
1394                 priority = priv->config.flow_prio - 1;
1395         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1396                 int ret;
1397
1398                 switch (actions->type) {
1399                 case RTE_FLOW_ACTION_TYPE_VOID:
1400                         break;
1401                 case RTE_FLOW_ACTION_TYPE_FLAG:
1402                         flow_verbs_translate_action_flag(dev_flow, actions);
1403                         action_flags |= MLX5_FLOW_ACTION_FLAG;
1404                         break;
1405                 case RTE_FLOW_ACTION_TYPE_MARK:
1406                         flow_verbs_translate_action_mark(dev_flow, actions);
1407                         action_flags |= MLX5_FLOW_ACTION_MARK;
1408                         break;
1409                 case RTE_FLOW_ACTION_TYPE_DROP:
1410                         flow_verbs_translate_action_drop(dev_flow, actions);
1411                         action_flags |= MLX5_FLOW_ACTION_DROP;
1412                         break;
1413                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1414                         flow_verbs_translate_action_queue(dev_flow, actions);
1415                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
1416                         break;
1417                 case RTE_FLOW_ACTION_TYPE_RSS:
1418                         flow_verbs_translate_action_rss(dev_flow, actions);
1419                         action_flags |= MLX5_FLOW_ACTION_RSS;
1420                         break;
1421                 case RTE_FLOW_ACTION_TYPE_COUNT:
1422                         ret = flow_verbs_translate_action_count(dev_flow,
1423                                                                 actions,
1424                                                                 dev, error);
1425                         if (ret < 0)
1426                                 return ret;
1427                         action_flags |= MLX5_FLOW_ACTION_COUNT;
1428                         break;
1429                 default:
1430                         return rte_flow_error_set(error, ENOTSUP,
1431                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1432                                                   actions,
1433                                                   "action not supported");
1434                 }
1435         }
1436         flow->actions = action_flags;
1437         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1438                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
1439
1440                 switch (items->type) {
1441                 case RTE_FLOW_ITEM_TYPE_VOID:
1442                         break;
1443                 case RTE_FLOW_ITEM_TYPE_ETH:
1444                         flow_verbs_translate_item_eth(dev_flow, items,
1445                                                       item_flags);
1446                         subpriority = MLX5_PRIORITY_MAP_L2;
1447                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
1448                                                MLX5_FLOW_LAYER_OUTER_L2;
1449                         break;
1450                 case RTE_FLOW_ITEM_TYPE_VLAN:
1451                         flow_verbs_translate_item_vlan(dev_flow, items,
1452                                                        item_flags);
1453                         subpriority = MLX5_PRIORITY_MAP_L2;
1454                         item_flags |= tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
1455                                                 MLX5_FLOW_LAYER_INNER_VLAN) :
1456                                                (MLX5_FLOW_LAYER_OUTER_L2 |
1457                                                 MLX5_FLOW_LAYER_OUTER_VLAN);
1458                         break;
1459                 case RTE_FLOW_ITEM_TYPE_IPV4:
1460                         flow_verbs_translate_item_ipv4(dev_flow, items,
1461                                                        item_flags);
1462                         subpriority = MLX5_PRIORITY_MAP_L3;
1463                         dev_flow->verbs.hash_fields |=
1464                                 mlx5_flow_hashfields_adjust
1465                                         (dev_flow, tunnel,
1466                                          MLX5_IPV4_LAYER_TYPES,
1467                                          MLX5_IPV4_IBV_RX_HASH);
1468                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
1469                                                MLX5_FLOW_LAYER_OUTER_L3_IPV4;
1470                         break;
1471                 case RTE_FLOW_ITEM_TYPE_IPV6:
1472                         flow_verbs_translate_item_ipv6(dev_flow, items,
1473                                                        item_flags);
1474                         subpriority = MLX5_PRIORITY_MAP_L3;
1475                         dev_flow->verbs.hash_fields |=
1476                                 mlx5_flow_hashfields_adjust
1477                                         (dev_flow, tunnel,
1478                                          MLX5_IPV6_LAYER_TYPES,
1479                                          MLX5_IPV6_IBV_RX_HASH);
1480                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
1481                                                MLX5_FLOW_LAYER_OUTER_L3_IPV6;
1482                         break;
1483                 case RTE_FLOW_ITEM_TYPE_TCP:
1484                         flow_verbs_translate_item_tcp(dev_flow, items,
1485                                                       item_flags);
1486                         subpriority = MLX5_PRIORITY_MAP_L4;
1487                         dev_flow->verbs.hash_fields |=
1488                                 mlx5_flow_hashfields_adjust
1489                                         (dev_flow, tunnel, ETH_RSS_TCP,
1490                                          (IBV_RX_HASH_SRC_PORT_TCP |
1491                                           IBV_RX_HASH_DST_PORT_TCP));
1492                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
1493                                                MLX5_FLOW_LAYER_OUTER_L4_TCP;
1494                         break;
1495                 case RTE_FLOW_ITEM_TYPE_UDP:
1496                         flow_verbs_translate_item_udp(dev_flow, items,
1497                                                       item_flags);
1498                         subpriority = MLX5_PRIORITY_MAP_L4;
1499                         dev_flow->verbs.hash_fields |=
1500                                 mlx5_flow_hashfields_adjust
1501                                         (dev_flow, tunnel, ETH_RSS_UDP,
1502                                          (IBV_RX_HASH_SRC_PORT_UDP |
1503                                           IBV_RX_HASH_DST_PORT_UDP));
1504                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
1505                                                MLX5_FLOW_LAYER_OUTER_L4_UDP;
1506                         break;
1507                 case RTE_FLOW_ITEM_TYPE_VXLAN:
1508                         flow_verbs_translate_item_vxlan(dev_flow, items,
1509                                                         item_flags);
1510                         subpriority = MLX5_PRIORITY_MAP_L2;
1511                         item_flags |= MLX5_FLOW_LAYER_VXLAN;
1512                         break;
1513                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1514                         flow_verbs_translate_item_vxlan_gpe(dev_flow, items,
1515                                                             item_flags);
1516                         subpriority = MLX5_PRIORITY_MAP_L2;
1517                         item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
1518                         break;
1519                 case RTE_FLOW_ITEM_TYPE_GRE:
1520                         flow_verbs_translate_item_gre(dev_flow, items,
1521                                                       item_flags);
1522                         subpriority = MLX5_PRIORITY_MAP_L2;
1523                         item_flags |= MLX5_FLOW_LAYER_GRE;
1524                         break;
1525                 case RTE_FLOW_ITEM_TYPE_MPLS:
1526                         flow_verbs_translate_item_mpls(dev_flow, items,
1527                                                        item_flags);
1528                         subpriority = MLX5_PRIORITY_MAP_L2;
1529                         item_flags |= MLX5_FLOW_LAYER_MPLS;
1530                         break;
1531                 default:
1532                         return rte_flow_error_set(error, ENOTSUP,
1533                                                   RTE_FLOW_ERROR_TYPE_ITEM,
1534                                                   NULL,
1535                                                   "item not supported");
1536                 }
1537         }
1538         dev_flow->layers = item_flags;
1539         dev_flow->verbs.attr->priority =
1540                 mlx5_flow_adjust_priority(dev, priority, subpriority);
1541         return 0;
1542 }
1543
1544 /**
1545  * Remove the flow from the NIC but keeps it in memory.
1546  *
1547  * @param[in] dev
1548  *   Pointer to the Ethernet device structure.
1549  * @param[in, out] flow
1550  *   Pointer to flow structure.
1551  */
1552 static void
1553 flow_verbs_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1554 {
1555         struct mlx5_flow_verbs *verbs;
1556         struct mlx5_flow *dev_flow;
1557
1558         if (!flow)
1559                 return;
1560         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1561                 verbs = &dev_flow->verbs;
1562                 if (verbs->flow) {
1563                         claim_zero(mlx5_glue->destroy_flow(verbs->flow));
1564                         verbs->flow = NULL;
1565                 }
1566                 if (verbs->hrxq) {
1567                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1568                                 mlx5_hrxq_drop_release(dev);
1569                         else
1570                                 mlx5_hrxq_release(dev, verbs->hrxq);
1571                         verbs->hrxq = NULL;
1572                 }
1573         }
1574 }
1575
1576 /**
1577  * Remove the flow from the NIC and the memory.
1578  *
1579  * @param[in] dev
1580  *   Pointer to the Ethernet device structure.
1581  * @param[in, out] flow
1582  *   Pointer to flow structure.
1583  */
1584 static void
1585 flow_verbs_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1586 {
1587         struct mlx5_flow *dev_flow;
1588
1589         if (!flow)
1590                 return;
1591         flow_verbs_remove(dev, flow);
1592         while (!LIST_EMPTY(&flow->dev_flows)) {
1593                 dev_flow = LIST_FIRST(&flow->dev_flows);
1594                 LIST_REMOVE(dev_flow, next);
1595                 rte_free(dev_flow);
1596         }
1597         if (flow->counter) {
1598                 flow_verbs_counter_release(flow->counter);
1599                 flow->counter = NULL;
1600         }
1601 }
1602
1603 /**
1604  * Apply the flow to the NIC.
1605  *
1606  * @param[in] dev
1607  *   Pointer to the Ethernet device structure.
1608  * @param[in, out] flow
1609  *   Pointer to flow structure.
1610  * @param[out] error
1611  *   Pointer to error structure.
1612  *
1613  * @return
1614  *   0 on success, a negative errno value otherwise and rte_errno is set.
1615  */
1616 static int
1617 flow_verbs_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1618                  struct rte_flow_error *error)
1619 {
1620         struct mlx5_flow_verbs *verbs;
1621         struct mlx5_flow *dev_flow;
1622         int err;
1623
1624         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1625                 verbs = &dev_flow->verbs;
1626                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1627                         verbs->hrxq = mlx5_hrxq_drop_new(dev);
1628                         if (!verbs->hrxq) {
1629                                 rte_flow_error_set
1630                                         (error, errno,
1631                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1632                                          "cannot get drop hash queue");
1633                                 goto error;
1634                         }
1635                 } else {
1636                         struct mlx5_hrxq *hrxq;
1637
1638                         hrxq = mlx5_hrxq_get(dev, flow->key,
1639                                              MLX5_RSS_HASH_KEY_LEN,
1640                                              verbs->hash_fields,
1641                                              (*flow->queue),
1642                                              flow->rss.queue_num);
1643                         if (!hrxq)
1644                                 hrxq = mlx5_hrxq_new(dev, flow->key,
1645                                                      MLX5_RSS_HASH_KEY_LEN,
1646                                                      verbs->hash_fields,
1647                                                      (*flow->queue),
1648                                                      flow->rss.queue_num,
1649                                                      !!(dev_flow->layers &
1650                                                       MLX5_FLOW_LAYER_TUNNEL));
1651                         if (!hrxq) {
1652                                 rte_flow_error_set
1653                                         (error, rte_errno,
1654                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1655                                          "cannot get hash queue");
1656                                 goto error;
1657                         }
1658                         verbs->hrxq = hrxq;
1659                 }
1660                 verbs->flow = mlx5_glue->create_flow(verbs->hrxq->qp,
1661                                                      verbs->attr);
1662                 if (!verbs->flow) {
1663                         rte_flow_error_set(error, errno,
1664                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1665                                            NULL,
1666                                            "hardware refuses to create flow");
1667                         goto error;
1668                 }
1669         }
1670         return 0;
1671 error:
1672         err = rte_errno; /* Save rte_errno before cleanup. */
1673         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1674                 verbs = &dev_flow->verbs;
1675                 if (verbs->hrxq) {
1676                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1677                                 mlx5_hrxq_drop_release(dev);
1678                         else
1679                                 mlx5_hrxq_release(dev, verbs->hrxq);
1680                         verbs->hrxq = NULL;
1681                 }
1682         }
1683         rte_errno = err; /* Restore rte_errno. */
1684         return -rte_errno;
1685 }
1686
1687 /**
1688  * Query a flow.
1689  *
1690  * @see rte_flow_query()
1691  * @see rte_flow_ops
1692  */
1693 static int
1694 flow_verbs_query(struct rte_eth_dev *dev,
1695                  struct rte_flow *flow,
1696                  const struct rte_flow_action *actions,
1697                  void *data,
1698                  struct rte_flow_error *error)
1699 {
1700         int ret = -EINVAL;
1701
1702         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1703                 switch (actions->type) {
1704                 case RTE_FLOW_ACTION_TYPE_VOID:
1705                         break;
1706                 case RTE_FLOW_ACTION_TYPE_COUNT:
1707                         ret = flow_verbs_counter_query(dev, flow, data, error);
1708                         break;
1709                 default:
1710                         return rte_flow_error_set(error, ENOTSUP,
1711                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1712                                                   actions,
1713                                                   "action not supported");
1714                 }
1715         }
1716         return ret;
1717 }
1718
1719 const struct mlx5_flow_driver_ops mlx5_flow_verbs_drv_ops = {
1720         .validate = flow_verbs_validate,
1721         .prepare = flow_verbs_prepare,
1722         .translate = flow_verbs_translate,
1723         .apply = flow_verbs_apply,
1724         .remove = flow_verbs_remove,
1725         .destroy = flow_verbs_destroy,
1726         .query = flow_verbs_query,
1727 };