New upstream version 18.11-rc1
[deb_dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28
29 #include "mlx5.h"
30 #include "mlx5_defs.h"
31 #include "mlx5_prm.h"
32 #include "mlx5_glue.h"
33 #include "mlx5_flow.h"
34
35 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
36
37 /**
38  * Validate META item.
39  *
40  * @param[in] dev
41  *   Pointer to the rte_eth_dev structure.
42  * @param[in] item
43  *   Item specification.
44  * @param[in] attr
45  *   Attributes of flow that includes this item.
46  * @param[out] error
47  *   Pointer to error structure.
48  *
49  * @return
50  *   0 on success, a negative errno value otherwise and rte_errno is set.
51  */
52 static int
53 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
54                            const struct rte_flow_item *item,
55                            const struct rte_flow_attr *attr,
56                            struct rte_flow_error *error)
57 {
58         const struct rte_flow_item_meta *spec = item->spec;
59         const struct rte_flow_item_meta *mask = item->mask;
60         const struct rte_flow_item_meta nic_mask = {
61                 .data = RTE_BE32(UINT32_MAX)
62         };
63         int ret;
64         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
65
66         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
67                 return rte_flow_error_set(error, EPERM,
68                                           RTE_FLOW_ERROR_TYPE_ITEM,
69                                           NULL,
70                                           "match on metadata offload "
71                                           "configuration is off for this port");
72         if (!spec)
73                 return rte_flow_error_set(error, EINVAL,
74                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
75                                           item->spec,
76                                           "data cannot be empty");
77         if (!spec->data)
78                 return rte_flow_error_set(error, EINVAL,
79                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
80                                           NULL,
81                                           "data cannot be zero");
82         if (!mask)
83                 mask = &rte_flow_item_meta_mask;
84         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
85                                         (const uint8_t *)&nic_mask,
86                                         sizeof(struct rte_flow_item_meta),
87                                         error);
88         if (ret < 0)
89                 return ret;
90         if (attr->ingress)
91                 return rte_flow_error_set(error, ENOTSUP,
92                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
93                                           NULL,
94                                           "pattern not supported for ingress");
95         return 0;
96 }
97
98 /**
99  * Verify the @p attributes will be correctly understood by the NIC and store
100  * them in the @p flow if everything is correct.
101  *
102  * @param[in] dev
103  *   Pointer to dev struct.
104  * @param[in] attributes
105  *   Pointer to flow attributes
106  * @param[out] error
107  *   Pointer to error structure.
108  *
109  * @return
110  *   0 on success, a negative errno value otherwise and rte_errno is set.
111  */
112 static int
113 flow_dv_validate_attributes(struct rte_eth_dev *dev,
114                             const struct rte_flow_attr *attributes,
115                             struct rte_flow_error *error)
116 {
117         struct priv *priv = dev->data->dev_private;
118         uint32_t priority_max = priv->config.flow_prio - 1;
119
120         if (attributes->group)
121                 return rte_flow_error_set(error, ENOTSUP,
122                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
123                                           NULL,
124                                           "groups is not supported");
125         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
126             attributes->priority >= priority_max)
127                 return rte_flow_error_set(error, ENOTSUP,
128                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
129                                           NULL,
130                                           "priority out of range");
131         if (attributes->transfer)
132                 return rte_flow_error_set(error, ENOTSUP,
133                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
134                                           NULL,
135                                           "transfer is not supported");
136         if (!(attributes->egress ^ attributes->ingress))
137                 return rte_flow_error_set(error, ENOTSUP,
138                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
139                                           "must specify exactly one of "
140                                           "ingress or egress");
141         return 0;
142 }
143
144 /**
145  * Internal validation function. For validating both actions and items.
146  *
147  * @param[in] dev
148  *   Pointer to the rte_eth_dev structure.
149  * @param[in] attr
150  *   Pointer to the flow attributes.
151  * @param[in] items
152  *   Pointer to the list of items.
153  * @param[in] actions
154  *   Pointer to the list of actions.
155  * @param[out] error
156  *   Pointer to the error structure.
157  *
158  * @return
159  *   0 on success, a negative errno value otherwise and rte_ernno is set.
160  */
161 static int
162 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
163                  const struct rte_flow_item items[],
164                  const struct rte_flow_action actions[],
165                  struct rte_flow_error *error)
166 {
167         int ret;
168         uint64_t action_flags = 0;
169         uint64_t item_flags = 0;
170         int tunnel = 0;
171         uint8_t next_protocol = 0xff;
172         int actions_n = 0;
173
174         if (items == NULL)
175                 return -1;
176         ret = flow_dv_validate_attributes(dev, attr, error);
177         if (ret < 0)
178                 return ret;
179         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
180                 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
181                 switch (items->type) {
182                 case RTE_FLOW_ITEM_TYPE_VOID:
183                         break;
184                 case RTE_FLOW_ITEM_TYPE_ETH:
185                         ret = mlx5_flow_validate_item_eth(items, item_flags,
186                                                           error);
187                         if (ret < 0)
188                                 return ret;
189                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
190                                                MLX5_FLOW_LAYER_OUTER_L2;
191                         break;
192                 case RTE_FLOW_ITEM_TYPE_VLAN:
193                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
194                                                            error);
195                         if (ret < 0)
196                                 return ret;
197                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
198                                                MLX5_FLOW_LAYER_OUTER_VLAN;
199                         break;
200                 case RTE_FLOW_ITEM_TYPE_IPV4:
201                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
202                                                            error);
203                         if (ret < 0)
204                                 return ret;
205                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
206                                                MLX5_FLOW_LAYER_OUTER_L3_IPV4;
207                         if (items->mask != NULL &&
208                             ((const struct rte_flow_item_ipv4 *)
209                              items->mask)->hdr.next_proto_id)
210                                 next_protocol =
211                                         ((const struct rte_flow_item_ipv4 *)
212                                          (items->spec))->hdr.next_proto_id;
213                         break;
214                 case RTE_FLOW_ITEM_TYPE_IPV6:
215                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
216                                                            error);
217                         if (ret < 0)
218                                 return ret;
219                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
220                                                MLX5_FLOW_LAYER_OUTER_L3_IPV6;
221                         if (items->mask != NULL &&
222                             ((const struct rte_flow_item_ipv6 *)
223                              items->mask)->hdr.proto)
224                                 next_protocol =
225                                         ((const struct rte_flow_item_ipv6 *)
226                                          items->spec)->hdr.proto;
227                         break;
228                 case RTE_FLOW_ITEM_TYPE_TCP:
229                         ret = mlx5_flow_validate_item_tcp
230                                                 (items, item_flags,
231                                                  next_protocol,
232                                                  &rte_flow_item_tcp_mask,
233                                                  error);
234                         if (ret < 0)
235                                 return ret;
236                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
237                                                MLX5_FLOW_LAYER_OUTER_L4_TCP;
238                         break;
239                 case RTE_FLOW_ITEM_TYPE_UDP:
240                         ret = mlx5_flow_validate_item_udp(items, item_flags,
241                                                           next_protocol,
242                                                           error);
243                         if (ret < 0)
244                                 return ret;
245                         item_flags |= tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
246                                                MLX5_FLOW_LAYER_OUTER_L4_UDP;
247                         break;
248                 case RTE_FLOW_ITEM_TYPE_GRE:
249                 case RTE_FLOW_ITEM_TYPE_NVGRE:
250                         ret = mlx5_flow_validate_item_gre(items, item_flags,
251                                                           next_protocol, error);
252                         if (ret < 0)
253                                 return ret;
254                         item_flags |= MLX5_FLOW_LAYER_GRE;
255                         break;
256                 case RTE_FLOW_ITEM_TYPE_VXLAN:
257                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
258                                                             error);
259                         if (ret < 0)
260                                 return ret;
261                         item_flags |= MLX5_FLOW_LAYER_VXLAN;
262                         break;
263                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
264                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
265                                                                 item_flags, dev,
266                                                                 error);
267                         if (ret < 0)
268                                 return ret;
269                         item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
270                         break;
271                 case RTE_FLOW_ITEM_TYPE_META:
272                         ret = flow_dv_validate_item_meta(dev, items, attr,
273                                                          error);
274                         if (ret < 0)
275                                 return ret;
276                         item_flags |= MLX5_FLOW_ITEM_METADATA;
277                         break;
278                 default:
279                         return rte_flow_error_set(error, ENOTSUP,
280                                                   RTE_FLOW_ERROR_TYPE_ITEM,
281                                                   NULL, "item not supported");
282                 }
283         }
284         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
285                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
286                         return rte_flow_error_set(error, ENOTSUP,
287                                                   RTE_FLOW_ERROR_TYPE_ACTION,
288                                                   actions, "too many actions");
289                 switch (actions->type) {
290                 case RTE_FLOW_ACTION_TYPE_VOID:
291                         break;
292                 case RTE_FLOW_ACTION_TYPE_FLAG:
293                         ret = mlx5_flow_validate_action_flag(action_flags,
294                                                              attr, error);
295                         if (ret < 0)
296                                 return ret;
297                         action_flags |= MLX5_FLOW_ACTION_FLAG;
298                         ++actions_n;
299                         break;
300                 case RTE_FLOW_ACTION_TYPE_MARK:
301                         ret = mlx5_flow_validate_action_mark(actions,
302                                                              action_flags,
303                                                              attr, error);
304                         if (ret < 0)
305                                 return ret;
306                         action_flags |= MLX5_FLOW_ACTION_MARK;
307                         ++actions_n;
308                         break;
309                 case RTE_FLOW_ACTION_TYPE_DROP:
310                         ret = mlx5_flow_validate_action_drop(action_flags,
311                                                              attr, error);
312                         if (ret < 0)
313                                 return ret;
314                         action_flags |= MLX5_FLOW_ACTION_DROP;
315                         ++actions_n;
316                         break;
317                 case RTE_FLOW_ACTION_TYPE_QUEUE:
318                         ret = mlx5_flow_validate_action_queue(actions,
319                                                               action_flags, dev,
320                                                               attr, error);
321                         if (ret < 0)
322                                 return ret;
323                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
324                         ++actions_n;
325                         break;
326                 case RTE_FLOW_ACTION_TYPE_RSS:
327                         ret = mlx5_flow_validate_action_rss(actions,
328                                                             action_flags, dev,
329                                                             attr, error);
330                         if (ret < 0)
331                                 return ret;
332                         action_flags |= MLX5_FLOW_ACTION_RSS;
333                         ++actions_n;
334                         break;
335                 case RTE_FLOW_ACTION_TYPE_COUNT:
336                         ret = mlx5_flow_validate_action_count(dev, attr, error);
337                         if (ret < 0)
338                                 return ret;
339                         action_flags |= MLX5_FLOW_ACTION_COUNT;
340                         ++actions_n;
341                         break;
342                 default:
343                         return rte_flow_error_set(error, ENOTSUP,
344                                                   RTE_FLOW_ERROR_TYPE_ACTION,
345                                                   actions,
346                                                   "action not supported");
347                 }
348         }
349         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
350                 return rte_flow_error_set(error, EINVAL,
351                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
352                                           "no fate action is found");
353         return 0;
354 }
355
356 /**
357  * Internal preparation function. Allocates the DV flow size,
358  * this size is constant.
359  *
360  * @param[in] attr
361  *   Pointer to the flow attributes.
362  * @param[in] items
363  *   Pointer to the list of items.
364  * @param[in] actions
365  *   Pointer to the list of actions.
366  * @param[out] item_flags
367  *   Pointer to bit mask of all items detected.
368  * @param[out] action_flags
369  *   Pointer to bit mask of all actions detected.
370  * @param[out] error
371  *   Pointer to the error structure.
372  *
373  * @return
374  *   Pointer to mlx5_flow object on success,
375  *   otherwise NULL and rte_ernno is set.
376  */
377 static struct mlx5_flow *
378 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
379                 const struct rte_flow_item items[] __rte_unused,
380                 const struct rte_flow_action actions[] __rte_unused,
381                 uint64_t *item_flags __rte_unused,
382                 uint64_t *action_flags __rte_unused,
383                 struct rte_flow_error *error)
384 {
385         uint32_t size = sizeof(struct mlx5_flow);
386         struct mlx5_flow *flow;
387
388         flow = rte_calloc(__func__, 1, size, 0);
389         if (!flow) {
390                 rte_flow_error_set(error, ENOMEM,
391                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
392                                    "not enough memory to create flow");
393                 return NULL;
394         }
395         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
396         return flow;
397 }
398
399 /**
400  * Add Ethernet item to matcher and to the value.
401  *
402  * @param[in, out] matcher
403  *   Flow matcher.
404  * @param[in, out] key
405  *   Flow matcher value.
406  * @param[in] item
407  *   Flow pattern to translate.
408  * @param[in] inner
409  *   Item is inner pattern.
410  */
411 static void
412 flow_dv_translate_item_eth(void *matcher, void *key,
413                            const struct rte_flow_item *item, int inner)
414 {
415         const struct rte_flow_item_eth *eth_m = item->mask;
416         const struct rte_flow_item_eth *eth_v = item->spec;
417         const struct rte_flow_item_eth nic_mask = {
418                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
419                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
420                 .type = RTE_BE16(0xffff),
421         };
422         void *headers_m;
423         void *headers_v;
424         char *l24_v;
425         unsigned int i;
426
427         if (!eth_v)
428                 return;
429         if (!eth_m)
430                 eth_m = &nic_mask;
431         if (inner) {
432                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
433                                          inner_headers);
434                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
435         } else {
436                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
437                                          outer_headers);
438                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
439         }
440         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
441                &eth_m->dst, sizeof(eth_m->dst));
442         /* The value must be in the range of the mask. */
443         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
444         for (i = 0; i < sizeof(eth_m->dst); ++i)
445                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
446         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
447                &eth_m->src, sizeof(eth_m->src));
448         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
449         /* The value must be in the range of the mask. */
450         for (i = 0; i < sizeof(eth_m->dst); ++i)
451                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
452         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
453                  rte_be_to_cpu_16(eth_m->type));
454         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
455         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
456 }
457
458 /**
459  * Add VLAN item to matcher and to the value.
460  *
461  * @param[in, out] matcher
462  *   Flow matcher.
463  * @param[in, out] key
464  *   Flow matcher value.
465  * @param[in] item
466  *   Flow pattern to translate.
467  * @param[in] inner
468  *   Item is inner pattern.
469  */
470 static void
471 flow_dv_translate_item_vlan(void *matcher, void *key,
472                             const struct rte_flow_item *item,
473                             int inner)
474 {
475         const struct rte_flow_item_vlan *vlan_m = item->mask;
476         const struct rte_flow_item_vlan *vlan_v = item->spec;
477         const struct rte_flow_item_vlan nic_mask = {
478                 .tci = RTE_BE16(0x0fff),
479                 .inner_type = RTE_BE16(0xffff),
480         };
481         void *headers_m;
482         void *headers_v;
483         uint16_t tci_m;
484         uint16_t tci_v;
485
486         if (!vlan_v)
487                 return;
488         if (!vlan_m)
489                 vlan_m = &nic_mask;
490         if (inner) {
491                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
492                                          inner_headers);
493                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
494         } else {
495                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
496                                          outer_headers);
497                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
498         }
499         tci_m = rte_be_to_cpu_16(vlan_m->tci);
500         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
501         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
502         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
503         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
504         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
505         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
506         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
507         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
508         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
509 }
510
511 /**
512  * Add IPV4 item to matcher and to the value.
513  *
514  * @param[in, out] matcher
515  *   Flow matcher.
516  * @param[in, out] key
517  *   Flow matcher value.
518  * @param[in] item
519  *   Flow pattern to translate.
520  * @param[in] inner
521  *   Item is inner pattern.
522  */
523 static void
524 flow_dv_translate_item_ipv4(void *matcher, void *key,
525                             const struct rte_flow_item *item,
526                             int inner)
527 {
528         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
529         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
530         const struct rte_flow_item_ipv4 nic_mask = {
531                 .hdr = {
532                         .src_addr = RTE_BE32(0xffffffff),
533                         .dst_addr = RTE_BE32(0xffffffff),
534                         .type_of_service = 0xff,
535                         .next_proto_id = 0xff,
536                 },
537         };
538         void *headers_m;
539         void *headers_v;
540         char *l24_m;
541         char *l24_v;
542         uint8_t tos;
543
544         if (inner) {
545                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
546                                          inner_headers);
547                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
548         } else {
549                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
550                                          outer_headers);
551                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
552         }
553         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
554         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
555         if (!ipv4_v)
556                 return;
557         if (!ipv4_m)
558                 ipv4_m = &nic_mask;
559         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
560                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
561         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
562                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
563         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
564         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
565         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
566                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
567         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
568                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
569         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
570         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
571         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
572         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
573                  ipv4_m->hdr.type_of_service);
574         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
575         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
576                  ipv4_m->hdr.type_of_service >> 2);
577         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
578         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
579                  ipv4_m->hdr.next_proto_id);
580         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
581                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
582 }
583
584 /**
585  * Add IPV6 item to matcher and to the value.
586  *
587  * @param[in, out] matcher
588  *   Flow matcher.
589  * @param[in, out] key
590  *   Flow matcher value.
591  * @param[in] item
592  *   Flow pattern to translate.
593  * @param[in] inner
594  *   Item is inner pattern.
595  */
596 static void
597 flow_dv_translate_item_ipv6(void *matcher, void *key,
598                             const struct rte_flow_item *item,
599                             int inner)
600 {
601         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
602         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
603         const struct rte_flow_item_ipv6 nic_mask = {
604                 .hdr = {
605                         .src_addr =
606                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
607                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
608                         .dst_addr =
609                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
610                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
611                         .vtc_flow = RTE_BE32(0xffffffff),
612                         .proto = 0xff,
613                         .hop_limits = 0xff,
614                 },
615         };
616         void *headers_m;
617         void *headers_v;
618         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
619         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
620         char *l24_m;
621         char *l24_v;
622         uint32_t vtc_m;
623         uint32_t vtc_v;
624         int i;
625         int size;
626
627         if (inner) {
628                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
629                                          inner_headers);
630                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
631         } else {
632                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
633                                          outer_headers);
634                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
635         }
636         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
637         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
638         if (!ipv6_v)
639                 return;
640         if (!ipv6_m)
641                 ipv6_m = &nic_mask;
642         size = sizeof(ipv6_m->hdr.dst_addr);
643         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
644                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
645         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
646                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
647         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
648         for (i = 0; i < size; ++i)
649                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
650         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
651                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
652         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
653                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
654         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
655         for (i = 0; i < size; ++i)
656                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
657         /* TOS. */
658         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
659         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
660         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
661         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
662         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
663         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
664         /* Label. */
665         if (inner) {
666                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
667                          vtc_m);
668                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
669                          vtc_v);
670         } else {
671                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
672                          vtc_m);
673                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
674                          vtc_v);
675         }
676         /* Protocol. */
677         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
678                  ipv6_m->hdr.proto);
679         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
680                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
681 }
682
683 /**
684  * Add TCP item to matcher and to the value.
685  *
686  * @param[in, out] matcher
687  *   Flow matcher.
688  * @param[in, out] key
689  *   Flow matcher value.
690  * @param[in] item
691  *   Flow pattern to translate.
692  * @param[in] inner
693  *   Item is inner pattern.
694  */
695 static void
696 flow_dv_translate_item_tcp(void *matcher, void *key,
697                            const struct rte_flow_item *item,
698                            int inner)
699 {
700         const struct rte_flow_item_tcp *tcp_m = item->mask;
701         const struct rte_flow_item_tcp *tcp_v = item->spec;
702         void *headers_m;
703         void *headers_v;
704
705         if (inner) {
706                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
707                                          inner_headers);
708                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
709         } else {
710                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
711                                          outer_headers);
712                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
713         }
714         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
715         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
716         if (!tcp_v)
717                 return;
718         if (!tcp_m)
719                 tcp_m = &rte_flow_item_tcp_mask;
720         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
721                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
722         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
723                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
724         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
725                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
726         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
727                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
728 }
729
730 /**
731  * Add UDP item to matcher and to the value.
732  *
733  * @param[in, out] matcher
734  *   Flow matcher.
735  * @param[in, out] key
736  *   Flow matcher value.
737  * @param[in] item
738  *   Flow pattern to translate.
739  * @param[in] inner
740  *   Item is inner pattern.
741  */
742 static void
743 flow_dv_translate_item_udp(void *matcher, void *key,
744                            const struct rte_flow_item *item,
745                            int inner)
746 {
747         const struct rte_flow_item_udp *udp_m = item->mask;
748         const struct rte_flow_item_udp *udp_v = item->spec;
749         void *headers_m;
750         void *headers_v;
751
752         if (inner) {
753                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
754                                          inner_headers);
755                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
756         } else {
757                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
758                                          outer_headers);
759                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
760         }
761         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
762         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
763         if (!udp_v)
764                 return;
765         if (!udp_m)
766                 udp_m = &rte_flow_item_udp_mask;
767         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
768                  rte_be_to_cpu_16(udp_m->hdr.src_port));
769         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
770                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
771         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
772                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
773         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
774                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
775 }
776
777 /**
778  * Add GRE item to matcher and to the value.
779  *
780  * @param[in, out] matcher
781  *   Flow matcher.
782  * @param[in, out] key
783  *   Flow matcher value.
784  * @param[in] item
785  *   Flow pattern to translate.
786  * @param[in] inner
787  *   Item is inner pattern.
788  */
789 static void
790 flow_dv_translate_item_gre(void *matcher, void *key,
791                            const struct rte_flow_item *item,
792                            int inner)
793 {
794         const struct rte_flow_item_gre *gre_m = item->mask;
795         const struct rte_flow_item_gre *gre_v = item->spec;
796         void *headers_m;
797         void *headers_v;
798         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
799         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
800
801         if (inner) {
802                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
803                                          inner_headers);
804                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
805         } else {
806                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
807                                          outer_headers);
808                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
809         }
810         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
811         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
812         if (!gre_v)
813                 return;
814         if (!gre_m)
815                 gre_m = &rte_flow_item_gre_mask;
816         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
817                  rte_be_to_cpu_16(gre_m->protocol));
818         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
819                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
820 }
821
822 /**
823  * Add NVGRE item to matcher and to the value.
824  *
825  * @param[in, out] matcher
826  *   Flow matcher.
827  * @param[in, out] key
828  *   Flow matcher value.
829  * @param[in] item
830  *   Flow pattern to translate.
831  * @param[in] inner
832  *   Item is inner pattern.
833  */
834 static void
835 flow_dv_translate_item_nvgre(void *matcher, void *key,
836                              const struct rte_flow_item *item,
837                              int inner)
838 {
839         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
840         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
841         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
842         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
843         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
844         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
845         char *gre_key_m;
846         char *gre_key_v;
847         int size;
848         int i;
849
850         flow_dv_translate_item_gre(matcher, key, item, inner);
851         if (!nvgre_v)
852                 return;
853         if (!nvgre_m)
854                 nvgre_m = &rte_flow_item_nvgre_mask;
855         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
856         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
857         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
858         memcpy(gre_key_m, tni_flow_id_m, size);
859         for (i = 0; i < size; ++i)
860                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
861 }
862
863 /**
864  * Add VXLAN item to matcher and to the value.
865  *
866  * @param[in, out] matcher
867  *   Flow matcher.
868  * @param[in, out] key
869  *   Flow matcher value.
870  * @param[in] item
871  *   Flow pattern to translate.
872  * @param[in] inner
873  *   Item is inner pattern.
874  */
875 static void
876 flow_dv_translate_item_vxlan(void *matcher, void *key,
877                              const struct rte_flow_item *item,
878                              int inner)
879 {
880         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
881         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
882         void *headers_m;
883         void *headers_v;
884         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
885         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
886         char *vni_m;
887         char *vni_v;
888         uint16_t dport;
889         int size;
890         int i;
891
892         if (inner) {
893                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
894                                          inner_headers);
895                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
896         } else {
897                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
898                                          outer_headers);
899                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
900         }
901         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
902                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
903         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
904                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
905                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
906         }
907         if (!vxlan_v)
908                 return;
909         if (!vxlan_m)
910                 vxlan_m = &rte_flow_item_vxlan_mask;
911         size = sizeof(vxlan_m->vni);
912         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
913         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
914         memcpy(vni_m, vxlan_m->vni, size);
915         for (i = 0; i < size; ++i)
916                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
917 }
918
919 /**
920  * Add META item to matcher
921  *
922  * @param[in, out] matcher
923  *   Flow matcher.
924  * @param[in, out] key
925  *   Flow matcher value.
926  * @param[in] item
927  *   Flow pattern to translate.
928  * @param[in] inner
929  *   Item is inner pattern.
930  */
931 static void
932 flow_dv_translate_item_meta(void *matcher, void *key,
933                             const struct rte_flow_item *item)
934 {
935         const struct rte_flow_item_meta *meta_m;
936         const struct rte_flow_item_meta *meta_v;
937         void *misc2_m =
938                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
939         void *misc2_v =
940                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
941
942         meta_m = (const void *)item->mask;
943         if (!meta_m)
944                 meta_m = &rte_flow_item_meta_mask;
945         meta_v = (const void *)item->spec;
946         if (meta_v) {
947                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
948                          rte_be_to_cpu_32(meta_m->data));
949                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
950                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
951         }
952 }
953
954 /**
955  * Update the matcher and the value based the selected item.
956  *
957  * @param[in, out] matcher
958  *   Flow matcher.
959  * @param[in, out] key
960  *   Flow matcher value.
961  * @param[in] item
962  *   Flow pattern to translate.
963  * @param[in, out] dev_flow
964  *   Pointer to the mlx5_flow.
965  * @param[in] inner
966  *   Item is inner pattern.
967  */
968 static void
969 flow_dv_create_item(void *matcher, void *key,
970                     const struct rte_flow_item *item,
971                     struct mlx5_flow *dev_flow,
972                     int inner)
973 {
974         struct mlx5_flow_dv_matcher *tmatcher = matcher;
975
976         switch (item->type) {
977         case RTE_FLOW_ITEM_TYPE_ETH:
978                 flow_dv_translate_item_eth(tmatcher->mask.buf, key, item,
979                                            inner);
980                 tmatcher->priority = MLX5_PRIORITY_MAP_L2;
981                 break;
982         case RTE_FLOW_ITEM_TYPE_VLAN:
983                 flow_dv_translate_item_vlan(tmatcher->mask.buf, key, item,
984                                             inner);
985                 break;
986         case RTE_FLOW_ITEM_TYPE_IPV4:
987                 flow_dv_translate_item_ipv4(tmatcher->mask.buf, key, item,
988                                             inner);
989                 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
990                 dev_flow->dv.hash_fields |=
991                         mlx5_flow_hashfields_adjust(dev_flow, inner,
992                                                     MLX5_IPV4_LAYER_TYPES,
993                                                     MLX5_IPV4_IBV_RX_HASH);
994                 break;
995         case RTE_FLOW_ITEM_TYPE_IPV6:
996                 flow_dv_translate_item_ipv6(tmatcher->mask.buf, key, item,
997                                             inner);
998                 tmatcher->priority = MLX5_PRIORITY_MAP_L3;
999                 dev_flow->dv.hash_fields |=
1000                         mlx5_flow_hashfields_adjust(dev_flow, inner,
1001                                                     MLX5_IPV6_LAYER_TYPES,
1002                                                     MLX5_IPV6_IBV_RX_HASH);
1003                 break;
1004         case RTE_FLOW_ITEM_TYPE_TCP:
1005                 flow_dv_translate_item_tcp(tmatcher->mask.buf, key, item,
1006                                            inner);
1007                 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
1008                 dev_flow->dv.hash_fields |=
1009                         mlx5_flow_hashfields_adjust(dev_flow, inner,
1010                                                     ETH_RSS_TCP,
1011                                                     (IBV_RX_HASH_SRC_PORT_TCP |
1012                                                      IBV_RX_HASH_DST_PORT_TCP));
1013                 break;
1014         case RTE_FLOW_ITEM_TYPE_UDP:
1015                 flow_dv_translate_item_udp(tmatcher->mask.buf, key, item,
1016                                            inner);
1017                 tmatcher->priority = MLX5_PRIORITY_MAP_L4;
1018                 dev_flow->verbs.hash_fields |=
1019                         mlx5_flow_hashfields_adjust(dev_flow, inner,
1020                                                     ETH_RSS_UDP,
1021                                                     (IBV_RX_HASH_SRC_PORT_UDP |
1022                                                      IBV_RX_HASH_DST_PORT_UDP));
1023                 break;
1024         case RTE_FLOW_ITEM_TYPE_GRE:
1025                 flow_dv_translate_item_gre(tmatcher->mask.buf, key, item,
1026                                            inner);
1027                 break;
1028         case RTE_FLOW_ITEM_TYPE_NVGRE:
1029                 flow_dv_translate_item_nvgre(tmatcher->mask.buf, key, item,
1030                                              inner);
1031                 break;
1032         case RTE_FLOW_ITEM_TYPE_VXLAN:
1033         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
1034                 flow_dv_translate_item_vxlan(tmatcher->mask.buf, key, item,
1035                                              inner);
1036                 break;
1037         case RTE_FLOW_ITEM_TYPE_META:
1038                 flow_dv_translate_item_meta(tmatcher->mask.buf, key, item);
1039                 break;
1040         default:
1041                 break;
1042         }
1043 }
1044
1045 /**
1046  * Store the requested actions in an array.
1047  *
1048  * @param[in] action
1049  *   Flow action to translate.
1050  * @param[in, out] dev_flow
1051  *   Pointer to the mlx5_flow.
1052  */
1053 static void
1054 flow_dv_create_action(const struct rte_flow_action *action,
1055                       struct mlx5_flow *dev_flow)
1056 {
1057         const struct rte_flow_action_queue *queue;
1058         const struct rte_flow_action_rss *rss;
1059         int actions_n = dev_flow->dv.actions_n;
1060         struct rte_flow *flow = dev_flow->flow;
1061
1062         switch (action->type) {
1063         case RTE_FLOW_ACTION_TYPE_VOID:
1064                 break;
1065         case RTE_FLOW_ACTION_TYPE_FLAG:
1066                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
1067                 dev_flow->dv.actions[actions_n].tag_value =
1068                         mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
1069                 actions_n++;
1070                 flow->actions |= MLX5_FLOW_ACTION_FLAG;
1071                 break;
1072         case RTE_FLOW_ACTION_TYPE_MARK:
1073                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_TAG;
1074                 dev_flow->dv.actions[actions_n].tag_value =
1075                         mlx5_flow_mark_set
1076                         (((const struct rte_flow_action_mark *)
1077                           (action->conf))->id);
1078                 flow->actions |= MLX5_FLOW_ACTION_MARK;
1079                 actions_n++;
1080                 break;
1081         case RTE_FLOW_ACTION_TYPE_DROP:
1082                 dev_flow->dv.actions[actions_n].type = MLX5DV_FLOW_ACTION_DROP;
1083                 flow->actions |= MLX5_FLOW_ACTION_DROP;
1084                 break;
1085         case RTE_FLOW_ACTION_TYPE_QUEUE:
1086                 queue = action->conf;
1087                 flow->rss.queue_num = 1;
1088                 (*flow->queue)[0] = queue->index;
1089                 flow->actions |= MLX5_FLOW_ACTION_QUEUE;
1090                 break;
1091         case RTE_FLOW_ACTION_TYPE_RSS:
1092                 rss = action->conf;
1093                 if (flow->queue)
1094                         memcpy((*flow->queue), rss->queue,
1095                                rss->queue_num * sizeof(uint16_t));
1096                 flow->rss.queue_num = rss->queue_num;
1097                 memcpy(flow->key, rss->key, MLX5_RSS_HASH_KEY_LEN);
1098                 flow->rss.types = rss->types;
1099                 flow->rss.level = rss->level;
1100                 /* Added to array only in apply since we need the QP */
1101                 flow->actions |= MLX5_FLOW_ACTION_RSS;
1102                 break;
1103         default:
1104                 break;
1105         }
1106         dev_flow->dv.actions_n = actions_n;
1107 }
1108
1109 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
1110
1111 #define HEADER_IS_ZERO(match_criteria, headers)                              \
1112         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
1113                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1114
1115 /**
1116  * Calculate flow matcher enable bitmap.
1117  *
1118  * @param match_criteria
1119  *   Pointer to flow matcher criteria.
1120  *
1121  * @return
1122  *   Bitmap of enabled fields.
1123  */
1124 static uint8_t
1125 flow_dv_matcher_enable(uint32_t *match_criteria)
1126 {
1127         uint8_t match_criteria_enable;
1128
1129         match_criteria_enable =
1130                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1131                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
1132         match_criteria_enable |=
1133                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1134                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
1135         match_criteria_enable |=
1136                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1137                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
1138         match_criteria_enable |=
1139                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
1140                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
1141
1142         return match_criteria_enable;
1143 }
1144
1145 /**
1146  * Register the flow matcher.
1147  *
1148  * @param dev[in, out]
1149  *   Pointer to rte_eth_dev structure.
1150  * @param[in, out] matcher
1151  *   Pointer to flow matcher.
1152  * @parm[in, out] dev_flow
1153  *   Pointer to the dev_flow.
1154  * @param[out] error
1155  *   pointer to error structure.
1156  *
1157  * @return
1158  *   0 on success otherwise -errno and errno is set.
1159  */
1160 static int
1161 flow_dv_matcher_register(struct rte_eth_dev *dev,
1162                          struct mlx5_flow_dv_matcher *matcher,
1163                          struct mlx5_flow *dev_flow,
1164                          struct rte_flow_error *error)
1165 {
1166         struct priv *priv = dev->data->dev_private;
1167         struct mlx5_flow_dv_matcher *cache_matcher;
1168         struct mlx5dv_flow_matcher_attr dv_attr = {
1169                 .type = IBV_FLOW_ATTR_NORMAL,
1170                 .match_mask = (void *)&matcher->mask,
1171         };
1172
1173         /* Lookup from cache. */
1174         LIST_FOREACH(cache_matcher, &priv->matchers, next) {
1175                 if (matcher->crc == cache_matcher->crc &&
1176                     matcher->priority == cache_matcher->priority &&
1177                     matcher->egress == cache_matcher->egress &&
1178                     !memcmp((const void *)matcher->mask.buf,
1179                             (const void *)cache_matcher->mask.buf,
1180                             cache_matcher->mask.size)) {
1181                         DRV_LOG(DEBUG,
1182                                 "priority %hd use %s matcher %p: refcnt %d++",
1183                                 cache_matcher->priority,
1184                                 cache_matcher->egress ? "tx" : "rx",
1185                                 (void *)cache_matcher,
1186                                 rte_atomic32_read(&cache_matcher->refcnt));
1187                         rte_atomic32_inc(&cache_matcher->refcnt);
1188                         dev_flow->dv.matcher = cache_matcher;
1189                         return 0;
1190                 }
1191         }
1192         /* Register new matcher. */
1193         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
1194         if (!cache_matcher)
1195                 return rte_flow_error_set(error, ENOMEM,
1196                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1197                                           "cannot allocate matcher memory");
1198         *cache_matcher = *matcher;
1199         dv_attr.match_criteria_enable =
1200                 flow_dv_matcher_enable(cache_matcher->mask.buf);
1201         dv_attr.priority = matcher->priority;
1202         if (matcher->egress)
1203                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
1204         cache_matcher->matcher_object =
1205                 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
1206         if (!cache_matcher->matcher_object)
1207                 return rte_flow_error_set(error, ENOMEM,
1208                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1209                                           NULL, "cannot create matcher");
1210         rte_atomic32_inc(&cache_matcher->refcnt);
1211         LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
1212         dev_flow->dv.matcher = cache_matcher;
1213         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
1214                 cache_matcher->priority,
1215                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
1216                 rte_atomic32_read(&cache_matcher->refcnt));
1217         return 0;
1218 }
1219
1220
1221 /**
1222  * Fill the flow with DV spec.
1223  *
1224  * @param[in] dev
1225  *   Pointer to rte_eth_dev structure.
1226  * @param[in, out] dev_flow
1227  *   Pointer to the sub flow.
1228  * @param[in] attr
1229  *   Pointer to the flow attributes.
1230  * @param[in] items
1231  *   Pointer to the list of items.
1232  * @param[in] actions
1233  *   Pointer to the list of actions.
1234  * @param[out] error
1235  *   Pointer to the error structure.
1236  *
1237  * @return
1238  *   0 on success, a negative errno value otherwise and rte_ernno is set.
1239  */
1240 static int
1241 flow_dv_translate(struct rte_eth_dev *dev,
1242                   struct mlx5_flow *dev_flow,
1243                   const struct rte_flow_attr *attr,
1244                   const struct rte_flow_item items[],
1245                   const struct rte_flow_action actions[] __rte_unused,
1246                   struct rte_flow_error *error)
1247 {
1248         struct priv *priv = dev->data->dev_private;
1249         uint64_t priority = attr->priority;
1250         struct mlx5_flow_dv_matcher matcher = {
1251                 .mask = {
1252                         .size = sizeof(matcher.mask.buf),
1253                 },
1254         };
1255         void *match_value = dev_flow->dv.value.buf;
1256         int tunnel = 0;
1257
1258         if (priority == MLX5_FLOW_PRIO_RSVD)
1259                 priority = priv->config.flow_prio - 1;
1260         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
1261                 tunnel = !!(dev_flow->layers & MLX5_FLOW_LAYER_TUNNEL);
1262                 flow_dv_create_item(&matcher, match_value, items, dev_flow,
1263                                     tunnel);
1264         }
1265         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
1266                                      matcher.mask.size);
1267         if (priority == MLX5_FLOW_PRIO_RSVD)
1268                 priority = priv->config.flow_prio - 1;
1269         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
1270                                                      matcher.priority);
1271         matcher.egress = attr->egress;
1272         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
1273                 return -rte_errno;
1274         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++)
1275                 flow_dv_create_action(actions, dev_flow);
1276         return 0;
1277 }
1278
1279 /**
1280  * Apply the flow to the NIC.
1281  *
1282  * @param[in] dev
1283  *   Pointer to the Ethernet device structure.
1284  * @param[in, out] flow
1285  *   Pointer to flow structure.
1286  * @param[out] error
1287  *   Pointer to error structure.
1288  *
1289  * @return
1290  *   0 on success, a negative errno value otherwise and rte_errno is set.
1291  */
1292 static int
1293 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
1294               struct rte_flow_error *error)
1295 {
1296         struct mlx5_flow_dv *dv;
1297         struct mlx5_flow *dev_flow;
1298         int n;
1299         int err;
1300
1301         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1302                 dv = &dev_flow->dv;
1303                 n = dv->actions_n;
1304                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
1305                         dv->hrxq = mlx5_hrxq_drop_new(dev);
1306                         if (!dv->hrxq) {
1307                                 rte_flow_error_set
1308                                         (error, errno,
1309                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1310                                          "cannot get drop hash queue");
1311                                 goto error;
1312                         }
1313                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1314                         dv->actions[n].qp = dv->hrxq->qp;
1315                         n++;
1316                 } else if (flow->actions &
1317                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
1318                         struct mlx5_hrxq *hrxq;
1319                         hrxq = mlx5_hrxq_get(dev, flow->key,
1320                                              MLX5_RSS_HASH_KEY_LEN,
1321                                              dv->hash_fields,
1322                                              (*flow->queue),
1323                                              flow->rss.queue_num);
1324                         if (!hrxq)
1325                                 hrxq = mlx5_hrxq_new
1326                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
1327                                          dv->hash_fields, (*flow->queue),
1328                                          flow->rss.queue_num,
1329                                          !!(dev_flow->layers &
1330                                             MLX5_FLOW_LAYER_TUNNEL));
1331                         if (!hrxq) {
1332                                 rte_flow_error_set
1333                                         (error, rte_errno,
1334                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1335                                          "cannot get hash queue");
1336                                 goto error;
1337                         }
1338                         dv->hrxq = hrxq;
1339                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
1340                         dv->actions[n].qp = hrxq->qp;
1341                         n++;
1342                 }
1343                 dv->flow =
1344                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
1345                                                   (void *)&dv->value, n,
1346                                                   dv->actions);
1347                 if (!dv->flow) {
1348                         rte_flow_error_set(error, errno,
1349                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1350                                            NULL,
1351                                            "hardware refuses to create flow");
1352                         goto error;
1353                 }
1354         }
1355         return 0;
1356 error:
1357         err = rte_errno; /* Save rte_errno before cleanup. */
1358         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1359                 struct mlx5_flow_dv *dv = &dev_flow->dv;
1360                 if (dv->hrxq) {
1361                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1362                                 mlx5_hrxq_drop_release(dev);
1363                         else
1364                                 mlx5_hrxq_release(dev, dv->hrxq);
1365                         dv->hrxq = NULL;
1366                 }
1367         }
1368         rte_errno = err; /* Restore rte_errno. */
1369         return -rte_errno;
1370 }
1371
1372 /**
1373  * Release the flow matcher.
1374  *
1375  * @param dev
1376  *   Pointer to Ethernet device.
1377  * @param flow
1378  *   Pointer to mlx5_flow.
1379  *
1380  * @return
1381  *   1 while a reference on it exists, 0 when freed.
1382  */
1383 static int
1384 flow_dv_matcher_release(struct rte_eth_dev *dev,
1385                         struct mlx5_flow *flow)
1386 {
1387         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
1388
1389         assert(matcher->matcher_object);
1390         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
1391                 dev->data->port_id, (void *)matcher,
1392                 rte_atomic32_read(&matcher->refcnt));
1393         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
1394                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
1395                            (matcher->matcher_object));
1396                 LIST_REMOVE(matcher, next);
1397                 rte_free(matcher);
1398                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
1399                         dev->data->port_id, (void *)matcher);
1400                 return 0;
1401         }
1402         return 1;
1403 }
1404
1405 /**
1406  * Remove the flow from the NIC but keeps it in memory.
1407  *
1408  * @param[in] dev
1409  *   Pointer to Ethernet device.
1410  * @param[in, out] flow
1411  *   Pointer to flow structure.
1412  */
1413 static void
1414 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
1415 {
1416         struct mlx5_flow_dv *dv;
1417         struct mlx5_flow *dev_flow;
1418
1419         if (!flow)
1420                 return;
1421         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
1422                 dv = &dev_flow->dv;
1423                 if (dv->flow) {
1424                         claim_zero(mlx5_glue->destroy_flow(dv->flow));
1425                         dv->flow = NULL;
1426                 }
1427                 if (dv->hrxq) {
1428                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
1429                                 mlx5_hrxq_drop_release(dev);
1430                         else
1431                                 mlx5_hrxq_release(dev, dv->hrxq);
1432                         dv->hrxq = NULL;
1433                 }
1434         }
1435         if (flow->counter)
1436                 flow->counter = NULL;
1437 }
1438
1439 /**
1440  * Remove the flow from the NIC and the memory.
1441  *
1442  * @param[in] dev
1443  *   Pointer to the Ethernet device structure.
1444  * @param[in, out] flow
1445  *   Pointer to flow structure.
1446  */
1447 static void
1448 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
1449 {
1450         struct mlx5_flow *dev_flow;
1451
1452         if (!flow)
1453                 return;
1454         flow_dv_remove(dev, flow);
1455         while (!LIST_EMPTY(&flow->dev_flows)) {
1456                 dev_flow = LIST_FIRST(&flow->dev_flows);
1457                 LIST_REMOVE(dev_flow, next);
1458                 if (dev_flow->dv.matcher)
1459                         flow_dv_matcher_release(dev, dev_flow);
1460                 rte_free(dev_flow);
1461         }
1462 }
1463
1464 /**
1465  * Query a flow.
1466  *
1467  * @see rte_flow_query()
1468  * @see rte_flow_ops
1469  */
1470 static int
1471 flow_dv_query(struct rte_eth_dev *dev __rte_unused,
1472               struct rte_flow *flow __rte_unused,
1473               const struct rte_flow_action *actions __rte_unused,
1474               void *data __rte_unused,
1475               struct rte_flow_error *error __rte_unused)
1476 {
1477         rte_errno = ENOTSUP;
1478         return -rte_errno;
1479 }
1480
1481
1482 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
1483         .validate = flow_dv_validate,
1484         .prepare = flow_dv_prepare,
1485         .translate = flow_dv_translate,
1486         .apply = flow_dv_apply,
1487         .remove = flow_dv_remove,
1488         .destroy = flow_dv_destroy,
1489         .query = flow_dv_query,
1490 };
1491
1492 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */