New upstream version 18.11-rc4
[deb_dpdk.git] / drivers / net / mlx5 / mlx5_flow_dv.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2018 Mellanox Technologies, Ltd
3  */
4
5 #include <sys/queue.h>
6 #include <stdalign.h>
7 #include <stdint.h>
8 #include <string.h>
9
10 /* Verbs header. */
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
12 #ifdef PEDANTIC
13 #pragma GCC diagnostic ignored "-Wpedantic"
14 #endif
15 #include <infiniband/verbs.h>
16 #ifdef PEDANTIC
17 #pragma GCC diagnostic error "-Wpedantic"
18 #endif
19
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
24 #include <rte_flow.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_ip.h>
28 #include <rte_gre.h>
29
30 #include "mlx5.h"
31 #include "mlx5_defs.h"
32 #include "mlx5_prm.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
35
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
37
38 /**
39  * Validate META item.
40  *
41  * @param[in] dev
42  *   Pointer to the rte_eth_dev structure.
43  * @param[in] item
44  *   Item specification.
45  * @param[in] attr
46  *   Attributes of flow that includes this item.
47  * @param[out] error
48  *   Pointer to error structure.
49  *
50  * @return
51  *   0 on success, a negative errno value otherwise and rte_errno is set.
52  */
53 static int
54 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
55                            const struct rte_flow_item *item,
56                            const struct rte_flow_attr *attr,
57                            struct rte_flow_error *error)
58 {
59         const struct rte_flow_item_meta *spec = item->spec;
60         const struct rte_flow_item_meta *mask = item->mask;
61         const struct rte_flow_item_meta nic_mask = {
62                 .data = RTE_BE32(UINT32_MAX)
63         };
64         int ret;
65         uint64_t offloads = dev->data->dev_conf.txmode.offloads;
66
67         if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
68                 return rte_flow_error_set(error, EPERM,
69                                           RTE_FLOW_ERROR_TYPE_ITEM,
70                                           NULL,
71                                           "match on metadata offload "
72                                           "configuration is off for this port");
73         if (!spec)
74                 return rte_flow_error_set(error, EINVAL,
75                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
76                                           item->spec,
77                                           "data cannot be empty");
78         if (!spec->data)
79                 return rte_flow_error_set(error, EINVAL,
80                                           RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
81                                           NULL,
82                                           "data cannot be zero");
83         if (!mask)
84                 mask = &rte_flow_item_meta_mask;
85         ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
86                                         (const uint8_t *)&nic_mask,
87                                         sizeof(struct rte_flow_item_meta),
88                                         error);
89         if (ret < 0)
90                 return ret;
91         if (attr->ingress)
92                 return rte_flow_error_set(error, ENOTSUP,
93                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
94                                           NULL,
95                                           "pattern not supported for ingress");
96         return 0;
97 }
98
99 /**
100  * Validate the L2 encap action.
101  *
102  * @param[in] action_flags
103  *   Holds the actions detected until now.
104  * @param[in] action
105  *   Pointer to the encap action.
106  * @param[in] attr
107  *   Pointer to flow attributes
108  * @param[out] error
109  *   Pointer to error structure.
110  *
111  * @return
112  *   0 on success, a negative errno value otherwise and rte_errno is set.
113  */
114 static int
115 flow_dv_validate_action_l2_encap(uint64_t action_flags,
116                                  const struct rte_flow_action *action,
117                                  const struct rte_flow_attr *attr,
118                                  struct rte_flow_error *error)
119 {
120         if (!(action->conf))
121                 return rte_flow_error_set(error, EINVAL,
122                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
123                                           "configuration cannot be null");
124         if (action_flags & MLX5_FLOW_ACTION_DROP)
125                 return rte_flow_error_set(error, EINVAL,
126                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
127                                           "can't drop and encap in same flow");
128         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
129                 return rte_flow_error_set(error, EINVAL,
130                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
131                                           "can only have a single encap or"
132                                           " decap action in a flow");
133         if (attr->ingress)
134                 return rte_flow_error_set(error, ENOTSUP,
135                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
136                                           NULL,
137                                           "encap action not supported for "
138                                           "ingress");
139         return 0;
140 }
141
142 /**
143  * Validate the L2 decap action.
144  *
145  * @param[in] action_flags
146  *   Holds the actions detected until now.
147  * @param[in] attr
148  *   Pointer to flow attributes
149  * @param[out] error
150  *   Pointer to error structure.
151  *
152  * @return
153  *   0 on success, a negative errno value otherwise and rte_errno is set.
154  */
155 static int
156 flow_dv_validate_action_l2_decap(uint64_t action_flags,
157                                  const struct rte_flow_attr *attr,
158                                  struct rte_flow_error *error)
159 {
160         if (action_flags & MLX5_FLOW_ACTION_DROP)
161                 return rte_flow_error_set(error, EINVAL,
162                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
163                                           "can't drop and decap in same flow");
164         if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
165                 return rte_flow_error_set(error, EINVAL,
166                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
167                                           "can only have a single encap or"
168                                           " decap action in a flow");
169         if (attr->egress)
170                 return rte_flow_error_set(error, ENOTSUP,
171                                           RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
172                                           NULL,
173                                           "decap action not supported for "
174                                           "egress");
175         return 0;
176 }
177
178 /**
179  * Validate the raw encap action.
180  *
181  * @param[in] action_flags
182  *   Holds the actions detected until now.
183  * @param[in] action
184  *   Pointer to the encap action.
185  * @param[in] attr
186  *   Pointer to flow attributes
187  * @param[out] error
188  *   Pointer to error structure.
189  *
190  * @return
191  *   0 on success, a negative errno value otherwise and rte_errno is set.
192  */
193 static int
194 flow_dv_validate_action_raw_encap(uint64_t action_flags,
195                                   const struct rte_flow_action *action,
196                                   const struct rte_flow_attr *attr,
197                                   struct rte_flow_error *error)
198 {
199         if (!(action->conf))
200                 return rte_flow_error_set(error, EINVAL,
201                                           RTE_FLOW_ERROR_TYPE_ACTION, action,
202                                           "configuration cannot be null");
203         if (action_flags & MLX5_FLOW_ACTION_DROP)
204                 return rte_flow_error_set(error, EINVAL,
205                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
206                                           "can't drop and encap in same flow");
207         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
208                 return rte_flow_error_set(error, EINVAL,
209                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
210                                           "can only have a single encap"
211                                           " action in a flow");
212         /* encap without preceding decap is not supported for ingress */
213         if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
214                 return rte_flow_error_set(error, ENOTSUP,
215                                           RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
216                                           NULL,
217                                           "encap action not supported for "
218                                           "ingress");
219         return 0;
220 }
221
222 /**
223  * Validate the raw decap action.
224  *
225  * @param[in] action_flags
226  *   Holds the actions detected until now.
227  * @param[in] action
228  *   Pointer to the encap action.
229  * @param[in] attr
230  *   Pointer to flow attributes
231  * @param[out] error
232  *   Pointer to error structure.
233  *
234  * @return
235  *   0 on success, a negative errno value otherwise and rte_errno is set.
236  */
237 static int
238 flow_dv_validate_action_raw_decap(uint64_t action_flags,
239                                   const struct rte_flow_action *action,
240                                   const struct rte_flow_attr *attr,
241                                   struct rte_flow_error *error)
242 {
243         if (action_flags & MLX5_FLOW_ACTION_DROP)
244                 return rte_flow_error_set(error, EINVAL,
245                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
246                                           "can't drop and decap in same flow");
247         if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
248                 return rte_flow_error_set(error, EINVAL,
249                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
250                                           "can't have encap action before"
251                                           " decap action");
252         if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
253                 return rte_flow_error_set(error, EINVAL,
254                                           RTE_FLOW_ERROR_TYPE_ACTION, NULL,
255                                           "can only have a single decap"
256                                           " action in a flow");
257         /* decap action is valid on egress only if it is followed by encap */
258         if (attr->egress) {
259                 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
260                        action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
261                        action++) {
262                 }
263                 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
264                         return rte_flow_error_set
265                                         (error, ENOTSUP,
266                                          RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
267                                          NULL, "decap action not supported"
268                                          " for egress");
269         }
270         return 0;
271 }
272
273
274 /**
275  * Find existing encap/decap resource or create and register a new one.
276  *
277  * @param dev[in, out]
278  *   Pointer to rte_eth_dev structure.
279  * @param[in, out] resource
280  *   Pointer to encap/decap resource.
281  * @parm[in, out] dev_flow
282  *   Pointer to the dev_flow.
283  * @param[out] error
284  *   pointer to error structure.
285  *
286  * @return
287  *   0 on success otherwise -errno and errno is set.
288  */
289 static int
290 flow_dv_encap_decap_resource_register
291                         (struct rte_eth_dev *dev,
292                          struct mlx5_flow_dv_encap_decap_resource *resource,
293                          struct mlx5_flow *dev_flow,
294                          struct rte_flow_error *error)
295 {
296         struct priv *priv = dev->data->dev_private;
297         struct mlx5_flow_dv_encap_decap_resource *cache_resource;
298
299         /* Lookup a matching resource from cache. */
300         LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
301                 if (resource->reformat_type == cache_resource->reformat_type &&
302                     resource->ft_type == cache_resource->ft_type &&
303                     resource->size == cache_resource->size &&
304                     !memcmp((const void *)resource->buf,
305                             (const void *)cache_resource->buf,
306                             resource->size)) {
307                         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
308                                 (void *)cache_resource,
309                                 rte_atomic32_read(&cache_resource->refcnt));
310                         rte_atomic32_inc(&cache_resource->refcnt);
311                         dev_flow->dv.encap_decap = cache_resource;
312                         return 0;
313                 }
314         }
315         /* Register new encap/decap resource. */
316         cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
317         if (!cache_resource)
318                 return rte_flow_error_set(error, ENOMEM,
319                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
320                                           "cannot allocate resource memory");
321         *cache_resource = *resource;
322         cache_resource->verbs_action =
323                 mlx5_glue->dv_create_flow_action_packet_reformat
324                         (priv->ctx, cache_resource->size,
325                          (cache_resource->size ? cache_resource->buf : NULL),
326                          cache_resource->reformat_type,
327                          cache_resource->ft_type);
328         if (!cache_resource->verbs_action) {
329                 rte_free(cache_resource);
330                 return rte_flow_error_set(error, ENOMEM,
331                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
332                                           NULL, "cannot create action");
333         }
334         rte_atomic32_init(&cache_resource->refcnt);
335         rte_atomic32_inc(&cache_resource->refcnt);
336         LIST_INSERT_HEAD(&priv->encaps_decaps, cache_resource, next);
337         dev_flow->dv.encap_decap = cache_resource;
338         DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
339                 (void *)cache_resource,
340                 rte_atomic32_read(&cache_resource->refcnt));
341         return 0;
342 }
343
344 /**
345  * Get the size of specific rte_flow_item_type
346  *
347  * @param[in] item_type
348  *   Tested rte_flow_item_type.
349  *
350  * @return
351  *   sizeof struct item_type, 0 if void or irrelevant.
352  */
353 static size_t
354 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
355 {
356         size_t retval;
357
358         switch (item_type) {
359         case RTE_FLOW_ITEM_TYPE_ETH:
360                 retval = sizeof(struct rte_flow_item_eth);
361                 break;
362         case RTE_FLOW_ITEM_TYPE_VLAN:
363                 retval = sizeof(struct rte_flow_item_vlan);
364                 break;
365         case RTE_FLOW_ITEM_TYPE_IPV4:
366                 retval = sizeof(struct rte_flow_item_ipv4);
367                 break;
368         case RTE_FLOW_ITEM_TYPE_IPV6:
369                 retval = sizeof(struct rte_flow_item_ipv6);
370                 break;
371         case RTE_FLOW_ITEM_TYPE_UDP:
372                 retval = sizeof(struct rte_flow_item_udp);
373                 break;
374         case RTE_FLOW_ITEM_TYPE_TCP:
375                 retval = sizeof(struct rte_flow_item_tcp);
376                 break;
377         case RTE_FLOW_ITEM_TYPE_VXLAN:
378                 retval = sizeof(struct rte_flow_item_vxlan);
379                 break;
380         case RTE_FLOW_ITEM_TYPE_GRE:
381                 retval = sizeof(struct rte_flow_item_gre);
382                 break;
383         case RTE_FLOW_ITEM_TYPE_NVGRE:
384                 retval = sizeof(struct rte_flow_item_nvgre);
385                 break;
386         case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
387                 retval = sizeof(struct rte_flow_item_vxlan_gpe);
388                 break;
389         case RTE_FLOW_ITEM_TYPE_MPLS:
390                 retval = sizeof(struct rte_flow_item_mpls);
391                 break;
392         case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
393         default:
394                 retval = 0;
395                 break;
396         }
397         return retval;
398 }
399
400 #define MLX5_ENCAP_IPV4_VERSION         0x40
401 #define MLX5_ENCAP_IPV4_IHL_MIN         0x05
402 #define MLX5_ENCAP_IPV4_TTL_DEF         0x40
403 #define MLX5_ENCAP_IPV6_VTC_FLOW        0x60000000
404 #define MLX5_ENCAP_IPV6_HOP_LIMIT       0xff
405 #define MLX5_ENCAP_VXLAN_FLAGS          0x08000000
406 #define MLX5_ENCAP_VXLAN_GPE_FLAGS      0x04
407
408 /**
409  * Convert the encap action data from list of rte_flow_item to raw buffer
410  *
411  * @param[in] items
412  *   Pointer to rte_flow_item objects list.
413  * @param[out] buf
414  *   Pointer to the output buffer.
415  * @param[out] size
416  *   Pointer to the output buffer size.
417  * @param[out] error
418  *   Pointer to the error structure.
419  *
420  * @return
421  *   0 on success, a negative errno value otherwise and rte_errno is set.
422  */
423 static int
424 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
425                            size_t *size, struct rte_flow_error *error)
426 {
427         struct ether_hdr *eth = NULL;
428         struct vlan_hdr *vlan = NULL;
429         struct ipv4_hdr *ipv4 = NULL;
430         struct ipv6_hdr *ipv6 = NULL;
431         struct udp_hdr *udp = NULL;
432         struct vxlan_hdr *vxlan = NULL;
433         struct vxlan_gpe_hdr *vxlan_gpe = NULL;
434         struct gre_hdr *gre = NULL;
435         size_t len;
436         size_t temp_size = 0;
437
438         if (!items)
439                 return rte_flow_error_set(error, EINVAL,
440                                           RTE_FLOW_ERROR_TYPE_ACTION,
441                                           NULL, "invalid empty data");
442         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
443                 len = flow_dv_get_item_len(items->type);
444                 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
445                         return rte_flow_error_set(error, EINVAL,
446                                                   RTE_FLOW_ERROR_TYPE_ACTION,
447                                                   (void *)items->type,
448                                                   "items total size is too big"
449                                                   " for encap action");
450                 rte_memcpy((void *)&buf[temp_size], items->spec, len);
451                 switch (items->type) {
452                 case RTE_FLOW_ITEM_TYPE_ETH:
453                         eth = (struct ether_hdr *)&buf[temp_size];
454                         break;
455                 case RTE_FLOW_ITEM_TYPE_VLAN:
456                         vlan = (struct vlan_hdr *)&buf[temp_size];
457                         if (!eth)
458                                 return rte_flow_error_set(error, EINVAL,
459                                                 RTE_FLOW_ERROR_TYPE_ACTION,
460                                                 (void *)items->type,
461                                                 "eth header not found");
462                         if (!eth->ether_type)
463                                 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
464                         break;
465                 case RTE_FLOW_ITEM_TYPE_IPV4:
466                         ipv4 = (struct ipv4_hdr *)&buf[temp_size];
467                         if (!vlan && !eth)
468                                 return rte_flow_error_set(error, EINVAL,
469                                                 RTE_FLOW_ERROR_TYPE_ACTION,
470                                                 (void *)items->type,
471                                                 "neither eth nor vlan"
472                                                 " header found");
473                         if (vlan && !vlan->eth_proto)
474                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
475                         else if (eth && !eth->ether_type)
476                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
477                         if (!ipv4->version_ihl)
478                                 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
479                                                     MLX5_ENCAP_IPV4_IHL_MIN;
480                         if (!ipv4->time_to_live)
481                                 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
482                         break;
483                 case RTE_FLOW_ITEM_TYPE_IPV6:
484                         ipv6 = (struct ipv6_hdr *)&buf[temp_size];
485                         if (!vlan && !eth)
486                                 return rte_flow_error_set(error, EINVAL,
487                                                 RTE_FLOW_ERROR_TYPE_ACTION,
488                                                 (void *)items->type,
489                                                 "neither eth nor vlan"
490                                                 " header found");
491                         if (vlan && !vlan->eth_proto)
492                                 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
493                         else if (eth && !eth->ether_type)
494                                 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
495                         if (!ipv6->vtc_flow)
496                                 ipv6->vtc_flow =
497                                         RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
498                         if (!ipv6->hop_limits)
499                                 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
500                         break;
501                 case RTE_FLOW_ITEM_TYPE_UDP:
502                         udp = (struct udp_hdr *)&buf[temp_size];
503                         if (!ipv4 && !ipv6)
504                                 return rte_flow_error_set(error, EINVAL,
505                                                 RTE_FLOW_ERROR_TYPE_ACTION,
506                                                 (void *)items->type,
507                                                 "ip header not found");
508                         if (ipv4 && !ipv4->next_proto_id)
509                                 ipv4->next_proto_id = IPPROTO_UDP;
510                         else if (ipv6 && !ipv6->proto)
511                                 ipv6->proto = IPPROTO_UDP;
512                         break;
513                 case RTE_FLOW_ITEM_TYPE_VXLAN:
514                         vxlan = (struct vxlan_hdr *)&buf[temp_size];
515                         if (!udp)
516                                 return rte_flow_error_set(error, EINVAL,
517                                                 RTE_FLOW_ERROR_TYPE_ACTION,
518                                                 (void *)items->type,
519                                                 "udp header not found");
520                         if (!udp->dst_port)
521                                 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
522                         if (!vxlan->vx_flags)
523                                 vxlan->vx_flags =
524                                         RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
525                         break;
526                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
527                         vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
528                         if (!udp)
529                                 return rte_flow_error_set(error, EINVAL,
530                                                 RTE_FLOW_ERROR_TYPE_ACTION,
531                                                 (void *)items->type,
532                                                 "udp header not found");
533                         if (!vxlan_gpe->proto)
534                                 return rte_flow_error_set(error, EINVAL,
535                                                 RTE_FLOW_ERROR_TYPE_ACTION,
536                                                 (void *)items->type,
537                                                 "next protocol not found");
538                         if (!udp->dst_port)
539                                 udp->dst_port =
540                                         RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
541                         if (!vxlan_gpe->vx_flags)
542                                 vxlan_gpe->vx_flags =
543                                                 MLX5_ENCAP_VXLAN_GPE_FLAGS;
544                         break;
545                 case RTE_FLOW_ITEM_TYPE_GRE:
546                 case RTE_FLOW_ITEM_TYPE_NVGRE:
547                         gre = (struct gre_hdr *)&buf[temp_size];
548                         if (!gre->proto)
549                                 return rte_flow_error_set(error, EINVAL,
550                                                 RTE_FLOW_ERROR_TYPE_ACTION,
551                                                 (void *)items->type,
552                                                 "next protocol not found");
553                         if (!ipv4 && !ipv6)
554                                 return rte_flow_error_set(error, EINVAL,
555                                                 RTE_FLOW_ERROR_TYPE_ACTION,
556                                                 (void *)items->type,
557                                                 "ip header not found");
558                         if (ipv4 && !ipv4->next_proto_id)
559                                 ipv4->next_proto_id = IPPROTO_GRE;
560                         else if (ipv6 && !ipv6->proto)
561                                 ipv6->proto = IPPROTO_GRE;
562                         break;
563                 case RTE_FLOW_ITEM_TYPE_VOID:
564                         break;
565                 default:
566                         return rte_flow_error_set(error, EINVAL,
567                                                   RTE_FLOW_ERROR_TYPE_ACTION,
568                                                   (void *)items->type,
569                                                   "unsupported item type");
570                         break;
571                 }
572                 temp_size += len;
573         }
574         *size = temp_size;
575         return 0;
576 }
577
578 /**
579  * Convert L2 encap action to DV specification.
580  *
581  * @param[in] dev
582  *   Pointer to rte_eth_dev structure.
583  * @param[in] action
584  *   Pointer to action structure.
585  * @param[in, out] dev_flow
586  *   Pointer to the mlx5_flow.
587  * @param[out] error
588  *   Pointer to the error structure.
589  *
590  * @return
591  *   0 on success, a negative errno value otherwise and rte_errno is set.
592  */
593 static int
594 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
595                                const struct rte_flow_action *action,
596                                struct mlx5_flow *dev_flow,
597                                struct rte_flow_error *error)
598 {
599         const struct rte_flow_item *encap_data;
600         const struct rte_flow_action_raw_encap *raw_encap_data;
601         struct mlx5_flow_dv_encap_decap_resource res = {
602                 .reformat_type =
603                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
604                 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
605         };
606
607         if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
608                 raw_encap_data =
609                         (const struct rte_flow_action_raw_encap *)action->conf;
610                 res.size = raw_encap_data->size;
611                 memcpy(res.buf, raw_encap_data->data, res.size);
612         } else {
613                 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
614                         encap_data =
615                                 ((const struct rte_flow_action_vxlan_encap *)
616                                                 action->conf)->definition;
617                 else
618                         encap_data =
619                                 ((const struct rte_flow_action_nvgre_encap *)
620                                                 action->conf)->definition;
621                 if (flow_dv_convert_encap_data(encap_data, res.buf,
622                                                &res.size, error))
623                         return -rte_errno;
624         }
625         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
626                 return rte_flow_error_set(error, EINVAL,
627                                           RTE_FLOW_ERROR_TYPE_ACTION,
628                                           NULL, "can't create L2 encap action");
629         return 0;
630 }
631
632 /**
633  * Convert L2 decap action to DV specification.
634  *
635  * @param[in] dev
636  *   Pointer to rte_eth_dev structure.
637  * @param[in, out] dev_flow
638  *   Pointer to the mlx5_flow.
639  * @param[out] error
640  *   Pointer to the error structure.
641  *
642  * @return
643  *   0 on success, a negative errno value otherwise and rte_errno is set.
644  */
645 static int
646 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
647                                struct mlx5_flow *dev_flow,
648                                struct rte_flow_error *error)
649 {
650         struct mlx5_flow_dv_encap_decap_resource res = {
651                 .size = 0,
652                 .reformat_type =
653                         MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
654                 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
655         };
656
657         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
658                 return rte_flow_error_set(error, EINVAL,
659                                           RTE_FLOW_ERROR_TYPE_ACTION,
660                                           NULL, "can't create L2 decap action");
661         return 0;
662 }
663
664 /**
665  * Convert raw decap/encap (L3 tunnel) action to DV specification.
666  *
667  * @param[in] dev
668  *   Pointer to rte_eth_dev structure.
669  * @param[in] action
670  *   Pointer to action structure.
671  * @param[in, out] dev_flow
672  *   Pointer to the mlx5_flow.
673  * @param[in] attr
674  *   Pointer to the flow attributes.
675  * @param[out] error
676  *   Pointer to the error structure.
677  *
678  * @return
679  *   0 on success, a negative errno value otherwise and rte_errno is set.
680  */
681 static int
682 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
683                                 const struct rte_flow_action *action,
684                                 struct mlx5_flow *dev_flow,
685                                 const struct rte_flow_attr *attr,
686                                 struct rte_flow_error *error)
687 {
688         const struct rte_flow_action_raw_encap *encap_data;
689         struct mlx5_flow_dv_encap_decap_resource res;
690
691         encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
692         res.size = encap_data->size;
693         memcpy(res.buf, encap_data->data, res.size);
694         res.reformat_type = attr->egress ?
695                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
696                 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
697         res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
698                                      MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
699         if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
700                 return rte_flow_error_set(error, EINVAL,
701                                           RTE_FLOW_ERROR_TYPE_ACTION,
702                                           NULL, "can't create encap action");
703         return 0;
704 }
705
706 /**
707  * Verify the @p attributes will be correctly understood by the NIC and store
708  * them in the @p flow if everything is correct.
709  *
710  * @param[in] dev
711  *   Pointer to dev struct.
712  * @param[in] attributes
713  *   Pointer to flow attributes
714  * @param[out] error
715  *   Pointer to error structure.
716  *
717  * @return
718  *   0 on success, a negative errno value otherwise and rte_errno is set.
719  */
720 static int
721 flow_dv_validate_attributes(struct rte_eth_dev *dev,
722                             const struct rte_flow_attr *attributes,
723                             struct rte_flow_error *error)
724 {
725         struct priv *priv = dev->data->dev_private;
726         uint32_t priority_max = priv->config.flow_prio - 1;
727
728         if (attributes->group)
729                 return rte_flow_error_set(error, ENOTSUP,
730                                           RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
731                                           NULL,
732                                           "groups is not supported");
733         if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
734             attributes->priority >= priority_max)
735                 return rte_flow_error_set(error, ENOTSUP,
736                                           RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
737                                           NULL,
738                                           "priority out of range");
739         if (attributes->transfer)
740                 return rte_flow_error_set(error, ENOTSUP,
741                                           RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
742                                           NULL,
743                                           "transfer is not supported");
744         if (!(attributes->egress ^ attributes->ingress))
745                 return rte_flow_error_set(error, ENOTSUP,
746                                           RTE_FLOW_ERROR_TYPE_ATTR, NULL,
747                                           "must specify exactly one of "
748                                           "ingress or egress");
749         return 0;
750 }
751
752 /**
753  * Internal validation function. For validating both actions and items.
754  *
755  * @param[in] dev
756  *   Pointer to the rte_eth_dev structure.
757  * @param[in] attr
758  *   Pointer to the flow attributes.
759  * @param[in] items
760  *   Pointer to the list of items.
761  * @param[in] actions
762  *   Pointer to the list of actions.
763  * @param[out] error
764  *   Pointer to the error structure.
765  *
766  * @return
767  *   0 on success, a negative errno value otherwise and rte_ernno is set.
768  */
769 static int
770 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
771                  const struct rte_flow_item items[],
772                  const struct rte_flow_action actions[],
773                  struct rte_flow_error *error)
774 {
775         int ret;
776         uint64_t action_flags = 0;
777         uint64_t item_flags = 0;
778         uint64_t last_item = 0;
779         int tunnel = 0;
780         uint8_t next_protocol = 0xff;
781         int actions_n = 0;
782
783         if (items == NULL)
784                 return -1;
785         ret = flow_dv_validate_attributes(dev, attr, error);
786         if (ret < 0)
787                 return ret;
788         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
789                 tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
790                 switch (items->type) {
791                 case RTE_FLOW_ITEM_TYPE_VOID:
792                         break;
793                 case RTE_FLOW_ITEM_TYPE_ETH:
794                         ret = mlx5_flow_validate_item_eth(items, item_flags,
795                                                           error);
796                         if (ret < 0)
797                                 return ret;
798                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
799                                              MLX5_FLOW_LAYER_OUTER_L2;
800                         break;
801                 case RTE_FLOW_ITEM_TYPE_VLAN:
802                         ret = mlx5_flow_validate_item_vlan(items, item_flags,
803                                                            error);
804                         if (ret < 0)
805                                 return ret;
806                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
807                                              MLX5_FLOW_LAYER_OUTER_VLAN;
808                         break;
809                 case RTE_FLOW_ITEM_TYPE_IPV4:
810                         ret = mlx5_flow_validate_item_ipv4(items, item_flags,
811                                                            error);
812                         if (ret < 0)
813                                 return ret;
814                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
815                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
816                         if (items->mask != NULL &&
817                             ((const struct rte_flow_item_ipv4 *)
818                              items->mask)->hdr.next_proto_id) {
819                                 next_protocol =
820                                         ((const struct rte_flow_item_ipv4 *)
821                                          (items->spec))->hdr.next_proto_id;
822                                 next_protocol &=
823                                         ((const struct rte_flow_item_ipv4 *)
824                                          (items->mask))->hdr.next_proto_id;
825                         } else {
826                                 /* Reset for inner layer. */
827                                 next_protocol = 0xff;
828                         }
829                         break;
830                 case RTE_FLOW_ITEM_TYPE_IPV6:
831                         ret = mlx5_flow_validate_item_ipv6(items, item_flags,
832                                                            error);
833                         if (ret < 0)
834                                 return ret;
835                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
836                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
837                         if (items->mask != NULL &&
838                             ((const struct rte_flow_item_ipv6 *)
839                              items->mask)->hdr.proto) {
840                                 next_protocol =
841                                         ((const struct rte_flow_item_ipv6 *)
842                                          items->spec)->hdr.proto;
843                                 next_protocol &=
844                                         ((const struct rte_flow_item_ipv6 *)
845                                          items->mask)->hdr.proto;
846                         } else {
847                                 /* Reset for inner layer. */
848                                 next_protocol = 0xff;
849                         }
850                         break;
851                 case RTE_FLOW_ITEM_TYPE_TCP:
852                         ret = mlx5_flow_validate_item_tcp
853                                                 (items, item_flags,
854                                                  next_protocol,
855                                                  &rte_flow_item_tcp_mask,
856                                                  error);
857                         if (ret < 0)
858                                 return ret;
859                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
860                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
861                         break;
862                 case RTE_FLOW_ITEM_TYPE_UDP:
863                         ret = mlx5_flow_validate_item_udp(items, item_flags,
864                                                           next_protocol,
865                                                           error);
866                         if (ret < 0)
867                                 return ret;
868                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
869                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
870                         break;
871                 case RTE_FLOW_ITEM_TYPE_GRE:
872                 case RTE_FLOW_ITEM_TYPE_NVGRE:
873                         ret = mlx5_flow_validate_item_gre(items, item_flags,
874                                                           next_protocol, error);
875                         if (ret < 0)
876                                 return ret;
877                         last_item = MLX5_FLOW_LAYER_GRE;
878                         break;
879                 case RTE_FLOW_ITEM_TYPE_VXLAN:
880                         ret = mlx5_flow_validate_item_vxlan(items, item_flags,
881                                                             error);
882                         if (ret < 0)
883                                 return ret;
884                         last_item = MLX5_FLOW_LAYER_VXLAN;
885                         break;
886                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
887                         ret = mlx5_flow_validate_item_vxlan_gpe(items,
888                                                                 item_flags, dev,
889                                                                 error);
890                         if (ret < 0)
891                                 return ret;
892                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
893                         break;
894                 case RTE_FLOW_ITEM_TYPE_MPLS:
895                         ret = mlx5_flow_validate_item_mpls(dev, items,
896                                                            item_flags,
897                                                            last_item, error);
898                         if (ret < 0)
899                                 return ret;
900                         last_item = MLX5_FLOW_LAYER_MPLS;
901                         break;
902                 case RTE_FLOW_ITEM_TYPE_META:
903                         ret = flow_dv_validate_item_meta(dev, items, attr,
904                                                          error);
905                         if (ret < 0)
906                                 return ret;
907                         last_item = MLX5_FLOW_ITEM_METADATA;
908                         break;
909                 default:
910                         return rte_flow_error_set(error, ENOTSUP,
911                                                   RTE_FLOW_ERROR_TYPE_ITEM,
912                                                   NULL, "item not supported");
913                 }
914                 item_flags |= last_item;
915         }
916         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
917                 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
918                         return rte_flow_error_set(error, ENOTSUP,
919                                                   RTE_FLOW_ERROR_TYPE_ACTION,
920                                                   actions, "too many actions");
921                 switch (actions->type) {
922                 case RTE_FLOW_ACTION_TYPE_VOID:
923                         break;
924                 case RTE_FLOW_ACTION_TYPE_FLAG:
925                         ret = mlx5_flow_validate_action_flag(action_flags,
926                                                              attr, error);
927                         if (ret < 0)
928                                 return ret;
929                         action_flags |= MLX5_FLOW_ACTION_FLAG;
930                         ++actions_n;
931                         break;
932                 case RTE_FLOW_ACTION_TYPE_MARK:
933                         ret = mlx5_flow_validate_action_mark(actions,
934                                                              action_flags,
935                                                              attr, error);
936                         if (ret < 0)
937                                 return ret;
938                         action_flags |= MLX5_FLOW_ACTION_MARK;
939                         ++actions_n;
940                         break;
941                 case RTE_FLOW_ACTION_TYPE_DROP:
942                         ret = mlx5_flow_validate_action_drop(action_flags,
943                                                              attr, error);
944                         if (ret < 0)
945                                 return ret;
946                         action_flags |= MLX5_FLOW_ACTION_DROP;
947                         ++actions_n;
948                         break;
949                 case RTE_FLOW_ACTION_TYPE_QUEUE:
950                         ret = mlx5_flow_validate_action_queue(actions,
951                                                               action_flags, dev,
952                                                               attr, error);
953                         if (ret < 0)
954                                 return ret;
955                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
956                         ++actions_n;
957                         break;
958                 case RTE_FLOW_ACTION_TYPE_RSS:
959                         ret = mlx5_flow_validate_action_rss(actions,
960                                                             action_flags, dev,
961                                                             attr, error);
962                         if (ret < 0)
963                                 return ret;
964                         action_flags |= MLX5_FLOW_ACTION_RSS;
965                         ++actions_n;
966                         break;
967                 case RTE_FLOW_ACTION_TYPE_COUNT:
968                         ret = mlx5_flow_validate_action_count(dev, attr, error);
969                         if (ret < 0)
970                                 return ret;
971                         action_flags |= MLX5_FLOW_ACTION_COUNT;
972                         ++actions_n;
973                         break;
974                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
975                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
976                         ret = flow_dv_validate_action_l2_encap(action_flags,
977                                                                actions, attr,
978                                                                error);
979                         if (ret < 0)
980                                 return ret;
981                         action_flags |= actions->type ==
982                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
983                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
984                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
985                         ++actions_n;
986                         break;
987                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
988                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
989                         ret = flow_dv_validate_action_l2_decap(action_flags,
990                                                                attr, error);
991                         if (ret < 0)
992                                 return ret;
993                         action_flags |= actions->type ==
994                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
995                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
996                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
997                         ++actions_n;
998                         break;
999                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1000                         ret = flow_dv_validate_action_raw_encap(action_flags,
1001                                                                 actions, attr,
1002                                                                 error);
1003                         if (ret < 0)
1004                                 return ret;
1005                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
1006                         ++actions_n;
1007                         break;
1008                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
1009                         ret = flow_dv_validate_action_raw_decap(action_flags,
1010                                                                 actions, attr,
1011                                                                 error);
1012                         if (ret < 0)
1013                                 return ret;
1014                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
1015                         ++actions_n;
1016                         break;
1017                 default:
1018                         return rte_flow_error_set(error, ENOTSUP,
1019                                                   RTE_FLOW_ERROR_TYPE_ACTION,
1020                                                   actions,
1021                                                   "action not supported");
1022                 }
1023         }
1024         if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
1025                 return rte_flow_error_set(error, EINVAL,
1026                                           RTE_FLOW_ERROR_TYPE_ACTION, actions,
1027                                           "no fate action is found");
1028         return 0;
1029 }
1030
1031 /**
1032  * Internal preparation function. Allocates the DV flow size,
1033  * this size is constant.
1034  *
1035  * @param[in] attr
1036  *   Pointer to the flow attributes.
1037  * @param[in] items
1038  *   Pointer to the list of items.
1039  * @param[in] actions
1040  *   Pointer to the list of actions.
1041  * @param[out] error
1042  *   Pointer to the error structure.
1043  *
1044  * @return
1045  *   Pointer to mlx5_flow object on success,
1046  *   otherwise NULL and rte_ernno is set.
1047  */
1048 static struct mlx5_flow *
1049 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
1050                 const struct rte_flow_item items[] __rte_unused,
1051                 const struct rte_flow_action actions[] __rte_unused,
1052                 struct rte_flow_error *error)
1053 {
1054         uint32_t size = sizeof(struct mlx5_flow);
1055         struct mlx5_flow *flow;
1056
1057         flow = rte_calloc(__func__, 1, size, 0);
1058         if (!flow) {
1059                 rte_flow_error_set(error, ENOMEM,
1060                                    RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1061                                    "not enough memory to create flow");
1062                 return NULL;
1063         }
1064         flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
1065         return flow;
1066 }
1067
1068 #ifndef NDEBUG
1069 /**
1070  * Sanity check for match mask and value. Similar to check_valid_spec() in
1071  * kernel driver. If unmasked bit is present in value, it returns failure.
1072  *
1073  * @param match_mask
1074  *   pointer to match mask buffer.
1075  * @param match_value
1076  *   pointer to match value buffer.
1077  *
1078  * @return
1079  *   0 if valid, -EINVAL otherwise.
1080  */
1081 static int
1082 flow_dv_check_valid_spec(void *match_mask, void *match_value)
1083 {
1084         uint8_t *m = match_mask;
1085         uint8_t *v = match_value;
1086         unsigned int i;
1087
1088         for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
1089                 if (v[i] & ~m[i]) {
1090                         DRV_LOG(ERR,
1091                                 "match_value differs from match_criteria"
1092                                 " %p[%u] != %p[%u]",
1093                                 match_value, i, match_mask, i);
1094                         return -EINVAL;
1095                 }
1096         }
1097         return 0;
1098 }
1099 #endif
1100
1101 /**
1102  * Add Ethernet item to matcher and to the value.
1103  *
1104  * @param[in, out] matcher
1105  *   Flow matcher.
1106  * @param[in, out] key
1107  *   Flow matcher value.
1108  * @param[in] item
1109  *   Flow pattern to translate.
1110  * @param[in] inner
1111  *   Item is inner pattern.
1112  */
1113 static void
1114 flow_dv_translate_item_eth(void *matcher, void *key,
1115                            const struct rte_flow_item *item, int inner)
1116 {
1117         const struct rte_flow_item_eth *eth_m = item->mask;
1118         const struct rte_flow_item_eth *eth_v = item->spec;
1119         const struct rte_flow_item_eth nic_mask = {
1120                 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1121                 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1122                 .type = RTE_BE16(0xffff),
1123         };
1124         void *headers_m;
1125         void *headers_v;
1126         char *l24_v;
1127         unsigned int i;
1128
1129         if (!eth_v)
1130                 return;
1131         if (!eth_m)
1132                 eth_m = &nic_mask;
1133         if (inner) {
1134                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1135                                          inner_headers);
1136                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1137         } else {
1138                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1139                                          outer_headers);
1140                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1141         }
1142         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
1143                &eth_m->dst, sizeof(eth_m->dst));
1144         /* The value must be in the range of the mask. */
1145         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
1146         for (i = 0; i < sizeof(eth_m->dst); ++i)
1147                 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
1148         memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
1149                &eth_m->src, sizeof(eth_m->src));
1150         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
1151         /* The value must be in the range of the mask. */
1152         for (i = 0; i < sizeof(eth_m->dst); ++i)
1153                 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
1154         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
1155                  rte_be_to_cpu_16(eth_m->type));
1156         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
1157         *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
1158 }
1159
1160 /**
1161  * Add VLAN item to matcher and to the value.
1162  *
1163  * @param[in, out] matcher
1164  *   Flow matcher.
1165  * @param[in, out] key
1166  *   Flow matcher value.
1167  * @param[in] item
1168  *   Flow pattern to translate.
1169  * @param[in] inner
1170  *   Item is inner pattern.
1171  */
1172 static void
1173 flow_dv_translate_item_vlan(void *matcher, void *key,
1174                             const struct rte_flow_item *item,
1175                             int inner)
1176 {
1177         const struct rte_flow_item_vlan *vlan_m = item->mask;
1178         const struct rte_flow_item_vlan *vlan_v = item->spec;
1179         const struct rte_flow_item_vlan nic_mask = {
1180                 .tci = RTE_BE16(0x0fff),
1181                 .inner_type = RTE_BE16(0xffff),
1182         };
1183         void *headers_m;
1184         void *headers_v;
1185         uint16_t tci_m;
1186         uint16_t tci_v;
1187
1188         if (!vlan_v)
1189                 return;
1190         if (!vlan_m)
1191                 vlan_m = &nic_mask;
1192         if (inner) {
1193                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1194                                          inner_headers);
1195                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1196         } else {
1197                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1198                                          outer_headers);
1199                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1200         }
1201         tci_m = rte_be_to_cpu_16(vlan_m->tci);
1202         tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
1203         MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
1204         MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
1205         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
1206         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
1207         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
1208         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
1209         MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
1210         MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
1211 }
1212
1213 /**
1214  * Add IPV4 item to matcher and to the value.
1215  *
1216  * @param[in, out] matcher
1217  *   Flow matcher.
1218  * @param[in, out] key
1219  *   Flow matcher value.
1220  * @param[in] item
1221  *   Flow pattern to translate.
1222  * @param[in] inner
1223  *   Item is inner pattern.
1224  */
1225 static void
1226 flow_dv_translate_item_ipv4(void *matcher, void *key,
1227                             const struct rte_flow_item *item,
1228                             int inner)
1229 {
1230         const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
1231         const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
1232         const struct rte_flow_item_ipv4 nic_mask = {
1233                 .hdr = {
1234                         .src_addr = RTE_BE32(0xffffffff),
1235                         .dst_addr = RTE_BE32(0xffffffff),
1236                         .type_of_service = 0xff,
1237                         .next_proto_id = 0xff,
1238                 },
1239         };
1240         void *headers_m;
1241         void *headers_v;
1242         char *l24_m;
1243         char *l24_v;
1244         uint8_t tos;
1245
1246         if (inner) {
1247                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1248                                          inner_headers);
1249                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1250         } else {
1251                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1252                                          outer_headers);
1253                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1254         }
1255         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
1256         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
1257         if (!ipv4_v)
1258                 return;
1259         if (!ipv4_m)
1260                 ipv4_m = &nic_mask;
1261         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1262                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1263         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1264                              dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1265         *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
1266         *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
1267         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1268                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
1269         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1270                           src_ipv4_src_ipv6.ipv4_layout.ipv4);
1271         *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
1272         *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
1273         tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
1274         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
1275                  ipv4_m->hdr.type_of_service);
1276         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
1277         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
1278                  ipv4_m->hdr.type_of_service >> 2);
1279         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
1280         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
1281                  ipv4_m->hdr.next_proto_id);
1282         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1283                  ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
1284 }
1285
1286 /**
1287  * Add IPV6 item to matcher and to the value.
1288  *
1289  * @param[in, out] matcher
1290  *   Flow matcher.
1291  * @param[in, out] key
1292  *   Flow matcher value.
1293  * @param[in] item
1294  *   Flow pattern to translate.
1295  * @param[in] inner
1296  *   Item is inner pattern.
1297  */
1298 static void
1299 flow_dv_translate_item_ipv6(void *matcher, void *key,
1300                             const struct rte_flow_item *item,
1301                             int inner)
1302 {
1303         const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
1304         const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
1305         const struct rte_flow_item_ipv6 nic_mask = {
1306                 .hdr = {
1307                         .src_addr =
1308                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
1309                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
1310                         .dst_addr =
1311                                 "\xff\xff\xff\xff\xff\xff\xff\xff"
1312                                 "\xff\xff\xff\xff\xff\xff\xff\xff",
1313                         .vtc_flow = RTE_BE32(0xffffffff),
1314                         .proto = 0xff,
1315                         .hop_limits = 0xff,
1316                 },
1317         };
1318         void *headers_m;
1319         void *headers_v;
1320         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1321         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1322         char *l24_m;
1323         char *l24_v;
1324         uint32_t vtc_m;
1325         uint32_t vtc_v;
1326         int i;
1327         int size;
1328
1329         if (inner) {
1330                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1331                                          inner_headers);
1332                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1333         } else {
1334                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1335                                          outer_headers);
1336                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1337         }
1338         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
1339         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
1340         if (!ipv6_v)
1341                 return;
1342         if (!ipv6_m)
1343                 ipv6_m = &nic_mask;
1344         size = sizeof(ipv6_m->hdr.dst_addr);
1345         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1346                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
1347         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1348                              dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
1349         memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
1350         for (i = 0; i < size; ++i)
1351                 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
1352         l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1353                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
1354         l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1355                              src_ipv4_src_ipv6.ipv6_layout.ipv6);
1356         memcpy(l24_m, ipv6_m->hdr.src_addr, size);
1357         for (i = 0; i < size; ++i)
1358                 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
1359         /* TOS. */
1360         vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
1361         vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
1362         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
1363         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
1364         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
1365         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
1366         /* Label. */
1367         if (inner) {
1368                 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
1369                          vtc_m);
1370                 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
1371                          vtc_v);
1372         } else {
1373                 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
1374                          vtc_m);
1375                 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
1376                          vtc_v);
1377         }
1378         /* Protocol. */
1379         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
1380                  ipv6_m->hdr.proto);
1381         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1382                  ipv6_v->hdr.proto & ipv6_m->hdr.proto);
1383 }
1384
1385 /**
1386  * Add TCP item to matcher and to the value.
1387  *
1388  * @param[in, out] matcher
1389  *   Flow matcher.
1390  * @param[in, out] key
1391  *   Flow matcher value.
1392  * @param[in] item
1393  *   Flow pattern to translate.
1394  * @param[in] inner
1395  *   Item is inner pattern.
1396  */
1397 static void
1398 flow_dv_translate_item_tcp(void *matcher, void *key,
1399                            const struct rte_flow_item *item,
1400                            int inner)
1401 {
1402         const struct rte_flow_item_tcp *tcp_m = item->mask;
1403         const struct rte_flow_item_tcp *tcp_v = item->spec;
1404         void *headers_m;
1405         void *headers_v;
1406
1407         if (inner) {
1408                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1409                                          inner_headers);
1410                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1411         } else {
1412                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1413                                          outer_headers);
1414                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1415         }
1416         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1417         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
1418         if (!tcp_v)
1419                 return;
1420         if (!tcp_m)
1421                 tcp_m = &rte_flow_item_tcp_mask;
1422         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
1423                  rte_be_to_cpu_16(tcp_m->hdr.src_port));
1424         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
1425                  rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
1426         MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
1427                  rte_be_to_cpu_16(tcp_m->hdr.dst_port));
1428         MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
1429                  rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
1430 }
1431
1432 /**
1433  * Add UDP item to matcher and to the value.
1434  *
1435  * @param[in, out] matcher
1436  *   Flow matcher.
1437  * @param[in, out] key
1438  *   Flow matcher value.
1439  * @param[in] item
1440  *   Flow pattern to translate.
1441  * @param[in] inner
1442  *   Item is inner pattern.
1443  */
1444 static void
1445 flow_dv_translate_item_udp(void *matcher, void *key,
1446                            const struct rte_flow_item *item,
1447                            int inner)
1448 {
1449         const struct rte_flow_item_udp *udp_m = item->mask;
1450         const struct rte_flow_item_udp *udp_v = item->spec;
1451         void *headers_m;
1452         void *headers_v;
1453
1454         if (inner) {
1455                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1456                                          inner_headers);
1457                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1458         } else {
1459                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1460                                          outer_headers);
1461                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1462         }
1463         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1464         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1465         if (!udp_v)
1466                 return;
1467         if (!udp_m)
1468                 udp_m = &rte_flow_item_udp_mask;
1469         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
1470                  rte_be_to_cpu_16(udp_m->hdr.src_port));
1471         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
1472                  rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
1473         MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
1474                  rte_be_to_cpu_16(udp_m->hdr.dst_port));
1475         MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
1476                  rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
1477 }
1478
1479 /**
1480  * Add GRE item to matcher and to the value.
1481  *
1482  * @param[in, out] matcher
1483  *   Flow matcher.
1484  * @param[in, out] key
1485  *   Flow matcher value.
1486  * @param[in] item
1487  *   Flow pattern to translate.
1488  * @param[in] inner
1489  *   Item is inner pattern.
1490  */
1491 static void
1492 flow_dv_translate_item_gre(void *matcher, void *key,
1493                            const struct rte_flow_item *item,
1494                            int inner)
1495 {
1496         const struct rte_flow_item_gre *gre_m = item->mask;
1497         const struct rte_flow_item_gre *gre_v = item->spec;
1498         void *headers_m;
1499         void *headers_v;
1500         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1501         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1502
1503         if (inner) {
1504                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1505                                          inner_headers);
1506                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1507         } else {
1508                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1509                                          outer_headers);
1510                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1511         }
1512         MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1513         MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
1514         if (!gre_v)
1515                 return;
1516         if (!gre_m)
1517                 gre_m = &rte_flow_item_gre_mask;
1518         MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
1519                  rte_be_to_cpu_16(gre_m->protocol));
1520         MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
1521                  rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
1522 }
1523
1524 /**
1525  * Add NVGRE item to matcher and to the value.
1526  *
1527  * @param[in, out] matcher
1528  *   Flow matcher.
1529  * @param[in, out] key
1530  *   Flow matcher value.
1531  * @param[in] item
1532  *   Flow pattern to translate.
1533  * @param[in] inner
1534  *   Item is inner pattern.
1535  */
1536 static void
1537 flow_dv_translate_item_nvgre(void *matcher, void *key,
1538                              const struct rte_flow_item *item,
1539                              int inner)
1540 {
1541         const struct rte_flow_item_nvgre *nvgre_m = item->mask;
1542         const struct rte_flow_item_nvgre *nvgre_v = item->spec;
1543         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1544         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1545         const char *tni_flow_id_m = (const char *)nvgre_m->tni;
1546         const char *tni_flow_id_v = (const char *)nvgre_v->tni;
1547         char *gre_key_m;
1548         char *gre_key_v;
1549         int size;
1550         int i;
1551
1552         flow_dv_translate_item_gre(matcher, key, item, inner);
1553         if (!nvgre_v)
1554                 return;
1555         if (!nvgre_m)
1556                 nvgre_m = &rte_flow_item_nvgre_mask;
1557         size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
1558         gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
1559         gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
1560         memcpy(gre_key_m, tni_flow_id_m, size);
1561         for (i = 0; i < size; ++i)
1562                 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
1563 }
1564
1565 /**
1566  * Add VXLAN item to matcher and to the value.
1567  *
1568  * @param[in, out] matcher
1569  *   Flow matcher.
1570  * @param[in, out] key
1571  *   Flow matcher value.
1572  * @param[in] item
1573  *   Flow pattern to translate.
1574  * @param[in] inner
1575  *   Item is inner pattern.
1576  */
1577 static void
1578 flow_dv_translate_item_vxlan(void *matcher, void *key,
1579                              const struct rte_flow_item *item,
1580                              int inner)
1581 {
1582         const struct rte_flow_item_vxlan *vxlan_m = item->mask;
1583         const struct rte_flow_item_vxlan *vxlan_v = item->spec;
1584         void *headers_m;
1585         void *headers_v;
1586         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1587         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1588         char *vni_m;
1589         char *vni_v;
1590         uint16_t dport;
1591         int size;
1592         int i;
1593
1594         if (inner) {
1595                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1596                                          inner_headers);
1597                 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1598         } else {
1599                 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1600                                          outer_headers);
1601                 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1602         }
1603         dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
1604                 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
1605         if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
1606                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
1607                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
1608         }
1609         if (!vxlan_v)
1610                 return;
1611         if (!vxlan_m)
1612                 vxlan_m = &rte_flow_item_vxlan_mask;
1613         size = sizeof(vxlan_m->vni);
1614         vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
1615         vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
1616         memcpy(vni_m, vxlan_m->vni, size);
1617         for (i = 0; i < size; ++i)
1618                 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
1619 }
1620
1621 /**
1622  * Add MPLS item to matcher and to the value.
1623  *
1624  * @param[in, out] matcher
1625  *   Flow matcher.
1626  * @param[in, out] key
1627  *   Flow matcher value.
1628  * @param[in] item
1629  *   Flow pattern to translate.
1630  * @param[in] prev_layer
1631  *   The protocol layer indicated in previous item.
1632  * @param[in] inner
1633  *   Item is inner pattern.
1634  */
1635 static void
1636 flow_dv_translate_item_mpls(void *matcher, void *key,
1637                             const struct rte_flow_item *item,
1638                             uint64_t prev_layer,
1639                             int inner)
1640 {
1641         const uint32_t *in_mpls_m = item->mask;
1642         const uint32_t *in_mpls_v = item->spec;
1643         uint32_t *out_mpls_m = 0;
1644         uint32_t *out_mpls_v = 0;
1645         void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1646         void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1647         void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
1648                                      misc_parameters_2);
1649         void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
1650         void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
1651         void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1652
1653         switch (prev_layer) {
1654         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
1655                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
1656                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
1657                          MLX5_UDP_PORT_MPLS);
1658                 break;
1659         case MLX5_FLOW_LAYER_GRE:
1660                 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
1661                 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
1662                          ETHER_TYPE_MPLS);
1663                 break;
1664         default:
1665                 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1666                 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1667                          IPPROTO_MPLS);
1668                 break;
1669         }
1670         if (!in_mpls_v)
1671                 return;
1672         if (!in_mpls_m)
1673                 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
1674         switch (prev_layer) {
1675         case MLX5_FLOW_LAYER_OUTER_L4_UDP:
1676                 out_mpls_m =
1677                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
1678                                                  outer_first_mpls_over_udp);
1679                 out_mpls_v =
1680                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
1681                                                  outer_first_mpls_over_udp);
1682                 break;
1683         case MLX5_FLOW_LAYER_GRE:
1684                 out_mpls_m =
1685                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
1686                                                  outer_first_mpls_over_gre);
1687                 out_mpls_v =
1688                         (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
1689                                                  outer_first_mpls_over_gre);
1690                 break;
1691         default:
1692                 /* Inner MPLS not over GRE is not supported. */
1693                 if (!inner) {
1694                         out_mpls_m =
1695                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
1696                                                          misc2_m,
1697                                                          outer_first_mpls);
1698                         out_mpls_v =
1699                                 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
1700                                                          misc2_v,
1701                                                          outer_first_mpls);
1702                 }
1703                 break;
1704         }
1705         if (out_mpls_m && out_mpls_v) {
1706                 *out_mpls_m = *in_mpls_m;
1707                 *out_mpls_v = *in_mpls_v & *in_mpls_m;
1708         }
1709 }
1710
1711 /**
1712  * Add META item to matcher
1713  *
1714  * @param[in, out] matcher
1715  *   Flow matcher.
1716  * @param[in, out] key
1717  *   Flow matcher value.
1718  * @param[in] item
1719  *   Flow pattern to translate.
1720  * @param[in] inner
1721  *   Item is inner pattern.
1722  */
1723 static void
1724 flow_dv_translate_item_meta(void *matcher, void *key,
1725                             const struct rte_flow_item *item)
1726 {
1727         const struct rte_flow_item_meta *meta_m;
1728         const struct rte_flow_item_meta *meta_v;
1729         void *misc2_m =
1730                 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
1731         void *misc2_v =
1732                 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
1733
1734         meta_m = (const void *)item->mask;
1735         if (!meta_m)
1736                 meta_m = &rte_flow_item_meta_mask;
1737         meta_v = (const void *)item->spec;
1738         if (meta_v) {
1739                 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
1740                          rte_be_to_cpu_32(meta_m->data));
1741                 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
1742                          rte_be_to_cpu_32(meta_v->data & meta_m->data));
1743         }
1744 }
1745
1746 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
1747
1748 #define HEADER_IS_ZERO(match_criteria, headers)                              \
1749         !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers),     \
1750                  matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1751
1752 /**
1753  * Calculate flow matcher enable bitmap.
1754  *
1755  * @param match_criteria
1756  *   Pointer to flow matcher criteria.
1757  *
1758  * @return
1759  *   Bitmap of enabled fields.
1760  */
1761 static uint8_t
1762 flow_dv_matcher_enable(uint32_t *match_criteria)
1763 {
1764         uint8_t match_criteria_enable;
1765
1766         match_criteria_enable =
1767                 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1768                 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
1769         match_criteria_enable |=
1770                 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1771                 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
1772         match_criteria_enable |=
1773                 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1774                 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
1775         match_criteria_enable |=
1776                 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
1777                 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
1778
1779         return match_criteria_enable;
1780 }
1781
1782 /**
1783  * Register the flow matcher.
1784  *
1785  * @param dev[in, out]
1786  *   Pointer to rte_eth_dev structure.
1787  * @param[in, out] matcher
1788  *   Pointer to flow matcher.
1789  * @parm[in, out] dev_flow
1790  *   Pointer to the dev_flow.
1791  * @param[out] error
1792  *   pointer to error structure.
1793  *
1794  * @return
1795  *   0 on success otherwise -errno and errno is set.
1796  */
1797 static int
1798 flow_dv_matcher_register(struct rte_eth_dev *dev,
1799                          struct mlx5_flow_dv_matcher *matcher,
1800                          struct mlx5_flow *dev_flow,
1801                          struct rte_flow_error *error)
1802 {
1803         struct priv *priv = dev->data->dev_private;
1804         struct mlx5_flow_dv_matcher *cache_matcher;
1805         struct mlx5dv_flow_matcher_attr dv_attr = {
1806                 .type = IBV_FLOW_ATTR_NORMAL,
1807                 .match_mask = (void *)&matcher->mask,
1808         };
1809
1810         /* Lookup from cache. */
1811         LIST_FOREACH(cache_matcher, &priv->matchers, next) {
1812                 if (matcher->crc == cache_matcher->crc &&
1813                     matcher->priority == cache_matcher->priority &&
1814                     matcher->egress == cache_matcher->egress &&
1815                     !memcmp((const void *)matcher->mask.buf,
1816                             (const void *)cache_matcher->mask.buf,
1817                             cache_matcher->mask.size)) {
1818                         DRV_LOG(DEBUG,
1819                                 "priority %hd use %s matcher %p: refcnt %d++",
1820                                 cache_matcher->priority,
1821                                 cache_matcher->egress ? "tx" : "rx",
1822                                 (void *)cache_matcher,
1823                                 rte_atomic32_read(&cache_matcher->refcnt));
1824                         rte_atomic32_inc(&cache_matcher->refcnt);
1825                         dev_flow->dv.matcher = cache_matcher;
1826                         return 0;
1827                 }
1828         }
1829         /* Register new matcher. */
1830         cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
1831         if (!cache_matcher)
1832                 return rte_flow_error_set(error, ENOMEM,
1833                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1834                                           "cannot allocate matcher memory");
1835         *cache_matcher = *matcher;
1836         dv_attr.match_criteria_enable =
1837                 flow_dv_matcher_enable(cache_matcher->mask.buf);
1838         dv_attr.priority = matcher->priority;
1839         if (matcher->egress)
1840                 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
1841         cache_matcher->matcher_object =
1842                 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
1843         if (!cache_matcher->matcher_object) {
1844                 rte_free(cache_matcher);
1845                 return rte_flow_error_set(error, ENOMEM,
1846                                           RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1847                                           NULL, "cannot create matcher");
1848         }
1849         rte_atomic32_inc(&cache_matcher->refcnt);
1850         LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
1851         dev_flow->dv.matcher = cache_matcher;
1852         DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
1853                 cache_matcher->priority,
1854                 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
1855                 rte_atomic32_read(&cache_matcher->refcnt));
1856         return 0;
1857 }
1858
1859 /**
1860  * Fill the flow with DV spec.
1861  *
1862  * @param[in] dev
1863  *   Pointer to rte_eth_dev structure.
1864  * @param[in, out] dev_flow
1865  *   Pointer to the sub flow.
1866  * @param[in] attr
1867  *   Pointer to the flow attributes.
1868  * @param[in] items
1869  *   Pointer to the list of items.
1870  * @param[in] actions
1871  *   Pointer to the list of actions.
1872  * @param[out] error
1873  *   Pointer to the error structure.
1874  *
1875  * @return
1876  *   0 on success, a negative errno value otherwise and rte_ernno is set.
1877  */
1878 static int
1879 flow_dv_translate(struct rte_eth_dev *dev,
1880                   struct mlx5_flow *dev_flow,
1881                   const struct rte_flow_attr *attr,
1882                   const struct rte_flow_item items[],
1883                   const struct rte_flow_action actions[],
1884                   struct rte_flow_error *error)
1885 {
1886         struct priv *priv = dev->data->dev_private;
1887         struct rte_flow *flow = dev_flow->flow;
1888         uint64_t item_flags = 0;
1889         uint64_t last_item = 0;
1890         uint64_t action_flags = 0;
1891         uint64_t priority = attr->priority;
1892         struct mlx5_flow_dv_matcher matcher = {
1893                 .mask = {
1894                         .size = sizeof(matcher.mask.buf),
1895                 },
1896         };
1897         int actions_n = 0;
1898
1899         if (priority == MLX5_FLOW_PRIO_RSVD)
1900                 priority = priv->config.flow_prio - 1;
1901         for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1902                 const struct rte_flow_action_queue *queue;
1903                 const struct rte_flow_action_rss *rss;
1904                 const struct rte_flow_action *action = actions;
1905                 const uint8_t *rss_key;
1906
1907                 switch (actions->type) {
1908                 case RTE_FLOW_ACTION_TYPE_VOID:
1909                         break;
1910                 case RTE_FLOW_ACTION_TYPE_FLAG:
1911                         dev_flow->dv.actions[actions_n].type =
1912                                 MLX5DV_FLOW_ACTION_TAG;
1913                         dev_flow->dv.actions[actions_n].tag_value =
1914                                 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
1915                         actions_n++;
1916                         action_flags |= MLX5_FLOW_ACTION_FLAG;
1917                         break;
1918                 case RTE_FLOW_ACTION_TYPE_MARK:
1919                         dev_flow->dv.actions[actions_n].type =
1920                                 MLX5DV_FLOW_ACTION_TAG;
1921                         dev_flow->dv.actions[actions_n].tag_value =
1922                                 mlx5_flow_mark_set
1923                                 (((const struct rte_flow_action_mark *)
1924                                   (actions->conf))->id);
1925                         actions_n++;
1926                         action_flags |= MLX5_FLOW_ACTION_MARK;
1927                         break;
1928                 case RTE_FLOW_ACTION_TYPE_DROP:
1929                         dev_flow->dv.actions[actions_n].type =
1930                                 MLX5DV_FLOW_ACTION_DROP;
1931                         action_flags |= MLX5_FLOW_ACTION_DROP;
1932                         break;
1933                 case RTE_FLOW_ACTION_TYPE_QUEUE:
1934                         queue = actions->conf;
1935                         flow->rss.queue_num = 1;
1936                         (*flow->queue)[0] = queue->index;
1937                         action_flags |= MLX5_FLOW_ACTION_QUEUE;
1938                         break;
1939                 case RTE_FLOW_ACTION_TYPE_RSS:
1940                         rss = actions->conf;
1941                         if (flow->queue)
1942                                 memcpy((*flow->queue), rss->queue,
1943                                        rss->queue_num * sizeof(uint16_t));
1944                         flow->rss.queue_num = rss->queue_num;
1945                         /* NULL RSS key indicates default RSS key. */
1946                         rss_key = !rss->key ? rss_hash_default_key : rss->key;
1947                         memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1948                         /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
1949                         flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
1950                         flow->rss.level = rss->level;
1951                         action_flags |= MLX5_FLOW_ACTION_RSS;
1952                         break;
1953                 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1954                 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1955                         if (flow_dv_create_action_l2_encap(dev, actions,
1956                                                            dev_flow, error))
1957                                 return -rte_errno;
1958                         dev_flow->dv.actions[actions_n].type =
1959                                 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1960                         dev_flow->dv.actions[actions_n].action =
1961                                 dev_flow->dv.encap_decap->verbs_action;
1962                         actions_n++;
1963                         action_flags |= actions->type ==
1964                                         RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
1965                                         MLX5_FLOW_ACTION_VXLAN_ENCAP :
1966                                         MLX5_FLOW_ACTION_NVGRE_ENCAP;
1967                         break;
1968                 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1969                 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
1970                         if (flow_dv_create_action_l2_decap(dev, dev_flow,
1971                                                            error))
1972                                 return -rte_errno;
1973                         dev_flow->dv.actions[actions_n].type =
1974                                 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1975                         dev_flow->dv.actions[actions_n].action =
1976                                 dev_flow->dv.encap_decap->verbs_action;
1977                         actions_n++;
1978                         action_flags |= actions->type ==
1979                                         RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
1980                                         MLX5_FLOW_ACTION_VXLAN_DECAP :
1981                                         MLX5_FLOW_ACTION_NVGRE_DECAP;
1982                         break;
1983                 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1984                         /* Handle encap with preceding decap. */
1985                         if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
1986                                 if (flow_dv_create_action_raw_encap
1987                                         (dev, actions, dev_flow, attr, error))
1988                                         return -rte_errno;
1989                                 dev_flow->dv.actions[actions_n].type =
1990                                         MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1991                                 dev_flow->dv.actions[actions_n].action =
1992                                         dev_flow->dv.encap_decap->verbs_action;
1993                         } else {
1994                                 /* Handle encap without preceding decap. */
1995                                 if (flow_dv_create_action_l2_encap(dev, actions,
1996                                                                    dev_flow,
1997                                                                    error))
1998                                         return -rte_errno;
1999                                 dev_flow->dv.actions[actions_n].type =
2000                                         MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2001                                 dev_flow->dv.actions[actions_n].action =
2002                                         dev_flow->dv.encap_decap->verbs_action;
2003                         }
2004                         actions_n++;
2005                         action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2006                         break;
2007                 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2008                         /* Check if this decap is followed by encap. */
2009                         for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
2010                                action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
2011                                action++) {
2012                         }
2013                         /* Handle decap only if it isn't followed by encap. */
2014                         if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2015                                 if (flow_dv_create_action_l2_decap(dev,
2016                                                                    dev_flow,
2017                                                                    error))
2018                                         return -rte_errno;
2019                                 dev_flow->dv.actions[actions_n].type =
2020                                         MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2021                                 dev_flow->dv.actions[actions_n].action =
2022                                         dev_flow->dv.encap_decap->verbs_action;
2023                                 actions_n++;
2024                         }
2025                         /* If decap is followed by encap, handle it at encap. */
2026                         action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2027                         break;
2028                 default:
2029                         break;
2030                 }
2031         }
2032         dev_flow->dv.actions_n = actions_n;
2033         flow->actions = action_flags;
2034         for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2035                 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2036                 void *match_mask = matcher.mask.buf;
2037                 void *match_value = dev_flow->dv.value.buf;
2038
2039                 switch (items->type) {
2040                 case RTE_FLOW_ITEM_TYPE_ETH:
2041                         flow_dv_translate_item_eth(match_mask, match_value,
2042                                                    items, tunnel);
2043                         matcher.priority = MLX5_PRIORITY_MAP_L2;
2044                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2045                                              MLX5_FLOW_LAYER_OUTER_L2;
2046                         break;
2047                 case RTE_FLOW_ITEM_TYPE_VLAN:
2048                         flow_dv_translate_item_vlan(match_mask, match_value,
2049                                                     items, tunnel);
2050                         matcher.priority = MLX5_PRIORITY_MAP_L2;
2051                         last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
2052                                               MLX5_FLOW_LAYER_INNER_VLAN) :
2053                                              (MLX5_FLOW_LAYER_OUTER_L2 |
2054                                               MLX5_FLOW_LAYER_OUTER_VLAN);
2055                         break;
2056                 case RTE_FLOW_ITEM_TYPE_IPV4:
2057                         flow_dv_translate_item_ipv4(match_mask, match_value,
2058                                                     items, tunnel);
2059                         matcher.priority = MLX5_PRIORITY_MAP_L3;
2060                         dev_flow->dv.hash_fields |=
2061                                 mlx5_flow_hashfields_adjust
2062                                         (dev_flow, tunnel,
2063                                          MLX5_IPV4_LAYER_TYPES,
2064                                          MLX5_IPV4_IBV_RX_HASH);
2065                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2066                                              MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2067                         break;
2068                 case RTE_FLOW_ITEM_TYPE_IPV6:
2069                         flow_dv_translate_item_ipv6(match_mask, match_value,
2070                                                     items, tunnel);
2071                         matcher.priority = MLX5_PRIORITY_MAP_L3;
2072                         dev_flow->dv.hash_fields |=
2073                                 mlx5_flow_hashfields_adjust
2074                                         (dev_flow, tunnel,
2075                                          MLX5_IPV6_LAYER_TYPES,
2076                                          MLX5_IPV6_IBV_RX_HASH);
2077                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2078                                              MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2079                         break;
2080                 case RTE_FLOW_ITEM_TYPE_TCP:
2081                         flow_dv_translate_item_tcp(match_mask, match_value,
2082                                                    items, tunnel);
2083                         matcher.priority = MLX5_PRIORITY_MAP_L4;
2084                         dev_flow->dv.hash_fields |=
2085                                 mlx5_flow_hashfields_adjust
2086                                         (dev_flow, tunnel, ETH_RSS_TCP,
2087                                          IBV_RX_HASH_SRC_PORT_TCP |
2088                                          IBV_RX_HASH_DST_PORT_TCP);
2089                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2090                                              MLX5_FLOW_LAYER_OUTER_L4_TCP;
2091                         break;
2092                 case RTE_FLOW_ITEM_TYPE_UDP:
2093                         flow_dv_translate_item_udp(match_mask, match_value,
2094                                                    items, tunnel);
2095                         matcher.priority = MLX5_PRIORITY_MAP_L4;
2096                         dev_flow->dv.hash_fields |=
2097                                 mlx5_flow_hashfields_adjust
2098                                         (dev_flow, tunnel, ETH_RSS_UDP,
2099                                          IBV_RX_HASH_SRC_PORT_UDP |
2100                                          IBV_RX_HASH_DST_PORT_UDP);
2101                         last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2102                                              MLX5_FLOW_LAYER_OUTER_L4_UDP;
2103                         break;
2104                 case RTE_FLOW_ITEM_TYPE_GRE:
2105                         flow_dv_translate_item_gre(match_mask, match_value,
2106                                                    items, tunnel);
2107                         last_item = MLX5_FLOW_LAYER_GRE;
2108                         break;
2109                 case RTE_FLOW_ITEM_TYPE_NVGRE:
2110                         flow_dv_translate_item_nvgre(match_mask, match_value,
2111                                                      items, tunnel);
2112                         last_item = MLX5_FLOW_LAYER_GRE;
2113                         break;
2114                 case RTE_FLOW_ITEM_TYPE_VXLAN:
2115                         flow_dv_translate_item_vxlan(match_mask, match_value,
2116                                                      items, tunnel);
2117                         last_item = MLX5_FLOW_LAYER_VXLAN;
2118                         break;
2119                 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2120                         flow_dv_translate_item_vxlan(match_mask, match_value,
2121                                                      items, tunnel);
2122                         last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2123                         break;
2124                 case RTE_FLOW_ITEM_TYPE_MPLS:
2125                         flow_dv_translate_item_mpls(match_mask, match_value,
2126                                                     items, last_item, tunnel);
2127                         last_item = MLX5_FLOW_LAYER_MPLS;
2128                         break;
2129                 case RTE_FLOW_ITEM_TYPE_META:
2130                         flow_dv_translate_item_meta(match_mask, match_value,
2131                                                     items);
2132                         last_item = MLX5_FLOW_ITEM_METADATA;
2133                         break;
2134                 default:
2135                         break;
2136                 }
2137                 item_flags |= last_item;
2138         }
2139         assert(!flow_dv_check_valid_spec(matcher.mask.buf,
2140                                          dev_flow->dv.value.buf));
2141         dev_flow->layers = item_flags;
2142         /* Register matcher. */
2143         matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
2144                                     matcher.mask.size);
2145         matcher.priority = mlx5_flow_adjust_priority(dev, priority,
2146                                                      matcher.priority);
2147         matcher.egress = attr->egress;
2148         if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
2149                 return -rte_errno;
2150         return 0;
2151 }
2152
2153 /**
2154  * Apply the flow to the NIC.
2155  *
2156  * @param[in] dev
2157  *   Pointer to the Ethernet device structure.
2158  * @param[in, out] flow
2159  *   Pointer to flow structure.
2160  * @param[out] error
2161  *   Pointer to error structure.
2162  *
2163  * @return
2164  *   0 on success, a negative errno value otherwise and rte_errno is set.
2165  */
2166 static int
2167 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
2168               struct rte_flow_error *error)
2169 {
2170         struct mlx5_flow_dv *dv;
2171         struct mlx5_flow *dev_flow;
2172         int n;
2173         int err;
2174
2175         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
2176                 dv = &dev_flow->dv;
2177                 n = dv->actions_n;
2178                 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
2179                         dv->hrxq = mlx5_hrxq_drop_new(dev);
2180                         if (!dv->hrxq) {
2181                                 rte_flow_error_set
2182                                         (error, errno,
2183                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2184                                          "cannot get drop hash queue");
2185                                 goto error;
2186                         }
2187                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
2188                         dv->actions[n].qp = dv->hrxq->qp;
2189                         n++;
2190                 } else if (flow->actions &
2191                            (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
2192                         struct mlx5_hrxq *hrxq;
2193
2194                         hrxq = mlx5_hrxq_get(dev, flow->key,
2195                                              MLX5_RSS_HASH_KEY_LEN,
2196                                              dv->hash_fields,
2197                                              (*flow->queue),
2198                                              flow->rss.queue_num);
2199                         if (!hrxq)
2200                                 hrxq = mlx5_hrxq_new
2201                                         (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
2202                                          dv->hash_fields, (*flow->queue),
2203                                          flow->rss.queue_num,
2204                                          !!(dev_flow->layers &
2205                                             MLX5_FLOW_LAYER_TUNNEL));
2206                         if (!hrxq) {
2207                                 rte_flow_error_set
2208                                         (error, rte_errno,
2209                                          RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2210                                          "cannot get hash queue");
2211                                 goto error;
2212                         }
2213                         dv->hrxq = hrxq;
2214                         dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
2215                         dv->actions[n].qp = hrxq->qp;
2216                         n++;
2217                 }
2218                 dv->flow =
2219                         mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
2220                                                   (void *)&dv->value, n,
2221                                                   dv->actions);
2222                 if (!dv->flow) {
2223                         rte_flow_error_set(error, errno,
2224                                            RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2225                                            NULL,
2226                                            "hardware refuses to create flow");
2227                         goto error;
2228                 }
2229         }
2230         return 0;
2231 error:
2232         err = rte_errno; /* Save rte_errno before cleanup. */
2233         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
2234                 struct mlx5_flow_dv *dv = &dev_flow->dv;
2235                 if (dv->hrxq) {
2236                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
2237                                 mlx5_hrxq_drop_release(dev);
2238                         else
2239                                 mlx5_hrxq_release(dev, dv->hrxq);
2240                         dv->hrxq = NULL;
2241                 }
2242         }
2243         rte_errno = err; /* Restore rte_errno. */
2244         return -rte_errno;
2245 }
2246
2247 /**
2248  * Release the flow matcher.
2249  *
2250  * @param dev
2251  *   Pointer to Ethernet device.
2252  * @param flow
2253  *   Pointer to mlx5_flow.
2254  *
2255  * @return
2256  *   1 while a reference on it exists, 0 when freed.
2257  */
2258 static int
2259 flow_dv_matcher_release(struct rte_eth_dev *dev,
2260                         struct mlx5_flow *flow)
2261 {
2262         struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
2263
2264         assert(matcher->matcher_object);
2265         DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
2266                 dev->data->port_id, (void *)matcher,
2267                 rte_atomic32_read(&matcher->refcnt));
2268         if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
2269                 claim_zero(mlx5_glue->dv_destroy_flow_matcher
2270                            (matcher->matcher_object));
2271                 LIST_REMOVE(matcher, next);
2272                 rte_free(matcher);
2273                 DRV_LOG(DEBUG, "port %u matcher %p: removed",
2274                         dev->data->port_id, (void *)matcher);
2275                 return 0;
2276         }
2277         return 1;
2278 }
2279
2280 /**
2281  * Release an encap/decap resource.
2282  *
2283  * @param flow
2284  *   Pointer to mlx5_flow.
2285  *
2286  * @return
2287  *   1 while a reference on it exists, 0 when freed.
2288  */
2289 static int
2290 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
2291 {
2292         struct mlx5_flow_dv_encap_decap_resource *cache_resource =
2293                                                 flow->dv.encap_decap;
2294
2295         assert(cache_resource->verbs_action);
2296         DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
2297                 (void *)cache_resource,
2298                 rte_atomic32_read(&cache_resource->refcnt));
2299         if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
2300                 claim_zero(mlx5_glue->destroy_flow_action
2301                                 (cache_resource->verbs_action));
2302                 LIST_REMOVE(cache_resource, next);
2303                 rte_free(cache_resource);
2304                 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
2305                         (void *)cache_resource);
2306                 return 0;
2307         }
2308         return 1;
2309 }
2310
2311 /**
2312  * Remove the flow from the NIC but keeps it in memory.
2313  *
2314  * @param[in] dev
2315  *   Pointer to Ethernet device.
2316  * @param[in, out] flow
2317  *   Pointer to flow structure.
2318  */
2319 static void
2320 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
2321 {
2322         struct mlx5_flow_dv *dv;
2323         struct mlx5_flow *dev_flow;
2324
2325         if (!flow)
2326                 return;
2327         LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
2328                 dv = &dev_flow->dv;
2329                 if (dv->flow) {
2330                         claim_zero(mlx5_glue->destroy_flow(dv->flow));
2331                         dv->flow = NULL;
2332                 }
2333                 if (dv->hrxq) {
2334                         if (flow->actions & MLX5_FLOW_ACTION_DROP)
2335                                 mlx5_hrxq_drop_release(dev);
2336                         else
2337                                 mlx5_hrxq_release(dev, dv->hrxq);
2338                         dv->hrxq = NULL;
2339                 }
2340         }
2341         if (flow->counter)
2342                 flow->counter = NULL;
2343 }
2344
2345 /**
2346  * Remove the flow from the NIC and the memory.
2347  *
2348  * @param[in] dev
2349  *   Pointer to the Ethernet device structure.
2350  * @param[in, out] flow
2351  *   Pointer to flow structure.
2352  */
2353 static void
2354 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
2355 {
2356         struct mlx5_flow *dev_flow;
2357
2358         if (!flow)
2359                 return;
2360         flow_dv_remove(dev, flow);
2361         while (!LIST_EMPTY(&flow->dev_flows)) {
2362                 dev_flow = LIST_FIRST(&flow->dev_flows);
2363                 LIST_REMOVE(dev_flow, next);
2364                 if (dev_flow->dv.matcher)
2365                         flow_dv_matcher_release(dev, dev_flow);
2366                 if (dev_flow->dv.encap_decap)
2367                         flow_dv_encap_decap_resource_release(dev_flow);
2368                 rte_free(dev_flow);
2369         }
2370 }
2371
2372 /**
2373  * Query a flow.
2374  *
2375  * @see rte_flow_query()
2376  * @see rte_flow_ops
2377  */
2378 static int
2379 flow_dv_query(struct rte_eth_dev *dev __rte_unused,
2380               struct rte_flow *flow __rte_unused,
2381               const struct rte_flow_action *actions __rte_unused,
2382               void *data __rte_unused,
2383               struct rte_flow_error *error __rte_unused)
2384 {
2385         return rte_flow_error_set(error, ENOTSUP,
2386                                   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2387                                   NULL,
2388                                   "flow query with DV is not supported");
2389 }
2390
2391
2392 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
2393         .validate = flow_dv_validate,
2394         .prepare = flow_dv_prepare,
2395         .translate = flow_dv_translate,
2396         .apply = flow_dv_apply,
2397         .remove = flow_dv_remove,
2398         .destroy = flow_dv_destroy,
2399         .query = flow_dv_query,
2400 };
2401
2402 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */