1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright 2018 Mellanox Technologies, Ltd
11 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */
13 #pragma GCC diagnostic ignored "-Wpedantic"
15 #include <infiniband/verbs.h>
17 #pragma GCC diagnostic error "-Wpedantic"
20 #include <rte_common.h>
21 #include <rte_ether.h>
22 #include <rte_eth_ctrl.h>
23 #include <rte_ethdev_driver.h>
25 #include <rte_flow_driver.h>
26 #include <rte_malloc.h>
31 #include "mlx5_defs.h"
33 #include "mlx5_glue.h"
34 #include "mlx5_flow.h"
36 #ifdef HAVE_IBV_FLOW_DV_SUPPORT
42 * Pointer to the rte_eth_dev structure.
46 * Attributes of flow that includes this item.
48 * Pointer to error structure.
51 * 0 on success, a negative errno value otherwise and rte_errno is set.
54 flow_dv_validate_item_meta(struct rte_eth_dev *dev,
55 const struct rte_flow_item *item,
56 const struct rte_flow_attr *attr,
57 struct rte_flow_error *error)
59 const struct rte_flow_item_meta *spec = item->spec;
60 const struct rte_flow_item_meta *mask = item->mask;
61 const struct rte_flow_item_meta nic_mask = {
62 .data = RTE_BE32(UINT32_MAX)
65 uint64_t offloads = dev->data->dev_conf.txmode.offloads;
67 if (!(offloads & DEV_TX_OFFLOAD_MATCH_METADATA))
68 return rte_flow_error_set(error, EPERM,
69 RTE_FLOW_ERROR_TYPE_ITEM,
71 "match on metadata offload "
72 "configuration is off for this port");
74 return rte_flow_error_set(error, EINVAL,
75 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
77 "data cannot be empty");
79 return rte_flow_error_set(error, EINVAL,
80 RTE_FLOW_ERROR_TYPE_ITEM_SPEC,
82 "data cannot be zero");
84 mask = &rte_flow_item_meta_mask;
85 ret = mlx5_flow_item_acceptable(item, (const uint8_t *)mask,
86 (const uint8_t *)&nic_mask,
87 sizeof(struct rte_flow_item_meta),
92 return rte_flow_error_set(error, ENOTSUP,
93 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
95 "pattern not supported for ingress");
100 * Validate the L2 encap action.
102 * @param[in] action_flags
103 * Holds the actions detected until now.
105 * Pointer to the encap action.
107 * Pointer to flow attributes
109 * Pointer to error structure.
112 * 0 on success, a negative errno value otherwise and rte_errno is set.
115 flow_dv_validate_action_l2_encap(uint64_t action_flags,
116 const struct rte_flow_action *action,
117 const struct rte_flow_attr *attr,
118 struct rte_flow_error *error)
121 return rte_flow_error_set(error, EINVAL,
122 RTE_FLOW_ERROR_TYPE_ACTION, action,
123 "configuration cannot be null");
124 if (action_flags & MLX5_FLOW_ACTION_DROP)
125 return rte_flow_error_set(error, EINVAL,
126 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
127 "can't drop and encap in same flow");
128 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
129 return rte_flow_error_set(error, EINVAL,
130 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
131 "can only have a single encap or"
132 " decap action in a flow");
134 return rte_flow_error_set(error, ENOTSUP,
135 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
137 "encap action not supported for "
143 * Validate the L2 decap action.
145 * @param[in] action_flags
146 * Holds the actions detected until now.
148 * Pointer to flow attributes
150 * Pointer to error structure.
153 * 0 on success, a negative errno value otherwise and rte_errno is set.
156 flow_dv_validate_action_l2_decap(uint64_t action_flags,
157 const struct rte_flow_attr *attr,
158 struct rte_flow_error *error)
160 if (action_flags & MLX5_FLOW_ACTION_DROP)
161 return rte_flow_error_set(error, EINVAL,
162 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
163 "can't drop and decap in same flow");
164 if (action_flags & (MLX5_FLOW_ENCAP_ACTIONS | MLX5_FLOW_DECAP_ACTIONS))
165 return rte_flow_error_set(error, EINVAL,
166 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
167 "can only have a single encap or"
168 " decap action in a flow");
170 return rte_flow_error_set(error, ENOTSUP,
171 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
173 "decap action not supported for "
179 * Validate the raw encap action.
181 * @param[in] action_flags
182 * Holds the actions detected until now.
184 * Pointer to the encap action.
186 * Pointer to flow attributes
188 * Pointer to error structure.
191 * 0 on success, a negative errno value otherwise and rte_errno is set.
194 flow_dv_validate_action_raw_encap(uint64_t action_flags,
195 const struct rte_flow_action *action,
196 const struct rte_flow_attr *attr,
197 struct rte_flow_error *error)
200 return rte_flow_error_set(error, EINVAL,
201 RTE_FLOW_ERROR_TYPE_ACTION, action,
202 "configuration cannot be null");
203 if (action_flags & MLX5_FLOW_ACTION_DROP)
204 return rte_flow_error_set(error, EINVAL,
205 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
206 "can't drop and encap in same flow");
207 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
208 return rte_flow_error_set(error, EINVAL,
209 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
210 "can only have a single encap"
211 " action in a flow");
212 /* encap without preceding decap is not supported for ingress */
213 if (attr->ingress && !(action_flags & MLX5_FLOW_ACTION_RAW_DECAP))
214 return rte_flow_error_set(error, ENOTSUP,
215 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
217 "encap action not supported for "
223 * Validate the raw decap action.
225 * @param[in] action_flags
226 * Holds the actions detected until now.
228 * Pointer to the encap action.
230 * Pointer to flow attributes
232 * Pointer to error structure.
235 * 0 on success, a negative errno value otherwise and rte_errno is set.
238 flow_dv_validate_action_raw_decap(uint64_t action_flags,
239 const struct rte_flow_action *action,
240 const struct rte_flow_attr *attr,
241 struct rte_flow_error *error)
243 if (action_flags & MLX5_FLOW_ACTION_DROP)
244 return rte_flow_error_set(error, EINVAL,
245 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
246 "can't drop and decap in same flow");
247 if (action_flags & MLX5_FLOW_ENCAP_ACTIONS)
248 return rte_flow_error_set(error, EINVAL,
249 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
250 "can't have encap action before"
252 if (action_flags & MLX5_FLOW_DECAP_ACTIONS)
253 return rte_flow_error_set(error, EINVAL,
254 RTE_FLOW_ERROR_TYPE_ACTION, NULL,
255 "can only have a single decap"
256 " action in a flow");
257 /* decap action is valid on egress only if it is followed by encap */
259 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
260 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
263 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP)
264 return rte_flow_error_set
266 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
267 NULL, "decap action not supported"
275 * Find existing encap/decap resource or create and register a new one.
277 * @param dev[in, out]
278 * Pointer to rte_eth_dev structure.
279 * @param[in, out] resource
280 * Pointer to encap/decap resource.
281 * @parm[in, out] dev_flow
282 * Pointer to the dev_flow.
284 * pointer to error structure.
287 * 0 on success otherwise -errno and errno is set.
290 flow_dv_encap_decap_resource_register
291 (struct rte_eth_dev *dev,
292 struct mlx5_flow_dv_encap_decap_resource *resource,
293 struct mlx5_flow *dev_flow,
294 struct rte_flow_error *error)
296 struct mlx5_priv *priv = dev->data->dev_private;
297 struct mlx5_flow_dv_encap_decap_resource *cache_resource;
299 /* Lookup a matching resource from cache. */
300 LIST_FOREACH(cache_resource, &priv->encaps_decaps, next) {
301 if (resource->reformat_type == cache_resource->reformat_type &&
302 resource->ft_type == cache_resource->ft_type &&
303 resource->size == cache_resource->size &&
304 !memcmp((const void *)resource->buf,
305 (const void *)cache_resource->buf,
307 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d++",
308 (void *)cache_resource,
309 rte_atomic32_read(&cache_resource->refcnt));
310 rte_atomic32_inc(&cache_resource->refcnt);
311 dev_flow->dv.encap_decap = cache_resource;
315 /* Register new encap/decap resource. */
316 cache_resource = rte_calloc(__func__, 1, sizeof(*cache_resource), 0);
318 return rte_flow_error_set(error, ENOMEM,
319 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
320 "cannot allocate resource memory");
321 *cache_resource = *resource;
322 cache_resource->verbs_action =
323 mlx5_glue->dv_create_flow_action_packet_reformat
324 (priv->ctx, cache_resource->size,
325 (cache_resource->size ? cache_resource->buf : NULL),
326 cache_resource->reformat_type,
327 cache_resource->ft_type);
328 if (!cache_resource->verbs_action) {
329 rte_free(cache_resource);
330 return rte_flow_error_set(error, ENOMEM,
331 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
332 NULL, "cannot create action");
334 rte_atomic32_init(&cache_resource->refcnt);
335 rte_atomic32_inc(&cache_resource->refcnt);
336 LIST_INSERT_HEAD(&priv->encaps_decaps, cache_resource, next);
337 dev_flow->dv.encap_decap = cache_resource;
338 DRV_LOG(DEBUG, "new encap/decap resource %p: refcnt %d++",
339 (void *)cache_resource,
340 rte_atomic32_read(&cache_resource->refcnt));
345 * Get the size of specific rte_flow_item_type
347 * @param[in] item_type
348 * Tested rte_flow_item_type.
351 * sizeof struct item_type, 0 if void or irrelevant.
354 flow_dv_get_item_len(const enum rte_flow_item_type item_type)
359 case RTE_FLOW_ITEM_TYPE_ETH:
360 retval = sizeof(struct rte_flow_item_eth);
362 case RTE_FLOW_ITEM_TYPE_VLAN:
363 retval = sizeof(struct rte_flow_item_vlan);
365 case RTE_FLOW_ITEM_TYPE_IPV4:
366 retval = sizeof(struct rte_flow_item_ipv4);
368 case RTE_FLOW_ITEM_TYPE_IPV6:
369 retval = sizeof(struct rte_flow_item_ipv6);
371 case RTE_FLOW_ITEM_TYPE_UDP:
372 retval = sizeof(struct rte_flow_item_udp);
374 case RTE_FLOW_ITEM_TYPE_TCP:
375 retval = sizeof(struct rte_flow_item_tcp);
377 case RTE_FLOW_ITEM_TYPE_VXLAN:
378 retval = sizeof(struct rte_flow_item_vxlan);
380 case RTE_FLOW_ITEM_TYPE_GRE:
381 retval = sizeof(struct rte_flow_item_gre);
383 case RTE_FLOW_ITEM_TYPE_NVGRE:
384 retval = sizeof(struct rte_flow_item_nvgre);
386 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
387 retval = sizeof(struct rte_flow_item_vxlan_gpe);
389 case RTE_FLOW_ITEM_TYPE_MPLS:
390 retval = sizeof(struct rte_flow_item_mpls);
392 case RTE_FLOW_ITEM_TYPE_VOID: /* Fall through. */
400 #define MLX5_ENCAP_IPV4_VERSION 0x40
401 #define MLX5_ENCAP_IPV4_IHL_MIN 0x05
402 #define MLX5_ENCAP_IPV4_TTL_DEF 0x40
403 #define MLX5_ENCAP_IPV6_VTC_FLOW 0x60000000
404 #define MLX5_ENCAP_IPV6_HOP_LIMIT 0xff
405 #define MLX5_ENCAP_VXLAN_FLAGS 0x08000000
406 #define MLX5_ENCAP_VXLAN_GPE_FLAGS 0x04
409 * Convert the encap action data from list of rte_flow_item to raw buffer
412 * Pointer to rte_flow_item objects list.
414 * Pointer to the output buffer.
416 * Pointer to the output buffer size.
418 * Pointer to the error structure.
421 * 0 on success, a negative errno value otherwise and rte_errno is set.
424 flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
425 size_t *size, struct rte_flow_error *error)
427 struct ether_hdr *eth = NULL;
428 struct vlan_hdr *vlan = NULL;
429 struct ipv4_hdr *ipv4 = NULL;
430 struct ipv6_hdr *ipv6 = NULL;
431 struct udp_hdr *udp = NULL;
432 struct vxlan_hdr *vxlan = NULL;
433 struct vxlan_gpe_hdr *vxlan_gpe = NULL;
434 struct gre_hdr *gre = NULL;
436 size_t temp_size = 0;
439 return rte_flow_error_set(error, EINVAL,
440 RTE_FLOW_ERROR_TYPE_ACTION,
441 NULL, "invalid empty data");
442 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
443 len = flow_dv_get_item_len(items->type);
444 if (len + temp_size > MLX5_ENCAP_MAX_LEN)
445 return rte_flow_error_set(error, EINVAL,
446 RTE_FLOW_ERROR_TYPE_ACTION,
448 "items total size is too big"
449 " for encap action");
450 rte_memcpy((void *)&buf[temp_size], items->spec, len);
451 switch (items->type) {
452 case RTE_FLOW_ITEM_TYPE_ETH:
453 eth = (struct ether_hdr *)&buf[temp_size];
455 case RTE_FLOW_ITEM_TYPE_VLAN:
456 vlan = (struct vlan_hdr *)&buf[temp_size];
458 return rte_flow_error_set(error, EINVAL,
459 RTE_FLOW_ERROR_TYPE_ACTION,
461 "eth header not found");
462 if (!eth->ether_type)
463 eth->ether_type = RTE_BE16(ETHER_TYPE_VLAN);
465 case RTE_FLOW_ITEM_TYPE_IPV4:
466 ipv4 = (struct ipv4_hdr *)&buf[temp_size];
468 return rte_flow_error_set(error, EINVAL,
469 RTE_FLOW_ERROR_TYPE_ACTION,
471 "neither eth nor vlan"
473 if (vlan && !vlan->eth_proto)
474 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv4);
475 else if (eth && !eth->ether_type)
476 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv4);
477 if (!ipv4->version_ihl)
478 ipv4->version_ihl = MLX5_ENCAP_IPV4_VERSION |
479 MLX5_ENCAP_IPV4_IHL_MIN;
480 if (!ipv4->time_to_live)
481 ipv4->time_to_live = MLX5_ENCAP_IPV4_TTL_DEF;
483 case RTE_FLOW_ITEM_TYPE_IPV6:
484 ipv6 = (struct ipv6_hdr *)&buf[temp_size];
486 return rte_flow_error_set(error, EINVAL,
487 RTE_FLOW_ERROR_TYPE_ACTION,
489 "neither eth nor vlan"
491 if (vlan && !vlan->eth_proto)
492 vlan->eth_proto = RTE_BE16(ETHER_TYPE_IPv6);
493 else if (eth && !eth->ether_type)
494 eth->ether_type = RTE_BE16(ETHER_TYPE_IPv6);
497 RTE_BE32(MLX5_ENCAP_IPV6_VTC_FLOW);
498 if (!ipv6->hop_limits)
499 ipv6->hop_limits = MLX5_ENCAP_IPV6_HOP_LIMIT;
501 case RTE_FLOW_ITEM_TYPE_UDP:
502 udp = (struct udp_hdr *)&buf[temp_size];
504 return rte_flow_error_set(error, EINVAL,
505 RTE_FLOW_ERROR_TYPE_ACTION,
507 "ip header not found");
508 if (ipv4 && !ipv4->next_proto_id)
509 ipv4->next_proto_id = IPPROTO_UDP;
510 else if (ipv6 && !ipv6->proto)
511 ipv6->proto = IPPROTO_UDP;
513 case RTE_FLOW_ITEM_TYPE_VXLAN:
514 vxlan = (struct vxlan_hdr *)&buf[temp_size];
516 return rte_flow_error_set(error, EINVAL,
517 RTE_FLOW_ERROR_TYPE_ACTION,
519 "udp header not found");
521 udp->dst_port = RTE_BE16(MLX5_UDP_PORT_VXLAN);
522 if (!vxlan->vx_flags)
524 RTE_BE32(MLX5_ENCAP_VXLAN_FLAGS);
526 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
527 vxlan_gpe = (struct vxlan_gpe_hdr *)&buf[temp_size];
529 return rte_flow_error_set(error, EINVAL,
530 RTE_FLOW_ERROR_TYPE_ACTION,
532 "udp header not found");
533 if (!vxlan_gpe->proto)
534 return rte_flow_error_set(error, EINVAL,
535 RTE_FLOW_ERROR_TYPE_ACTION,
537 "next protocol not found");
540 RTE_BE16(MLX5_UDP_PORT_VXLAN_GPE);
541 if (!vxlan_gpe->vx_flags)
542 vxlan_gpe->vx_flags =
543 MLX5_ENCAP_VXLAN_GPE_FLAGS;
545 case RTE_FLOW_ITEM_TYPE_GRE:
546 case RTE_FLOW_ITEM_TYPE_NVGRE:
547 gre = (struct gre_hdr *)&buf[temp_size];
549 return rte_flow_error_set(error, EINVAL,
550 RTE_FLOW_ERROR_TYPE_ACTION,
552 "next protocol not found");
554 return rte_flow_error_set(error, EINVAL,
555 RTE_FLOW_ERROR_TYPE_ACTION,
557 "ip header not found");
558 if (ipv4 && !ipv4->next_proto_id)
559 ipv4->next_proto_id = IPPROTO_GRE;
560 else if (ipv6 && !ipv6->proto)
561 ipv6->proto = IPPROTO_GRE;
563 case RTE_FLOW_ITEM_TYPE_VOID:
566 return rte_flow_error_set(error, EINVAL,
567 RTE_FLOW_ERROR_TYPE_ACTION,
569 "unsupported item type");
579 * Convert L2 encap action to DV specification.
582 * Pointer to rte_eth_dev structure.
584 * Pointer to action structure.
585 * @param[in, out] dev_flow
586 * Pointer to the mlx5_flow.
588 * Pointer to the error structure.
591 * 0 on success, a negative errno value otherwise and rte_errno is set.
594 flow_dv_create_action_l2_encap(struct rte_eth_dev *dev,
595 const struct rte_flow_action *action,
596 struct mlx5_flow *dev_flow,
597 struct rte_flow_error *error)
599 const struct rte_flow_item *encap_data;
600 const struct rte_flow_action_raw_encap *raw_encap_data;
601 struct mlx5_flow_dv_encap_decap_resource res = {
603 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L2_TUNNEL,
604 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_TX,
607 if (action->type == RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
609 (const struct rte_flow_action_raw_encap *)action->conf;
610 res.size = raw_encap_data->size;
611 memcpy(res.buf, raw_encap_data->data, res.size);
613 if (action->type == RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP)
615 ((const struct rte_flow_action_vxlan_encap *)
616 action->conf)->definition;
619 ((const struct rte_flow_action_nvgre_encap *)
620 action->conf)->definition;
621 if (flow_dv_convert_encap_data(encap_data, res.buf,
625 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
626 return rte_flow_error_set(error, EINVAL,
627 RTE_FLOW_ERROR_TYPE_ACTION,
628 NULL, "can't create L2 encap action");
633 * Convert L2 decap action to DV specification.
636 * Pointer to rte_eth_dev structure.
637 * @param[in, out] dev_flow
638 * Pointer to the mlx5_flow.
640 * Pointer to the error structure.
643 * 0 on success, a negative errno value otherwise and rte_errno is set.
646 flow_dv_create_action_l2_decap(struct rte_eth_dev *dev,
647 struct mlx5_flow *dev_flow,
648 struct rte_flow_error *error)
650 struct mlx5_flow_dv_encap_decap_resource res = {
653 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TUNNEL_TO_L2,
654 .ft_type = MLX5DV_FLOW_TABLE_TYPE_NIC_RX,
657 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
658 return rte_flow_error_set(error, EINVAL,
659 RTE_FLOW_ERROR_TYPE_ACTION,
660 NULL, "can't create L2 decap action");
665 * Convert raw decap/encap (L3 tunnel) action to DV specification.
668 * Pointer to rte_eth_dev structure.
670 * Pointer to action structure.
671 * @param[in, out] dev_flow
672 * Pointer to the mlx5_flow.
674 * Pointer to the flow attributes.
676 * Pointer to the error structure.
679 * 0 on success, a negative errno value otherwise and rte_errno is set.
682 flow_dv_create_action_raw_encap(struct rte_eth_dev *dev,
683 const struct rte_flow_action *action,
684 struct mlx5_flow *dev_flow,
685 const struct rte_flow_attr *attr,
686 struct rte_flow_error *error)
688 const struct rte_flow_action_raw_encap *encap_data;
689 struct mlx5_flow_dv_encap_decap_resource res;
691 encap_data = (const struct rte_flow_action_raw_encap *)action->conf;
692 res.size = encap_data->size;
693 memcpy(res.buf, encap_data->data, res.size);
694 res.reformat_type = attr->egress ?
695 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L2_TO_L3_TUNNEL :
696 MLX5DV_FLOW_ACTION_PACKET_REFORMAT_TYPE_L3_TUNNEL_TO_L2;
697 res.ft_type = attr->egress ? MLX5DV_FLOW_TABLE_TYPE_NIC_TX :
698 MLX5DV_FLOW_TABLE_TYPE_NIC_RX;
699 if (flow_dv_encap_decap_resource_register(dev, &res, dev_flow, error))
700 return rte_flow_error_set(error, EINVAL,
701 RTE_FLOW_ERROR_TYPE_ACTION,
702 NULL, "can't create encap action");
707 * Verify the @p attributes will be correctly understood by the NIC and store
708 * them in the @p flow if everything is correct.
711 * Pointer to dev struct.
712 * @param[in] attributes
713 * Pointer to flow attributes
715 * Pointer to error structure.
718 * 0 on success, a negative errno value otherwise and rte_errno is set.
721 flow_dv_validate_attributes(struct rte_eth_dev *dev,
722 const struct rte_flow_attr *attributes,
723 struct rte_flow_error *error)
725 struct mlx5_priv *priv = dev->data->dev_private;
726 uint32_t priority_max = priv->config.flow_prio - 1;
728 if (attributes->group)
729 return rte_flow_error_set(error, ENOTSUP,
730 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
732 "groups is not supported");
733 if (attributes->priority != MLX5_FLOW_PRIO_RSVD &&
734 attributes->priority >= priority_max)
735 return rte_flow_error_set(error, ENOTSUP,
736 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
738 "priority out of range");
739 if (attributes->transfer)
740 return rte_flow_error_set(error, ENOTSUP,
741 RTE_FLOW_ERROR_TYPE_ATTR_TRANSFER,
743 "transfer is not supported");
744 if (!(attributes->egress ^ attributes->ingress))
745 return rte_flow_error_set(error, ENOTSUP,
746 RTE_FLOW_ERROR_TYPE_ATTR, NULL,
747 "must specify exactly one of "
748 "ingress or egress");
753 * Internal validation function. For validating both actions and items.
756 * Pointer to the rte_eth_dev structure.
758 * Pointer to the flow attributes.
760 * Pointer to the list of items.
762 * Pointer to the list of actions.
764 * Pointer to the error structure.
767 * 0 on success, a negative errno value otherwise and rte_errno is set.
770 flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
771 const struct rte_flow_item items[],
772 const struct rte_flow_action actions[],
773 struct rte_flow_error *error)
776 uint64_t action_flags = 0;
777 uint64_t item_flags = 0;
778 uint64_t last_item = 0;
779 uint8_t next_protocol = 0xff;
784 ret = flow_dv_validate_attributes(dev, attr, error);
787 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
788 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
789 switch (items->type) {
790 case RTE_FLOW_ITEM_TYPE_VOID:
792 case RTE_FLOW_ITEM_TYPE_ETH:
793 ret = mlx5_flow_validate_item_eth(items, item_flags,
797 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
798 MLX5_FLOW_LAYER_OUTER_L2;
800 case RTE_FLOW_ITEM_TYPE_VLAN:
801 ret = mlx5_flow_validate_item_vlan(items, item_flags,
805 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_VLAN :
806 MLX5_FLOW_LAYER_OUTER_VLAN;
808 case RTE_FLOW_ITEM_TYPE_IPV4:
809 ret = mlx5_flow_validate_item_ipv4(items, item_flags,
813 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
814 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
815 if (items->mask != NULL &&
816 ((const struct rte_flow_item_ipv4 *)
817 items->mask)->hdr.next_proto_id) {
819 ((const struct rte_flow_item_ipv4 *)
820 (items->spec))->hdr.next_proto_id;
822 ((const struct rte_flow_item_ipv4 *)
823 (items->mask))->hdr.next_proto_id;
825 /* Reset for inner layer. */
826 next_protocol = 0xff;
829 case RTE_FLOW_ITEM_TYPE_IPV6:
830 ret = mlx5_flow_validate_item_ipv6(items, item_flags,
834 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
835 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
836 if (items->mask != NULL &&
837 ((const struct rte_flow_item_ipv6 *)
838 items->mask)->hdr.proto) {
840 ((const struct rte_flow_item_ipv6 *)
841 items->spec)->hdr.proto;
843 ((const struct rte_flow_item_ipv6 *)
844 items->mask)->hdr.proto;
846 /* Reset for inner layer. */
847 next_protocol = 0xff;
850 case RTE_FLOW_ITEM_TYPE_TCP:
851 ret = mlx5_flow_validate_item_tcp
854 &rte_flow_item_tcp_mask,
858 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
859 MLX5_FLOW_LAYER_OUTER_L4_TCP;
861 case RTE_FLOW_ITEM_TYPE_UDP:
862 ret = mlx5_flow_validate_item_udp(items, item_flags,
867 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
868 MLX5_FLOW_LAYER_OUTER_L4_UDP;
870 case RTE_FLOW_ITEM_TYPE_GRE:
871 case RTE_FLOW_ITEM_TYPE_NVGRE:
872 ret = mlx5_flow_validate_item_gre(items, item_flags,
873 next_protocol, error);
876 last_item = MLX5_FLOW_LAYER_GRE;
878 case RTE_FLOW_ITEM_TYPE_VXLAN:
879 ret = mlx5_flow_validate_item_vxlan(items, item_flags,
883 last_item = MLX5_FLOW_LAYER_VXLAN;
885 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
886 ret = mlx5_flow_validate_item_vxlan_gpe(items,
891 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
893 case RTE_FLOW_ITEM_TYPE_MPLS:
894 ret = mlx5_flow_validate_item_mpls(dev, items,
899 last_item = MLX5_FLOW_LAYER_MPLS;
901 case RTE_FLOW_ITEM_TYPE_META:
902 ret = flow_dv_validate_item_meta(dev, items, attr,
906 last_item = MLX5_FLOW_ITEM_METADATA;
909 return rte_flow_error_set(error, ENOTSUP,
910 RTE_FLOW_ERROR_TYPE_ITEM,
911 NULL, "item not supported");
913 item_flags |= last_item;
915 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
916 if (actions_n == MLX5_DV_MAX_NUMBER_OF_ACTIONS)
917 return rte_flow_error_set(error, ENOTSUP,
918 RTE_FLOW_ERROR_TYPE_ACTION,
919 actions, "too many actions");
920 switch (actions->type) {
921 case RTE_FLOW_ACTION_TYPE_VOID:
923 case RTE_FLOW_ACTION_TYPE_FLAG:
924 ret = mlx5_flow_validate_action_flag(action_flags,
928 action_flags |= MLX5_FLOW_ACTION_FLAG;
931 case RTE_FLOW_ACTION_TYPE_MARK:
932 ret = mlx5_flow_validate_action_mark(actions,
937 action_flags |= MLX5_FLOW_ACTION_MARK;
940 case RTE_FLOW_ACTION_TYPE_DROP:
941 ret = mlx5_flow_validate_action_drop(action_flags,
945 action_flags |= MLX5_FLOW_ACTION_DROP;
948 case RTE_FLOW_ACTION_TYPE_QUEUE:
949 ret = mlx5_flow_validate_action_queue(actions,
954 action_flags |= MLX5_FLOW_ACTION_QUEUE;
957 case RTE_FLOW_ACTION_TYPE_RSS:
958 ret = mlx5_flow_validate_action_rss(actions,
964 action_flags |= MLX5_FLOW_ACTION_RSS;
967 case RTE_FLOW_ACTION_TYPE_COUNT:
968 ret = mlx5_flow_validate_action_count(dev, attr, error);
971 action_flags |= MLX5_FLOW_ACTION_COUNT;
974 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
975 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
976 ret = flow_dv_validate_action_l2_encap(action_flags,
981 action_flags |= actions->type ==
982 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
983 MLX5_FLOW_ACTION_VXLAN_ENCAP :
984 MLX5_FLOW_ACTION_NVGRE_ENCAP;
987 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
988 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
989 ret = flow_dv_validate_action_l2_decap(action_flags,
993 action_flags |= actions->type ==
994 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
995 MLX5_FLOW_ACTION_VXLAN_DECAP :
996 MLX5_FLOW_ACTION_NVGRE_DECAP;
999 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1000 ret = flow_dv_validate_action_raw_encap(action_flags,
1005 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
1008 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
1009 ret = flow_dv_validate_action_raw_decap(action_flags,
1014 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
1018 return rte_flow_error_set(error, ENOTSUP,
1019 RTE_FLOW_ERROR_TYPE_ACTION,
1021 "action not supported");
1024 if (!(action_flags & MLX5_FLOW_FATE_ACTIONS) && attr->ingress)
1025 return rte_flow_error_set(error, EINVAL,
1026 RTE_FLOW_ERROR_TYPE_ACTION, actions,
1027 "no fate action is found");
1032 * Internal preparation function. Allocates the DV flow size,
1033 * this size is constant.
1036 * Pointer to the flow attributes.
1038 * Pointer to the list of items.
1039 * @param[in] actions
1040 * Pointer to the list of actions.
1042 * Pointer to the error structure.
1045 * Pointer to mlx5_flow object on success,
1046 * otherwise NULL and rte_errno is set.
1048 static struct mlx5_flow *
1049 flow_dv_prepare(const struct rte_flow_attr *attr __rte_unused,
1050 const struct rte_flow_item items[] __rte_unused,
1051 const struct rte_flow_action actions[] __rte_unused,
1052 struct rte_flow_error *error)
1054 uint32_t size = sizeof(struct mlx5_flow);
1055 struct mlx5_flow *flow;
1057 flow = rte_calloc(__func__, 1, size, 0);
1059 rte_flow_error_set(error, ENOMEM,
1060 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1061 "not enough memory to create flow");
1064 flow->dv.value.size = MLX5_ST_SZ_DB(fte_match_param);
1070 * Sanity check for match mask and value. Similar to check_valid_spec() in
1071 * kernel driver. If unmasked bit is present in value, it returns failure.
1074 * pointer to match mask buffer.
1075 * @param match_value
1076 * pointer to match value buffer.
1079 * 0 if valid, -EINVAL otherwise.
1082 flow_dv_check_valid_spec(void *match_mask, void *match_value)
1084 uint8_t *m = match_mask;
1085 uint8_t *v = match_value;
1088 for (i = 0; i < MLX5_ST_SZ_DB(fte_match_param); ++i) {
1091 "match_value differs from match_criteria"
1092 " %p[%u] != %p[%u]",
1093 match_value, i, match_mask, i);
1102 * Add Ethernet item to matcher and to the value.
1104 * @param[in, out] matcher
1106 * @param[in, out] key
1107 * Flow matcher value.
1109 * Flow pattern to translate.
1111 * Item is inner pattern.
1114 flow_dv_translate_item_eth(void *matcher, void *key,
1115 const struct rte_flow_item *item, int inner)
1117 const struct rte_flow_item_eth *eth_m = item->mask;
1118 const struct rte_flow_item_eth *eth_v = item->spec;
1119 const struct rte_flow_item_eth nic_mask = {
1120 .dst.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1121 .src.addr_bytes = "\xff\xff\xff\xff\xff\xff",
1122 .type = RTE_BE16(0xffff),
1134 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1136 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1138 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1140 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1142 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, dmac_47_16),
1143 ð_m->dst, sizeof(eth_m->dst));
1144 /* The value must be in the range of the mask. */
1145 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, dmac_47_16);
1146 for (i = 0; i < sizeof(eth_m->dst); ++i)
1147 l24_v[i] = eth_m->dst.addr_bytes[i] & eth_v->dst.addr_bytes[i];
1148 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m, smac_47_16),
1149 ð_m->src, sizeof(eth_m->src));
1150 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, smac_47_16);
1151 /* The value must be in the range of the mask. */
1152 for (i = 0; i < sizeof(eth_m->dst); ++i)
1153 l24_v[i] = eth_m->src.addr_bytes[i] & eth_v->src.addr_bytes[i];
1154 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ethertype,
1155 rte_be_to_cpu_16(eth_m->type));
1156 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v, ethertype);
1157 *(uint16_t *)(l24_v) = eth_m->type & eth_v->type;
1161 * Add VLAN item to matcher and to the value.
1163 * @param[in, out] matcher
1165 * @param[in, out] key
1166 * Flow matcher value.
1168 * Flow pattern to translate.
1170 * Item is inner pattern.
1173 flow_dv_translate_item_vlan(void *matcher, void *key,
1174 const struct rte_flow_item *item,
1177 const struct rte_flow_item_vlan *vlan_m = item->mask;
1178 const struct rte_flow_item_vlan *vlan_v = item->spec;
1179 const struct rte_flow_item_vlan nic_mask = {
1180 .tci = RTE_BE16(0x0fff),
1181 .inner_type = RTE_BE16(0xffff),
1193 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1195 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1197 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1199 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1201 tci_m = rte_be_to_cpu_16(vlan_m->tci);
1202 tci_v = rte_be_to_cpu_16(vlan_m->tci & vlan_v->tci);
1203 MLX5_SET(fte_match_set_lyr_2_4, headers_m, cvlan_tag, 1);
1204 MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
1205 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_vid, tci_m);
1206 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, tci_v);
1207 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_cfi, tci_m >> 12);
1208 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_cfi, tci_v >> 12);
1209 MLX5_SET(fte_match_set_lyr_2_4, headers_m, first_prio, tci_m >> 13);
1210 MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_prio, tci_v >> 13);
1214 * Add IPV4 item to matcher and to the value.
1216 * @param[in, out] matcher
1218 * @param[in, out] key
1219 * Flow matcher value.
1221 * Flow pattern to translate.
1223 * Item is inner pattern.
1226 flow_dv_translate_item_ipv4(void *matcher, void *key,
1227 const struct rte_flow_item *item,
1230 const struct rte_flow_item_ipv4 *ipv4_m = item->mask;
1231 const struct rte_flow_item_ipv4 *ipv4_v = item->spec;
1232 const struct rte_flow_item_ipv4 nic_mask = {
1234 .src_addr = RTE_BE32(0xffffffff),
1235 .dst_addr = RTE_BE32(0xffffffff),
1236 .type_of_service = 0xff,
1237 .next_proto_id = 0xff,
1247 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1249 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1251 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1253 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1255 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
1256 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 4);
1261 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1262 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1263 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1264 dst_ipv4_dst_ipv6.ipv4_layout.ipv4);
1265 *(uint32_t *)l24_m = ipv4_m->hdr.dst_addr;
1266 *(uint32_t *)l24_v = ipv4_m->hdr.dst_addr & ipv4_v->hdr.dst_addr;
1267 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1268 src_ipv4_src_ipv6.ipv4_layout.ipv4);
1269 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1270 src_ipv4_src_ipv6.ipv4_layout.ipv4);
1271 *(uint32_t *)l24_m = ipv4_m->hdr.src_addr;
1272 *(uint32_t *)l24_v = ipv4_m->hdr.src_addr & ipv4_v->hdr.src_addr;
1273 tos = ipv4_m->hdr.type_of_service & ipv4_v->hdr.type_of_service;
1274 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn,
1275 ipv4_m->hdr.type_of_service);
1276 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, tos);
1277 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp,
1278 ipv4_m->hdr.type_of_service >> 2);
1279 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, tos >> 2);
1280 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
1281 ipv4_m->hdr.next_proto_id);
1282 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1283 ipv4_v->hdr.next_proto_id & ipv4_m->hdr.next_proto_id);
1287 * Add IPV6 item to matcher and to the value.
1289 * @param[in, out] matcher
1291 * @param[in, out] key
1292 * Flow matcher value.
1294 * Flow pattern to translate.
1296 * Item is inner pattern.
1299 flow_dv_translate_item_ipv6(void *matcher, void *key,
1300 const struct rte_flow_item *item,
1303 const struct rte_flow_item_ipv6 *ipv6_m = item->mask;
1304 const struct rte_flow_item_ipv6 *ipv6_v = item->spec;
1305 const struct rte_flow_item_ipv6 nic_mask = {
1308 "\xff\xff\xff\xff\xff\xff\xff\xff"
1309 "\xff\xff\xff\xff\xff\xff\xff\xff",
1311 "\xff\xff\xff\xff\xff\xff\xff\xff"
1312 "\xff\xff\xff\xff\xff\xff\xff\xff",
1313 .vtc_flow = RTE_BE32(0xffffffff),
1320 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1321 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1330 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1332 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1334 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1336 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1338 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_version, 0xf);
1339 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_version, 6);
1344 size = sizeof(ipv6_m->hdr.dst_addr);
1345 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1346 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
1347 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1348 dst_ipv4_dst_ipv6.ipv6_layout.ipv6);
1349 memcpy(l24_m, ipv6_m->hdr.dst_addr, size);
1350 for (i = 0; i < size; ++i)
1351 l24_v[i] = l24_m[i] & ipv6_v->hdr.dst_addr[i];
1352 l24_m = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_m,
1353 src_ipv4_src_ipv6.ipv6_layout.ipv6);
1354 l24_v = MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
1355 src_ipv4_src_ipv6.ipv6_layout.ipv6);
1356 memcpy(l24_m, ipv6_m->hdr.src_addr, size);
1357 for (i = 0; i < size; ++i)
1358 l24_v[i] = l24_m[i] & ipv6_v->hdr.src_addr[i];
1360 vtc_m = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow);
1361 vtc_v = rte_be_to_cpu_32(ipv6_m->hdr.vtc_flow & ipv6_v->hdr.vtc_flow);
1362 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_ecn, vtc_m >> 20);
1363 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_ecn, vtc_v >> 20);
1364 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_dscp, vtc_m >> 22);
1365 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_dscp, vtc_v >> 22);
1368 MLX5_SET(fte_match_set_misc, misc_m, inner_ipv6_flow_label,
1370 MLX5_SET(fte_match_set_misc, misc_v, inner_ipv6_flow_label,
1373 MLX5_SET(fte_match_set_misc, misc_m, outer_ipv6_flow_label,
1375 MLX5_SET(fte_match_set_misc, misc_v, outer_ipv6_flow_label,
1379 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol,
1381 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1382 ipv6_v->hdr.proto & ipv6_m->hdr.proto);
1386 * Add TCP item to matcher and to the value.
1388 * @param[in, out] matcher
1390 * @param[in, out] key
1391 * Flow matcher value.
1393 * Flow pattern to translate.
1395 * Item is inner pattern.
1398 flow_dv_translate_item_tcp(void *matcher, void *key,
1399 const struct rte_flow_item *item,
1402 const struct rte_flow_item_tcp *tcp_m = item->mask;
1403 const struct rte_flow_item_tcp *tcp_v = item->spec;
1408 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1410 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1412 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1414 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1416 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1417 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_TCP);
1421 tcp_m = &rte_flow_item_tcp_mask;
1422 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_sport,
1423 rte_be_to_cpu_16(tcp_m->hdr.src_port));
1424 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_sport,
1425 rte_be_to_cpu_16(tcp_v->hdr.src_port & tcp_m->hdr.src_port));
1426 MLX5_SET(fte_match_set_lyr_2_4, headers_m, tcp_dport,
1427 rte_be_to_cpu_16(tcp_m->hdr.dst_port));
1428 MLX5_SET(fte_match_set_lyr_2_4, headers_v, tcp_dport,
1429 rte_be_to_cpu_16(tcp_v->hdr.dst_port & tcp_m->hdr.dst_port));
1433 * Add UDP item to matcher and to the value.
1435 * @param[in, out] matcher
1437 * @param[in, out] key
1438 * Flow matcher value.
1440 * Flow pattern to translate.
1442 * Item is inner pattern.
1445 flow_dv_translate_item_udp(void *matcher, void *key,
1446 const struct rte_flow_item *item,
1449 const struct rte_flow_item_udp *udp_m = item->mask;
1450 const struct rte_flow_item_udp *udp_v = item->spec;
1455 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1457 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1459 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1461 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1463 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1464 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP);
1468 udp_m = &rte_flow_item_udp_mask;
1469 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_sport,
1470 rte_be_to_cpu_16(udp_m->hdr.src_port));
1471 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport,
1472 rte_be_to_cpu_16(udp_v->hdr.src_port & udp_m->hdr.src_port));
1473 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport,
1474 rte_be_to_cpu_16(udp_m->hdr.dst_port));
1475 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
1476 rte_be_to_cpu_16(udp_v->hdr.dst_port & udp_m->hdr.dst_port));
1480 * Add GRE item to matcher and to the value.
1482 * @param[in, out] matcher
1484 * @param[in, out] key
1485 * Flow matcher value.
1487 * Flow pattern to translate.
1489 * Item is inner pattern.
1492 flow_dv_translate_item_gre(void *matcher, void *key,
1493 const struct rte_flow_item *item,
1496 const struct rte_flow_item_gre *gre_m = item->mask;
1497 const struct rte_flow_item_gre *gre_v = item->spec;
1500 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1501 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1504 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1506 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1508 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1510 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1512 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1513 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_GRE);
1517 gre_m = &rte_flow_item_gre_mask;
1518 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol,
1519 rte_be_to_cpu_16(gre_m->protocol));
1520 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
1521 rte_be_to_cpu_16(gre_v->protocol & gre_m->protocol));
1525 * Add NVGRE item to matcher and to the value.
1527 * @param[in, out] matcher
1529 * @param[in, out] key
1530 * Flow matcher value.
1532 * Flow pattern to translate.
1534 * Item is inner pattern.
1537 flow_dv_translate_item_nvgre(void *matcher, void *key,
1538 const struct rte_flow_item *item,
1541 const struct rte_flow_item_nvgre *nvgre_m = item->mask;
1542 const struct rte_flow_item_nvgre *nvgre_v = item->spec;
1543 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1544 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1545 const char *tni_flow_id_m = (const char *)nvgre_m->tni;
1546 const char *tni_flow_id_v = (const char *)nvgre_v->tni;
1552 flow_dv_translate_item_gre(matcher, key, item, inner);
1556 nvgre_m = &rte_flow_item_nvgre_mask;
1557 size = sizeof(nvgre_m->tni) + sizeof(nvgre_m->flow_id);
1558 gre_key_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, gre_key_h);
1559 gre_key_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, gre_key_h);
1560 memcpy(gre_key_m, tni_flow_id_m, size);
1561 for (i = 0; i < size; ++i)
1562 gre_key_v[i] = gre_key_m[i] & tni_flow_id_v[i];
1566 * Add VXLAN item to matcher and to the value.
1568 * @param[in, out] matcher
1570 * @param[in, out] key
1571 * Flow matcher value.
1573 * Flow pattern to translate.
1575 * Item is inner pattern.
1578 flow_dv_translate_item_vxlan(void *matcher, void *key,
1579 const struct rte_flow_item *item,
1582 const struct rte_flow_item_vxlan *vxlan_m = item->mask;
1583 const struct rte_flow_item_vxlan *vxlan_v = item->spec;
1586 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1587 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1595 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1597 headers_v = MLX5_ADDR_OF(fte_match_param, key, inner_headers);
1599 headers_m = MLX5_ADDR_OF(fte_match_param, matcher,
1601 headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1603 dport = item->type == RTE_FLOW_ITEM_TYPE_VXLAN ?
1604 MLX5_UDP_PORT_VXLAN : MLX5_UDP_PORT_VXLAN_GPE;
1605 if (!MLX5_GET16(fte_match_set_lyr_2_4, headers_v, udp_dport)) {
1606 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xFFFF);
1607 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, dport);
1612 vxlan_m = &rte_flow_item_vxlan_mask;
1613 size = sizeof(vxlan_m->vni);
1614 vni_m = MLX5_ADDR_OF(fte_match_set_misc, misc_m, vxlan_vni);
1615 vni_v = MLX5_ADDR_OF(fte_match_set_misc, misc_v, vxlan_vni);
1616 memcpy(vni_m, vxlan_m->vni, size);
1617 for (i = 0; i < size; ++i)
1618 vni_v[i] = vni_m[i] & vxlan_v->vni[i];
1622 * Add MPLS item to matcher and to the value.
1624 * @param[in, out] matcher
1626 * @param[in, out] key
1627 * Flow matcher value.
1629 * Flow pattern to translate.
1630 * @param[in] prev_layer
1631 * The protocol layer indicated in previous item.
1633 * Item is inner pattern.
1636 flow_dv_translate_item_mpls(void *matcher, void *key,
1637 const struct rte_flow_item *item,
1638 uint64_t prev_layer,
1641 const uint32_t *in_mpls_m = item->mask;
1642 const uint32_t *in_mpls_v = item->spec;
1643 uint32_t *out_mpls_m = 0;
1644 uint32_t *out_mpls_v = 0;
1645 void *misc_m = MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters);
1646 void *misc_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters);
1647 void *misc2_m = MLX5_ADDR_OF(fte_match_param, matcher,
1649 void *misc2_v = MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
1650 void *headers_m = MLX5_ADDR_OF(fte_match_param, matcher, outer_headers);
1651 void *headers_v = MLX5_ADDR_OF(fte_match_param, key, outer_headers);
1653 switch (prev_layer) {
1654 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
1655 MLX5_SET(fte_match_set_lyr_2_4, headers_m, udp_dport, 0xffff);
1656 MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport,
1657 MLX5_UDP_PORT_MPLS);
1659 case MLX5_FLOW_LAYER_GRE:
1660 MLX5_SET(fte_match_set_misc, misc_m, gre_protocol, 0xffff);
1661 MLX5_SET(fte_match_set_misc, misc_v, gre_protocol,
1665 MLX5_SET(fte_match_set_lyr_2_4, headers_m, ip_protocol, 0xff);
1666 MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol,
1673 in_mpls_m = (const uint32_t *)&rte_flow_item_mpls_mask;
1674 switch (prev_layer) {
1675 case MLX5_FLOW_LAYER_OUTER_L4_UDP:
1677 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
1678 outer_first_mpls_over_udp);
1680 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
1681 outer_first_mpls_over_udp);
1683 case MLX5_FLOW_LAYER_GRE:
1685 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_m,
1686 outer_first_mpls_over_gre);
1688 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2, misc2_v,
1689 outer_first_mpls_over_gre);
1692 /* Inner MPLS not over GRE is not supported. */
1695 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
1699 (uint32_t *)MLX5_ADDR_OF(fte_match_set_misc2,
1705 if (out_mpls_m && out_mpls_v) {
1706 *out_mpls_m = *in_mpls_m;
1707 *out_mpls_v = *in_mpls_v & *in_mpls_m;
1712 * Add META item to matcher
1714 * @param[in, out] matcher
1716 * @param[in, out] key
1717 * Flow matcher value.
1719 * Flow pattern to translate.
1721 * Item is inner pattern.
1724 flow_dv_translate_item_meta(void *matcher, void *key,
1725 const struct rte_flow_item *item)
1727 const struct rte_flow_item_meta *meta_m;
1728 const struct rte_flow_item_meta *meta_v;
1730 MLX5_ADDR_OF(fte_match_param, matcher, misc_parameters_2);
1732 MLX5_ADDR_OF(fte_match_param, key, misc_parameters_2);
1734 meta_m = (const void *)item->mask;
1736 meta_m = &rte_flow_item_meta_mask;
1737 meta_v = (const void *)item->spec;
1739 MLX5_SET(fte_match_set_misc2, misc2_m, metadata_reg_a,
1740 rte_be_to_cpu_32(meta_m->data));
1741 MLX5_SET(fte_match_set_misc2, misc2_v, metadata_reg_a,
1742 rte_be_to_cpu_32(meta_v->data & meta_m->data));
1746 static uint32_t matcher_zero[MLX5_ST_SZ_DW(fte_match_param)] = { 0 };
1748 #define HEADER_IS_ZERO(match_criteria, headers) \
1749 !(memcmp(MLX5_ADDR_OF(fte_match_param, match_criteria, headers), \
1750 matcher_zero, MLX5_FLD_SZ_BYTES(fte_match_param, headers))) \
1753 * Calculate flow matcher enable bitmap.
1755 * @param match_criteria
1756 * Pointer to flow matcher criteria.
1759 * Bitmap of enabled fields.
1762 flow_dv_matcher_enable(uint32_t *match_criteria)
1764 uint8_t match_criteria_enable;
1766 match_criteria_enable =
1767 (!HEADER_IS_ZERO(match_criteria, outer_headers)) <<
1768 MLX5_MATCH_CRITERIA_ENABLE_OUTER_BIT;
1769 match_criteria_enable |=
1770 (!HEADER_IS_ZERO(match_criteria, misc_parameters)) <<
1771 MLX5_MATCH_CRITERIA_ENABLE_MISC_BIT;
1772 match_criteria_enable |=
1773 (!HEADER_IS_ZERO(match_criteria, inner_headers)) <<
1774 MLX5_MATCH_CRITERIA_ENABLE_INNER_BIT;
1775 match_criteria_enable |=
1776 (!HEADER_IS_ZERO(match_criteria, misc_parameters_2)) <<
1777 MLX5_MATCH_CRITERIA_ENABLE_MISC2_BIT;
1779 return match_criteria_enable;
1783 * Register the flow matcher.
1785 * @param dev[in, out]
1786 * Pointer to rte_eth_dev structure.
1787 * @param[in, out] matcher
1788 * Pointer to flow matcher.
1789 * @parm[in, out] dev_flow
1790 * Pointer to the dev_flow.
1792 * pointer to error structure.
1795 * 0 on success otherwise -errno and errno is set.
1798 flow_dv_matcher_register(struct rte_eth_dev *dev,
1799 struct mlx5_flow_dv_matcher *matcher,
1800 struct mlx5_flow *dev_flow,
1801 struct rte_flow_error *error)
1803 struct mlx5_priv *priv = dev->data->dev_private;
1804 struct mlx5_flow_dv_matcher *cache_matcher;
1805 struct mlx5dv_flow_matcher_attr dv_attr = {
1806 .type = IBV_FLOW_ATTR_NORMAL,
1807 .match_mask = (void *)&matcher->mask,
1810 /* Lookup from cache. */
1811 LIST_FOREACH(cache_matcher, &priv->matchers, next) {
1812 if (matcher->crc == cache_matcher->crc &&
1813 matcher->priority == cache_matcher->priority &&
1814 matcher->egress == cache_matcher->egress &&
1815 !memcmp((const void *)matcher->mask.buf,
1816 (const void *)cache_matcher->mask.buf,
1817 cache_matcher->mask.size)) {
1819 "priority %hd use %s matcher %p: refcnt %d++",
1820 cache_matcher->priority,
1821 cache_matcher->egress ? "tx" : "rx",
1822 (void *)cache_matcher,
1823 rte_atomic32_read(&cache_matcher->refcnt));
1824 rte_atomic32_inc(&cache_matcher->refcnt);
1825 dev_flow->dv.matcher = cache_matcher;
1829 /* Register new matcher. */
1830 cache_matcher = rte_calloc(__func__, 1, sizeof(*cache_matcher), 0);
1832 return rte_flow_error_set(error, ENOMEM,
1833 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1834 "cannot allocate matcher memory");
1835 *cache_matcher = *matcher;
1836 dv_attr.match_criteria_enable =
1837 flow_dv_matcher_enable(cache_matcher->mask.buf);
1838 dv_attr.priority = matcher->priority;
1839 if (matcher->egress)
1840 dv_attr.flags |= IBV_FLOW_ATTR_FLAGS_EGRESS;
1841 cache_matcher->matcher_object =
1842 mlx5_glue->dv_create_flow_matcher(priv->ctx, &dv_attr);
1843 if (!cache_matcher->matcher_object) {
1844 rte_free(cache_matcher);
1845 return rte_flow_error_set(error, ENOMEM,
1846 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1847 NULL, "cannot create matcher");
1849 rte_atomic32_inc(&cache_matcher->refcnt);
1850 LIST_INSERT_HEAD(&priv->matchers, cache_matcher, next);
1851 dev_flow->dv.matcher = cache_matcher;
1852 DRV_LOG(DEBUG, "priority %hd new %s matcher %p: refcnt %d",
1853 cache_matcher->priority,
1854 cache_matcher->egress ? "tx" : "rx", (void *)cache_matcher,
1855 rte_atomic32_read(&cache_matcher->refcnt));
1860 * Fill the flow with DV spec.
1863 * Pointer to rte_eth_dev structure.
1864 * @param[in, out] dev_flow
1865 * Pointer to the sub flow.
1867 * Pointer to the flow attributes.
1869 * Pointer to the list of items.
1870 * @param[in] actions
1871 * Pointer to the list of actions.
1873 * Pointer to the error structure.
1876 * 0 on success, a negative errno value otherwise and rte_errno is set.
1879 flow_dv_translate(struct rte_eth_dev *dev,
1880 struct mlx5_flow *dev_flow,
1881 const struct rte_flow_attr *attr,
1882 const struct rte_flow_item items[],
1883 const struct rte_flow_action actions[],
1884 struct rte_flow_error *error)
1886 struct mlx5_priv *priv = dev->data->dev_private;
1887 struct rte_flow *flow = dev_flow->flow;
1888 uint64_t item_flags = 0;
1889 uint64_t last_item = 0;
1890 uint64_t action_flags = 0;
1891 uint64_t priority = attr->priority;
1892 struct mlx5_flow_dv_matcher matcher = {
1894 .size = sizeof(matcher.mask.buf),
1899 if (priority == MLX5_FLOW_PRIO_RSVD)
1900 priority = priv->config.flow_prio - 1;
1901 for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) {
1902 const struct rte_flow_action_queue *queue;
1903 const struct rte_flow_action_rss *rss;
1904 const struct rte_flow_action *action = actions;
1905 const uint8_t *rss_key;
1907 switch (actions->type) {
1908 case RTE_FLOW_ACTION_TYPE_VOID:
1910 case RTE_FLOW_ACTION_TYPE_FLAG:
1911 dev_flow->dv.actions[actions_n].type =
1912 MLX5DV_FLOW_ACTION_TAG;
1913 dev_flow->dv.actions[actions_n].tag_value =
1914 mlx5_flow_mark_set(MLX5_FLOW_MARK_DEFAULT);
1916 action_flags |= MLX5_FLOW_ACTION_FLAG;
1918 case RTE_FLOW_ACTION_TYPE_MARK:
1919 dev_flow->dv.actions[actions_n].type =
1920 MLX5DV_FLOW_ACTION_TAG;
1921 dev_flow->dv.actions[actions_n].tag_value =
1923 (((const struct rte_flow_action_mark *)
1924 (actions->conf))->id);
1926 action_flags |= MLX5_FLOW_ACTION_MARK;
1928 case RTE_FLOW_ACTION_TYPE_DROP:
1929 dev_flow->dv.actions[actions_n].type =
1930 MLX5DV_FLOW_ACTION_DROP;
1931 action_flags |= MLX5_FLOW_ACTION_DROP;
1933 case RTE_FLOW_ACTION_TYPE_QUEUE:
1934 queue = actions->conf;
1935 flow->rss.queue_num = 1;
1936 (*flow->queue)[0] = queue->index;
1937 action_flags |= MLX5_FLOW_ACTION_QUEUE;
1939 case RTE_FLOW_ACTION_TYPE_RSS:
1940 rss = actions->conf;
1942 memcpy((*flow->queue), rss->queue,
1943 rss->queue_num * sizeof(uint16_t));
1944 flow->rss.queue_num = rss->queue_num;
1945 /* NULL RSS key indicates default RSS key. */
1946 rss_key = !rss->key ? rss_hash_default_key : rss->key;
1947 memcpy(flow->key, rss_key, MLX5_RSS_HASH_KEY_LEN);
1948 /* RSS type 0 indicates default RSS type ETH_RSS_IP. */
1949 flow->rss.types = !rss->types ? ETH_RSS_IP : rss->types;
1950 flow->rss.level = rss->level;
1951 action_flags |= MLX5_FLOW_ACTION_RSS;
1953 case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
1954 case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
1955 if (flow_dv_create_action_l2_encap(dev, actions,
1958 dev_flow->dv.actions[actions_n].type =
1959 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1960 dev_flow->dv.actions[actions_n].action =
1961 dev_flow->dv.encap_decap->verbs_action;
1963 action_flags |= actions->type ==
1964 RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP ?
1965 MLX5_FLOW_ACTION_VXLAN_ENCAP :
1966 MLX5_FLOW_ACTION_NVGRE_ENCAP;
1968 case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
1969 case RTE_FLOW_ACTION_TYPE_NVGRE_DECAP:
1970 if (flow_dv_create_action_l2_decap(dev, dev_flow,
1973 dev_flow->dv.actions[actions_n].type =
1974 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1975 dev_flow->dv.actions[actions_n].action =
1976 dev_flow->dv.encap_decap->verbs_action;
1978 action_flags |= actions->type ==
1979 RTE_FLOW_ACTION_TYPE_VXLAN_DECAP ?
1980 MLX5_FLOW_ACTION_VXLAN_DECAP :
1981 MLX5_FLOW_ACTION_NVGRE_DECAP;
1983 case RTE_FLOW_ACTION_TYPE_RAW_ENCAP:
1984 /* Handle encap with preceding decap. */
1985 if (action_flags & MLX5_FLOW_ACTION_RAW_DECAP) {
1986 if (flow_dv_create_action_raw_encap
1987 (dev, actions, dev_flow, attr, error))
1989 dev_flow->dv.actions[actions_n].type =
1990 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
1991 dev_flow->dv.actions[actions_n].action =
1992 dev_flow->dv.encap_decap->verbs_action;
1994 /* Handle encap without preceding decap. */
1995 if (flow_dv_create_action_l2_encap(dev, actions,
1999 dev_flow->dv.actions[actions_n].type =
2000 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2001 dev_flow->dv.actions[actions_n].action =
2002 dev_flow->dv.encap_decap->verbs_action;
2005 action_flags |= MLX5_FLOW_ACTION_RAW_ENCAP;
2007 case RTE_FLOW_ACTION_TYPE_RAW_DECAP:
2008 /* Check if this decap is followed by encap. */
2009 for (; action->type != RTE_FLOW_ACTION_TYPE_END &&
2010 action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP;
2013 /* Handle decap only if it isn't followed by encap. */
2014 if (action->type != RTE_FLOW_ACTION_TYPE_RAW_ENCAP) {
2015 if (flow_dv_create_action_l2_decap(dev,
2019 dev_flow->dv.actions[actions_n].type =
2020 MLX5DV_FLOW_ACTION_IBV_FLOW_ACTION;
2021 dev_flow->dv.actions[actions_n].action =
2022 dev_flow->dv.encap_decap->verbs_action;
2025 /* If decap is followed by encap, handle it at encap. */
2026 action_flags |= MLX5_FLOW_ACTION_RAW_DECAP;
2032 dev_flow->dv.actions_n = actions_n;
2033 flow->actions = action_flags;
2034 for (; items->type != RTE_FLOW_ITEM_TYPE_END; items++) {
2035 int tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2036 void *match_mask = matcher.mask.buf;
2037 void *match_value = dev_flow->dv.value.buf;
2039 switch (items->type) {
2040 case RTE_FLOW_ITEM_TYPE_ETH:
2041 flow_dv_translate_item_eth(match_mask, match_value,
2043 matcher.priority = MLX5_PRIORITY_MAP_L2;
2044 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2045 MLX5_FLOW_LAYER_OUTER_L2;
2047 case RTE_FLOW_ITEM_TYPE_VLAN:
2048 flow_dv_translate_item_vlan(match_mask, match_value,
2050 matcher.priority = MLX5_PRIORITY_MAP_L2;
2051 last_item = tunnel ? (MLX5_FLOW_LAYER_INNER_L2 |
2052 MLX5_FLOW_LAYER_INNER_VLAN) :
2053 (MLX5_FLOW_LAYER_OUTER_L2 |
2054 MLX5_FLOW_LAYER_OUTER_VLAN);
2056 case RTE_FLOW_ITEM_TYPE_IPV4:
2057 flow_dv_translate_item_ipv4(match_mask, match_value,
2059 matcher.priority = MLX5_PRIORITY_MAP_L3;
2060 dev_flow->dv.hash_fields |=
2061 mlx5_flow_hashfields_adjust
2063 MLX5_IPV4_LAYER_TYPES,
2064 MLX5_IPV4_IBV_RX_HASH);
2065 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2066 MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2068 case RTE_FLOW_ITEM_TYPE_IPV6:
2069 flow_dv_translate_item_ipv6(match_mask, match_value,
2071 matcher.priority = MLX5_PRIORITY_MAP_L3;
2072 dev_flow->dv.hash_fields |=
2073 mlx5_flow_hashfields_adjust
2075 MLX5_IPV6_LAYER_TYPES,
2076 MLX5_IPV6_IBV_RX_HASH);
2077 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2078 MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2080 case RTE_FLOW_ITEM_TYPE_TCP:
2081 flow_dv_translate_item_tcp(match_mask, match_value,
2083 matcher.priority = MLX5_PRIORITY_MAP_L4;
2084 dev_flow->dv.hash_fields |=
2085 mlx5_flow_hashfields_adjust
2086 (dev_flow, tunnel, ETH_RSS_TCP,
2087 IBV_RX_HASH_SRC_PORT_TCP |
2088 IBV_RX_HASH_DST_PORT_TCP);
2089 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2090 MLX5_FLOW_LAYER_OUTER_L4_TCP;
2092 case RTE_FLOW_ITEM_TYPE_UDP:
2093 flow_dv_translate_item_udp(match_mask, match_value,
2095 matcher.priority = MLX5_PRIORITY_MAP_L4;
2096 dev_flow->dv.hash_fields |=
2097 mlx5_flow_hashfields_adjust
2098 (dev_flow, tunnel, ETH_RSS_UDP,
2099 IBV_RX_HASH_SRC_PORT_UDP |
2100 IBV_RX_HASH_DST_PORT_UDP);
2101 last_item = tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2102 MLX5_FLOW_LAYER_OUTER_L4_UDP;
2104 case RTE_FLOW_ITEM_TYPE_GRE:
2105 flow_dv_translate_item_gre(match_mask, match_value,
2107 last_item = MLX5_FLOW_LAYER_GRE;
2109 case RTE_FLOW_ITEM_TYPE_NVGRE:
2110 flow_dv_translate_item_nvgre(match_mask, match_value,
2112 last_item = MLX5_FLOW_LAYER_GRE;
2114 case RTE_FLOW_ITEM_TYPE_VXLAN:
2115 flow_dv_translate_item_vxlan(match_mask, match_value,
2117 last_item = MLX5_FLOW_LAYER_VXLAN;
2119 case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2120 flow_dv_translate_item_vxlan(match_mask, match_value,
2122 last_item = MLX5_FLOW_LAYER_VXLAN_GPE;
2124 case RTE_FLOW_ITEM_TYPE_MPLS:
2125 flow_dv_translate_item_mpls(match_mask, match_value,
2126 items, last_item, tunnel);
2127 last_item = MLX5_FLOW_LAYER_MPLS;
2129 case RTE_FLOW_ITEM_TYPE_META:
2130 flow_dv_translate_item_meta(match_mask, match_value,
2132 last_item = MLX5_FLOW_ITEM_METADATA;
2137 item_flags |= last_item;
2139 assert(!flow_dv_check_valid_spec(matcher.mask.buf,
2140 dev_flow->dv.value.buf));
2141 dev_flow->layers = item_flags;
2142 /* Register matcher. */
2143 matcher.crc = rte_raw_cksum((const void *)matcher.mask.buf,
2145 matcher.priority = mlx5_flow_adjust_priority(dev, priority,
2147 matcher.egress = attr->egress;
2148 if (flow_dv_matcher_register(dev, &matcher, dev_flow, error))
2154 * Apply the flow to the NIC.
2157 * Pointer to the Ethernet device structure.
2158 * @param[in, out] flow
2159 * Pointer to flow structure.
2161 * Pointer to error structure.
2164 * 0 on success, a negative errno value otherwise and rte_errno is set.
2167 flow_dv_apply(struct rte_eth_dev *dev, struct rte_flow *flow,
2168 struct rte_flow_error *error)
2170 struct mlx5_flow_dv *dv;
2171 struct mlx5_flow *dev_flow;
2175 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
2178 if (flow->actions & MLX5_FLOW_ACTION_DROP) {
2179 dv->hrxq = mlx5_hrxq_drop_new(dev);
2183 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2184 "cannot get drop hash queue");
2187 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
2188 dv->actions[n].qp = dv->hrxq->qp;
2190 } else if (flow->actions &
2191 (MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS)) {
2192 struct mlx5_hrxq *hrxq;
2194 hrxq = mlx5_hrxq_get(dev, flow->key,
2195 MLX5_RSS_HASH_KEY_LEN,
2198 flow->rss.queue_num);
2200 hrxq = mlx5_hrxq_new
2201 (dev, flow->key, MLX5_RSS_HASH_KEY_LEN,
2202 dv->hash_fields, (*flow->queue),
2203 flow->rss.queue_num,
2204 !!(dev_flow->layers &
2205 MLX5_FLOW_LAYER_TUNNEL));
2209 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2210 "cannot get hash queue");
2214 dv->actions[n].type = MLX5DV_FLOW_ACTION_DEST_IBV_QP;
2215 dv->actions[n].qp = hrxq->qp;
2219 mlx5_glue->dv_create_flow(dv->matcher->matcher_object,
2220 (void *)&dv->value, n,
2223 rte_flow_error_set(error, errno,
2224 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2226 "hardware refuses to create flow");
2232 err = rte_errno; /* Save rte_errno before cleanup. */
2233 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
2234 struct mlx5_flow_dv *dv = &dev_flow->dv;
2236 if (flow->actions & MLX5_FLOW_ACTION_DROP)
2237 mlx5_hrxq_drop_release(dev);
2239 mlx5_hrxq_release(dev, dv->hrxq);
2243 rte_errno = err; /* Restore rte_errno. */
2248 * Release the flow matcher.
2251 * Pointer to Ethernet device.
2253 * Pointer to mlx5_flow.
2256 * 1 while a reference on it exists, 0 when freed.
2259 flow_dv_matcher_release(struct rte_eth_dev *dev,
2260 struct mlx5_flow *flow)
2262 struct mlx5_flow_dv_matcher *matcher = flow->dv.matcher;
2264 assert(matcher->matcher_object);
2265 DRV_LOG(DEBUG, "port %u matcher %p: refcnt %d--",
2266 dev->data->port_id, (void *)matcher,
2267 rte_atomic32_read(&matcher->refcnt));
2268 if (rte_atomic32_dec_and_test(&matcher->refcnt)) {
2269 claim_zero(mlx5_glue->dv_destroy_flow_matcher
2270 (matcher->matcher_object));
2271 LIST_REMOVE(matcher, next);
2273 DRV_LOG(DEBUG, "port %u matcher %p: removed",
2274 dev->data->port_id, (void *)matcher);
2281 * Release an encap/decap resource.
2284 * Pointer to mlx5_flow.
2287 * 1 while a reference on it exists, 0 when freed.
2290 flow_dv_encap_decap_resource_release(struct mlx5_flow *flow)
2292 struct mlx5_flow_dv_encap_decap_resource *cache_resource =
2293 flow->dv.encap_decap;
2295 assert(cache_resource->verbs_action);
2296 DRV_LOG(DEBUG, "encap/decap resource %p: refcnt %d--",
2297 (void *)cache_resource,
2298 rte_atomic32_read(&cache_resource->refcnt));
2299 if (rte_atomic32_dec_and_test(&cache_resource->refcnt)) {
2300 claim_zero(mlx5_glue->destroy_flow_action
2301 (cache_resource->verbs_action));
2302 LIST_REMOVE(cache_resource, next);
2303 rte_free(cache_resource);
2304 DRV_LOG(DEBUG, "encap/decap resource %p: removed",
2305 (void *)cache_resource);
2312 * Remove the flow from the NIC but keeps it in memory.
2315 * Pointer to Ethernet device.
2316 * @param[in, out] flow
2317 * Pointer to flow structure.
2320 flow_dv_remove(struct rte_eth_dev *dev, struct rte_flow *flow)
2322 struct mlx5_flow_dv *dv;
2323 struct mlx5_flow *dev_flow;
2327 LIST_FOREACH(dev_flow, &flow->dev_flows, next) {
2330 claim_zero(mlx5_glue->destroy_flow(dv->flow));
2334 if (flow->actions & MLX5_FLOW_ACTION_DROP)
2335 mlx5_hrxq_drop_release(dev);
2337 mlx5_hrxq_release(dev, dv->hrxq);
2342 flow->counter = NULL;
2346 * Remove the flow from the NIC and the memory.
2349 * Pointer to the Ethernet device structure.
2350 * @param[in, out] flow
2351 * Pointer to flow structure.
2354 flow_dv_destroy(struct rte_eth_dev *dev, struct rte_flow *flow)
2356 struct mlx5_flow *dev_flow;
2360 flow_dv_remove(dev, flow);
2361 while (!LIST_EMPTY(&flow->dev_flows)) {
2362 dev_flow = LIST_FIRST(&flow->dev_flows);
2363 LIST_REMOVE(dev_flow, next);
2364 if (dev_flow->dv.matcher)
2365 flow_dv_matcher_release(dev, dev_flow);
2366 if (dev_flow->dv.encap_decap)
2367 flow_dv_encap_decap_resource_release(dev_flow);
2375 * @see rte_flow_query()
2379 flow_dv_query(struct rte_eth_dev *dev __rte_unused,
2380 struct rte_flow *flow __rte_unused,
2381 const struct rte_flow_action *actions __rte_unused,
2382 void *data __rte_unused,
2383 struct rte_flow_error *error __rte_unused)
2385 return rte_flow_error_set(error, ENOTSUP,
2386 RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2388 "flow query with DV is not supported");
2392 const struct mlx5_flow_driver_ops mlx5_flow_dv_drv_ops = {
2393 .validate = flow_dv_validate,
2394 .prepare = flow_dv_prepare,
2395 .translate = flow_dv_translate,
2396 .apply = flow_dv_apply,
2397 .remove = flow_dv_remove,
2398 .destroy = flow_dv_destroy,
2399 .query = flow_dv_query,
2402 #endif /* HAVE_IBV_FLOW_DV_SUPPORT */