1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2014-2018 Broadcom
8 #include <rte_byteorder.h>
10 #include <rte_malloc.h>
12 #include <rte_flow_driver.h>
13 #include <rte_tailq.h>
16 #include "bnxt_filter.h"
17 #include "bnxt_hwrm.h"
18 #include "bnxt_vnic.h"
19 #include "hsi_struct_def_dpdk.h"
25 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
27 struct bnxt_filter_info *filter;
29 /* Find the 1st unused filter from the free_filter_list pool*/
30 filter = STAILQ_FIRST(&bp->free_filter_list);
32 PMD_DRV_LOG(ERR, "No more free filter resources\n");
35 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
37 /* Default to L2 MAC Addr filter */
38 filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
39 filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
40 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
41 memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
43 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
47 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
49 struct bnxt_filter_info *filter;
51 filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
53 PMD_DRV_LOG(ERR, "Failed to alloc memory for VF %hu filters\n",
58 filter->fw_l2_filter_id = UINT64_MAX;
59 STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
63 void bnxt_init_filters(struct bnxt *bp)
65 struct bnxt_filter_info *filter;
68 max_filters = bp->max_l2_ctx;
69 STAILQ_INIT(&bp->free_filter_list);
70 for (i = 0; i < max_filters; i++) {
71 filter = &bp->filter_info[i];
72 filter->fw_l2_filter_id = UINT64_MAX;
73 filter->fw_em_filter_id = UINT64_MAX;
74 filter->fw_ntuple_filter_id = UINT64_MAX;
75 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
79 void bnxt_free_all_filters(struct bnxt *bp)
81 struct bnxt_vnic_info *vnic;
82 struct bnxt_filter_info *filter, *temp_filter;
85 for (i = 0; i < MAX_FF_POOLS; i++) {
86 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
87 filter = STAILQ_FIRST(&vnic->filter);
89 temp_filter = STAILQ_NEXT(filter, next);
90 STAILQ_REMOVE(&vnic->filter, filter,
91 bnxt_filter_info, next);
92 STAILQ_INSERT_TAIL(&bp->free_filter_list,
96 STAILQ_INIT(&vnic->filter);
100 for (i = 0; i < bp->pf.max_vfs; i++) {
101 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
102 bnxt_hwrm_clear_l2_filter(bp, filter);
107 void bnxt_free_filter_mem(struct bnxt *bp)
109 struct bnxt_filter_info *filter;
110 uint16_t max_filters, i;
113 if (bp->filter_info == NULL)
116 /* Ensure that all filters are freed */
117 max_filters = bp->max_l2_ctx;
118 for (i = 0; i < max_filters; i++) {
119 filter = &bp->filter_info[i];
120 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
121 PMD_DRV_LOG(ERR, "HWRM filter is not freed??\n");
122 /* Call HWRM to try to free filter again */
123 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
126 "HWRM filter cannot be freed rc = %d\n",
129 filter->fw_l2_filter_id = UINT64_MAX;
131 STAILQ_INIT(&bp->free_filter_list);
133 rte_free(bp->filter_info);
134 bp->filter_info = NULL;
136 for (i = 0; i < bp->pf.max_vfs; i++) {
137 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
139 STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
140 bnxt_filter_info, next);
145 int bnxt_alloc_filter_mem(struct bnxt *bp)
147 struct bnxt_filter_info *filter_mem;
148 uint16_t max_filters;
150 max_filters = bp->max_l2_ctx;
151 /* Allocate memory for VNIC pool and filter pool */
152 filter_mem = rte_zmalloc("bnxt_filter_info",
153 max_filters * sizeof(struct bnxt_filter_info),
155 if (filter_mem == NULL) {
156 PMD_DRV_LOG(ERR, "Failed to alloc memory for %d filters",
160 bp->filter_info = filter_mem;
164 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
166 struct bnxt_filter_info *filter;
168 /* Find the 1st unused filter from the free_filter_list pool*/
169 filter = STAILQ_FIRST(&bp->free_filter_list);
171 PMD_DRV_LOG(ERR, "No more free filter resources\n");
174 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
179 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
181 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
185 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
186 const struct rte_flow_item pattern[],
187 const struct rte_flow_action actions[],
188 struct rte_flow_error *error)
191 rte_flow_error_set(error, EINVAL,
192 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
193 NULL, "NULL pattern.");
198 rte_flow_error_set(error, EINVAL,
199 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
200 NULL, "NULL action.");
205 rte_flow_error_set(error, EINVAL,
206 RTE_FLOW_ERROR_TYPE_ATTR,
207 NULL, "NULL attribute.");
214 static const struct rte_flow_item *
215 nxt_non_void_pattern(const struct rte_flow_item *cur)
218 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
224 static const struct rte_flow_action *
225 nxt_non_void_action(const struct rte_flow_action *cur)
228 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
234 int bnxt_check_zero_bytes(const uint8_t *bytes, int len)
237 for (i = 0; i < len; i++)
238 if (bytes[i] != 0x00)
244 bnxt_filter_type_check(const struct rte_flow_item pattern[],
245 struct rte_flow_error *error __rte_unused)
247 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
250 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
251 switch (item->type) {
252 case RTE_FLOW_ITEM_TYPE_ETH:
255 case RTE_FLOW_ITEM_TYPE_VLAN:
258 case RTE_FLOW_ITEM_TYPE_IPV4:
259 case RTE_FLOW_ITEM_TYPE_IPV6:
260 case RTE_FLOW_ITEM_TYPE_TCP:
261 case RTE_FLOW_ITEM_TYPE_UDP:
263 /* need ntuple match, reset exact match */
266 "VLAN flow cannot use NTUPLE filter\n");
267 rte_flow_error_set(error, EINVAL,
268 RTE_FLOW_ERROR_TYPE_ITEM,
270 "Cannot use VLAN with NTUPLE");
276 PMD_DRV_LOG(ERR, "Unknown Flow type");
285 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
286 const struct rte_flow_attr *attr,
287 const struct rte_flow_item pattern[],
288 struct rte_flow_error *error,
289 struct bnxt_filter_info *filter)
291 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
292 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
293 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
294 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
295 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
296 const struct rte_flow_item_udp *udp_spec, *udp_mask;
297 const struct rte_flow_item_eth *eth_spec, *eth_mask;
298 const struct rte_flow_item_nvgre *nvgre_spec;
299 const struct rte_flow_item_nvgre *nvgre_mask;
300 const struct rte_flow_item_vxlan *vxlan_spec;
301 const struct rte_flow_item_vxlan *vxlan_mask;
302 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
303 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
304 const struct rte_flow_item_vf *vf_spec;
305 uint32_t tenant_id_be = 0;
311 uint32_t en_ethertype;
314 use_ntuple = bnxt_filter_type_check(pattern, error);
315 PMD_DRV_LOG(DEBUG, "Use NTUPLE %d\n", use_ntuple);
319 filter->filter_type = use_ntuple ?
320 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
321 en_ethertype = use_ntuple ?
322 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
323 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
325 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
327 /* last or range is NOT supported as match criteria */
328 rte_flow_error_set(error, EINVAL,
329 RTE_FLOW_ERROR_TYPE_ITEM,
331 "No support for range");
334 if (!item->spec || !item->mask) {
335 rte_flow_error_set(error, EINVAL,
336 RTE_FLOW_ERROR_TYPE_ITEM,
338 "spec/mask is NULL");
341 switch (item->type) {
342 case RTE_FLOW_ITEM_TYPE_ETH:
343 eth_spec = item->spec;
344 eth_mask = item->mask;
346 /* Source MAC address mask cannot be partially set.
347 * Should be All 0's or all 1's.
348 * Destination MAC address mask must not be partially
349 * set. Should be all 1's or all 0's.
351 if ((!is_zero_ether_addr(ð_mask->src) &&
352 !is_broadcast_ether_addr(ð_mask->src)) ||
353 (!is_zero_ether_addr(ð_mask->dst) &&
354 !is_broadcast_ether_addr(ð_mask->dst))) {
355 rte_flow_error_set(error, EINVAL,
356 RTE_FLOW_ERROR_TYPE_ITEM,
358 "MAC_addr mask not valid");
362 /* Mask is not allowed. Only exact matches are */
363 if (eth_mask->type &&
364 eth_mask->type != RTE_BE16(0xffff)) {
365 rte_flow_error_set(error, EINVAL,
366 RTE_FLOW_ERROR_TYPE_ITEM,
368 "ethertype mask not valid");
372 if (is_broadcast_ether_addr(ð_mask->dst)) {
373 rte_memcpy(filter->dst_macaddr,
376 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
377 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
379 if (is_broadcast_ether_addr(ð_mask->src)) {
380 rte_memcpy(filter->src_macaddr,
383 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
384 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
387 * RTE_LOG(ERR, PMD, "Handle this condition\n");
390 if (eth_mask->type) {
392 rte_be_to_cpu_16(eth_spec->type);
397 case RTE_FLOW_ITEM_TYPE_VLAN:
398 vlan_spec = item->spec;
399 vlan_mask = item->mask;
400 if (en & en_ethertype) {
401 rte_flow_error_set(error, EINVAL,
402 RTE_FLOW_ERROR_TYPE_ITEM,
404 "VLAN TPID matching is not"
408 if (vlan_mask->tci &&
409 vlan_mask->tci == RTE_BE16(0x0fff)) {
410 /* Only the VLAN ID can be matched. */
412 rte_be_to_cpu_16(vlan_spec->tci &
414 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
415 } else if (vlan_mask->tci) {
416 rte_flow_error_set(error, EINVAL,
417 RTE_FLOW_ERROR_TYPE_ITEM,
419 "VLAN mask is invalid");
422 if (vlan_mask->inner_type &&
423 vlan_mask->inner_type != RTE_BE16(0xffff)) {
424 rte_flow_error_set(error, EINVAL,
425 RTE_FLOW_ERROR_TYPE_ITEM,
427 "inner ethertype mask not"
431 if (vlan_mask->inner_type) {
433 rte_be_to_cpu_16(vlan_spec->inner_type);
438 case RTE_FLOW_ITEM_TYPE_IPV4:
439 /* If mask is not involved, we could use EM filters. */
440 ipv4_spec = item->spec;
441 ipv4_mask = item->mask;
442 /* Only IP DST and SRC fields are maskable. */
443 if (ipv4_mask->hdr.version_ihl ||
444 ipv4_mask->hdr.type_of_service ||
445 ipv4_mask->hdr.total_length ||
446 ipv4_mask->hdr.packet_id ||
447 ipv4_mask->hdr.fragment_offset ||
448 ipv4_mask->hdr.time_to_live ||
449 ipv4_mask->hdr.next_proto_id ||
450 ipv4_mask->hdr.hdr_checksum) {
451 rte_flow_error_set(error, EINVAL,
452 RTE_FLOW_ERROR_TYPE_ITEM,
454 "Invalid IPv4 mask.");
457 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
458 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
460 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
461 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
463 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
464 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
465 if (ipv4_mask->hdr.src_addr) {
466 filter->src_ipaddr_mask[0] =
467 ipv4_mask->hdr.src_addr;
468 en |= !use_ntuple ? 0 :
469 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
471 if (ipv4_mask->hdr.dst_addr) {
472 filter->dst_ipaddr_mask[0] =
473 ipv4_mask->hdr.dst_addr;
474 en |= !use_ntuple ? 0 :
475 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
477 filter->ip_addr_type = use_ntuple ?
478 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
479 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
480 if (ipv4_spec->hdr.next_proto_id) {
481 filter->ip_protocol =
482 ipv4_spec->hdr.next_proto_id;
484 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
486 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
489 case RTE_FLOW_ITEM_TYPE_IPV6:
490 ipv6_spec = item->spec;
491 ipv6_mask = item->mask;
493 /* Only IP DST and SRC fields are maskable. */
494 if (ipv6_mask->hdr.vtc_flow ||
495 ipv6_mask->hdr.payload_len ||
496 ipv6_mask->hdr.proto ||
497 ipv6_mask->hdr.hop_limits) {
498 rte_flow_error_set(error, EINVAL,
499 RTE_FLOW_ERROR_TYPE_ITEM,
501 "Invalid IPv6 mask.");
506 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
507 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
509 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
510 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
511 rte_memcpy(filter->src_ipaddr,
512 ipv6_spec->hdr.src_addr, 16);
513 rte_memcpy(filter->dst_ipaddr,
514 ipv6_spec->hdr.dst_addr, 16);
515 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.src_addr,
517 rte_memcpy(filter->src_ipaddr_mask,
518 ipv6_mask->hdr.src_addr, 16);
519 en |= !use_ntuple ? 0 :
520 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
522 if (!bnxt_check_zero_bytes(ipv6_mask->hdr.dst_addr,
524 rte_memcpy(filter->dst_ipaddr_mask,
525 ipv6_mask->hdr.dst_addr, 16);
526 en |= !use_ntuple ? 0 :
527 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
529 filter->ip_addr_type = use_ntuple ?
530 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
531 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
533 case RTE_FLOW_ITEM_TYPE_TCP:
534 tcp_spec = item->spec;
535 tcp_mask = item->mask;
537 /* Check TCP mask. Only DST & SRC ports are maskable */
538 if (tcp_mask->hdr.sent_seq ||
539 tcp_mask->hdr.recv_ack ||
540 tcp_mask->hdr.data_off ||
541 tcp_mask->hdr.tcp_flags ||
542 tcp_mask->hdr.rx_win ||
543 tcp_mask->hdr.cksum ||
544 tcp_mask->hdr.tcp_urp) {
545 rte_flow_error_set(error, EINVAL,
546 RTE_FLOW_ERROR_TYPE_ITEM,
551 filter->src_port = tcp_spec->hdr.src_port;
552 filter->dst_port = tcp_spec->hdr.dst_port;
554 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
555 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
557 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
558 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
559 if (tcp_mask->hdr.dst_port) {
560 filter->dst_port_mask = tcp_mask->hdr.dst_port;
561 en |= !use_ntuple ? 0 :
562 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
564 if (tcp_mask->hdr.src_port) {
565 filter->src_port_mask = tcp_mask->hdr.src_port;
566 en |= !use_ntuple ? 0 :
567 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
570 case RTE_FLOW_ITEM_TYPE_UDP:
571 udp_spec = item->spec;
572 udp_mask = item->mask;
574 if (udp_mask->hdr.dgram_len ||
575 udp_mask->hdr.dgram_cksum) {
576 rte_flow_error_set(error, EINVAL,
577 RTE_FLOW_ERROR_TYPE_ITEM,
583 filter->src_port = udp_spec->hdr.src_port;
584 filter->dst_port = udp_spec->hdr.dst_port;
586 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
587 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
589 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
590 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
592 if (udp_mask->hdr.dst_port) {
593 filter->dst_port_mask = udp_mask->hdr.dst_port;
594 en |= !use_ntuple ? 0 :
595 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
597 if (udp_mask->hdr.src_port) {
598 filter->src_port_mask = udp_mask->hdr.src_port;
599 en |= !use_ntuple ? 0 :
600 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
603 case RTE_FLOW_ITEM_TYPE_VXLAN:
604 vxlan_spec = item->spec;
605 vxlan_mask = item->mask;
606 /* Check if VXLAN item is used to describe protocol.
607 * If yes, both spec and mask should be NULL.
608 * If no, both spec and mask shouldn't be NULL.
610 if ((!vxlan_spec && vxlan_mask) ||
611 (vxlan_spec && !vxlan_mask)) {
612 rte_flow_error_set(error, EINVAL,
613 RTE_FLOW_ERROR_TYPE_ITEM,
615 "Invalid VXLAN item");
619 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
620 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
621 vxlan_spec->flags != 0x8) {
622 rte_flow_error_set(error, EINVAL,
623 RTE_FLOW_ERROR_TYPE_ITEM,
625 "Invalid VXLAN item");
629 /* Check if VNI is masked. */
630 if (vxlan_spec && vxlan_mask) {
632 !!memcmp(vxlan_mask->vni, vni_mask,
635 rte_flow_error_set(error, EINVAL,
636 RTE_FLOW_ERROR_TYPE_ITEM,
642 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
645 rte_be_to_cpu_32(tenant_id_be);
646 filter->tunnel_type =
647 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
650 case RTE_FLOW_ITEM_TYPE_NVGRE:
651 nvgre_spec = item->spec;
652 nvgre_mask = item->mask;
653 /* Check if NVGRE item is used to describe protocol.
654 * If yes, both spec and mask should be NULL.
655 * If no, both spec and mask shouldn't be NULL.
657 if ((!nvgre_spec && nvgre_mask) ||
658 (nvgre_spec && !nvgre_mask)) {
659 rte_flow_error_set(error, EINVAL,
660 RTE_FLOW_ERROR_TYPE_ITEM,
662 "Invalid NVGRE item");
666 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
667 nvgre_spec->protocol != 0x6558) {
668 rte_flow_error_set(error, EINVAL,
669 RTE_FLOW_ERROR_TYPE_ITEM,
671 "Invalid NVGRE item");
675 if (nvgre_spec && nvgre_mask) {
677 !!memcmp(nvgre_mask->tni, tni_mask,
680 rte_flow_error_set(error, EINVAL,
681 RTE_FLOW_ERROR_TYPE_ITEM,
686 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
689 rte_be_to_cpu_32(tenant_id_be);
690 filter->tunnel_type =
691 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
694 case RTE_FLOW_ITEM_TYPE_VF:
695 vf_spec = item->spec;
698 rte_flow_error_set(error, EINVAL,
699 RTE_FLOW_ERROR_TYPE_ITEM,
701 "Configuring on a VF!");
705 if (vf >= bp->pdev->max_vfs) {
706 rte_flow_error_set(error, EINVAL,
707 RTE_FLOW_ERROR_TYPE_ITEM,
713 if (!attr->transfer) {
714 rte_flow_error_set(error, ENOTSUP,
715 RTE_FLOW_ERROR_TYPE_ITEM,
717 "Matching VF traffic without"
718 " affecting it (transfer attribute)"
723 filter->mirror_vnic_id =
724 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
726 /* This simply indicates there's no driver
727 * loaded. This is not an error.
729 rte_flow_error_set(error, EINVAL,
730 RTE_FLOW_ERROR_TYPE_ITEM,
732 "Unable to get default VNIC for VF");
735 filter->mirror_vnic_id = dflt_vnic;
736 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
743 filter->enables = en;
748 /* Parse attributes */
750 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
751 struct rte_flow_error *error)
753 /* Must be input direction */
754 if (!attr->ingress) {
755 rte_flow_error_set(error, EINVAL,
756 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
757 attr, "Only support ingress.");
763 rte_flow_error_set(error, EINVAL,
764 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
765 attr, "No support for egress.");
770 if (attr->priority) {
771 rte_flow_error_set(error, EINVAL,
772 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
773 attr, "No support for priority.");
779 rte_flow_error_set(error, EINVAL,
780 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
781 attr, "No support for group.");
788 struct bnxt_filter_info *
789 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
790 struct bnxt_vnic_info *vnic)
792 struct bnxt_filter_info *filter1, *f0;
793 struct bnxt_vnic_info *vnic0;
796 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
797 f0 = STAILQ_FIRST(&vnic0->filter);
799 //This flow has same DST MAC as the port/l2 filter.
800 if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
803 //This flow needs DST MAC which is not same as port/l2
804 PMD_DRV_LOG(DEBUG, "Create L2 filter for DST MAC\n");
805 filter1 = bnxt_get_unused_filter(bp);
808 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
809 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
810 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
811 memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
812 memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
813 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
816 bnxt_free_filter(bp, filter1);
823 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
824 const struct rte_flow_item pattern[],
825 const struct rte_flow_action actions[],
826 const struct rte_flow_attr *attr,
827 struct rte_flow_error *error,
828 struct bnxt_filter_info *filter)
830 const struct rte_flow_action *act = nxt_non_void_action(actions);
831 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
832 const struct rte_flow_action_queue *act_q;
833 const struct rte_flow_action_vf *act_vf;
834 struct bnxt_vnic_info *vnic, *vnic0;
835 struct bnxt_filter_info *filter1;
840 if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
841 PMD_DRV_LOG(ERR, "Cannot create flow on RSS queues\n");
842 rte_flow_error_set(error, EINVAL,
843 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
844 "Cannot create flow on RSS queues");
849 rc = bnxt_validate_and_parse_flow_type(bp, attr, pattern, error,
854 rc = bnxt_flow_parse_attr(attr, error);
857 //Since we support ingress attribute only - right now.
858 if (filter->filter_type == HWRM_CFA_EM_FILTER)
859 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
862 case RTE_FLOW_ACTION_TYPE_QUEUE:
863 /* Allow this flow. Redirect to a VNIC. */
864 act_q = (const struct rte_flow_action_queue *)act->conf;
865 if (act_q->index >= bp->rx_nr_rings) {
866 rte_flow_error_set(error, EINVAL,
867 RTE_FLOW_ERROR_TYPE_ACTION, act,
868 "Invalid queue ID.");
872 PMD_DRV_LOG(DEBUG, "Queue index %d\n", act_q->index);
874 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
875 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
877 rte_flow_error_set(error, EINVAL,
878 RTE_FLOW_ERROR_TYPE_ACTION, act,
879 "No matching VNIC for queue ID.");
883 filter->dst_id = vnic->fw_vnic_id;
884 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
885 if (filter1 == NULL) {
889 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
890 PMD_DRV_LOG(DEBUG, "VNIC found\n");
892 case RTE_FLOW_ACTION_TYPE_DROP:
893 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
894 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
895 if (filter1 == NULL) {
899 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
900 if (filter->filter_type == HWRM_CFA_EM_FILTER)
902 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
905 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
907 case RTE_FLOW_ACTION_TYPE_COUNT:
908 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
909 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
910 if (filter1 == NULL) {
914 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
915 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
917 case RTE_FLOW_ACTION_TYPE_VF:
918 act_vf = (const struct rte_flow_action_vf *)act->conf;
921 rte_flow_error_set(error, EINVAL,
922 RTE_FLOW_ERROR_TYPE_ACTION,
924 "Configuring on a VF!");
929 if (vf >= bp->pdev->max_vfs) {
930 rte_flow_error_set(error, EINVAL,
931 RTE_FLOW_ERROR_TYPE_ACTION,
938 filter->mirror_vnic_id =
939 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
941 /* This simply indicates there's no driver loaded.
942 * This is not an error.
944 rte_flow_error_set(error, EINVAL,
945 RTE_FLOW_ERROR_TYPE_ACTION,
947 "Unable to get default VNIC for VF");
951 filter->mirror_vnic_id = dflt_vnic;
952 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
954 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
955 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
956 if (filter1 == NULL) {
960 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
964 rte_flow_error_set(error, EINVAL,
965 RTE_FLOW_ERROR_TYPE_ACTION, act,
971 act = nxt_non_void_action(++act);
972 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
973 rte_flow_error_set(error, EINVAL,
974 RTE_FLOW_ERROR_TYPE_ACTION,
975 act, "Invalid action.");
984 bnxt_flow_validate(struct rte_eth_dev *dev,
985 const struct rte_flow_attr *attr,
986 const struct rte_flow_item pattern[],
987 const struct rte_flow_action actions[],
988 struct rte_flow_error *error)
990 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
991 struct bnxt_filter_info *filter;
994 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
998 filter = bnxt_get_unused_filter(bp);
999 if (filter == NULL) {
1000 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1004 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1006 /* No need to hold on to this filter if we are just validating flow */
1007 filter->fw_l2_filter_id = UINT64_MAX;
1008 bnxt_free_filter(bp, filter);
1014 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1016 struct bnxt_filter_info *mf;
1017 struct rte_flow *flow;
1020 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1021 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1023 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1026 if (mf->filter_type == nf->filter_type &&
1027 mf->flags == nf->flags &&
1028 mf->src_port == nf->src_port &&
1029 mf->src_port_mask == nf->src_port_mask &&
1030 mf->dst_port == nf->dst_port &&
1031 mf->dst_port_mask == nf->dst_port_mask &&
1032 mf->ip_protocol == nf->ip_protocol &&
1033 mf->ip_addr_type == nf->ip_addr_type &&
1034 mf->ethertype == nf->ethertype &&
1035 mf->vni == nf->vni &&
1036 mf->tunnel_type == nf->tunnel_type &&
1037 mf->l2_ovlan == nf->l2_ovlan &&
1038 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1039 mf->l2_ivlan == nf->l2_ivlan &&
1040 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1041 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1042 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1044 !memcmp(mf->src_macaddr, nf->src_macaddr,
1046 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1048 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1049 sizeof(nf->src_ipaddr)) &&
1050 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1051 sizeof(nf->src_ipaddr_mask)) &&
1052 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1053 sizeof(nf->dst_ipaddr)) &&
1054 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1055 sizeof(nf->dst_ipaddr_mask))) {
1056 if (mf->dst_id == nf->dst_id)
1058 /* Same Flow, Different queue
1059 * Clear the old ntuple filter
1061 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1062 bnxt_hwrm_clear_em_filter(bp, mf);
1063 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1064 bnxt_hwrm_clear_ntuple_filter(bp, mf);
1065 /* Free the old filter, update flow
1068 bnxt_free_filter(bp, mf);
1077 static struct rte_flow *
1078 bnxt_flow_create(struct rte_eth_dev *dev,
1079 const struct rte_flow_attr *attr,
1080 const struct rte_flow_item pattern[],
1081 const struct rte_flow_action actions[],
1082 struct rte_flow_error *error)
1084 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1085 struct bnxt_filter_info *filter;
1086 struct bnxt_vnic_info *vnic = NULL;
1087 bool update_flow = false;
1088 struct rte_flow *flow;
1092 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1094 rte_flow_error_set(error, ENOMEM,
1095 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1096 "Failed to allocate memory");
1100 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1102 PMD_DRV_LOG(ERR, "Not a validate flow.\n");
1106 filter = bnxt_get_unused_filter(bp);
1107 if (filter == NULL) {
1108 PMD_DRV_LOG(ERR, "Not enough resources for a new flow.\n");
1112 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1117 ret = bnxt_match_filter(bp, filter);
1118 if (ret == -EEXIST) {
1119 PMD_DRV_LOG(DEBUG, "Flow already exists.\n");
1120 /* Clear the filter that was created as part of
1121 * validate_and_parse_flow() above
1123 bnxt_hwrm_clear_l2_filter(bp, filter);
1125 } else if (ret == -EXDEV) {
1126 PMD_DRV_LOG(DEBUG, "Flow with same pattern exists");
1127 PMD_DRV_LOG(DEBUG, "Updating with different destination\n");
1131 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1133 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1134 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1136 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1138 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1139 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1142 for (i = 0; i < bp->nr_vnics; i++) {
1143 vnic = &bp->vnic_info[i];
1144 if (filter->dst_id == vnic->fw_vnic_id)
1149 flow->filter = filter;
1155 PMD_DRV_LOG(ERR, "Successfully created flow.\n");
1156 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1160 bnxt_free_filter(bp, filter);
1163 rte_flow_error_set(error, ret,
1164 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1165 "Matching Flow exists.");
1166 else if (ret == -EXDEV)
1167 rte_flow_error_set(error, ret,
1168 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1169 "Flow with pattern exists, updating destination queue");
1171 rte_flow_error_set(error, -ret,
1172 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1173 "Failed to create flow.");
1180 bnxt_flow_destroy(struct rte_eth_dev *dev,
1181 struct rte_flow *flow,
1182 struct rte_flow_error *error)
1184 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1185 struct bnxt_filter_info *filter = flow->filter;
1186 struct bnxt_vnic_info *vnic = flow->vnic;
1189 ret = bnxt_match_filter(bp, filter);
1191 PMD_DRV_LOG(ERR, "Could not find matching flow\n");
1192 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1193 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1194 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1195 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1197 ret = bnxt_hwrm_clear_l2_filter(bp, filter);
1199 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1202 rte_flow_error_set(error, -ret,
1203 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1204 "Failed to destroy flow.");
1211 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1213 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1214 struct bnxt_vnic_info *vnic;
1215 struct rte_flow *flow;
1219 for (i = 0; i < bp->nr_vnics; i++) {
1220 vnic = &bp->vnic_info[i];
1221 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1222 struct bnxt_filter_info *filter = flow->filter;
1224 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1225 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1226 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1227 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1230 rte_flow_error_set(error, -ret,
1231 RTE_FLOW_ERROR_TYPE_HANDLE,
1233 "Failed to flush flow in HW.");
1237 STAILQ_REMOVE(&vnic->flow_list, flow,
1246 const struct rte_flow_ops bnxt_flow_ops = {
1247 .validate = bnxt_flow_validate,
1248 .create = bnxt_flow_create,
1249 .destroy = bnxt_flow_destroy,
1250 .flush = bnxt_flow_flush,