4 * Copyright(c) Broadcom Limited.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Broadcom Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34 #include <sys/queue.h>
37 #include <rte_malloc.h>
39 #include <rte_flow_driver.h>
40 #include <rte_tailq.h>
43 #include "bnxt_filter.h"
44 #include "bnxt_hwrm.h"
45 #include "bnxt_vnic.h"
46 #include "hsi_struct_def_dpdk.h"
52 struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp)
54 struct bnxt_filter_info *filter;
56 /* Find the 1st unused filter from the free_filter_list pool*/
57 filter = STAILQ_FIRST(&bp->free_filter_list);
59 RTE_LOG(ERR, PMD, "No more free filter resources\n");
62 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
64 /* Default to L2 MAC Addr filter */
65 filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
66 filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
67 HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK;
68 memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes,
70 memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
74 struct bnxt_filter_info *bnxt_alloc_vf_filter(struct bnxt *bp, uint16_t vf)
76 struct bnxt_filter_info *filter;
78 filter = rte_zmalloc("bnxt_vf_filter_info", sizeof(*filter), 0);
80 RTE_LOG(ERR, PMD, "Failed to alloc memory for VF %hu filters\n",
85 filter->fw_l2_filter_id = UINT64_MAX;
86 STAILQ_INSERT_TAIL(&bp->pf.vf_info[vf].filter, filter, next);
90 void bnxt_init_filters(struct bnxt *bp)
92 struct bnxt_filter_info *filter;
95 max_filters = bp->max_l2_ctx;
96 STAILQ_INIT(&bp->free_filter_list);
97 for (i = 0; i < max_filters; i++) {
98 filter = &bp->filter_info[i];
99 filter->fw_l2_filter_id = -1;
100 filter->fw_em_filter_id = -1;
101 filter->fw_ntuple_filter_id = -1;
102 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
106 void bnxt_free_all_filters(struct bnxt *bp)
108 struct bnxt_vnic_info *vnic;
109 struct bnxt_filter_info *filter, *temp_filter;
112 for (i = 0; i < MAX_FF_POOLS; i++) {
113 STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) {
114 filter = STAILQ_FIRST(&vnic->filter);
116 temp_filter = STAILQ_NEXT(filter, next);
117 STAILQ_REMOVE(&vnic->filter, filter,
118 bnxt_filter_info, next);
119 STAILQ_INSERT_TAIL(&bp->free_filter_list,
121 filter = temp_filter;
123 STAILQ_INIT(&vnic->filter);
127 for (i = 0; i < bp->pf.max_vfs; i++) {
128 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
129 bnxt_hwrm_clear_l2_filter(bp, filter);
134 void bnxt_free_filter_mem(struct bnxt *bp)
136 struct bnxt_filter_info *filter;
137 uint16_t max_filters, i;
140 if (bp->filter_info == NULL)
143 /* Ensure that all filters are freed */
144 max_filters = bp->max_l2_ctx;
145 for (i = 0; i < max_filters; i++) {
146 filter = &bp->filter_info[i];
147 if (filter->fw_l2_filter_id != ((uint64_t)-1)) {
148 RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n");
149 /* Call HWRM to try to free filter again */
150 rc = bnxt_hwrm_clear_l2_filter(bp, filter);
153 "HWRM filter cannot be freed rc = %d\n",
156 filter->fw_l2_filter_id = UINT64_MAX;
158 STAILQ_INIT(&bp->free_filter_list);
160 rte_free(bp->filter_info);
161 bp->filter_info = NULL;
163 for (i = 0; i < bp->pf.max_vfs; i++) {
164 STAILQ_FOREACH(filter, &bp->pf.vf_info[i].filter, next) {
166 STAILQ_REMOVE(&bp->pf.vf_info[i].filter, filter,
167 bnxt_filter_info, next);
172 int bnxt_alloc_filter_mem(struct bnxt *bp)
174 struct bnxt_filter_info *filter_mem;
175 uint16_t max_filters;
177 max_filters = bp->max_l2_ctx;
178 /* Allocate memory for VNIC pool and filter pool */
179 filter_mem = rte_zmalloc("bnxt_filter_info",
180 max_filters * sizeof(struct bnxt_filter_info),
182 if (filter_mem == NULL) {
183 RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters",
187 bp->filter_info = filter_mem;
191 struct bnxt_filter_info *bnxt_get_unused_filter(struct bnxt *bp)
193 struct bnxt_filter_info *filter;
195 /* Find the 1st unused filter from the free_filter_list pool*/
196 filter = STAILQ_FIRST(&bp->free_filter_list);
198 RTE_LOG(ERR, PMD, "No more free filter resources\n");
201 STAILQ_REMOVE_HEAD(&bp->free_filter_list, next);
206 void bnxt_free_filter(struct bnxt *bp, struct bnxt_filter_info *filter)
208 STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next);
212 bnxt_flow_agrs_validate(const struct rte_flow_attr *attr,
213 const struct rte_flow_item pattern[],
214 const struct rte_flow_action actions[],
215 struct rte_flow_error *error)
218 rte_flow_error_set(error, EINVAL,
219 RTE_FLOW_ERROR_TYPE_ITEM_NUM,
220 NULL, "NULL pattern.");
225 rte_flow_error_set(error, EINVAL,
226 RTE_FLOW_ERROR_TYPE_ACTION_NUM,
227 NULL, "NULL action.");
232 rte_flow_error_set(error, EINVAL,
233 RTE_FLOW_ERROR_TYPE_ATTR,
234 NULL, "NULL attribute.");
241 static const struct rte_flow_item *
242 nxt_non_void_pattern(const struct rte_flow_item *cur)
245 if (cur->type != RTE_FLOW_ITEM_TYPE_VOID)
251 static const struct rte_flow_action *
252 nxt_non_void_action(const struct rte_flow_action *cur)
255 if (cur->type != RTE_FLOW_ACTION_TYPE_VOID)
261 static inline int check_zero_bytes(const uint8_t *bytes, int len)
264 for (i = 0; i < len; i++)
265 if (bytes[i] != 0x00)
271 bnxt_filter_type_check(const struct rte_flow_item pattern[],
272 struct rte_flow_error *error __rte_unused)
274 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
277 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
278 switch (item->type) {
279 case RTE_FLOW_ITEM_TYPE_ETH:
282 case RTE_FLOW_ITEM_TYPE_VLAN:
285 case RTE_FLOW_ITEM_TYPE_IPV4:
286 case RTE_FLOW_ITEM_TYPE_IPV6:
287 case RTE_FLOW_ITEM_TYPE_TCP:
288 case RTE_FLOW_ITEM_TYPE_UDP:
290 /* need ntuple match, reset exact match */
293 "VLAN flow cannot use NTUPLE filter\n");
294 rte_flow_error_set(error, EINVAL,
295 RTE_FLOW_ERROR_TYPE_ITEM,
297 "Cannot use VLAN with NTUPLE");
303 RTE_LOG(ERR, PMD, "Unknown Flow type");
312 bnxt_validate_and_parse_flow_type(struct bnxt *bp,
313 const struct rte_flow_item pattern[],
314 struct rte_flow_error *error,
315 struct bnxt_filter_info *filter)
317 const struct rte_flow_item *item = nxt_non_void_pattern(pattern);
318 const struct rte_flow_item_vlan *vlan_spec, *vlan_mask;
319 const struct rte_flow_item_ipv4 *ipv4_spec, *ipv4_mask;
320 const struct rte_flow_item_ipv6 *ipv6_spec, *ipv6_mask;
321 const struct rte_flow_item_tcp *tcp_spec, *tcp_mask;
322 const struct rte_flow_item_udp *udp_spec, *udp_mask;
323 const struct rte_flow_item_eth *eth_spec, *eth_mask;
324 const struct rte_flow_item_nvgre *nvgre_spec;
325 const struct rte_flow_item_nvgre *nvgre_mask;
326 const struct rte_flow_item_vxlan *vxlan_spec;
327 const struct rte_flow_item_vxlan *vxlan_mask;
328 uint8_t vni_mask[] = {0xFF, 0xFF, 0xFF};
329 uint8_t tni_mask[] = {0xFF, 0xFF, 0xFF};
330 const struct rte_flow_item_vf *vf_spec;
331 uint32_t tenant_id_be = 0;
339 use_ntuple = bnxt_filter_type_check(pattern, error);
340 RTE_LOG(DEBUG, PMD, "Use NTUPLE %d\n", use_ntuple);
344 filter->filter_type = use_ntuple ?
345 HWRM_CFA_NTUPLE_FILTER : HWRM_CFA_EM_FILTER;
347 while (item->type != RTE_FLOW_ITEM_TYPE_END) {
349 /* last or range is NOT supported as match criteria */
350 rte_flow_error_set(error, EINVAL,
351 RTE_FLOW_ERROR_TYPE_ITEM,
353 "No support for range");
356 if (!item->spec || !item->mask) {
357 rte_flow_error_set(error, EINVAL,
358 RTE_FLOW_ERROR_TYPE_ITEM,
360 "spec/mask is NULL");
363 switch (item->type) {
364 case RTE_FLOW_ITEM_TYPE_ETH:
365 eth_spec = (const struct rte_flow_item_eth *)item->spec;
366 eth_mask = (const struct rte_flow_item_eth *)item->mask;
368 /* Source MAC address mask cannot be partially set.
369 * Should be All 0's or all 1's.
370 * Destination MAC address mask must not be partially
371 * set. Should be all 1's or all 0's.
373 if ((!is_zero_ether_addr(ð_mask->src) &&
374 !is_broadcast_ether_addr(ð_mask->src)) ||
375 (!is_zero_ether_addr(ð_mask->dst) &&
376 !is_broadcast_ether_addr(ð_mask->dst))) {
377 rte_flow_error_set(error, EINVAL,
378 RTE_FLOW_ERROR_TYPE_ITEM,
380 "MAC_addr mask not valid");
384 /* Mask is not allowed. Only exact matches are */
385 if ((eth_mask->type & UINT16_MAX) != UINT16_MAX) {
386 rte_flow_error_set(error, EINVAL,
387 RTE_FLOW_ERROR_TYPE_ITEM,
389 "ethertype mask not valid");
393 if (is_broadcast_ether_addr(ð_mask->dst)) {
394 rte_memcpy(filter->dst_macaddr,
397 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_MACADDR :
398 EM_FLOW_ALLOC_INPUT_EN_DST_MACADDR;
400 if (is_broadcast_ether_addr(ð_mask->src)) {
401 rte_memcpy(filter->src_macaddr,
404 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_MACADDR :
405 EM_FLOW_ALLOC_INPUT_EN_SRC_MACADDR;
408 * RTE_LOG(ERR, PMD, "Handle this condition\n");
411 if (eth_spec->type) {
413 rte_be_to_cpu_16(eth_spec->type);
415 NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE :
416 EM_FLOW_ALLOC_INPUT_EN_ETHERTYPE;
420 case RTE_FLOW_ITEM_TYPE_VLAN:
422 (const struct rte_flow_item_vlan *)item->spec;
424 (const struct rte_flow_item_vlan *)item->mask;
425 if (vlan_mask->tci & 0xFFFF && !vlan_mask->tpid) {
426 /* Only the VLAN ID can be matched. */
428 rte_be_to_cpu_16(vlan_spec->tci &
430 en |= EM_FLOW_ALLOC_INPUT_EN_OVLAN_VID;
432 rte_flow_error_set(error, EINVAL,
433 RTE_FLOW_ERROR_TYPE_ITEM,
435 "VLAN mask is invalid");
440 case RTE_FLOW_ITEM_TYPE_IPV4:
441 /* If mask is not involved, we could use EM filters. */
443 (const struct rte_flow_item_ipv4 *)item->spec;
445 (const struct rte_flow_item_ipv4 *)item->mask;
446 /* Only IP DST and SRC fields are maskable. */
447 if (ipv4_mask->hdr.version_ihl ||
448 ipv4_mask->hdr.type_of_service ||
449 ipv4_mask->hdr.total_length ||
450 ipv4_mask->hdr.packet_id ||
451 ipv4_mask->hdr.fragment_offset ||
452 ipv4_mask->hdr.time_to_live ||
453 ipv4_mask->hdr.next_proto_id ||
454 ipv4_mask->hdr.hdr_checksum) {
455 rte_flow_error_set(error, EINVAL,
456 RTE_FLOW_ERROR_TYPE_ITEM,
458 "Invalid IPv4 mask.");
461 filter->dst_ipaddr[0] = ipv4_spec->hdr.dst_addr;
462 filter->src_ipaddr[0] = ipv4_spec->hdr.src_addr;
464 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
465 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
467 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
468 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
469 if (ipv4_mask->hdr.src_addr) {
470 filter->src_ipaddr_mask[0] =
471 ipv4_mask->hdr.src_addr;
472 en |= !use_ntuple ? 0 :
473 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
475 if (ipv4_mask->hdr.dst_addr) {
476 filter->dst_ipaddr_mask[0] =
477 ipv4_mask->hdr.dst_addr;
478 en |= !use_ntuple ? 0 :
479 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
481 filter->ip_addr_type = use_ntuple ?
482 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_IP_ADDR_TYPE_IPV4 :
483 HWRM_CFA_EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV4;
484 if (ipv4_spec->hdr.next_proto_id) {
485 filter->ip_protocol =
486 ipv4_spec->hdr.next_proto_id;
488 en |= NTUPLE_FLTR_ALLOC_IN_EN_IP_PROTO;
490 en |= EM_FLOW_ALLOC_INPUT_EN_IP_PROTO;
493 case RTE_FLOW_ITEM_TYPE_IPV6:
495 (const struct rte_flow_item_ipv6 *)item->spec;
497 (const struct rte_flow_item_ipv6 *)item->mask;
499 /* Only IP DST and SRC fields are maskable. */
500 if (ipv6_mask->hdr.vtc_flow ||
501 ipv6_mask->hdr.payload_len ||
502 ipv6_mask->hdr.proto ||
503 ipv6_mask->hdr.hop_limits) {
504 rte_flow_error_set(error, EINVAL,
505 RTE_FLOW_ERROR_TYPE_ITEM,
507 "Invalid IPv6 mask.");
512 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR |
513 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR;
515 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_IPADDR |
516 EM_FLOW_ALLOC_INPUT_EN_DST_IPADDR;
517 rte_memcpy(filter->src_ipaddr,
518 ipv6_spec->hdr.src_addr, 16);
519 rte_memcpy(filter->dst_ipaddr,
520 ipv6_spec->hdr.dst_addr, 16);
521 if (!check_zero_bytes(ipv6_mask->hdr.src_addr, 16)) {
522 rte_memcpy(filter->src_ipaddr_mask,
523 ipv6_mask->hdr.src_addr, 16);
524 en |= !use_ntuple ? 0 :
525 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_IPADDR_MASK;
527 if (!check_zero_bytes(ipv6_mask->hdr.dst_addr, 16)) {
528 rte_memcpy(filter->dst_ipaddr_mask,
529 ipv6_mask->hdr.dst_addr, 16);
530 en |= !use_ntuple ? 0 :
531 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_IPADDR_MASK;
533 filter->ip_addr_type = use_ntuple ?
534 NTUPLE_FLTR_ALLOC_INPUT_IP_ADDR_TYPE_IPV6 :
535 EM_FLOW_ALLOC_INPUT_IP_ADDR_TYPE_IPV6;
537 case RTE_FLOW_ITEM_TYPE_TCP:
538 tcp_spec = (const struct rte_flow_item_tcp *)item->spec;
539 tcp_mask = (const struct rte_flow_item_tcp *)item->mask;
541 /* Check TCP mask. Only DST & SRC ports are maskable */
542 if (tcp_mask->hdr.sent_seq ||
543 tcp_mask->hdr.recv_ack ||
544 tcp_mask->hdr.data_off ||
545 tcp_mask->hdr.tcp_flags ||
546 tcp_mask->hdr.rx_win ||
547 tcp_mask->hdr.cksum ||
548 tcp_mask->hdr.tcp_urp) {
549 rte_flow_error_set(error, EINVAL,
550 RTE_FLOW_ERROR_TYPE_ITEM,
555 filter->src_port = tcp_spec->hdr.src_port;
556 filter->dst_port = tcp_spec->hdr.dst_port;
558 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
559 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
561 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
562 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
563 if (tcp_mask->hdr.dst_port) {
564 filter->dst_port_mask = tcp_mask->hdr.dst_port;
565 en |= !use_ntuple ? 0 :
566 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
568 if (tcp_mask->hdr.src_port) {
569 filter->src_port_mask = tcp_mask->hdr.src_port;
570 en |= !use_ntuple ? 0 :
571 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
574 case RTE_FLOW_ITEM_TYPE_UDP:
575 udp_spec = (const struct rte_flow_item_udp *)item->spec;
576 udp_mask = (const struct rte_flow_item_udp *)item->mask;
578 if (udp_mask->hdr.dgram_len ||
579 udp_mask->hdr.dgram_cksum) {
580 rte_flow_error_set(error, EINVAL,
581 RTE_FLOW_ERROR_TYPE_ITEM,
587 filter->src_port = udp_spec->hdr.src_port;
588 filter->dst_port = udp_spec->hdr.dst_port;
590 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT |
591 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT;
593 en |= EM_FLOW_ALLOC_INPUT_EN_SRC_PORT |
594 EM_FLOW_ALLOC_INPUT_EN_DST_PORT;
596 if (udp_mask->hdr.dst_port) {
597 filter->dst_port_mask = udp_mask->hdr.dst_port;
598 en |= !use_ntuple ? 0 :
599 NTUPLE_FLTR_ALLOC_INPUT_EN_DST_PORT_MASK;
601 if (udp_mask->hdr.src_port) {
602 filter->src_port_mask = udp_mask->hdr.src_port;
603 en |= !use_ntuple ? 0 :
604 NTUPLE_FLTR_ALLOC_INPUT_EN_SRC_PORT_MASK;
607 case RTE_FLOW_ITEM_TYPE_VXLAN:
609 (const struct rte_flow_item_vxlan *)item->spec;
611 (const struct rte_flow_item_vxlan *)item->mask;
612 /* Check if VXLAN item is used to describe protocol.
613 * If yes, both spec and mask should be NULL.
614 * If no, both spec and mask shouldn't be NULL.
616 if ((!vxlan_spec && vxlan_mask) ||
617 (vxlan_spec && !vxlan_mask)) {
618 rte_flow_error_set(error, EINVAL,
619 RTE_FLOW_ERROR_TYPE_ITEM,
621 "Invalid VXLAN item");
625 if (vxlan_spec->rsvd1 || vxlan_spec->rsvd0[0] ||
626 vxlan_spec->rsvd0[1] || vxlan_spec->rsvd0[2] ||
627 vxlan_spec->flags != 0x8) {
628 rte_flow_error_set(error, EINVAL,
629 RTE_FLOW_ERROR_TYPE_ITEM,
631 "Invalid VXLAN item");
635 /* Check if VNI is masked. */
636 if (vxlan_spec && vxlan_mask) {
638 !!memcmp(vxlan_mask->vni, vni_mask,
641 rte_flow_error_set(error, EINVAL,
642 RTE_FLOW_ERROR_TYPE_ITEM,
648 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
651 rte_be_to_cpu_32(tenant_id_be);
652 filter->tunnel_type =
653 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
656 case RTE_FLOW_ITEM_TYPE_NVGRE:
658 (const struct rte_flow_item_nvgre *)item->spec;
660 (const struct rte_flow_item_nvgre *)item->mask;
661 /* Check if NVGRE item is used to describe protocol.
662 * If yes, both spec and mask should be NULL.
663 * If no, both spec and mask shouldn't be NULL.
665 if ((!nvgre_spec && nvgre_mask) ||
666 (nvgre_spec && !nvgre_mask)) {
667 rte_flow_error_set(error, EINVAL,
668 RTE_FLOW_ERROR_TYPE_ITEM,
670 "Invalid NVGRE item");
674 if (nvgre_spec->c_k_s_rsvd0_ver != 0x2000 ||
675 nvgre_spec->protocol != 0x6558) {
676 rte_flow_error_set(error, EINVAL,
677 RTE_FLOW_ERROR_TYPE_ITEM,
679 "Invalid NVGRE item");
683 if (nvgre_spec && nvgre_mask) {
685 !!memcmp(nvgre_mask->tni, tni_mask,
688 rte_flow_error_set(error, EINVAL,
689 RTE_FLOW_ERROR_TYPE_ITEM,
694 rte_memcpy(((uint8_t *)&tenant_id_be + 1),
697 rte_be_to_cpu_32(tenant_id_be);
698 filter->tunnel_type =
699 CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE;
702 case RTE_FLOW_ITEM_TYPE_VF:
703 vf_spec = (const struct rte_flow_item_vf *)item->spec;
706 rte_flow_error_set(error, EINVAL,
707 RTE_FLOW_ERROR_TYPE_ITEM,
709 "Configuring on a VF!");
713 if (vf >= bp->pdev->max_vfs) {
714 rte_flow_error_set(error, EINVAL,
715 RTE_FLOW_ERROR_TYPE_ITEM,
721 filter->mirror_vnic_id =
722 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
724 /* This simply indicates there's no driver
725 * loaded. This is not an error.
727 rte_flow_error_set(error, EINVAL,
728 RTE_FLOW_ERROR_TYPE_ITEM,
730 "Unable to get default VNIC for VF");
733 filter->mirror_vnic_id = dflt_vnic;
734 en |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
741 filter->enables = en;
746 /* Parse attributes */
748 bnxt_flow_parse_attr(const struct rte_flow_attr *attr,
749 struct rte_flow_error *error)
751 /* Must be input direction */
752 if (!attr->ingress) {
753 rte_flow_error_set(error, EINVAL,
754 RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
755 attr, "Only support ingress.");
761 rte_flow_error_set(error, EINVAL,
762 RTE_FLOW_ERROR_TYPE_ATTR_EGRESS,
763 attr, "No support for egress.");
768 if (attr->priority) {
769 rte_flow_error_set(error, EINVAL,
770 RTE_FLOW_ERROR_TYPE_ATTR_PRIORITY,
771 attr, "No support for priority.");
777 rte_flow_error_set(error, EINVAL,
778 RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
779 attr, "No support for group.");
786 struct bnxt_filter_info *
787 bnxt_get_l2_filter(struct bnxt *bp, struct bnxt_filter_info *nf,
788 struct bnxt_vnic_info *vnic)
790 struct bnxt_filter_info *filter1, *f0;
791 struct bnxt_vnic_info *vnic0;
794 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
795 f0 = STAILQ_FIRST(&vnic0->filter);
797 //This flow has same DST MAC as the port/l2 filter.
798 if (memcmp(f0->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN) == 0)
801 //This flow needs DST MAC which is not same as port/l2
802 RTE_LOG(DEBUG, PMD, "Create L2 filter for DST MAC\n");
803 filter1 = bnxt_get_unused_filter(bp);
806 filter1->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX;
807 filter1->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR |
808 L2_FILTER_ALLOC_INPUT_EN_L2_ADDR_MASK;
809 memcpy(filter1->l2_addr, nf->dst_macaddr, ETHER_ADDR_LEN);
810 memset(filter1->l2_addr_mask, 0xff, ETHER_ADDR_LEN);
811 rc = bnxt_hwrm_set_l2_filter(bp, vnic->fw_vnic_id,
814 bnxt_free_filter(bp, filter1);
817 STAILQ_INSERT_TAIL(&vnic->filter, filter1, next);
822 bnxt_validate_and_parse_flow(struct rte_eth_dev *dev,
823 const struct rte_flow_item pattern[],
824 const struct rte_flow_action actions[],
825 const struct rte_flow_attr *attr,
826 struct rte_flow_error *error,
827 struct bnxt_filter_info *filter)
829 const struct rte_flow_action *act = nxt_non_void_action(actions);
830 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
831 const struct rte_flow_action_queue *act_q;
832 const struct rte_flow_action_vf *act_vf;
833 struct bnxt_vnic_info *vnic, *vnic0;
834 struct bnxt_filter_info *filter1;
839 if (bp->eth_dev->data->dev_conf.rxmode.mq_mode & ETH_MQ_RX_RSS) {
840 RTE_LOG(ERR, PMD, "Cannot create flow on RSS queues\n");
841 rte_flow_error_set(error, EINVAL,
842 RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
843 "Cannot create flow on RSS queues");
848 rc = bnxt_validate_and_parse_flow_type(bp, pattern, error, filter);
852 rc = bnxt_flow_parse_attr(attr, error);
855 //Since we support ingress attribute only - right now.
856 filter->flags = HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_PATH_RX;
859 case RTE_FLOW_ACTION_TYPE_QUEUE:
860 /* Allow this flow. Redirect to a VNIC. */
861 act_q = (const struct rte_flow_action_queue *)act->conf;
862 if (act_q->index >= bp->rx_nr_rings) {
863 rte_flow_error_set(error, EINVAL,
864 RTE_FLOW_ERROR_TYPE_ACTION, act,
865 "Invalid queue ID.");
869 RTE_LOG(DEBUG, PMD, "Queue index %d\n", act_q->index);
871 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
872 vnic = STAILQ_FIRST(&bp->ff_pool[act_q->index]);
874 rte_flow_error_set(error, EINVAL,
875 RTE_FLOW_ERROR_TYPE_ACTION, act,
876 "No matching VNIC for queue ID.");
880 filter->dst_id = vnic->fw_vnic_id;
881 filter1 = bnxt_get_l2_filter(bp, filter, vnic);
882 if (filter1 == NULL) {
886 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
887 RTE_LOG(DEBUG, PMD, "VNIC found\n");
889 case RTE_FLOW_ACTION_TYPE_DROP:
890 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
891 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
892 if (filter1 == NULL) {
896 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
897 if (filter->filter_type == HWRM_CFA_EM_FILTER)
899 HWRM_CFA_EM_FLOW_ALLOC_INPUT_FLAGS_DROP;
902 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_DROP;
904 case RTE_FLOW_ACTION_TYPE_COUNT:
905 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
906 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
907 if (filter1 == NULL) {
911 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
912 filter->flags = HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_FLAGS_METER;
914 case RTE_FLOW_ACTION_TYPE_VF:
915 act_vf = (const struct rte_flow_action_vf *)act->conf;
918 rte_flow_error_set(error, EINVAL,
919 RTE_FLOW_ERROR_TYPE_ACTION,
921 "Configuring on a VF!");
926 if (vf >= bp->pdev->max_vfs) {
927 rte_flow_error_set(error, EINVAL,
928 RTE_FLOW_ERROR_TYPE_ACTION,
935 filter->mirror_vnic_id =
936 dflt_vnic = bnxt_hwrm_func_qcfg_vf_dflt_vnic_id(bp, vf);
938 /* This simply indicates there's no driver loaded.
939 * This is not an error.
941 rte_flow_error_set(error, EINVAL,
942 RTE_FLOW_ERROR_TYPE_ACTION,
944 "Unable to get default VNIC for VF");
948 filter->mirror_vnic_id = dflt_vnic;
949 filter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_MIRROR_VNIC_ID;
951 vnic0 = STAILQ_FIRST(&bp->ff_pool[0]);
952 filter1 = bnxt_get_l2_filter(bp, filter, vnic0);
953 if (filter1 == NULL) {
957 filter->fw_l2_filter_id = filter1->fw_l2_filter_id;
961 rte_flow_error_set(error, EINVAL,
962 RTE_FLOW_ERROR_TYPE_ACTION, act,
969 act = nxt_non_void_action(++act);
970 if (act->type != RTE_FLOW_ACTION_TYPE_END) {
971 rte_flow_error_set(error, EINVAL,
972 RTE_FLOW_ERROR_TYPE_ACTION,
973 act, "Invalid action.");
982 bnxt_flow_validate(struct rte_eth_dev *dev,
983 const struct rte_flow_attr *attr,
984 const struct rte_flow_item pattern[],
985 const struct rte_flow_action actions[],
986 struct rte_flow_error *error)
988 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
989 struct bnxt_filter_info *filter;
992 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
996 filter = bnxt_get_unused_filter(bp);
997 if (filter == NULL) {
998 RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
1002 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1004 /* No need to hold on to this filter if we are just validating flow */
1005 filter->fw_l2_filter_id = -1;
1006 bnxt_free_filter(bp, filter);
1012 bnxt_match_filter(struct bnxt *bp, struct bnxt_filter_info *nf)
1014 struct bnxt_filter_info *mf;
1015 struct rte_flow *flow;
1018 for (i = bp->nr_vnics - 1; i >= 0; i--) {
1019 struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
1021 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1024 if (mf->filter_type == nf->filter_type &&
1025 mf->flags == nf->flags &&
1026 mf->src_port == nf->src_port &&
1027 mf->src_port_mask == nf->src_port_mask &&
1028 mf->dst_port == nf->dst_port &&
1029 mf->dst_port_mask == nf->dst_port_mask &&
1030 mf->ip_protocol == nf->ip_protocol &&
1031 mf->ip_addr_type == nf->ip_addr_type &&
1032 mf->ethertype == nf->ethertype &&
1033 mf->vni == nf->vni &&
1034 mf->tunnel_type == nf->tunnel_type &&
1035 mf->l2_ovlan == nf->l2_ovlan &&
1036 mf->l2_ovlan_mask == nf->l2_ovlan_mask &&
1037 mf->l2_ivlan == nf->l2_ivlan &&
1038 mf->l2_ivlan_mask == nf->l2_ivlan_mask &&
1039 !memcmp(mf->l2_addr, nf->l2_addr, ETHER_ADDR_LEN) &&
1040 !memcmp(mf->l2_addr_mask, nf->l2_addr_mask,
1042 !memcmp(mf->src_macaddr, nf->src_macaddr,
1044 !memcmp(mf->dst_macaddr, nf->dst_macaddr,
1046 !memcmp(mf->src_ipaddr, nf->src_ipaddr,
1047 sizeof(nf->src_ipaddr)) &&
1048 !memcmp(mf->src_ipaddr_mask, nf->src_ipaddr_mask,
1049 sizeof(nf->src_ipaddr_mask)) &&
1050 !memcmp(mf->dst_ipaddr, nf->dst_ipaddr,
1051 sizeof(nf->dst_ipaddr)) &&
1052 !memcmp(mf->dst_ipaddr_mask, nf->dst_ipaddr_mask,
1053 sizeof(nf->dst_ipaddr_mask))) {
1054 if (mf->dst_id == nf->dst_id)
1057 * Same Flow, Different queue
1058 * Clear the old ntuple filter
1059 * Reuse the matching L2 filter
1060 * ID for the new filter
1062 nf->fw_l2_filter_id = mf->fw_l2_filter_id;
1063 if (nf->filter_type == HWRM_CFA_EM_FILTER)
1064 bnxt_hwrm_clear_em_filter(bp, mf);
1065 if (nf->filter_type == HWRM_CFA_NTUPLE_FILTER)
1066 bnxt_hwrm_clear_ntuple_filter(bp, mf);
1067 /* Free the old filter, update flow
1070 bnxt_free_filter(bp, mf);
1079 static struct rte_flow *
1080 bnxt_flow_create(struct rte_eth_dev *dev,
1081 const struct rte_flow_attr *attr,
1082 const struct rte_flow_item pattern[],
1083 const struct rte_flow_action actions[],
1084 struct rte_flow_error *error)
1086 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1087 struct bnxt_filter_info *filter;
1088 struct bnxt_vnic_info *vnic = NULL;
1089 bool update_flow = false;
1090 struct rte_flow *flow;
1094 flow = rte_zmalloc("bnxt_flow", sizeof(struct rte_flow), 0);
1096 rte_flow_error_set(error, ENOMEM,
1097 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1098 "Failed to allocate memory");
1102 ret = bnxt_flow_agrs_validate(attr, pattern, actions, error);
1104 RTE_LOG(ERR, PMD, "Not a validate flow.\n");
1108 filter = bnxt_get_unused_filter(bp);
1109 if (filter == NULL) {
1110 RTE_LOG(ERR, PMD, "Not enough resources for a new flow.\n");
1114 ret = bnxt_validate_and_parse_flow(dev, pattern, actions, attr,
1119 ret = bnxt_match_filter(bp, filter);
1120 if (ret == -EEXIST) {
1121 RTE_LOG(DEBUG, PMD, "Flow already exists.\n");
1122 /* Clear the filter that was created as part of
1123 * validate_and_parse_flow() above
1125 bnxt_hwrm_clear_l2_filter(bp, filter);
1127 } else if (ret == -EXDEV) {
1128 RTE_LOG(DEBUG, PMD, "Flow with same pattern exists");
1129 RTE_LOG(DEBUG, PMD, "Updating with different destination\n");
1133 if (filter->filter_type == HWRM_CFA_EM_FILTER) {
1135 HWRM_CFA_EM_FLOW_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1136 ret = bnxt_hwrm_set_em_filter(bp, filter->dst_id, filter);
1138 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER) {
1140 HWRM_CFA_NTUPLE_FILTER_ALLOC_INPUT_ENABLES_L2_FILTER_ID;
1141 ret = bnxt_hwrm_set_ntuple_filter(bp, filter->dst_id, filter);
1144 for (i = 0; i < bp->nr_vnics; i++) {
1145 vnic = &bp->vnic_info[i];
1146 if (filter->dst_id == vnic->fw_vnic_id)
1151 flow->filter = filter;
1157 RTE_LOG(ERR, PMD, "Successfully created flow.\n");
1158 STAILQ_INSERT_TAIL(&vnic->flow_list, flow, next);
1162 bnxt_free_filter(bp, filter);
1165 rte_flow_error_set(error, ret,
1166 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1167 "Matching Flow exists.");
1168 else if (ret == -EXDEV)
1169 rte_flow_error_set(error, ret,
1170 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1171 "Flow with pattern exists, updating destination queue");
1173 rte_flow_error_set(error, -ret,
1174 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1175 "Failed to create flow.");
1182 bnxt_flow_destroy(struct rte_eth_dev *dev,
1183 struct rte_flow *flow,
1184 struct rte_flow_error *error)
1186 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1187 struct bnxt_filter_info *filter = flow->filter;
1188 struct bnxt_vnic_info *vnic = flow->vnic;
1191 ret = bnxt_match_filter(bp, filter);
1193 RTE_LOG(ERR, PMD, "Could not find matching flow\n");
1194 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1195 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1196 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1197 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1199 bnxt_hwrm_clear_l2_filter(bp, filter);
1201 STAILQ_REMOVE(&vnic->flow_list, flow, rte_flow, next);
1204 rte_flow_error_set(error, -ret,
1205 RTE_FLOW_ERROR_TYPE_HANDLE, NULL,
1206 "Failed to destroy flow.");
1213 bnxt_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error)
1215 struct bnxt *bp = (struct bnxt *)dev->data->dev_private;
1216 struct bnxt_vnic_info *vnic;
1217 struct rte_flow *flow;
1221 for (i = 0; i < bp->nr_vnics; i++) {
1222 vnic = &bp->vnic_info[i];
1223 STAILQ_FOREACH(flow, &vnic->flow_list, next) {
1224 struct bnxt_filter_info *filter = flow->filter;
1226 if (filter->filter_type == HWRM_CFA_EM_FILTER)
1227 ret = bnxt_hwrm_clear_em_filter(bp, filter);
1228 if (filter->filter_type == HWRM_CFA_NTUPLE_FILTER)
1229 ret = bnxt_hwrm_clear_ntuple_filter(bp, filter);
1232 rte_flow_error_set(error, -ret,
1233 RTE_FLOW_ERROR_TYPE_HANDLE,
1235 "Failed to flush flow in HW.");
1239 STAILQ_REMOVE(&vnic->flow_list, flow,
1248 const struct rte_flow_ops bnxt_flow_ops = {
1249 .validate = bnxt_flow_validate,
1250 .create = bnxt_flow_create,
1251 .destroy = bnxt_flow_destroy,
1252 .flush = bnxt_flow_flush,