X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fixgbe%2Fixgbe_fdir.c;h=02adfa43799ab4f87fd06a4bc44a27b7ade66051;hb=6e7cbd63706f3435b9d9a2057a37db1da01db9a7;hp=7f6c7b58fa2389a4cfe9b4c9e8aaa8ae2331e9fc;hpb=7595afa4d30097c1177b69257118d8ad89a539be;p=deb_dpdk.git diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c index 7f6c7b58..02adfa43 100644 --- a/drivers/net/ixgbe/ixgbe_fdir.c +++ b/drivers/net/ixgbe/ixgbe_fdir.c @@ -302,7 +302,7 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev) * mask VM pool and DIPv6 since there are currently not supported * mask FLEX byte, it will be set in flex_conf */ - uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6 | IXGBE_FDIRM_FLEX; + uint32_t fdirm = IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6; uint32_t fdirtcpm; /* TCP source and destination port masks. */ uint32_t fdiripv6m; /* IPv6 source and destination masks. */ volatile uint32_t *reg; @@ -333,6 +333,10 @@ fdir_set_input_mask_82599(struct rte_eth_dev *dev) return -EINVAL; } + /* flex byte mask */ + if (info->mask.flex_bytes_mask == 0) + fdirm |= IXGBE_FDIRM_FLEX; + IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); /* store the TCP/UDP port masks, bit reversed from port layout */ @@ -419,9 +423,12 @@ fdir_set_input_mask_x550(struct rte_eth_dev *dev) IXGBE_FDIRIP6M_TNI_VNI; if (mode == RTE_FDIR_MODE_PERFECT_TUNNEL) { - mac_mask = info->mask.mac_addr_byte_mask; - fdiripv6m |= (mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) - & IXGBE_FDIRIP6M_INNER_MAC; + fdiripv6m |= IXGBE_FDIRIP6M_INNER_MAC; + mac_mask = info->mask.mac_addr_byte_mask & + (IXGBE_FDIRIP6M_INNER_MAC >> + IXGBE_FDIRIP6M_INNER_MAC_SHIFT); + fdiripv6m &= ~((mac_mask << IXGBE_FDIRIP6M_INNER_MAC_SHIFT) & + IXGBE_FDIRIP6M_INNER_MAC); switch (info->mask.tunnel_type_mask) { case 0: @@ -533,6 +540,31 @@ ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev) return -ENOTSUP; } +int +ixgbe_fdir_set_flexbytes_offset(struct rte_eth_dev *dev, + uint16_t offset) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + uint32_t fdirctrl; + int i; + + fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); + + fdirctrl &= ~IXGBE_FDIRCTRL_FLEX_MASK; + fdirctrl |= ((offset >> 1) /* convert to word offset */ + << IXGBE_FDIRCTRL_FLEX_SHIFT); + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); + IXGBE_WRITE_FLUSH(hw); + for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; + msec_delay(1); + } + return 0; +} + static int fdir_set_input_mask(struct rte_eth_dev *dev, const struct rte_eth_fdir_masks *input_mask) @@ -654,7 +686,7 @@ ixgbe_fdir_configure(struct rte_eth_dev *dev) /* * The defaults in the HW for RX PB 1-7 are not zero and so should be - * intialized to zero for non DCB mode otherwise actual total RX PB + * initialized to zero for non DCB mode otherwise actual total RX PB * would be bigger than programmed and filter space would run into * the PB 0 region. */ @@ -771,10 +803,19 @@ ixgbe_fdir_filter_to_atr_input(const struct rte_eth_fdir_filter *fdir_filter, input->formatted.inner_mac, fdir_filter->input.flow.tunnel_flow.mac_addr.addr_bytes, sizeof(input->formatted.inner_mac)); - input->formatted.tunnel_type = - fdir_filter->input.flow.tunnel_flow.tunnel_type; + if (fdir_filter->input.flow.tunnel_flow.tunnel_type == + RTE_FDIR_TUNNEL_TYPE_VXLAN) + input->formatted.tunnel_type = + IXGBE_FDIR_VXLAN_TUNNEL_TYPE; + else if (fdir_filter->input.flow.tunnel_flow.tunnel_type == + RTE_FDIR_TUNNEL_TYPE_NVGRE) + input->formatted.tunnel_type = + IXGBE_FDIR_NVGRE_TUNNEL_TYPE; + else + PMD_DRV_LOG(ERR, " invalid tunnel type arguments."); + input->formatted.tni_vni = - fdir_filter->input.flow.tunnel_flow.tunnel_id; + fdir_filter->input.flow.tunnel_flow.tunnel_id >> 8; } return 0; @@ -1001,8 +1042,7 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), 0); } else { /* tunnel mode */ - if (input->formatted.tunnel_type != - RTE_FDIR_TUNNEL_TYPE_NVGRE) + if (input->formatted.tunnel_type) tunnel_type = 0x80000000; tunnel_type |= addr_high; IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), addr_low); @@ -1010,6 +1050,9 @@ fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); } + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, 0); + IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, 0); + IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, 0); } /* record vlan (little-endian) and flex_bytes(big-endian) */ @@ -1243,9 +1286,13 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev, hw->mac.type == ixgbe_mac_X550EM_x || hw->mac.type == ixgbe_mac_X550EM_a) && (rule->ixgbe_fdir.formatted.flow_type == - IXGBE_ATR_FLOW_TYPE_IPV4) && + IXGBE_ATR_FLOW_TYPE_IPV4 || + rule->ixgbe_fdir.formatted.flow_type == + IXGBE_ATR_FLOW_TYPE_IPV6) && (info->mask.src_port_mask != 0 || - info->mask.dst_port_mask != 0)) { + info->mask.dst_port_mask != 0) && + (rule->mode != RTE_FDIR_MODE_PERFECT_MAC_VLAN && + rule->mode != RTE_FDIR_MODE_PERFECT_TUNNEL)) { PMD_DRV_LOG(ERR, "By this device," " IPv4 is not supported without" " L4 protocol and ports masked!"); @@ -1316,7 +1363,7 @@ ixgbe_fdir_filter_program(struct rte_eth_dev *dev, 0); if (!node) return -ENOMEM; - (void)rte_memcpy(&node->ixgbe_fdir, + rte_memcpy(&node->ixgbe_fdir, &rule->ixgbe_fdir, sizeof(union ixgbe_atr_input)); node->fdirflags = fdircmd_flags;