for (i = 0; i < txq->tx_count; i++) {
if (txq->txbufs[i].mbuf) {
- rte_pktmbuf_free(txq->txbufs[i].mbuf);
+ rte_pktmbuf_free_seg(txq->txbufs[i].mbuf);
txq->txbufs[i].mbuf = NULL;
}
}
uint32_t tmp;
tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR));
- memcpy(&hw->mac_addr[0], &tmp, sizeof(struct ether_addr));
+ memcpy(&hw->mac_addr[0], &tmp, 4);
tmp = rte_be_to_cpu_32(nn_cfg_readl(hw, NFP_NET_CFG_MACADDR + 4));
memcpy(&hw->mac_addr[4], &tmp, 2);
return;
/* If IPv4 and IP checksum error, fail */
- if ((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
- !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK))
+ if (unlikely((rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM) &&
+ !(rxd->rxd.flags & PCIE_DESC_RX_IP4_CSUM_OK)))
mb->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ else
+ mb->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
/* If neither UDP nor TCP return */
if (!(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
!(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM))
return;
- if ((rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM) &&
- !(rxd->rxd.flags & PCIE_DESC_RX_TCP_CSUM_OK))
- mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-
- if ((rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM) &&
- !(rxd->rxd.flags & PCIE_DESC_RX_UDP_CSUM_OK))
+ if (likely(rxd->rxd.flags & PCIE_DESC_RX_L4_CSUM_OK))
+ mb->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ else
mb->ol_flags |= PKT_RX_L4_CKSUM_BAD;
}
case NFP_NET_RSS_IPV6_EX:
mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
break;
+ case NFP_NET_RSS_IPV4_TCP:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
+ case NFP_NET_RSS_IPV6_TCP:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
+ case NFP_NET_RSS_IPV4_UDP:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
+ case NFP_NET_RSS_IPV6_UDP:
+ mbuf->packet_type |= RTE_PTYPE_INNER_L3_IPV6_EXT;
+ break;
default:
mbuf->packet_type |= RTE_PTYPE_INNER_L4_MASK;
}
break;
}
+ rxds = &rxq->rxds[idx];
+ if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
+ break;
+
/*
* Memory barrier to ensure that we won't do other
* reads before the DD bit.
*/
rte_rmb();
- rxds = &rxq->rxds[idx];
- if ((rxds->rxd.meta_len_dd & PCIE_DESC_RX_DD) == 0)
- break;
-
/*
* We got a packet. Let's alloc a new mbuff for refilling the
* free descriptor ring as soon as possible
mb->nb_segs = 1;
mb->next = NULL;
+ mb->port = rxq->port_id;
+
/* Checking the RSS flag */
nfp_net_set_hash(rxq, rxds, mb);
txq->tail = 0;
pkt_size -= dma_size;
- if (!pkt_size) {
- /* End of packet */
- txds->offset_eop |= PCIE_DESC_TX_EOP;
- } else {
- txds->offset_eop &= PCIE_DESC_TX_OFFSET_MASK;
+
+ /*
+ * Making the EOP, packets with just one segment
+ * the priority
+ */
+ if (likely(!pkt_size))
+ txds->offset_eop = PCIE_DESC_TX_EOP;
+ else {
+ txds->offset_eop = 0;
pkt = pkt->next;
}
/* Referencing next free TX descriptor */
}
if (rss_hf & ETH_RSS_IPV4)
- cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4 |
- NFP_NET_CFG_RSS_IPV4_TCP |
- NFP_NET_CFG_RSS_IPV4_UDP;
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_TCP)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_TCP;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV4_UDP)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV4_UDP;
if (rss_hf & ETH_RSS_IPV6)
- cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6 |
- NFP_NET_CFG_RSS_IPV6_TCP |
- NFP_NET_CFG_RSS_IPV6_UDP;
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_TCP)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_TCP;
+
+ if (rss_hf & ETH_RSS_NONFRAG_IPV6_UDP)
+ cfg_rss_ctrl |= NFP_NET_CFG_RSS_IPV6_UDP;
cfg_rss_ctrl |= NFP_NET_CFG_RSS_MASK;
cfg_rss_ctrl |= NFP_NET_CFG_RSS_TOEPLITZ;
ether_addr_copy((struct ether_addr *)hw->mac_addr,
ð_dev->data->mac_addrs[0]);
+ if (!(hw->cap & NFP_NET_CFG_CTRL_LIVE_ADDR))
+ eth_dev->data->dev_flags |= RTE_ETH_DEV_NOLIVE_MAC_ADDR;
+
PMD_INIT_LOG(INFO, "port %d VendorID=0x%x DeviceID=0x%x "
"mac=%02x:%02x:%02x:%02x:%02x:%02x",
eth_dev->data->port_id, pci_dev->id.vendor_id,