ETH_RSS_NONFRAG_IPV6_UDP)
static int bnxt_vlan_offload_set_op(struct rte_eth_dev *dev, int mask);
+static void bnxt_print_link_info(struct rte_eth_dev *eth_dev);
/***********************/
unsigned int i, rss_idx, fw_idx;
struct rte_eth_link new;
struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(bp->eth_dev);
+ struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf;
struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
+ uint64_t rx_offloads = dev_conf->rxmode.offloads;
uint32_t intr_vector = 0;
uint32_t queue_id, base = BNXT_MISC_VEC_ID;
uint32_t vec = BNXT_MISC_VEC_ID;
/* VNIC configuration */
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint32_t size = sizeof(*vnic->fw_grp_ids) * bp->max_ring_grps;
+
+ vnic->fw_grp_ids = rte_zmalloc("vnic_fw_grp_ids", size, 0);
+ if (!vnic->fw_grp_ids) {
+ RTE_LOG(ERR, PMD,
+ "Failed to alloc %d bytes for group ids\n",
+ size);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+ memset(vnic->fw_grp_ids, -1, size);
rc = bnxt_hwrm_vnic_alloc(bp, vnic);
if (rc) {
goto err_out;
}
+ /*
+ * Firmware sets pf pair in default vnic cfg. If the VLAN strip
+ * setting is not available at this time, it will not be
+ * configured correctly in the CFA.
+ */
+ if (rx_offloads & DEV_RX_OFFLOAD_VLAN_STRIP)
+ vnic->vlan_strip = true;
+ else
+ vnic->vlan_strip = false;
+
rc = bnxt_hwrm_vnic_cfg(bp, vnic);
if (rc) {
RTE_LOG(ERR, PMD, "HWRM vnic %d cfg failure rc: %x\n",
goto err_out;
}
}
+ bnxt_print_link_info(bp->eth_dev);
return 0;
{
int rc;
- bnxt_init_ring_grps(bp);
- bnxt_init_vnics(bp);
- bnxt_init_filters(bp);
-
- rc = bnxt_init_chip(bp);
+ rc = bnxt_init_ring_grps(bp);
if (rc)
return rc;
+ bnxt_init_vnics(bp);
+ bnxt_init_filters(bp);
+
return 0;
}
/* For the sake of symmetry, max_rx_queues = max_tx_queues */
dev_info->max_rx_queues = max_rx_rings;
dev_info->max_tx_queues = max_rx_rings;
- dev_info->reta_size = bp->max_rsscos_ctx;
+ dev_info->reta_size = HW_HASH_INDEX_SIZE;
dev_info->hash_key_size = 40;
max_vnics = bp->max_vnics;
.wthresh = 0,
},
.rx_free_thresh = 32,
- .rx_drop_en = 0,
+ /* If no descriptors available, pkts are dropped by default */
+ .rx_drop_en = 1,
};
dev_info->default_txconf = (struct rte_eth_txconf) {
return 0;
}
-static inline int
-rte_bnxt_atomic_write_link_status(struct rte_eth_dev *eth_dev,
- struct rte_eth_link *link)
-{
- struct rte_eth_link *dst = ð_dev->data->dev_link;
- struct rte_eth_link *src = link;
-
- if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst,
- *(uint64_t *)src) == 0)
- return 1;
-
- return 0;
-}
-
static void bnxt_print_link_info(struct rte_eth_dev *eth_dev)
{
struct rte_eth_link *link = ð_dev->data->dev_link;
}
bp->dev_stopped = 0;
- rc = bnxt_init_nic(bp);
+ rc = bnxt_init_chip(bp);
if (rc)
goto error;
- bnxt_link_update_op(eth_dev, 0);
+ bnxt_link_update_op(eth_dev, 1);
if (eth_dev->data->dev_conf.rxmode.hw_vlan_filter)
vlan_mask |= ETH_VLAN_FILTER_MASK;
static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev)
{
struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private;
+ int rc = 0;
+
+ if (!bp->link_info.link_up)
+ rc = bnxt_set_hwrm_link_config(bp, true);
+ if (!rc)
+ eth_dev->data->dev_link.link_status = 1;
- eth_dev->data->dev_link.link_status = 1;
- bnxt_set_hwrm_link_config(bp, true);
+ bnxt_print_link_info(eth_dev);
return 0;
}
eth_dev->data->dev_link.link_status = 0;
bnxt_set_hwrm_link_config(bp, false);
+ bp->link_info.link_up = 0;
+
return 0;
}
}
bnxt_set_hwrm_link_config(bp, false);
bnxt_hwrm_port_clr_stats(bp);
+ bnxt_free_tx_mbufs(bp);
+ bnxt_free_rx_mbufs(bp);
bnxt_shutdown_nic(bp);
bp->dev_stopped = 1;
}
if (bp->dev_stopped == 0)
bnxt_dev_stop_op(eth_dev);
- bnxt_free_tx_mbufs(bp);
- bnxt_free_rx_mbufs(bp);
bnxt_free_mem(bp);
if (eth_dev->data->mac_addrs != NULL) {
rte_free(eth_dev->data->mac_addrs);
if (filter->mac_index == index) {
RTE_LOG(ERR, PMD,
"MAC addr already existed for pool %d\n", pool);
- return -EINVAL;
+ return 0;
}
}
filter = bnxt_alloc_filter(bp);
/* Timed out or success */
if (new.link_status != eth_dev->data->dev_link.link_status ||
new.link_speed != eth_dev->data->dev_link.link_speed) {
- rte_bnxt_atomic_write_link_status(eth_dev, &new);
+ memcpy(ð_dev->data->dev_link, &new,
+ sizeof(struct rte_eth_link));
bnxt_print_link_info(eth_dev);
}
struct bnxt_vnic_info *vnic;
unsigned int i;
int rc = 0;
- uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN |
- HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK;
- uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN;
+ uint32_t en = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN |
+ HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK;
+ uint32_t chk = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN;
/* Cycle through all VNICs */
for (i = 0; i < bp->nr_vnics; i++) {
memcpy(new_filter->l2_addr, filter->l2_addr,
ETHER_ADDR_LEN);
/* MAC + VLAN ID filter */
- new_filter->l2_ovlan = vlan_id;
- new_filter->l2_ovlan_mask = 0xF000;
+ new_filter->l2_ivlan = vlan_id;
+ new_filter->l2_ivlan_mask = 0xF000;
new_filter->enables |= en;
rc = bnxt_hwrm_set_l2_filter(bp,
vnic->fw_vnic_id,
for (i = 0; i < bp->nr_vnics; i++) {
struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ uint16_t size = 0;
vnic->mru = bp->eth_dev->data->mtu + ETHER_HDR_LEN +
ETHER_CRC_LEN + VLAN_TAG_SIZE * 2;
if (rc)
break;
- rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
- if (rc)
- return rc;
+ size = rte_pktmbuf_data_room_size(bp->rx_queues[0]->mb_pool);
+ size -= RTE_PKTMBUF_HEADROOM;
+
+ if (size < new_mtu) {
+ rc = bnxt_hwrm_vnic_plcmode_cfg(bp, vnic);
+ if (rc)
+ return rc;
+ }
}
return rc;
int match = 0;
*ret = 0;
- if (efilter->ether_type != ETHER_TYPE_IPv4 &&
- efilter->ether_type != ETHER_TYPE_IPv6) {
- RTE_LOG(ERR, PMD, "unsupported ether_type(0x%04x) in"
+ if (efilter->ether_type == ETHER_TYPE_IPv4 ||
+ efilter->ether_type == ETHER_TYPE_IPv6) {
+ RTE_LOG(ERR, PMD, "invalid ether_type(0x%04x) in"
" ethertype filter.", efilter->ether_type);
*ret = -EINVAL;
goto exit;
}
static struct bnxt_filter_info*
-bnxt_match_ntuple_filter(struct bnxt_vnic_info *vnic,
- struct bnxt_filter_info *bfilter)
+bnxt_match_ntuple_filter(struct bnxt *bp,
+ struct bnxt_filter_info *bfilter,
+ struct bnxt_vnic_info **mvnic)
{
struct bnxt_filter_info *mfilter = NULL;
+ int i;
- STAILQ_FOREACH(mfilter, &vnic->filter, next) {
- if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
- bfilter->src_ipaddr_mask[0] ==
- mfilter->src_ipaddr_mask[0] &&
- bfilter->src_port == mfilter->src_port &&
- bfilter->src_port_mask == mfilter->src_port_mask &&
- bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
- bfilter->dst_ipaddr_mask[0] ==
- mfilter->dst_ipaddr_mask[0] &&
- bfilter->dst_port == mfilter->dst_port &&
- bfilter->dst_port_mask == mfilter->dst_port_mask &&
- bfilter->flags == mfilter->flags &&
- bfilter->enables == mfilter->enables)
- return mfilter;
+ for (i = bp->nr_vnics - 1; i >= 0; i--) {
+ struct bnxt_vnic_info *vnic = &bp->vnic_info[i];
+ STAILQ_FOREACH(mfilter, &vnic->filter, next) {
+ if (bfilter->src_ipaddr[0] == mfilter->src_ipaddr[0] &&
+ bfilter->src_ipaddr_mask[0] ==
+ mfilter->src_ipaddr_mask[0] &&
+ bfilter->src_port == mfilter->src_port &&
+ bfilter->src_port_mask == mfilter->src_port_mask &&
+ bfilter->dst_ipaddr[0] == mfilter->dst_ipaddr[0] &&
+ bfilter->dst_ipaddr_mask[0] ==
+ mfilter->dst_ipaddr_mask[0] &&
+ bfilter->dst_port == mfilter->dst_port &&
+ bfilter->dst_port_mask == mfilter->dst_port_mask &&
+ bfilter->flags == mfilter->flags &&
+ bfilter->enables == mfilter->enables) {
+ if (mvnic)
+ *mvnic = vnic;
+ return mfilter;
+ }
+ }
}
return NULL;
}
enum rte_filter_op filter_op)
{
struct bnxt_filter_info *bfilter, *mfilter, *filter1;
- struct bnxt_vnic_info *vnic, *vnic0;
+ struct bnxt_vnic_info *vnic, *vnic0, *mvnic;
int ret;
if (nfilter->flags != RTE_5TUPLE_FLAGS) {
bfilter->ethertype = 0x800;
bfilter->enables |= NTUPLE_FLTR_ALLOC_INPUT_EN_ETHERTYPE;
- mfilter = bnxt_match_ntuple_filter(vnic, bfilter);
+ mfilter = bnxt_match_ntuple_filter(bp, bfilter, &mvnic);
- if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD) {
- RTE_LOG(ERR, PMD, "filter exists.");
+ if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
+ bfilter->dst_id == mfilter->dst_id) {
+ RTE_LOG(ERR, PMD, "filter exists.\n");
ret = -EEXIST;
goto free_filter;
+ } else if (mfilter != NULL && filter_op == RTE_ETH_FILTER_ADD &&
+ bfilter->dst_id != mfilter->dst_id) {
+ mfilter->dst_id = vnic->fw_vnic_id;
+ ret = bnxt_hwrm_set_ntuple_filter(bp, mfilter->dst_id, mfilter);
+ STAILQ_REMOVE(&mvnic->filter, mfilter, bnxt_filter_info, next);
+ STAILQ_INSERT_TAIL(&vnic->filter, mfilter, next);
+ RTE_LOG(ERR, PMD, "filter with matching pattern exists.\n");
+ RTE_LOG(ERR, PMD, " Updated it to the new destination queue\n");
+ goto free_filter;
}
if (mfilter == NULL && filter_op == RTE_ETH_FILTER_DELETE) {
RTE_LOG(ERR, PMD, "filter doesn't exist.");
}
ret = bnxt_hwrm_clear_ntuple_filter(bp, mfilter);
- STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info,
- next);
+ STAILQ_REMOVE(&vnic->filter, mfilter, bnxt_filter_info, next);
bnxt_free_filter(bp, mfilter);
- bfilter->fw_l2_filter_id = -1;
+ mfilter->fw_l2_filter_id = -1;
bnxt_free_filter(bp, bfilter);
+ bfilter->fw_l2_filter_id = -1;
}
return 0;
/* Copy the permanent MAC from the qcap response address now. */
memcpy(bp->mac_addr, bp->dflt_mac_addr, sizeof(bp->mac_addr));
memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN);
+
+ if (bp->max_ring_grps < bp->rx_cp_nr_rings) {
+ /* 1 ring is for default completion ring */
+ RTE_LOG(ERR, PMD, "Insufficient resource: Ring Group\n");
+ rc = -ENOSPC;
+ goto error_free;
+ }
+
bp->grp_info = rte_zmalloc("bnxt_grp_info",
sizeof(*bp->grp_info) * bp->max_ring_grps, 0);
if (!bp->grp_info) {
RTE_LOG(ERR, PMD,
- "Failed to alloc %zu bytes needed to store group info table\n",
+ "Failed to alloc %zu bytes to store group info table\n",
sizeof(*bp->grp_info) * bp->max_ring_grps);
rc = -ENOMEM;
goto error_free;
goto error_free_int;
bnxt_enable_int(bp);
+ bnxt_init_nic(bp);
return 0;