X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fqede%2Fqede_ethdev.c;h=5275ef9d2ec61e839af6dedb9cc75b902f162586;hb=refs%2Ftags%2Fupstream%2F16.11.5;hp=d106dd0fb43995cdd80d116064fd79c0bc042cc2;hpb=6b3e017e5d25f15da73f7700f7f2ac553ef1a2e9;p=deb_dpdk.git diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index d106dd0f..5275ef9d 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -203,9 +203,10 @@ static void qede_print_adapter_info(struct qede_dev *qdev) DP_INFO(edev, "*********************************\n"); DP_INFO(edev, " DPDK version:%s\n", rte_version()); - DP_INFO(edev, " Chip details : %s%d\n", + DP_INFO(edev, " Chip details : %s %c%d\n", ECORE_IS_BB(edev) ? "BB" : "AH", - CHIP_REV_IS_A0(edev) ? 0 : 1); + 'A' + edev->chip_rev, + (int)edev->chip_metal); snprintf(ver_str, QEDE_PMD_DRV_VER_STR_SIZE, "%d.%d.%d.%d", info->fw_major, info->fw_minor, info->fw_rev, info->fw_eng); snprintf(drv_ver, QEDE_PMD_DRV_VER_STR_SIZE, "%s_%s", @@ -223,47 +224,242 @@ static void qede_print_adapter_info(struct qede_dev *qdev) DP_INFO(edev, "*********************************\n"); } +static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast) +{ + memset(ucast, 0, sizeof(struct ecore_filter_ucast)); + ucast->is_rx_filter = true; + ucast->is_tx_filter = true; + /* ucast->assert_on_error = true; - For debug */ +} + static int -qede_set_ucast_rx_mac(struct qede_dev *qdev, - enum qed_filter_xcast_params_type opcode, - uint8_t mac[ETHER_ADDR_LEN]) +qede_ucast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, + bool add) { - struct ecore_dev *edev = &qdev->edev; - struct qed_filter_params filter_cmd; - - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.mac_valid = 1; - rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN); - return qdev->ops->filter_config(edev, &filter_cmd); + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qede_ucast_entry *tmp = NULL; + struct qede_ucast_entry *u; + struct ether_addr *mac_addr; + + mac_addr = (struct ether_addr *)ucast->mac; + if (add) { + SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { + if ((memcmp(mac_addr, &tmp->mac, + ETHER_ADDR_LEN) == 0) && + ucast->vlan == tmp->vlan) { + DP_ERR(edev, "Unicast MAC is already added" + " with vlan = %u, vni = %u\n", + ucast->vlan, ucast->vni); + return -EEXIST; + } + } + u = rte_malloc(NULL, sizeof(struct qede_ucast_entry), + RTE_CACHE_LINE_SIZE); + if (!u) { + DP_ERR(edev, "Did not allocate memory for ucast\n"); + return -ENOMEM; + } + ether_addr_copy(mac_addr, &u->mac); + u->vlan = ucast->vlan; + SLIST_INSERT_HEAD(&qdev->uc_list_head, u, list); + qdev->num_uc_addr++; + } else { + SLIST_FOREACH(tmp, &qdev->uc_list_head, list) { + if ((memcmp(mac_addr, &tmp->mac, + ETHER_ADDR_LEN) == 0) && + ucast->vlan == tmp->vlan) + break; + } + if (tmp == NULL) { + DP_INFO(edev, "Unicast MAC is not found\n"); + return -EINVAL; + } + SLIST_REMOVE(&qdev->uc_list_head, tmp, qede_ucast_entry, list); + qdev->num_uc_addr--; + } + + return 0; } -static void -qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, - uint32_t index, __rte_unused uint32_t pool) +static void qede_reset_queue_stats(struct qede_dev *qdev, bool xstats) { - struct qede_dev *qdev = eth_dev->data->dev_private; - struct ecore_dev *edev = &qdev->edev; - int rc; +#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); +#endif + unsigned int i = 0, j = 0, qid; + unsigned int rxq_stat_cntrs, txq_stat_cntrs; + struct qede_tx_queue *txq; - PMD_INIT_FUNC_TRACE(edev); + DP_VERBOSE(edev, ECORE_MSG_DEBUG, "Clearing queue stats\n"); - if (index >= qdev->dev_info.num_mac_addrs) { - DP_ERR(edev, "Index %u is above MAC filter limit %u\n", - index, qdev->dev_info.num_mac_addrs); - return; + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + + for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { + if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { + OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, rcv_pkts), 0, + sizeof(uint64_t)); + OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, rx_hw_errors), 0, + sizeof(uint64_t)); + OSAL_MEMSET(((char *)(qdev->fp_array[qid].rxq)) + + offsetof(struct qede_rx_queue, rx_alloc_errors), 0, + sizeof(uint64_t)); + + if (xstats) + for (j = 0; + j < RTE_DIM(qede_rxq_xstats_strings); j++) + OSAL_MEMSET((((char *) + (qdev->fp_array[qid].rxq)) + + qede_rxq_xstats_strings[j].offset), + 0, + sizeof(uint64_t)); + + i++; + if (i == rxq_stat_cntrs) + break; + } } - /* Adding macaddr even though promiscuous mode is set */ - if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) - DP_INFO(edev, "Port is in promisc mode, yet adding it\n"); + i = 0; + + for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { + if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) { + txq = qdev->fp_array[(qid)].txqs[0]; + + OSAL_MEMSET((uint64_t *)(uintptr_t) + (((uint64_t)(uintptr_t)(txq)) + + offsetof(struct qede_tx_queue, xmit_pkts)), 0, + sizeof(uint64_t)); + + i++; + if (i == txq_stat_cntrs) + break; + } + } +} + +static int +qede_mcast_filter(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *mcast, + bool add) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct ether_addr *mac_addr; + struct qede_mcast_entry *tmp = NULL; + struct qede_mcast_entry *m; + + mac_addr = (struct ether_addr *)mcast->mac; + if (add) { + SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { + if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) { + DP_ERR(edev, + "Multicast MAC is already added\n"); + return -EEXIST; + } + } + m = rte_malloc(NULL, sizeof(struct qede_mcast_entry), + RTE_CACHE_LINE_SIZE); + if (!m) { + DP_ERR(edev, + "Did not allocate memory for mcast\n"); + return -ENOMEM; + } + ether_addr_copy(mac_addr, &m->mac); + SLIST_INSERT_HEAD(&qdev->mc_list_head, m, list); + qdev->num_mc_addr++; + } else { + SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { + if (memcmp(mac_addr, &tmp->mac, ETHER_ADDR_LEN) == 0) + break; + } + if (tmp == NULL) { + DP_INFO(edev, "Multicast mac is not found\n"); + return -EINVAL; + } + SLIST_REMOVE(&qdev->mc_list_head, tmp, + qede_mcast_entry, list); + qdev->num_mc_addr--; + } + + return 0; +} + +static enum _ecore_status_t +qede_mac_int_ops(struct rte_eth_dev *eth_dev, struct ecore_filter_ucast *ucast, + bool add) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + enum _ecore_status_t rc; + struct ecore_filter_mcast mcast; + struct qede_mcast_entry *tmp; + uint16_t j = 0; + + /* Multicast */ + if (is_multicast_ether_addr((struct ether_addr *)ucast->mac)) { + if (add) { + if (qdev->num_mc_addr >= ECORE_MAX_MC_ADDRS) { + DP_ERR(edev, + "Mcast filter table limit exceeded, " + "Please enable mcast promisc mode\n"); + return -ECORE_INVAL; + } + } + rc = qede_mcast_filter(eth_dev, ucast, add); + if (rc == 0) { + DP_INFO(edev, "num_mc_addrs = %u\n", qdev->num_mc_addr); + memset(&mcast, 0, sizeof(mcast)); + mcast.num_mc_addrs = qdev->num_mc_addr; + mcast.opcode = ECORE_FILTER_ADD; + SLIST_FOREACH(tmp, &qdev->mc_list_head, list) { + ether_addr_copy(&tmp->mac, + (struct ether_addr *)&mcast.mac[j]); + j++; + } + rc = ecore_filter_mcast_cmd(edev, &mcast, + ECORE_SPQ_MODE_CB, NULL); + } + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "Failed to add multicast filter" + " rc = %d, op = %d\n", rc, add); + } + } else { /* Unicast */ + if (add) { + if (qdev->num_uc_addr >= qdev->dev_info.num_mac_addrs) { + DP_ERR(edev, + "Ucast filter table limit exceeded," + " Please enable promisc mode\n"); + return -ECORE_INVAL; + } + } + rc = qede_ucast_filter(eth_dev, ucast, add); + if (rc == 0) + rc = ecore_filter_ucast_cmd(edev, ucast, + ECORE_SPQ_MODE_CB, NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, "MAC filter failed, rc = %d, op = %d\n", + rc, add); + } + } + + return rc; +} + +static void +qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, + uint32_t index, __rte_unused uint32_t pool) +{ + struct ecore_filter_ucast ucast; - /* Add MAC filters according to the unicast secondary macs */ - rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD, - mac_addr->addr_bytes); - if (rc) - DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc); + qede_set_ucast_cmn_params(&ucast); + ucast.type = ECORE_FILTER_MAC; + ether_addr_copy(mac_addr, (struct ether_addr *)&ucast.mac); + (void)qede_mac_int_ops(eth_dev, &ucast, 1); } static void @@ -272,6 +468,7 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) struct qede_dev *qdev = eth_dev->data->dev_private; struct ecore_dev *edev = &qdev->edev; struct ether_addr mac_addr; + struct ecore_filter_ucast ucast; int rc; PMD_INIT_FUNC_TRACE(edev); @@ -282,12 +479,15 @@ qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) return; } + qede_set_ucast_cmn_params(&ucast); + ucast.opcode = ECORE_FILTER_REMOVE; + ucast.type = ECORE_FILTER_MAC; + /* Use the index maintained by rte */ - ether_addr_copy(ð_dev->data->mac_addrs[index], &mac_addr); - rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL, - mac_addr.addr_bytes); - if (rc) - DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc); + ether_addr_copy(ð_dev->data->mac_addrs[index], + (struct ether_addr *)&ucast.mac); + + ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); } static void @@ -295,7 +495,6 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - int rc; if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), mac_addr->addr_bytes)) { @@ -305,31 +504,9 @@ qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) return; } - /* First remove the primary mac */ - rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL, - qdev->primary_mac.addr_bytes); - - if (rc) { - DP_ERR(edev, "Unable to remove current macaddr" - " Reverting to previous default mac\n"); - ether_addr_copy(&qdev->primary_mac, - ð_dev->data->mac_addrs[0]); - return; - } - - /* Add new MAC */ - rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD, - mac_addr->addr_bytes); - - if (rc) - DP_ERR(edev, "Unable to add new default mac\n"); - else - ether_addr_copy(mac_addr, &qdev->primary_mac); + qede_mac_addr_add(eth_dev, mac_addr, 0, 0); } - - - static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action) { struct ecore_dev *edev = &qdev->edev; @@ -415,22 +592,6 @@ static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) mask, rxmode->hw_vlan_strip, rxmode->hw_vlan_filter); } -static int qede_set_ucast_rx_vlan(struct qede_dev *qdev, - enum qed_filter_xcast_params_type opcode, - uint16_t vid) -{ - struct qed_filter_params filter_cmd; - struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - - memset(&filter_cmd, 0, sizeof(filter_cmd)); - filter_cmd.type = QED_FILTER_TYPE_UCAST; - filter_cmd.filter.ucast.type = opcode; - filter_cmd.filter.ucast.vlan_valid = 1; - filter_cmd.filter.ucast.vlan = vid; - - return qdev->ops->filter_config(edev, &filter_cmd); -} - static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, uint16_t vlan_id, int on) { @@ -439,6 +600,7 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, struct qed_dev_eth_info *dev_info = &qdev->dev_info; struct qede_vlan_entry *tmp = NULL; struct qede_vlan_entry *vlan; + struct ecore_filter_ucast ucast; int rc; if (on) { @@ -465,9 +627,13 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, return -ENOMEM; } - rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD, - vlan_id); - if (rc) { + qede_set_ucast_cmn_params(&ucast); + ucast.opcode = ECORE_FILTER_ADD; + ucast.type = ECORE_FILTER_VLAN; + ucast.vlan = vlan_id; + rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, + NULL); + if (rc != 0) { DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, rc); rte_free(vlan); @@ -497,9 +663,13 @@ static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, SLIST_REMOVE(&qdev->vlan_list_head, tmp, qede_vlan_entry, list); - rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL, - vlan_id); - if (rc) { + qede_set_ucast_cmn_params(&ucast); + ucast.opcode = ECORE_FILTER_REMOVE; + ucast.type = ECORE_FILTER_VLAN; + ucast.vlan = vlan_id; + rc = ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, + NULL); + if (rc != 0) { DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", vlan_id, rc); } else { @@ -520,7 +690,7 @@ static int qede_init_vport(struct qede_dev *qdev) start.remove_inner_vlan = 1; start.gro_enable = 0; - start.mtu = ETHER_MTU + QEDE_ETH_OVERHEAD; + start.mtu = qdev->mtu; start.vport_id = 0; start.drop_ttl0 = false; start.clear_stats = 1; @@ -565,6 +735,14 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) } } + /* We need to have min 1 RX queue.There is no min check in + * rte_eth_dev_configure(), so we are checking it here. + */ + if (eth_dev->data->nb_rx_queues == 0) { + DP_ERR(edev, "Minimum one RX queue is required\n"); + return -EINVAL; + } + /* Sanity checks and throw warnings */ if (rxmode->enable_scatter == 1) eth_dev->data->scattered_rx = 1; @@ -600,6 +778,14 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) if (rc != 0) return rc; + /* If jumbo enabled adjust MTU */ + if (eth_dev->data->dev_conf.rxmode.jumbo_frame) + eth_dev->data->mtu = + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - + ETHER_HDR_LEN - ETHER_CRC_LEN; + + qdev->mtu = eth_dev->data->mtu; + /* Issue VPORT-START with default config values to allow * other port configurations early on. */ @@ -609,10 +795,6 @@ static int qede_dev_configure(struct rte_eth_dev *eth_dev) SLIST_INIT(&qdev->vlan_list_head); - /* Add primary mac for PF */ - if (IS_PF(edev)) - qede_mac_addr_set(eth_dev, &qdev->primary_mac); - /* Enable VLAN offloads by default */ qede_vlan_offload_set(eth_dev, ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | @@ -651,8 +833,7 @@ qede_dev_info_get(struct rte_eth_dev *eth_dev, PMD_INIT_FUNC_TRACE(edev); - dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU + - QEDE_ETH_OVERHEAD); + dev_info->min_rx_bufsize = (uint32_t)QEDE_MIN_RX_BUFF_SIZE; dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; dev_info->rx_desc_lim = qede_rx_desc_lim; dev_info->tx_desc_lim = qede_tx_desc_lim; @@ -742,22 +923,6 @@ qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) return ((curr->link_status == link.link_up) ? -1 : 0); } -static void -qede_rx_mode_setting(struct rte_eth_dev *eth_dev, - enum qed_filter_rx_mode_type accept_flags) -{ - struct qede_dev *qdev = eth_dev->data->dev_private; - struct ecore_dev *edev = &qdev->edev; - struct qed_filter_params rx_mode; - - DP_INFO(edev, "%s mode %u\n", __func__, accept_flags); - - memset(&rx_mode, 0, sizeof(struct qed_filter_params)); - rx_mode.type = QED_FILTER_TYPE_RX_MODE; - rx_mode.filter.accept_flags = accept_flags; - qdev->ops->filter_config(edev, &rx_mode); -} - static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) { struct qede_dev *qdev = eth_dev->data->dev_private; @@ -770,7 +935,7 @@ static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; - qede_rx_mode_setting(eth_dev, type); + qed_configure_filter_rx_mode(eth_dev, type); } static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) @@ -781,10 +946,11 @@ static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(edev); if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) - qede_rx_mode_setting(eth_dev, - QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); + qed_configure_filter_rx_mode(eth_dev, + QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); else - qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR); + qed_configure_filter_rx_mode(eth_dev, + QED_FILTER_RX_MODE_TYPE_REGULAR); } static void qede_poll_sp_sb_cb(void *param) @@ -853,6 +1019,7 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) struct ecore_dev *edev = &qdev->edev; struct ecore_eth_stats stats; unsigned int i = 0, j = 0, qid; + unsigned int rxq_stat_cntrs, txq_stat_cntrs; struct qede_tx_queue *txq; qdev->ops->get_vport_stats(edev, &stats); @@ -886,6 +1053,17 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) eth_stats->oerrors = stats.tx_err_drop_pkts; /* Queue stats */ + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + txq_stat_cntrs = RTE_MIN(QEDE_TSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + if ((rxq_stat_cntrs != QEDE_RSS_COUNT(qdev)) || + (txq_stat_cntrs != QEDE_TSS_COUNT(qdev))) + DP_VERBOSE(edev, ECORE_MSG_DEBUG, + "Not all the queue stats will be displayed. Set" + " RTE_ETHDEV_QUEUE_STAT_CNTRS config param" + " appropriately and retry.\n"); + for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { eth_stats->q_ipackets[i] = @@ -904,7 +1082,11 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) rx_alloc_errors)); i++; } + if (i == rxq_stat_cntrs) + break; + } + for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { if (qdev->fp_array[qid].type & QEDE_FASTPATH_TX) { txq = qdev->fp_array[(qid)].txqs[0]; eth_stats->q_opackets[j] = @@ -914,13 +1096,17 @@ qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) xmit_pkts))); j++; } + if (j == txq_stat_cntrs) + break; } } static unsigned qede_get_xstats_count(struct qede_dev *qdev) { return RTE_DIM(qede_xstats_strings) + - (RTE_DIM(qede_rxq_xstats_strings) * QEDE_RSS_COUNT(qdev)); + (RTE_DIM(qede_rxq_xstats_strings) * + RTE_MIN(QEDE_RSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS)); } static int @@ -930,6 +1116,7 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev, struct qede_dev *qdev = dev->data->dev_private; const unsigned int stat_cnt = qede_get_xstats_count(qdev); unsigned int i, qid, stat_idx = 0; + unsigned int rxq_stat_cntrs; if (xstats_names != NULL) { for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { @@ -940,7 +1127,9 @@ qede_get_xstats_names(__rte_unused struct rte_eth_dev *dev, stat_idx++; } - for (qid = 0; qid < QEDE_RSS_COUNT(qdev); qid++) { + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (qid = 0; qid < rxq_stat_cntrs; qid++) { for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { snprintf(xstats_names[stat_idx].name, sizeof(xstats_names[stat_idx].name), @@ -964,6 +1153,7 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, struct ecore_eth_stats stats; const unsigned int num = qede_get_xstats_count(qdev); unsigned int i, qid, stat_idx = 0; + unsigned int rxq_stat_cntrs; if (n < num) return num; @@ -973,15 +1163,19 @@ qede_get_xstats(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, for (i = 0; i < RTE_DIM(qede_xstats_strings); i++) { xstats[stat_idx].value = *(uint64_t *)(((char *)&stats) + qede_xstats_strings[i].offset); + xstats[stat_idx].id = stat_idx; stat_idx++; } - for (qid = 0; qid < QEDE_QUEUE_CNT(qdev); qid++) { + rxq_stat_cntrs = RTE_MIN(QEDE_RSS_COUNT(qdev), + RTE_ETHDEV_QUEUE_STAT_CNTRS); + for (qid = 0; qid < rxq_stat_cntrs; qid++) { if (qdev->fp_array[qid].type & QEDE_FASTPATH_RX) { for (i = 0; i < RTE_DIM(qede_rxq_xstats_strings); i++) { xstats[stat_idx].value = *(uint64_t *)( ((char *)(qdev->fp_array[(qid)].rxq)) + qede_rxq_xstats_strings[i].offset); + xstats[stat_idx].id = stat_idx; stat_idx++; } } @@ -997,6 +1191,7 @@ qede_reset_xstats(struct rte_eth_dev *dev) struct ecore_dev *edev = &qdev->edev; ecore_reset_vport_stats(edev); + qede_reset_queue_stats(qdev, true); } int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) @@ -1032,6 +1227,7 @@ static void qede_reset_stats(struct rte_eth_dev *eth_dev) struct ecore_dev *edev = &qdev->edev; ecore_reset_vport_stats(edev); + qede_reset_queue_stats(qdev, false); } static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) @@ -1042,15 +1238,17 @@ static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) type |= QED_FILTER_RX_MODE_TYPE_PROMISC; - qede_rx_mode_setting(eth_dev, type); + qed_configure_filter_rx_mode(eth_dev, type); } static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) { if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) - qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC); + qed_configure_filter_rx_mode(eth_dev, + QED_FILTER_RX_MODE_TYPE_PROMISC); else - qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR); + qed_configure_filter_rx_mode(eth_dev, + QED_FILTER_RX_MODE_TYPE_REGULAR); } static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, @@ -1139,6 +1337,8 @@ void qede_init_rss_caps(uint8_t *rss_caps, uint64_t hf) *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; *rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; + *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_UDP) ? ECORE_RSS_IPV4_UDP : 0; + *rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_UDP) ? ECORE_RSS_IPV6_UDP : 0; } static int qede_rss_hash_update(struct rte_eth_dev *eth_dev, @@ -1273,32 +1473,76 @@ int qede_rss_reta_query(struct rte_eth_dev *eth_dev, int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) { - uint32_t frame_size; - struct qede_dev *qdev = dev->data->dev_private; + struct qede_dev *qdev = QEDE_INIT_QDEV(dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); struct rte_eth_dev_info dev_info = {0}; + struct qede_fastpath *fp; + uint32_t max_rx_pkt_len; + uint32_t frame_size; + uint16_t rx_buf_size; + uint16_t bufsz; + bool restart = false; + int i; + PMD_INIT_FUNC_TRACE(edev); + if (IS_VF(edev)) + return -ENOTSUP; qede_dev_info_get(dev, &dev_info); - - /* VLAN_TAG = 4 */ - frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4; - - if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + max_rx_pkt_len = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + frame_size = max_rx_pkt_len + QEDE_ETH_OVERHEAD; + if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) { + DP_ERR(edev, "MTU %u out of range, %u is maximum allowable\n", + mtu, dev_info.max_rx_pktlen - ETHER_HDR_LEN - + ETHER_CRC_LEN - QEDE_ETH_OVERHEAD); return -EINVAL; - + } if (!dev->data->scattered_rx && - frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) { + DP_INFO(edev, "MTU greater than minimum RX buffer size of %u\n", + dev->data->min_rx_buf_size); return -EINVAL; - - if (frame_size > ETHER_MAX_LEN) + } + /* Temporarily replace I/O functions with dummy ones. It cannot + * be set to NULL because rte_eth_rx_burst() doesn't check for NULL. + */ + dev->rx_pkt_burst = qede_rxtx_pkts_dummy; + dev->tx_pkt_burst = qede_rxtx_pkts_dummy; + if (dev->data->dev_started) { + dev->data->dev_started = 0; + qede_dev_stop(dev); + restart = true; + } + rte_delay_ms(1000); + qdev->mtu = mtu; + /* Fix up RX buf size for all queues of the port */ + for_each_queue(i) { + fp = &qdev->fp_array[i]; + if ((fp->type & QEDE_FASTPATH_RX) && (fp->rxq != NULL)) { + bufsz = (uint16_t)rte_pktmbuf_data_room_size( + fp->rxq->mb_pool) - RTE_PKTMBUF_HEADROOM; + if (dev->data->scattered_rx) + rx_buf_size = bufsz + ETHER_HDR_LEN + + ETHER_CRC_LEN + QEDE_ETH_OVERHEAD; + else + rx_buf_size = frame_size; + rx_buf_size = QEDE_CEIL_TO_CACHE_LINE_SIZE(rx_buf_size); + fp->rxq->rx_buf_size = rx_buf_size; + DP_INFO(edev, "buf_size adjusted to %u\n", rx_buf_size); + } + } + if (max_rx_pkt_len > ETHER_MAX_LEN) dev->data->dev_conf.rxmode.jumbo_frame = 1; else dev->data->dev_conf.rxmode.jumbo_frame = 0; - + if (!dev->data->dev_started && restart) { + qede_dev_start(dev); + dev->data->dev_started = 1; + } /* update max frame size */ - dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; - qdev->mtu = mtu; - qede_dev_stop(dev); - qede_dev_start(dev); + dev->data->dev_conf.rxmode.max_rx_pkt_len = max_rx_pkt_len; + /* Reassign back */ + dev->rx_pkt_burst = qede_recv_pkts; + dev->tx_pkt_burst = qede_xmit_pkts; return 0; } @@ -1424,6 +1668,10 @@ static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) rte_eth_copy_pci_info(eth_dev, pci_dev); + /* @DPDK */ + edev->vendor_id = pci_dev->id.vendor_id; + edev->device_id = pci_dev->id.device_id; + qed_ops = qed_get_eth_ops(); if (!qed_ops) { DP_ERR(edev, "Failed to get qed_eth_ops_pass\n");