X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fenic%2Fenic_main.c;h=1694aed12cffdd72ea80a693e8c7f140f4f29f84;hb=c3f15def2ebe9cc255cf0e5cf32aa171f5b4326d;hp=d0262418d96579541c1a626e1cb43a84a67ec59e;hpb=7595afa4d30097c1177b69257118d8ad89a539be;p=deb_dpdk.git diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index d0262418..1694aed1 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -40,6 +40,7 @@ #include #include +#include #include #include #include @@ -156,16 +157,17 @@ void enic_dev_stats_clear(struct enic *enic) enic_clear_soft_stats(enic); } -void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) +int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) { struct vnic_stats *stats; struct enic_soft_stats *soft_stats = &enic->soft_stats; int64_t rx_truncated; uint64_t rx_packet_errors; + int ret = vnic_dev_stats_dump(enic->vdev, &stats); - if (vnic_dev_stats_dump(enic->vdev, &stats)) { + if (ret) { dev_err(enic, "Error in getting stats\n"); - return; + return ret; } /* The number of truncated packets can only be calculated by @@ -191,6 +193,7 @@ void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated; r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf); + return 0; } void enic_del_mac_address(struct enic *enic, int mac_index) @@ -224,7 +227,7 @@ enic_free_rq_buf(struct rte_mbuf **mbuf) return; rte_pktmbuf_free(*mbuf); - mbuf = NULL; + *mbuf = NULL; } void enic_init_vnic_resources(struct enic *enic) @@ -280,7 +283,7 @@ void enic_init_vnic_resources(struct enic *enic) 0 /* cq_entry_enable */, 1 /* cq_message_enable */, 0 /* interrupt offset */, - (u64)enic->wq[index].cqmsg_rz->phys_addr); + (u64)enic->wq[index].cqmsg_rz->iova); } vnic_intr_init(&enic->intr, @@ -313,7 +316,7 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) } mb->data_off = RTE_PKTMBUF_HEADROOM; - dma_addr = (dma_addr_t)(mb->buf_physaddr + dma_addr = (dma_addr_t)(mb->buf_iova + RTE_PKTMBUF_HEADROOM); rq_enet_desc_enc(rqd, dma_addr, (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP @@ -359,7 +362,7 @@ enic_alloc_consistent(void *priv, size_t size, } vaddr = rz->addr; - *dma_handle = (dma_addr_t)rz->phys_addr; + *dma_handle = (dma_addr_t)rz->iova; mze = rte_malloc("enic memzone entry", sizeof(struct enic_memzone_entry), 0); @@ -368,6 +371,7 @@ enic_alloc_consistent(void *priv, size_t size, pr_err("%s : Failed to allocate memory for memzone list\n", __func__); rte_memzone_free(rz); + return NULL; } mze->rz = rz; @@ -391,7 +395,7 @@ enic_free_consistent(void *priv, rte_spinlock_lock(&enic->memzone_list_lock); LIST_FOREACH(mze, &enic->memzone_list, entries) { if (mze->rz->addr == vaddr && - mze->rz->phys_addr == dma_handle) + mze->rz->iova == dma_handle) break; } if (mze == NULL) { @@ -430,7 +434,7 @@ enic_intr_handler(void *arg) vnic_intr_return_all_credits(&enic->intr); enic_link_update(enic); - _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL); + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL); enic_log_q_error(enic); } @@ -1071,6 +1075,9 @@ static void enic_dev_deinit(struct enic *enic) vnic_dev_notify_unset(enic->vdev); rte_free(eth_dev->data->mac_addrs); + rte_free(enic->cq); + rte_free(enic->rq); + rte_free(enic->wq); } @@ -1078,27 +1085,28 @@ int enic_set_vnic_res(struct enic *enic) { struct rte_eth_dev *eth_dev = enic->rte_dev; int rc = 0; + unsigned int required_rq, required_wq, required_cq; - /* With Rx scatter support, two RQs are now used per RQ used by - * the application. - */ - if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) { + /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */ + required_rq = eth_dev->data->nb_rx_queues * 2; + required_wq = eth_dev->data->nb_tx_queues; + required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues; + + if (enic->conf_rq_count < required_rq) { dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n", eth_dev->data->nb_rx_queues, - eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count); + required_rq, enic->conf_rq_count); rc = -EINVAL; } - if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) { + if (enic->conf_wq_count < required_wq) { dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n", eth_dev->data->nb_tx_queues, enic->conf_wq_count); rc = -EINVAL; } - if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues + - eth_dev->data->nb_tx_queues)) { + if (enic->conf_cq_count < required_cq) { dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n", - (eth_dev->data->nb_rx_queues + - eth_dev->data->nb_tx_queues), enic->conf_cq_count); + required_cq, enic->conf_cq_count); rc = -EINVAL; } @@ -1116,11 +1124,12 @@ static int enic_reinit_rq(struct enic *enic, unsigned int rq_idx) { struct vnic_rq *sop_rq, *data_rq; - unsigned int cq_idx = enic_cq_rq(enic, rq_idx); + unsigned int cq_idx; int rc = 0; sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)]; data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)]; + cq_idx = rq_idx; vnic_cq_clean(&enic->cq[cq_idx]); vnic_cq_init(&enic->cq[cq_idx], @@ -1180,6 +1189,9 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu) old_mtu = eth_dev->data->mtu; config_mtu = enic->config.mtu; + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return -E_RTE_SECONDARY; + if (new_mtu > enic->max_mtu) { dev_err(enic, "MTU not updated: requested (%u) greater than max (%u)\n", @@ -1225,7 +1237,7 @@ int enic_set_mtu(struct enic *enic, uint16_t new_mtu) } } - /* replace Rx funciton with a no-op to avoid getting stale pkts */ + /* replace Rx function with a no-op to avoid getting stale pkts */ eth_dev->rx_pkt_burst = enic_dummy_recv_pkts; rte_mb(); @@ -1299,6 +1311,25 @@ static int enic_dev_init(struct enic *enic) dev_err(enic, "See the ENIC PMD guide for more information.\n"); return -EINVAL; } + /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */ + enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) * + enic->conf_cq_count, 8); + enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) * + enic->conf_rq_count, 8); + enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) * + enic->conf_wq_count, 8); + if (enic->conf_cq_count > 0 && enic->cq == NULL) { + dev_err(enic, "failed to allocate vnic_cq, aborting.\n"); + return -1; + } + if (enic->conf_rq_count > 0 && enic->rq == NULL) { + dev_err(enic, "failed to allocate vnic_rq, aborting.\n"); + return -1; + } + if (enic->conf_wq_count > 0 && enic->wq == NULL) { + dev_err(enic, "failed to allocate vnic_wq, aborting.\n"); + return -1; + } /* Get the supported filters */ enic_fdir_info(enic); @@ -1314,6 +1345,9 @@ static int enic_dev_init(struct enic *enic) vnic_dev_set_reset_flag(enic->vdev, 0); + LIST_INIT(&enic->flows); + rte_spinlock_init(&enic->flows_lock); + /* set up link status checking */ vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */ @@ -1328,6 +1362,10 @@ int enic_probe(struct enic *enic) dev_debug(enic, " Initializing ENIC PMD\n"); + /* if this is a secondary process the hardware is already initialized */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr; enic->bar0.len = pdev->mem_resource[0].len;