#include <libgen.h>
#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_memzone.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
enic_clear_soft_stats(enic);
}
-void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
+int enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats)
{
struct vnic_stats *stats;
struct enic_soft_stats *soft_stats = &enic->soft_stats;
int64_t rx_truncated;
uint64_t rx_packet_errors;
+ int ret = vnic_dev_stats_dump(enic->vdev, &stats);
- if (vnic_dev_stats_dump(enic->vdev, &stats)) {
+ if (ret) {
dev_err(enic, "Error in getting stats\n");
- return;
+ return ret;
}
/* The number of truncated packets can only be calculated by
r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated;
r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf);
+ return 0;
}
void enic_del_mac_address(struct enic *enic, int mac_index)
return;
rte_pktmbuf_free(*mbuf);
- mbuf = NULL;
+ *mbuf = NULL;
}
void enic_init_vnic_resources(struct enic *enic)
0 /* cq_entry_enable */,
1 /* cq_message_enable */,
0 /* interrupt offset */,
- (u64)enic->wq[index].cqmsg_rz->phys_addr);
+ (u64)enic->wq[index].cqmsg_rz->iova);
}
vnic_intr_init(&enic->intr,
}
mb->data_off = RTE_PKTMBUF_HEADROOM;
- dma_addr = (dma_addr_t)(mb->buf_physaddr
+ dma_addr = (dma_addr_t)(mb->buf_iova
+ RTE_PKTMBUF_HEADROOM);
rq_enet_desc_enc(rqd, dma_addr,
(rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP
}
vaddr = rz->addr;
- *dma_handle = (dma_addr_t)rz->phys_addr;
+ *dma_handle = (dma_addr_t)rz->iova;
mze = rte_malloc("enic memzone entry",
sizeof(struct enic_memzone_entry), 0);
pr_err("%s : Failed to allocate memory for memzone list\n",
__func__);
rte_memzone_free(rz);
+ return NULL;
}
mze->rz = rz;
rte_spinlock_lock(&enic->memzone_list_lock);
LIST_FOREACH(mze, &enic->memzone_list, entries) {
if (mze->rz->addr == vaddr &&
- mze->rz->phys_addr == dma_handle)
+ mze->rz->iova == dma_handle)
break;
}
if (mze == NULL) {
vnic_intr_return_all_credits(&enic->intr);
enic_link_update(enic);
- _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL);
+ _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_LSC, NULL, NULL);
enic_log_q_error(enic);
}
vnic_dev_notify_unset(enic->vdev);
rte_free(eth_dev->data->mac_addrs);
+ rte_free(enic->cq);
+ rte_free(enic->rq);
+ rte_free(enic->wq);
}
{
struct rte_eth_dev *eth_dev = enic->rte_dev;
int rc = 0;
+ unsigned int required_rq, required_wq, required_cq;
- /* With Rx scatter support, two RQs are now used per RQ used by
- * the application.
- */
- if (enic->conf_rq_count < eth_dev->data->nb_rx_queues) {
+ /* Always use two vNIC RQs per eth_dev RQ, regardless of Rx scatter. */
+ required_rq = eth_dev->data->nb_rx_queues * 2;
+ required_wq = eth_dev->data->nb_tx_queues;
+ required_cq = eth_dev->data->nb_rx_queues + eth_dev->data->nb_tx_queues;
+
+ if (enic->conf_rq_count < required_rq) {
dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n",
eth_dev->data->nb_rx_queues,
- eth_dev->data->nb_rx_queues * 2, enic->conf_rq_count);
+ required_rq, enic->conf_rq_count);
rc = -EINVAL;
}
- if (enic->conf_wq_count < eth_dev->data->nb_tx_queues) {
+ if (enic->conf_wq_count < required_wq) {
dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n",
eth_dev->data->nb_tx_queues, enic->conf_wq_count);
rc = -EINVAL;
}
- if (enic->conf_cq_count < (eth_dev->data->nb_rx_queues +
- eth_dev->data->nb_tx_queues)) {
+ if (enic->conf_cq_count < required_cq) {
dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n",
- (eth_dev->data->nb_rx_queues +
- eth_dev->data->nb_tx_queues), enic->conf_cq_count);
+ required_cq, enic->conf_cq_count);
rc = -EINVAL;
}
enic_reinit_rq(struct enic *enic, unsigned int rq_idx)
{
struct vnic_rq *sop_rq, *data_rq;
- unsigned int cq_idx = enic_cq_rq(enic, rq_idx);
+ unsigned int cq_idx;
int rc = 0;
sop_rq = &enic->rq[enic_rte_rq_idx_to_sop_idx(rq_idx)];
data_rq = &enic->rq[enic_rte_rq_idx_to_data_idx(rq_idx)];
+ cq_idx = rq_idx;
vnic_cq_clean(&enic->cq[cq_idx]);
vnic_cq_init(&enic->cq[cq_idx],
old_mtu = eth_dev->data->mtu;
config_mtu = enic->config.mtu;
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return -E_RTE_SECONDARY;
+
if (new_mtu > enic->max_mtu) {
dev_err(enic,
"MTU not updated: requested (%u) greater than max (%u)\n",
}
}
- /* replace Rx funciton with a no-op to avoid getting stale pkts */
+ /* replace Rx function with a no-op to avoid getting stale pkts */
eth_dev->rx_pkt_burst = enic_dummy_recv_pkts;
rte_mb();
dev_err(enic, "See the ENIC PMD guide for more information.\n");
return -EINVAL;
}
+ /* Queue counts may be zeros. rte_zmalloc returns NULL in that case. */
+ enic->cq = rte_zmalloc("enic_vnic_cq", sizeof(struct vnic_cq) *
+ enic->conf_cq_count, 8);
+ enic->rq = rte_zmalloc("enic_vnic_rq", sizeof(struct vnic_rq) *
+ enic->conf_rq_count, 8);
+ enic->wq = rte_zmalloc("enic_vnic_wq", sizeof(struct vnic_wq) *
+ enic->conf_wq_count, 8);
+ if (enic->conf_cq_count > 0 && enic->cq == NULL) {
+ dev_err(enic, "failed to allocate vnic_cq, aborting.\n");
+ return -1;
+ }
+ if (enic->conf_rq_count > 0 && enic->rq == NULL) {
+ dev_err(enic, "failed to allocate vnic_rq, aborting.\n");
+ return -1;
+ }
+ if (enic->conf_wq_count > 0 && enic->wq == NULL) {
+ dev_err(enic, "failed to allocate vnic_wq, aborting.\n");
+ return -1;
+ }
/* Get the supported filters */
enic_fdir_info(enic);
vnic_dev_set_reset_flag(enic->vdev, 0);
+ LIST_INIT(&enic->flows);
+ rte_spinlock_init(&enic->flows_lock);
+
/* set up link status checking */
vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */
dev_debug(enic, " Initializing ENIC PMD\n");
+ /* if this is a secondary process the hardware is already initialized */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
enic->bar0.vaddr = (void *)pdev->mem_resource[0].addr;
enic->bar0.len = pdev->mem_resource[0].len;