X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=drivers%2Fnet%2Fena%2Fena_ethdev.c;h=4e52656798ec481166d10e264388419fbd64da0f;hb=6e7cbd63706f3435b9d9a2057a37db1da01db9a7;hp=02af67a24bd16f17f5a33390ab7be00ad011de96;hpb=97f17497d162afdb82c8704bf097f0fee3724b2e;p=deb_dpdk.git diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 02af67a2..4e526567 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -33,10 +33,14 @@ #include #include +#include #include #include #include #include +#include +#include +#include #include "ena_ethdev.h" #include "ena_logs.h" @@ -49,6 +53,10 @@ #include #include +#define DRV_MODULE_VER_MAJOR 1 +#define DRV_MODULE_VER_MINOR 0 +#define DRV_MODULE_VER_SUBMINOR 0 + #define ENA_IO_TXQ_IDX(q) (2 * (q)) #define ENA_IO_RXQ_IDX(q) (2 * (q) + 1) /*reverse version of ENA_IO_RXQ_IDX*/ @@ -72,6 +80,89 @@ #define ENA_RX_RSS_TABLE_LOG_SIZE 7 #define ENA_RX_RSS_TABLE_SIZE (1 << ENA_RX_RSS_TABLE_LOG_SIZE) #define ENA_HASH_KEY_SIZE 40 +#define ENA_ETH_SS_STATS 0xFF +#define ETH_GSTRING_LEN 32 + +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +enum ethtool_stringset { + ETH_SS_TEST = 0, + ETH_SS_STATS, +}; + +struct ena_stats { + char name[ETH_GSTRING_LEN]; + int stat_offset; +}; + +#define ENA_STAT_ENA_COM_ENTRY(stat) { \ + .name = #stat, \ + .stat_offset = offsetof(struct ena_com_stats_admin, stat) \ +} + +#define ENA_STAT_ENTRY(stat, stat_type) { \ + .name = #stat, \ + .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \ +} + +#define ENA_STAT_RX_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, rx) + +#define ENA_STAT_TX_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, tx) + +#define ENA_STAT_GLOBAL_ENTRY(stat) \ + ENA_STAT_ENTRY(stat, dev) + +static const struct ena_stats ena_stats_global_strings[] = { + ENA_STAT_GLOBAL_ENTRY(tx_timeout), + ENA_STAT_GLOBAL_ENTRY(io_suspend), + ENA_STAT_GLOBAL_ENTRY(io_resume), + ENA_STAT_GLOBAL_ENTRY(wd_expired), + ENA_STAT_GLOBAL_ENTRY(interface_up), + ENA_STAT_GLOBAL_ENTRY(interface_down), + ENA_STAT_GLOBAL_ENTRY(admin_q_pause), +}; + +static const struct ena_stats ena_stats_tx_strings[] = { + ENA_STAT_TX_ENTRY(cnt), + ENA_STAT_TX_ENTRY(bytes), + ENA_STAT_TX_ENTRY(queue_stop), + ENA_STAT_TX_ENTRY(queue_wakeup), + ENA_STAT_TX_ENTRY(dma_mapping_err), + ENA_STAT_TX_ENTRY(linearize), + ENA_STAT_TX_ENTRY(linearize_failed), + ENA_STAT_TX_ENTRY(tx_poll), + ENA_STAT_TX_ENTRY(doorbells), + ENA_STAT_TX_ENTRY(prepare_ctx_err), + ENA_STAT_TX_ENTRY(missing_tx_comp), + ENA_STAT_TX_ENTRY(bad_req_id), +}; + +static const struct ena_stats ena_stats_rx_strings[] = { + ENA_STAT_RX_ENTRY(cnt), + ENA_STAT_RX_ENTRY(bytes), + ENA_STAT_RX_ENTRY(refil_partial), + ENA_STAT_RX_ENTRY(bad_csum), + ENA_STAT_RX_ENTRY(page_alloc_fail), + ENA_STAT_RX_ENTRY(skb_alloc_fail), + ENA_STAT_RX_ENTRY(dma_mapping_err), + ENA_STAT_RX_ENTRY(bad_desc_num), + ENA_STAT_RX_ENTRY(small_copy_len_pkt), +}; + +static const struct ena_stats ena_stats_ena_com_strings[] = { + ENA_STAT_ENA_COM_ENTRY(aborted_cmd), + ENA_STAT_ENA_COM_ENTRY(submitted_cmd), + ENA_STAT_ENA_COM_ENTRY(completed_cmd), + ENA_STAT_ENA_COM_ENTRY(out_of_space), + ENA_STAT_ENA_COM_ENTRY(no_completion), +}; + +#define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings) +#define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings) +#define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings) +#define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings) /** Vendor ID used by Amazon devices */ #define PCI_VENDOR_ID_AMAZON 0x1D0F @@ -79,12 +170,18 @@ #define PCI_DEVICE_ID_ENA_VF 0xEC20 #define PCI_DEVICE_ID_ENA_LLQ_VF 0xEC21 -static struct rte_pci_id pci_id_ena_map[] = { -#define RTE_PCI_DEV_ID_DECL_ENA(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#define ENA_TX_OFFLOAD_MASK (\ + PKT_TX_L4_MASK | \ + PKT_TX_IP_CKSUM | \ + PKT_TX_TCP_SEG) - RTE_PCI_DEV_ID_DECL_ENA(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) - RTE_PCI_DEV_ID_DECL_ENA(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) - {.device_id = 0}, +#define ENA_TX_OFFLOAD_NOTSUP_MASK \ + (PKT_TX_OFFLOAD_MASK ^ ENA_TX_OFFLOAD_MASK) + +static const struct rte_pci_id pci_id_ena_map[] = { + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_VF) }, + { RTE_PCI_DEVICE(PCI_VENDOR_ID_AMAZON, PCI_DEVICE_ID_ENA_LLQ_VF) }, + { .device_id = 0 }, }; static int ena_device_init(struct ena_com_dev *ena_dev, @@ -92,6 +189,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, static int ena_dev_configure(struct rte_eth_dev *dev); static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +static uint16_t eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); static int ena_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, uint16_t nb_desc, unsigned int socket_id, const struct rte_eth_txconf *tx_conf); @@ -106,7 +205,7 @@ static void ena_init_rings(struct ena_adapter *adapter); static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); static int ena_start(struct rte_eth_dev *dev); static void ena_close(struct rte_eth_dev *dev); -static void ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); +static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static void ena_rx_queue_release_all(struct rte_eth_dev *dev); static void ena_tx_queue_release_all(struct rte_eth_dev *dev); static void ena_rx_queue_release(void *queue); @@ -114,12 +213,12 @@ static void ena_tx_queue_release(void *queue); static void ena_rx_queue_release_bufs(struct ena_ring *ring); static void ena_tx_queue_release_bufs(struct ena_ring *ring); static int ena_link_update(struct rte_eth_dev *dev, - __rte_unused int wait_to_complete); + int wait_to_complete); static int ena_queue_restart(struct ena_ring *ring); static int ena_queue_restart_all(struct rte_eth_dev *dev, enum ena_ring_type ring_type); static void ena_stats_restart(struct rte_eth_dev *dev); -static void ena_infos_get(__rte_unused struct rte_eth_dev *dev, +static void ena_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info); static int ena_rss_reta_update(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, @@ -127,8 +226,9 @@ static int ena_rss_reta_update(struct rte_eth_dev *dev, static int ena_rss_reta_query(struct rte_eth_dev *dev, struct rte_eth_rss_reta_entry64 *reta_conf, uint16_t reta_size); +static int ena_get_sset_count(struct rte_eth_dev *dev, int sset); -static struct eth_dev_ops ena_dev_ops = { +static const struct eth_dev_ops ena_dev_ops = { .dev_configure = ena_dev_configure, .dev_infos_get = ena_infos_get, .rx_queue_setup = ena_rx_queue_setup, @@ -144,20 +244,33 @@ static struct eth_dev_ops ena_dev_ops = { .reta_query = ena_rss_reta_query, }; +#define NUMA_NO_NODE SOCKET_ID_ANY + +static inline int ena_cpu_to_node(int cpu) +{ + struct rte_config *config = rte_eal_get_configuration(); + + if (likely(cpu < RTE_MAX_MEMZONE)) + return config->mem_config->memzone[cpu].socket_id; + + return NUMA_NO_NODE; +} + static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, struct ena_com_rx_ctx *ena_rx_ctx) { uint64_t ol_flags = 0; + uint32_t packet_type = 0; if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_TCP) - ol_flags |= PKT_TX_TCP_CKSUM; + packet_type |= RTE_PTYPE_L4_TCP; else if (ena_rx_ctx->l4_proto == ENA_ETH_IO_L4_PROTO_UDP) - ol_flags |= PKT_TX_UDP_CKSUM; + packet_type |= RTE_PTYPE_L4_UDP; if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV4) - ol_flags |= PKT_TX_IPV4; + packet_type |= RTE_PTYPE_L3_IPV4; else if (ena_rx_ctx->l3_proto == ENA_ETH_IO_L3_PROTO_IPV6) - ol_flags |= PKT_TX_IPV6; + packet_type |= RTE_PTYPE_L3_IPV6; if (unlikely(ena_rx_ctx->l4_csum_err)) ol_flags |= PKT_RX_L4_CKSUM_BAD; @@ -165,6 +278,7 @@ static inline void ena_rx_mbuf_prepare(struct rte_mbuf *mbuf, ol_flags |= PKT_RX_IP_CKSUM_BAD; mbuf->ol_flags = ol_flags; + mbuf->packet_type = packet_type; } static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, @@ -226,6 +340,100 @@ static inline void ena_tx_mbuf_prepare(struct rte_mbuf *mbuf, } } +static void ena_config_host_info(struct ena_com_dev *ena_dev) +{ + struct ena_admin_host_info *host_info; + int rc; + + /* Allocate only the host info */ + rc = ena_com_allocate_host_info(ena_dev); + if (rc) { + RTE_LOG(ERR, PMD, "Cannot allocate host info\n"); + return; + } + + host_info = ena_dev->host_attr.host_info; + + host_info->os_type = ENA_ADMIN_OS_DPDK; + host_info->kernel_ver = RTE_VERSION; + snprintf((char *)host_info->kernel_ver_str, + sizeof(host_info->kernel_ver_str), + "%s", rte_version()); + host_info->os_dist = RTE_VERSION; + snprintf((char *)host_info->os_dist_str, + sizeof(host_info->os_dist_str), + "%s", rte_version()); + host_info->driver_version = + (DRV_MODULE_VER_MAJOR) | + (DRV_MODULE_VER_MINOR << ENA_ADMIN_HOST_INFO_MINOR_SHIFT) | + (DRV_MODULE_VER_SUBMINOR << + ENA_ADMIN_HOST_INFO_SUB_MINOR_SHIFT); + + rc = ena_com_set_host_attributes(ena_dev); + if (rc) { + RTE_LOG(ERR, PMD, "Cannot set host attributes\n"); + if (rc != -EPERM) + goto err; + } + + return; + +err: + ena_com_delete_host_info(ena_dev); +} + +static int +ena_get_sset_count(struct rte_eth_dev *dev, int sset) +{ + if (sset != ETH_SS_STATS) + return -EOPNOTSUPP; + + /* Workaround for clang: + * touch internal structures to prevent + * compiler error + */ + ENA_TOUCH(ena_stats_global_strings); + ENA_TOUCH(ena_stats_tx_strings); + ENA_TOUCH(ena_stats_rx_strings); + ENA_TOUCH(ena_stats_ena_com_strings); + + return dev->data->nb_tx_queues * + (ENA_STATS_ARRAY_TX + ENA_STATS_ARRAY_RX) + + ENA_STATS_ARRAY_GLOBAL + ENA_STATS_ARRAY_ENA_COM; +} + +static void ena_config_debug_area(struct ena_adapter *adapter) +{ + u32 debug_area_size; + int rc, ss_count; + + ss_count = ena_get_sset_count(adapter->rte_dev, ETH_SS_STATS); + if (ss_count <= 0) { + RTE_LOG(ERR, PMD, "SS count is negative\n"); + return; + } + + /* allocate 32 bytes for each string and 64bit for the value */ + debug_area_size = ss_count * ETH_GSTRING_LEN + sizeof(u64) * ss_count; + + rc = ena_com_allocate_debug_area(&adapter->ena_dev, debug_area_size); + if (rc) { + RTE_LOG(ERR, PMD, "Cannot allocate debug area\n"); + return; + } + + rc = ena_com_set_host_attributes(&adapter->ena_dev); + if (rc) { + RTE_LOG(WARNING, PMD, "Cannot set host attributes\n"); + if (rc != -EPERM) + goto err; + } + + return; +err: + ena_com_delete_debug_area(&adapter->ena_dev); +} + static void ena_close(struct rte_eth_dev *dev) { struct ena_adapter *adapter = @@ -475,26 +683,23 @@ static void ena_rx_queue_release_bufs(struct ena_ring *ring) ring->rx_buffer_info[ring->next_to_clean & ring_mask]; if (m) - __rte_mbuf_raw_free(m); + rte_mbuf_raw_free(m); - ring->next_to_clean = - ENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size); + ring->next_to_clean++; } } static void ena_tx_queue_release_bufs(struct ena_ring *ring) { - unsigned int ring_mask = ring->ring_size - 1; + unsigned int i; - while (ring->next_to_clean != ring->next_to_use) { - struct ena_tx_buffer *tx_buf = - &ring->tx_buffer_info[ring->next_to_clean & ring_mask]; + for (i = 0; i < ring->ring_size; ++i) { + struct ena_tx_buffer *tx_buf = &ring->tx_buffer_info[i]; if (tx_buf->mbuf) rte_pktmbuf_free(tx_buf->mbuf); - ring->next_to_clean = - ENA_CIRC_INC(ring->next_to_clean, 1, ring->ring_size); + ring->next_to_clean++; } } @@ -504,7 +709,7 @@ static int ena_link_update(struct rte_eth_dev *dev, struct rte_eth_link *link = &dev->data->dev_link; link->link_status = 1; - link->link_speed = ETH_SPEED_NUM_10G; + link->link_speed = ETH_SPEED_NUM_NONE; link->link_duplex = ETH_LINK_FULL_DUPLEX; return 0; @@ -538,7 +743,7 @@ static int ena_queue_restart_all(struct rte_eth_dev *dev, if (rc) { PMD_INIT_LOG(ERR, - "failed to restart queue %d type(%d)\n", + "failed to restart queue %d type(%d)", i, ring_type); return -1; } @@ -564,7 +769,7 @@ static int ena_check_valid_conf(struct ena_adapter *adapter) uint32_t max_frame_len = ena_get_mtu_conf(adapter); if (max_frame_len > adapter->max_mtu) { - PMD_INIT_LOG(ERR, "Unsupported MTU of %d\n", max_frame_len); + PMD_INIT_LOG(ERR, "Unsupported MTU of %d", max_frame_len); return -1; } @@ -591,7 +796,7 @@ ena_calc_queue_size(struct ena_com_dev *ena_dev, queue_size = rte_align32pow2(queue_size >> 1); if (queue_size == 0) { - PMD_INIT_LOG(ERR, "Invalid queue size\n"); + PMD_INIT_LOG(ERR, "Invalid queue size"); return -EFAULT; } @@ -605,11 +810,10 @@ static void ena_stats_restart(struct rte_eth_dev *dev) rte_atomic64_init(&adapter->drv_stats->ierrors); rte_atomic64_init(&adapter->drv_stats->oerrors); - rte_atomic64_init(&adapter->drv_stats->imcasts); rte_atomic64_init(&adapter->drv_stats->rx_nombuf); } -static void ena_stats_get(struct rte_eth_dev *dev, +static int ena_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct ena_admin_basic_stats ena_stats; @@ -619,13 +823,13 @@ static void ena_stats_get(struct rte_eth_dev *dev, int rc; if (rte_eal_process_type() != RTE_PROC_PRIMARY) - return; + return -ENOTSUP; memset(&ena_stats, 0, sizeof(ena_stats)); rc = ena_com_get_dev_basic_stats(ena_dev, &ena_stats); if (unlikely(rc)) { RTE_LOG(ERR, PMD, "Could not retrieve statistics from ENA"); - return; + return rc; } /* Set of basic statistics from ENA */ @@ -643,8 +847,8 @@ static void ena_stats_get(struct rte_eth_dev *dev, /* Driver related stats */ stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); - stats->imcasts = rte_atomic64_read(&adapter->drv_stats->imcasts); stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); + return 0; } static int ena_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) @@ -703,7 +907,7 @@ static int ena_start(struct rte_eth_dev *dev) return rc; if (adapter->rte_dev->data->dev_conf.rxmode.mq_mode & - ETH_MQ_RX_RSS_FLAG) { + ETH_MQ_RX_RSS_FLAG && adapter->rte_dev->data->nb_rx_queues > 0) { rc = ena_rss_init_default(adapter); if (rc) return rc; @@ -718,7 +922,7 @@ static int ena_start(struct rte_eth_dev *dev) static int ena_queue_restart(struct ena_ring *ring) { - int rc; + int rc, bufs_num; ena_assert_msg(ring->configured == 1, "Trying to restart unconfigured queue\n"); @@ -729,9 +933,10 @@ static int ena_queue_restart(struct ena_ring *ring) if (ring->type == ENA_RING_TYPE_TX) return 0; - rc = ena_populate_rx_queue(ring, ring->ring_size - 1); - if ((unsigned int)rc != ring->ring_size - 1) { - PMD_INIT_LOG(ERR, "Failed to populate rx ring !\n"); + bufs_num = ring->ring_size - 1; + rc = ena_populate_rx_queue(ring, bufs_num); + if (rc != bufs_num) { + PMD_INIT_LOG(ERR, "Failed to populate rx ring !"); return (-1); } @@ -744,6 +949,10 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, __rte_unused unsigned int socket_id, __rte_unused const struct rte_eth_txconf *tx_conf) { + struct ena_com_create_io_ctx ctx = + /* policy set to _HOST just to satisfy icc compiler */ + { ENA_ADMIN_PLACEMENT_POLICY_HOST, + ENA_COM_IO_QUEUE_DIRECTION_TX, 0, 0, 0, 0 }; struct ena_ring *txq = NULL; struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); @@ -761,6 +970,13 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, return -1; } + if (!rte_is_power_of_2(nb_desc)) { + RTE_LOG(ERR, PMD, + "Unsupported size of RX queue: %d is not a power of 2.", + nb_desc); + return -EINVAL; + } + if (nb_desc > adapter->tx_ring_size) { RTE_LOG(ERR, PMD, "Unsupported size of TX queue (max size: %d)\n", @@ -769,11 +985,15 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, } ena_qid = ENA_IO_TXQ_IDX(queue_idx); - rc = ena_com_create_io_queue(ena_dev, ena_qid, - ENA_COM_IO_QUEUE_DIRECTION_TX, - ena_dev->tx_mem_queue_type, - -1 /* admin interrupts is not used */, - nb_desc); + + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_TX; + ctx.qid = ena_qid; + ctx.msix_vector = -1; /* admin interrupts not used */ + ctx.mem_queue_type = ena_dev->tx_mem_queue_type; + ctx.queue_size = adapter->tx_ring_size; + ctx.numa_node = ena_cpu_to_node(queue_idx); + + rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) { RTE_LOG(ERR, PMD, "failed to create io TX queue #%d (qid:%d) rc: %d\n", @@ -782,6 +1002,17 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, txq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; txq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; + rc = ena_com_get_io_handlers(ena_dev, ena_qid, + &txq->ena_com_io_sq, + &txq->ena_com_io_cq); + if (rc) { + RTE_LOG(ERR, PMD, + "Failed to get TX queue handlers. TX queue num %d rc: %d\n", + queue_idx, rc); + ena_com_destroy_io_queue(ena_dev, ena_qid); + goto err; + } + txq->port_id = dev->data->port_id; txq->next_to_clean = 0; txq->next_to_use = 0; @@ -810,7 +1041,7 @@ static int ena_tx_queue_setup(struct rte_eth_dev *dev, /* Store pointer to this queue in upper layer */ txq->configured = 1; dev->data->tx_queues[queue_idx] = txq; - +err: return rc; } @@ -821,6 +1052,10 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, __rte_unused const struct rte_eth_rxconf *rx_conf, struct rte_mempool *mp) { + struct ena_com_create_io_ctx ctx = + /* policy set to _HOST just to satisfy icc compiler */ + { ENA_ADMIN_PLACEMENT_POLICY_HOST, + ENA_COM_IO_QUEUE_DIRECTION_RX, 0, 0, 0, 0 }; struct ena_adapter *adapter = (struct ena_adapter *)(dev->data->dev_private); struct ena_ring *rxq = NULL; @@ -836,6 +1071,13 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, return -1; } + if (!rte_is_power_of_2(nb_desc)) { + RTE_LOG(ERR, PMD, + "Unsupported size of TX queue: %d is not a power of 2.", + nb_desc); + return -EINVAL; + } + if (nb_desc > adapter->rx_ring_size) { RTE_LOG(ERR, PMD, "Unsupported size of RX queue (max size: %d)\n", @@ -844,11 +1086,15 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, } ena_qid = ENA_IO_RXQ_IDX(queue_idx); - rc = ena_com_create_io_queue(ena_dev, ena_qid, - ENA_COM_IO_QUEUE_DIRECTION_RX, - ENA_ADMIN_PLACEMENT_POLICY_HOST, - -1 /* admin interrupts not used */, - nb_desc); + + ctx.qid = ena_qid; + ctx.direction = ENA_COM_IO_QUEUE_DIRECTION_RX; + ctx.mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; + ctx.msix_vector = -1; /* admin interrupts not used */ + ctx.queue_size = adapter->rx_ring_size; + ctx.numa_node = ena_cpu_to_node(queue_idx); + + rc = ena_com_create_io_queue(ena_dev, &ctx); if (rc) RTE_LOG(ERR, PMD, "failed to create io RX queue #%d rc: %d\n", queue_idx, rc); @@ -856,6 +1102,16 @@ static int ena_rx_queue_setup(struct rte_eth_dev *dev, rxq->ena_com_io_cq = &ena_dev->io_cq_queues[ena_qid]; rxq->ena_com_io_sq = &ena_dev->io_sq_queues[ena_qid]; + rc = ena_com_get_io_handlers(ena_dev, ena_qid, + &rxq->ena_com_io_sq, + &rxq->ena_com_io_cq); + if (rc) { + RTE_LOG(ERR, PMD, + "Failed to get RX queue handlers. RX queue num %d rc: %d\n", + queue_idx, rc); + ena_com_destroy_io_queue(ena_dev, ena_qid); + } + rxq->port_id = dev->data->port_id; rxq->next_to_clean = 0; rxq->next_to_use = 0; @@ -881,23 +1137,25 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) { unsigned int i; int rc; - unsigned int ring_size = rxq->ring_size; - unsigned int ring_mask = ring_size - 1; - int next_to_use = rxq->next_to_use & ring_mask; + uint16_t ring_size = rxq->ring_size; + uint16_t ring_mask = ring_size - 1; + uint16_t next_to_use = rxq->next_to_use; + uint16_t in_use; struct rte_mbuf **mbufs = &rxq->rx_buffer_info[0]; if (unlikely(!count)) return 0; - ena_assert_msg((((ENA_CIRC_COUNT(rxq->next_to_use, rxq->next_to_clean, - rxq->ring_size)) + - count) < rxq->ring_size), "bad ring state"); + in_use = rxq->next_to_use - rxq->next_to_clean; + ena_assert_msg(((in_use + count) < ring_size), "bad ring state"); - count = RTE_MIN(count, ring_size - next_to_use); + count = RTE_MIN(count, + (uint16_t)(ring_size - (next_to_use & ring_mask))); /* get resources for incoming packets */ rc = rte_mempool_get_bulk(rxq->mb_pool, - (void **)(&mbufs[next_to_use]), count); + (void **)(&mbufs[next_to_use & ring_mask]), + count); if (unlikely(rc < 0)) { rte_atomic64_inc(&rxq->adapter->drv_stats->rx_nombuf); PMD_RX_LOG(DEBUG, "there are no enough free buffers"); @@ -905,27 +1163,34 @@ static int ena_populate_rx_queue(struct ena_ring *rxq, unsigned int count) } for (i = 0; i < count; i++) { - struct rte_mbuf *mbuf = mbufs[next_to_use]; + uint16_t next_to_use_masked = next_to_use & ring_mask; + struct rte_mbuf *mbuf = mbufs[next_to_use_masked]; struct ena_com_buf ebuf; rte_prefetch0(mbufs[((next_to_use + 4) & ring_mask)]); /* prepare physical address for DMA transaction */ - ebuf.paddr = mbuf->buf_physaddr + RTE_PKTMBUF_HEADROOM; + ebuf.paddr = mbuf->buf_iova + RTE_PKTMBUF_HEADROOM; ebuf.len = mbuf->buf_len - RTE_PKTMBUF_HEADROOM; /* pass resource to device */ rc = ena_com_add_single_rx_desc(rxq->ena_com_io_sq, - &ebuf, next_to_use); + &ebuf, next_to_use_masked); if (unlikely(rc)) { + rte_mempool_put_bulk(rxq->mb_pool, (void **)(&mbuf), + count - i); RTE_LOG(WARNING, PMD, "failed adding rx desc\n"); break; } - next_to_use = ENA_RX_RING_IDX_NEXT(next_to_use, ring_size); + next_to_use++; } - rte_wmb(); - rxq->next_to_use = next_to_use; - /* let HW know that it can fill buffers with data */ - ena_com_write_sq_doorbell(rxq->ena_com_io_sq); + /* When we submitted free recources to device... */ + if (i > 0) { + /* ...let HW know that it can fill buffers with data */ + rte_wmb(); + ena_com_write_sq_doorbell(rxq->ena_com_io_sq); + + rxq->next_to_use = next_to_use; + } return i; } @@ -934,6 +1199,7 @@ static int ena_device_init(struct ena_com_dev *ena_dev, struct ena_com_dev_get_features_ctx *get_feat_ctx) { int rc; + bool readless_supported; /* Initialize mmio registers */ rc = ena_com_mmio_reg_read_request_init(ena_dev); @@ -942,6 +1208,14 @@ static int ena_device_init(struct ena_com_dev *ena_dev, return rc; } + /* The PCIe configuration space revision id indicate if mmio reg + * read is disabled. + */ + readless_supported = + !(((struct rte_pci_device *)ena_dev->dmadev)->id.class_id + & ENA_MMIO_DISABLE_REG_READ); + ena_com_set_mmio_read_mode(ena_dev, readless_supported); + /* reset device */ rc = ena_com_dev_reset(ena_dev); if (rc) { @@ -972,6 +1246,8 @@ static int ena_device_init(struct ena_com_dev *ena_dev, */ ena_com_set_admin_polling_mode(ena_dev, true); + ena_config_host_info(ena_dev); + /* Get Device Attributes and features */ rc = ena_com_get_dev_attr_feat(ena_dev, get_feat_ctx); if (rc) { @@ -1008,16 +1284,17 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) eth_dev->dev_ops = &ena_dev_ops; eth_dev->rx_pkt_burst = ð_ena_recv_pkts; eth_dev->tx_pkt_burst = ð_ena_xmit_pkts; + eth_dev->tx_pkt_prepare = ð_ena_prep_pkts; adapter->rte_eth_dev_data = eth_dev->data; adapter->rte_dev = eth_dev; if (rte_eal_process_type() != RTE_PROC_PRIMARY) return 0; - pci_dev = eth_dev->pci_dev; + pci_dev = RTE_ETH_DEV_TO_PCI(eth_dev); adapter->pdev = pci_dev; - PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d\n", + PMD_INIT_LOG(INFO, "Initializing %x:%x:%x.%d", pci_dev->addr.domain, pci_dev->addr.bus, pci_dev->addr.devid, @@ -1034,7 +1311,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) else if (adapter->regs) ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; else - PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)\n", + PMD_INIT_LOG(CRIT, "Failed to access registers BAR(%d)", ENA_REGS_BAR); ena_dev->reg_bar = adapter->regs; @@ -1048,7 +1325,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) /* device specific initialization routine */ rc = ena_device_init(ena_dev, &get_feat_ctx); if (rc) { - PMD_INIT_LOG(CRIT, "Failed to init ENA device\n"); + PMD_INIT_LOG(CRIT, "Failed to init ENA device"); return -1; } @@ -1056,7 +1333,7 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) if (get_feat_ctx.max_queues.max_llq_num == 0) { PMD_INIT_LOG(ERR, "Trying to use LLQ but llq_num is 0.\n" - "Fall back into regular queues.\n"); + "Fall back into regular queues."); ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST; adapter->num_queues = @@ -1079,9 +1356,15 @@ static int eth_ena_dev_init(struct rte_eth_dev *eth_dev) /* prepare ring structures */ ena_init_rings(adapter); + ena_config_debug_area(adapter); + /* Set max MTU for this device */ adapter->max_mtu = get_feat_ctx.dev_attr.max_mtu; + /* set device support for TSO */ + adapter->tso4_supported = get_feat_ctx.offload.tx & + ENA_ADMIN_FEATURE_OFFLOAD_DESC_TSO_IPV4_MASK; + /* Copy MAC address and point DPDK to it */ eth_dev->data->mac_addrs = (struct ether_addr *)adapter->mac_addr; ether_addr_copy((struct ether_addr *)get_feat_ctx.dev_attr.mac_addr, @@ -1108,7 +1391,7 @@ static int ena_dev_configure(struct rte_eth_dev *dev) if (!(adapter->state == ENA_ADAPTER_STATE_INIT || adapter->state == ENA_ADAPTER_STATE_STOPPED)) { - PMD_INIT_LOG(ERR, "Illegal adapter state: %d\n", + PMD_INIT_LOG(ERR, "Illegal adapter state: %d", adapter->state); return -1; } @@ -1170,6 +1453,8 @@ static void ena_infos_get(struct rte_eth_dev *dev, ena_dev = &adapter->ena_dev; ena_assert_msg(ena_dev != NULL, "Uninitialized device"); + dev_info->pci_dev = RTE_ETH_DEV_TO_PCI(dev); + dev_info->speed_capa = ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | @@ -1198,7 +1483,7 @@ static void ena_infos_get(struct rte_eth_dev *dev, DEV_TX_OFFLOAD_UDP_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM; - if (feat.offload.tx & + if (feat.offload.rx_supported & ENA_ADMIN_FEATURE_OFFLOAD_DESC_RX_L4_IPV4_CSUM_MASK) rx_feat |= DEV_RX_OFFLOAD_IPV4_CKSUM | DEV_RX_OFFLOAD_UDP_CKSUM | @@ -1224,7 +1509,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, unsigned int ring_size = rx_ring->ring_size; unsigned int ring_mask = ring_size - 1; uint16_t next_to_clean = rx_ring->next_to_clean; - int desc_in_use = 0; + uint16_t desc_in_use = 0; unsigned int recv_idx = 0; struct rte_mbuf *mbuf = NULL; struct rte_mbuf *mbuf_head = NULL; @@ -1242,8 +1527,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, return 0; } - desc_in_use = ENA_CIRC_COUNT(rx_ring->next_to_use, - next_to_clean, ring_size); + desc_in_use = rx_ring->next_to_use - next_to_clean; if (unlikely(nb_pkts > desc_in_use)) nb_pkts = desc_in_use; @@ -1284,8 +1568,7 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, mbuf_prev = mbuf; segments++; - next_to_clean = - ENA_RX_RING_IDX_NEXT(next_to_clean, ring_size); + next_to_clean++; } /* fill mbuf attributes if any */ @@ -1297,20 +1580,92 @@ static uint16_t eth_ena_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, recv_idx++; } - /* Burst refill to save doorbells, memory barriers, const interval */ - if (ring_size - desc_in_use - 1 > ENA_RING_DESCS_RATIO(ring_size)) - ena_populate_rx_queue(rx_ring, ring_size - desc_in_use - 1); + rx_ring->next_to_clean = next_to_clean; - rx_ring->next_to_clean = next_to_clean & ring_mask; + desc_in_use = desc_in_use - completed + 1; + /* Burst refill to save doorbells, memory barriers, const interval */ + if (ring_size - desc_in_use > ENA_RING_DESCS_RATIO(ring_size)) + ena_populate_rx_queue(rx_ring, ring_size - desc_in_use); return recv_idx; } +static uint16_t +eth_ena_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int32_t ret; + uint32_t i; + struct rte_mbuf *m; + struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); + struct ipv4_hdr *ip_hdr; + uint64_t ol_flags; + uint16_t frag_field; + + for (i = 0; i != nb_pkts; i++) { + m = tx_pkts[i]; + ol_flags = m->ol_flags; + + if (!(ol_flags & PKT_TX_IPV4)) + continue; + + /* If there was not L2 header length specified, assume it is + * length of the ethernet header. + */ + if (unlikely(m->l2_len == 0)) + m->l2_len = sizeof(struct ether_hdr); + + ip_hdr = rte_pktmbuf_mtod_offset(m, struct ipv4_hdr *, + m->l2_len); + frag_field = rte_be_to_cpu_16(ip_hdr->fragment_offset); + + if ((frag_field & IPV4_HDR_DF_FLAG) != 0) { + m->packet_type |= RTE_PTYPE_L4_NONFRAG; + + /* If IPv4 header has DF flag enabled and TSO support is + * disabled, partial chcecksum should not be calculated. + */ + if (!tx_ring->adapter->tso4_supported) + continue; + } + + if ((ol_flags & ENA_TX_OFFLOAD_NOTSUP_MASK) != 0 || + (ol_flags & PKT_TX_L4_MASK) == + PKT_TX_SCTP_CKSUM) { + rte_errno = -ENOTSUP; + return i; + } + +#ifdef RTE_LIBRTE_ETHDEV_DEBUG + ret = rte_validate_tx_offload(m); + if (ret != 0) { + rte_errno = ret; + return i; + } +#endif + + /* In case we are supposed to TSO and have DF not set (DF=0) + * hardware must be provided with partial checksum, otherwise + * it will take care of necessary calculations. + */ + + ret = rte_net_intel_cksum_flags_prepare(m, + ol_flags & ~PKT_TX_TCP_SEG); + if (ret != 0) { + rte_errno = ret; + return i; + } + } + + return i; +} + static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { struct ena_ring *tx_ring = (struct ena_ring *)(tx_queue); - unsigned int next_to_use = tx_ring->next_to_use; + uint16_t next_to_use = tx_ring->next_to_use; + uint16_t next_to_clean = tx_ring->next_to_clean; struct rte_mbuf *mbuf; unsigned int ring_size = tx_ring->ring_size; unsigned int ring_mask = ring_size - 1; @@ -1318,7 +1673,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, struct ena_tx_buffer *tx_info; struct ena_com_buf *ebuf; uint16_t rc, req_id, total_tx_descs = 0; - int sent_idx = 0; + uint16_t sent_idx = 0, empty_tx_reqs; int nb_hw_desc; /* Check adapter state */ @@ -1328,10 +1683,14 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, return 0; } + empty_tx_reqs = ring_size - (next_to_use - next_to_clean); + if (nb_pkts > empty_tx_reqs) + nb_pkts = empty_tx_reqs; + for (sent_idx = 0; sent_idx < nb_pkts; sent_idx++) { mbuf = tx_pkts[sent_idx]; - req_id = tx_ring->empty_tx_reqs[next_to_use]; + req_id = tx_ring->empty_tx_reqs[next_to_use & ring_mask]; tx_info = &tx_ring->tx_buffer_info[req_id]; tx_info->mbuf = mbuf; tx_info->num_of_bufs = 0; @@ -1369,7 +1728,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, * consideration pushed header */ if (mbuf->data_len > ena_tx_ctx.header_len) { - ebuf->paddr = mbuf->buf_physaddr + + ebuf->paddr = mbuf->buf_iova + mbuf->data_off + ena_tx_ctx.header_len; ebuf->len = mbuf->data_len - ena_tx_ctx.header_len; @@ -1378,7 +1737,7 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, } while ((mbuf = mbuf->next) != NULL) { - ebuf->paddr = mbuf->buf_physaddr + mbuf->data_off; + ebuf->paddr = mbuf->buf_iova + mbuf->data_off; ebuf->len = mbuf->data_len; ebuf++; tx_info->num_of_bufs++; @@ -1394,12 +1753,17 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, tx_info->tx_descs = nb_hw_desc; - next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, ring_size); + next_to_use++; } - /* Let HW do it's best :-) */ - rte_wmb(); - ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + /* If there are ready packets to be xmitted... */ + if (sent_idx > 0) { + /* ...let HW do its best :-) */ + rte_wmb(); + ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); + + tx_ring->next_to_use = next_to_use; + } /* Clear complete packets */ while (ena_com_tx_comp_req_id_get(tx_ring->ena_com_io_cq, &req_id) >= 0) { @@ -1410,46 +1774,45 @@ static uint16_t eth_ena_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, /* Free whole mbuf chain */ mbuf = tx_info->mbuf; rte_pktmbuf_free(mbuf); + tx_info->mbuf = NULL; /* Put back descriptor to the ring for reuse */ - tx_ring->empty_tx_reqs[tx_ring->next_to_clean] = req_id; - tx_ring->next_to_clean = - ENA_TX_RING_IDX_NEXT(tx_ring->next_to_clean, - tx_ring->ring_size); + tx_ring->empty_tx_reqs[next_to_clean & ring_mask] = req_id; + next_to_clean++; /* If too many descs to clean, leave it for another run */ if (unlikely(total_tx_descs > ENA_RING_DESCS_RATIO(ring_size))) break; } - /* acknowledge completion of sent packets */ - ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); - tx_ring->next_to_use = next_to_use; + if (total_tx_descs > 0) { + /* acknowledge completion of sent packets */ + ena_com_comp_ack(tx_ring->ena_com_io_sq, total_tx_descs); + tx_ring->next_to_clean = next_to_clean; + } + return sent_idx; } -static struct eth_driver rte_ena_pmd = { - { - .name = "rte_ena_pmd", - .id_table = pci_id_ena_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING, - }, - .eth_dev_init = eth_ena_dev_init, - .dev_private_size = sizeof(struct ena_adapter), -}; +static int eth_ena_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, + struct rte_pci_device *pci_dev) +{ + return rte_eth_dev_pci_generic_probe(pci_dev, + sizeof(struct ena_adapter), eth_ena_dev_init); +} -static int -rte_ena_pmd_init(const char *name __rte_unused, - const char *params __rte_unused) +static int eth_ena_pci_remove(struct rte_pci_device *pci_dev) { - rte_eth_driver_register(&rte_ena_pmd); - return 0; -}; + return rte_eth_dev_pci_generic_remove(pci_dev, NULL); +} -struct rte_driver ena_pmd_drv = { - .name = "ena_driver", - .type = PMD_PDEV, - .init = rte_ena_pmd_init, +static struct rte_pci_driver rte_ena_pmd = { + .id_table = pci_id_ena_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = eth_ena_pci_probe, + .remove = eth_ena_pci_remove, }; -PMD_REGISTER_DRIVER(ena_pmd_drv); +RTE_PMD_REGISTER_PCI(net_ena, rte_ena_pmd); +RTE_PMD_REGISTER_PCI_TABLE(net_ena, pci_id_ena_map); +RTE_PMD_REGISTER_KMOD_DEP(net_ena, "* igb_uio | uio_pci_generic | vfio-pci");