at DPDK queue setup take into account device descriptor limits. 66/10466/1
authorKonstantin Ananyev <konstantin.ananyev@intel.com>
Thu, 8 Feb 2018 15:44:20 +0000 (15:44 +0000)
committerKonstantin Ananyev <konstantin.ananyev@intel.com>
Thu, 8 Feb 2018 15:50:38 +0000 (15:50 +0000)
Change-Id: Idf7c65a8499f41d2cd53342f91f2b87e902faf58
Signed-off-by: Konstantin Ananyev <konstantin.ananyev@intel.com>
app/nginx/src/tldk/be.c
examples/l4fwd/port.h

index 4cb1b1c..3d17b60 100644 (file)
@@ -293,7 +293,7 @@ be_queue_init(struct tldk_ctx *tcx, const tldk_conf_t *cf)
 {
        int32_t socket, rc;
        uint16_t queue_id;
-       uint32_t port_id, i;
+       uint32_t port_id, i, nb_rxd, nb_txd;
        struct rte_eth_dev_info dev_info;
        const struct tldk_ctx_conf *ctx;
        const struct tldk_port_conf *pcf;
@@ -305,8 +305,12 @@ be_queue_init(struct tldk_ctx *tcx, const tldk_conf_t *cf)
                pcf = &cf->port[port_id];
 
                rte_eth_dev_info_get(port_id, &dev_info);
+
                dev_info.default_rxconf.rx_drop_en = 1;
-               dev_info.default_txconf.tx_free_thresh = TX_RING_SIZE / 2;
+
+               nb_rxd = RTE_MIN(RX_RING_SIZE, dev_info.rx_desc_lim.nb_max);
+               nb_txd = RTE_MIN(TX_RING_SIZE, dev_info.tx_desc_lim.nb_max);
+               dev_info.default_txconf.tx_free_thresh = nb_txd / 2;
 
                if (pcf->tx_offload != 0) {
                        RTE_LOG(ERR, USER1,
@@ -317,7 +321,7 @@ be_queue_init(struct tldk_ctx *tcx, const tldk_conf_t *cf)
 
                socket = rte_eth_dev_socket_id(port_id);
 
-               rc = rte_eth_rx_queue_setup(port_id, queue_id, RX_RING_SIZE,
+               rc = rte_eth_rx_queue_setup(port_id, queue_id, nb_rxd,
                                socket, &dev_info.default_rxconf, tcx->mpool);
                if (rc < 0) {
                        RTE_LOG(ERR, USER1,
@@ -326,7 +330,7 @@ be_queue_init(struct tldk_ctx *tcx, const tldk_conf_t *cf)
                        return rc;
                }
 
-               rc = rte_eth_tx_queue_setup(port_id, queue_id, TX_RING_SIZE,
+               rc = rte_eth_tx_queue_setup(port_id, queue_id, nb_txd,
                                socket, &dev_info.default_txconf);
                if (rc < 0) {
                        RTE_LOG(ERR, USER1,
index a451f7b..b727b1b 100644 (file)
@@ -207,6 +207,7 @@ queue_init(struct netbe_port *uprt, struct rte_mempool *mp)
 {
        int32_t socket, rc;
        uint16_t q;
+       uint32_t nb_rxd, nb_txd;
        struct rte_eth_dev_info dev_info;
 
        rte_eth_dev_info_get(uprt->id, &dev_info);
@@ -215,7 +216,10 @@ queue_init(struct netbe_port *uprt, struct rte_mempool *mp)
 
        dev_info.default_rxconf.rx_drop_en = 1;
 
-       dev_info.default_txconf.tx_free_thresh = TX_RING_SIZE / 2;
+       nb_rxd = RTE_MIN(RX_RING_SIZE, dev_info.rx_desc_lim.nb_max);
+       nb_txd = RTE_MIN(TX_RING_SIZE, dev_info.tx_desc_lim.nb_max);
+
+       dev_info.default_txconf.tx_free_thresh = nb_txd / 2;
        if (uprt->tx_offload != 0) {
                RTE_LOG(ERR, USER1, "%s(%u): enabling full featured TX;\n",
                        __func__, uprt->id);
@@ -223,7 +227,7 @@ queue_init(struct netbe_port *uprt, struct rte_mempool *mp)
        }
 
        for (q = 0; q < uprt->nb_lcore; q++) {
-               rc = rte_eth_rx_queue_setup(uprt->id, q, RX_RING_SIZE,
+               rc = rte_eth_rx_queue_setup(uprt->id, q, nb_rxd,
                        socket, &dev_info.default_rxconf, mp);
                if (rc < 0) {
                        RTE_LOG(ERR, USER1,
@@ -234,7 +238,7 @@ queue_init(struct netbe_port *uprt, struct rte_mempool *mp)
        }
 
        for (q = 0; q < uprt->nb_lcore; q++) {
-               rc = rte_eth_tx_queue_setup(uprt->id, q, TX_RING_SIZE,
+               rc = rte_eth_tx_queue_setup(uprt->id, q, nb_txd,
                        socket, &dev_info.default_txconf);
                if (rc < 0) {
                        RTE_LOG(ERR, USER1,