X-Git-Url: https://gerrit.fd.io/r/gitweb?a=blobdiff_plain;f=app%2Ftest-pmd%2Ftestpmd.c;h=4512ba92d0abe6113f6efcb76ca9f467ddacbac9;hb=43192222b329b3c984687235b0081c7fbfe484ba;hp=06885ceba19d7d97c44c56d677190ddb8fa55725;hpb=8b25d1ad5d2264bdfc2818c7bda74ee2697df6db;p=deb_dpdk.git diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c index 06885ceb..4512ba92 100644 --- a/app/test-pmd/testpmd.c +++ b/app/test-pmd/testpmd.c @@ -63,7 +63,6 @@ #include #include #include -#include #include #include #include @@ -284,7 +283,7 @@ struct rte_eth_rxmode rx_mode = { .hw_vlan_strip = 1, /**< VLAN strip enabled. */ .hw_vlan_extend = 0, /**< Extended VLAN disabled. */ .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */ - .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */ + .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */ }; struct rte_fdir_conf fdir_conf = { @@ -441,10 +440,13 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, mb_size, (unsigned) mb_mempool_cache, sizeof(struct rte_pktmbuf_pool_private), socket_id, 0); + if (rte_mp == NULL) + goto err; if (rte_mempool_populate_anon(rte_mp) == 0) { rte_mempool_free(rte_mp); rte_mp = NULL; + goto err; } rte_pktmbuf_pool_init(rte_mp, NULL); rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL); @@ -455,6 +457,7 @@ mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf, } } +err: if (rte_mp == NULL) { rte_exit(EXIT_FAILURE, "Creation of mbuf pool for socket %u failed: %s\n", @@ -515,34 +518,6 @@ init_config(void) fwd_lcores[lc_id]->cpuid_idx = lc_id; } - /* - * Create pools of mbuf. - * If NUMA support is disabled, create a single pool of mbuf in - * socket 0 memory by default. - * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. - * - * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and - * nb_txd can be configured at run time. - */ - if (param_total_num_mbufs) - nb_mbuf_per_pool = param_total_num_mbufs; - else { - nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache) - + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; - - if (!numa_support) - nb_mbuf_per_pool = - (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); - } - - if (!numa_support) { - if (socket_num == UMA_NO_CONFIG) - mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); - else - mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, - socket_num); - } - FOREACH_PORT(pid, ports) { port = &ports[pid]; rte_eth_dev_info_get(pid, &port->dev_info); @@ -565,20 +540,37 @@ init_config(void) port->need_reconfig_queues = 1; } + /* + * Create pools of mbuf. + * If NUMA support is disabled, create a single pool of mbuf in + * socket 0 memory by default. + * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1. + * + * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and + * nb_txd can be configured at run time. + */ + if (param_total_num_mbufs) + nb_mbuf_per_pool = param_total_num_mbufs; + else { + nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + + (nb_lcores * mb_mempool_cache) + + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST; + nb_mbuf_per_pool *= RTE_MAX_ETHPORTS; + } + if (numa_support) { uint8_t i; - unsigned int nb_mbuf; - if (param_total_num_mbufs) - nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports; - - for (i = 0; i < max_socket; i++) { - nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS); - if (nb_mbuf) - mbuf_pool_create(mbuf_data_size, - nb_mbuf,i); - } + for (i = 0; i < max_socket; i++) + mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i); + } else { + if (socket_num == UMA_NO_CONFIG) + mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0); + else + mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, + socket_num); } + init_port_config(); /* @@ -683,18 +675,23 @@ init_fwd_streams(void) /* init new */ nb_fwd_streams = nb_fwd_streams_new; - fwd_streams = rte_zmalloc("testpmd: fwd_streams", - sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE); - if (fwd_streams == NULL) - rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) " - "failed\n", nb_fwd_streams); - - for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { - fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream", - sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE); - if (fwd_streams[sm_id] == NULL) - rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)" - " failed\n"); + if (nb_fwd_streams) { + fwd_streams = rte_zmalloc("testpmd: fwd_streams", + sizeof(struct fwd_stream *) * nb_fwd_streams, + RTE_CACHE_LINE_SIZE); + if (fwd_streams == NULL) + rte_exit(EXIT_FAILURE, "rte_zmalloc(%d" + " (struct fwd_stream *)) failed\n", + nb_fwd_streams); + + for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) { + fwd_streams[sm_id] = rte_zmalloc("testpmd:" + " struct fwd_stream", sizeof(struct fwd_stream), + RTE_CACHE_LINE_SIZE); + if (fwd_streams[sm_id] == NULL) + rte_exit(EXIT_FAILURE, "rte_zmalloc" + "(struct fwd_stream) failed\n"); + } } return 0; @@ -728,6 +725,9 @@ pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs) pktnb_stats[1] = pktnb_stats[0]; burst_stats[0] = nb_burst; pktnb_stats[0] = nb_pkt; + } else if (nb_burst > burst_stats[1]) { + burst_stats[1] = nb_burst; + pktnb_stats[1] = nb_pkt; } } if (total_burst == 0) @@ -877,17 +877,35 @@ flush_fwd_rx_queues(void) uint16_t nb_rx; uint16_t i; uint8_t j; + uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0; + uint64_t timer_period; + + /* convert to number of cycles */ + timer_period = rte_get_timer_hz(); /* 1 second timeout */ for (j = 0; j < 2; j++) { for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) { for (rxq = 0; rxq < nb_rxq; rxq++) { port_id = fwd_ports_ids[rxp]; + /** + * testpmd can stuck in the below do while loop + * if rte_eth_rx_burst() always returns nonzero + * packets. So timer is added to exit this loop + * after 1sec timer expiry. + */ + prev_tsc = rte_rdtsc(); do { nb_rx = rte_eth_rx_burst(port_id, rxq, pkts_burst, MAX_PKT_BURST); for (i = 0; i < nb_rx; i++) rte_pktmbuf_free(pkts_burst[i]); - } while (nb_rx > 0); + + cur_tsc = rte_rdtsc(); + diff_tsc = cur_tsc - prev_tsc; + timer_tsc += diff_tsc; + } while ((nb_rx > 0) && + (timer_tsc < timer_period)); + timer_tsc = 0; } } rte_delay_ms(10); /* wait 10 milli-seconds before retrying */ @@ -965,6 +983,31 @@ launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore) } } +/* + * Update the forward ports list. + */ +void +update_fwd_ports(portid_t new_pid) +{ + unsigned int i; + unsigned int new_nb_fwd_ports = 0; + int move = 0; + + for (i = 0; i < nb_fwd_ports; ++i) { + if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN)) + move = 1; + else if (move) + fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i]; + else + new_nb_fwd_ports++; + } + if (new_pid < RTE_MAX_ETHPORTS) + fwd_ports_ids[new_nb_fwd_ports++] = new_pid; + + nb_fwd_ports = new_nb_fwd_ports; + nb_cfg_ports = new_nb_fwd_ports; +} + /* * Launch packet forwarding configuration. */ @@ -1000,10 +1043,6 @@ start_packet_forwarding(int with_tx_first) return; } - if (init_fwd_streams() < 0) { - printf("Fail from init_fwd_streams()\n"); - return; - } if(dcb_test) { for (i = 0; i < nb_fwd_ports; i++) { @@ -1023,10 +1062,11 @@ start_packet_forwarding(int with_tx_first) } test_done = 0; + fwd_config_setup(); + if(!no_flush_rx) flush_fwd_rx_queues(); - fwd_config_setup(); pkt_fwd_config_display(&cur_fwd_config); rxtx_config_display(); @@ -1562,6 +1602,8 @@ attach_port(char *identifier) ports[pi].port_status = RTE_PORT_STOPPED; + update_fwd_ports(pi); + printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports); printf("Done\n"); } @@ -1584,6 +1626,8 @@ detach_port(uint8_t port_id) ports[port_id].enabled = 0; nb_ports = rte_eth_dev_count(); + update_fwd_ports(RTE_MAX_ETHPORTS); + printf("Port '%s' is detached. Now total ports is %d\n", name, nb_ports); printf("Done\n"); @@ -1808,24 +1852,13 @@ init_port_config(void) port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0; } - if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) { + if (port->dcb_flag == 0) { if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; else port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; } - if (port->dev_info.max_vfs != 0) { - if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0) - port->dev_conf.rxmode.mq_mode = - ETH_MQ_RX_VMDQ_RSS; - else - port->dev_conf.rxmode.mq_mode = - ETH_MQ_RX_NONE; - - port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE; - } - rxtx_port_config(port); rte_eth_macaddr_get(pid, &port->eth_addr); @@ -1858,7 +1891,10 @@ uint8_t port_is_bonding_slave(portid_t slave_pid) struct rte_port *port; port = &ports[slave_pid]; - return port->slave_flag; + if ((rte_eth_devices[slave_pid].data->dev_flags & + RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1)) + return 1; + return 0; } const uint16_t vlan_tags[] = { @@ -1901,8 +1937,8 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, 1 << (i % vmdq_rx_conf->nb_queue_pools); } for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { - vmdq_rx_conf->dcb_tc[i] = i; - vmdq_tx_conf->dcb_tc[i] = i; + vmdq_rx_conf->dcb_tc[i] = i % num_tcs; + vmdq_tx_conf->dcb_tc[i] = i % num_tcs; } /* set DCB mode of RX and TX of multiple queues */ @@ -1917,9 +1953,9 @@ get_eth_dcb_conf(struct rte_eth_conf *eth_conf, rx_conf->nb_tcs = num_tcs; tx_conf->nb_tcs = num_tcs; - for (i = 0; i < num_tcs; i++) { - rx_conf->dcb_tc[i] = i; - tx_conf->dcb_tc[i] = i; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { + rx_conf->dcb_tc[i] = i % num_tcs; + tx_conf->dcb_tc[i] = i % num_tcs; } eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS; eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf; @@ -1942,17 +1978,36 @@ init_port_dcb_config(portid_t pid, uint8_t pfc_en) { struct rte_eth_conf port_conf; - struct rte_eth_dev_info dev_info; struct rte_port *rte_port; int retval; uint16_t i; - rte_eth_dev_info_get(pid, &dev_info); + rte_port = &ports[pid]; + + memset(&port_conf, 0, sizeof(struct rte_eth_conf)); + /* Enter DCB configuration status */ + dcb_config = 1; + + /*set configuration of DCB in vt mode and DCB in non-vt mode*/ + retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); + if (retval < 0) + return retval; + port_conf.rxmode.hw_vlan_filter = 1; + + /** + * Write the configuration into the device. + * Set the numbers of RX & TX queues to 0, so + * the RX & TX queues will not be setup. + */ + (void)rte_eth_dev_configure(pid, 0, 0, &port_conf); + + rte_eth_dev_info_get(pid, &rte_port->dev_info); /* If dev_info.vmdq_pool_base is greater than 0, * the queue id of vmdq pools is started after pf queues. */ - if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) { + if (dcb_mode == DCB_VT_ENABLED && + rte_port->dev_info.vmdq_pool_base > 0) { printf("VMDQ_DCB multi-queue mode is nonsensical" " for port %d.", pid); return -1; @@ -1962,13 +2017,18 @@ init_port_dcb_config(portid_t pid, * and has the same number of rxq and txq in dcb mode */ if (dcb_mode == DCB_VT_ENABLED) { - nb_rxq = dev_info.max_rx_queues; - nb_txq = dev_info.max_tx_queues; + if (rte_port->dev_info.max_vfs > 0) { + nb_rxq = rte_port->dev_info.nb_rx_queues; + nb_txq = rte_port->dev_info.nb_tx_queues; + } else { + nb_rxq = rte_port->dev_info.max_rx_queues; + nb_txq = rte_port->dev_info.max_tx_queues; + } } else { /*if vt is disabled, use all pf queues */ - if (dev_info.vmdq_pool_base == 0) { - nb_rxq = dev_info.max_rx_queues; - nb_txq = dev_info.max_tx_queues; + if (rte_port->dev_info.vmdq_pool_base == 0) { + nb_rxq = rte_port->dev_info.max_rx_queues; + nb_txq = rte_port->dev_info.max_tx_queues; } else { nb_rxq = (queueid_t)num_tcs; nb_txq = (queueid_t)num_tcs; @@ -1977,16 +2037,6 @@ init_port_dcb_config(portid_t pid, } rx_free_thresh = 64; - memset(&port_conf, 0, sizeof(struct rte_eth_conf)); - /* Enter DCB configuration status */ - dcb_config = 1; - - /*set configuration of DCB in vt mode and DCB in non-vt mode*/ - retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en); - if (retval < 0) - return retval; - - rte_port = &ports[pid]; memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf)); rxtx_port_config(rte_port); @@ -2105,6 +2155,7 @@ main(int argc, char** argv) start_packet_forwarding(0); } prompt(); + pmd_test_exit(); } else #endif {