4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
85 uint16_t verbose_level = 0; /**< Silent by default. */
87 /* use master core for command line ? */
88 uint8_t interactive = 0;
89 uint8_t auto_start = 0;
92 * NUMA support configuration.
93 * When set, the NUMA support attempts to dispatch the allocation of the
94 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
95 * probed ports among the CPU sockets 0 and 1.
96 * Otherwise, all memory is allocated from CPU socket 0.
98 uint8_t numa_support = 0; /**< No numa support by default */
101 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
104 uint8_t socket_num = UMA_NO_CONFIG;
107 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
112 * Record the Ethernet address of peer target ports to which packets are
114 * Must be instanciated with the ethernet addresses of peer traffic generator
117 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
118 portid_t nb_peer_eth_addrs = 0;
121 * Probed Target Environment.
123 struct rte_port *ports; /**< For all probed ethernet ports. */
124 portid_t nb_ports; /**< Number of probed ethernet ports. */
125 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
126 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
129 * Test Forwarding Configuration.
130 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
131 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
133 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
134 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
135 portid_t nb_cfg_ports; /**< Number of configured ports. */
136 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
138 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
139 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
141 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
142 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
145 * Forwarding engines.
147 struct fwd_engine * fwd_engines[] = {
156 #ifdef RTE_LIBRTE_IEEE1588
157 &ieee1588_fwd_engine,
162 struct fwd_config cur_fwd_config;
163 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
164 uint32_t retry_enabled;
165 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
166 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
168 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
169 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
170 * specified on command-line. */
173 * Configuration of packet segments used by the "txonly" processing engine.
175 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
176 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
177 TXONLY_DEF_PACKET_LEN,
179 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
181 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
182 /**< Split policy for packets to TX. */
184 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
185 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
187 /* current configuration is in DCB or not,0 means it is not in DCB mode */
188 uint8_t dcb_config = 0;
190 /* Whether the dcb is in testing status */
191 uint8_t dcb_test = 0;
194 * Configurable number of RX/TX queues.
196 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
197 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
200 * Configurable number of RX/TX ring descriptors.
202 #define RTE_TEST_RX_DESC_DEFAULT 128
203 #define RTE_TEST_TX_DESC_DEFAULT 512
204 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
205 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
207 #define RTE_PMD_PARAM_UNSET -1
209 * Configurable values of RX and TX ring threshold registers.
212 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
213 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
214 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
216 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
217 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
218 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
221 * Configurable value of RX free threshold.
223 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
226 * Configurable value of RX drop enable.
228 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
231 * Configurable value of TX free threshold.
233 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
236 * Configurable value of TX RS bit threshold.
238 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
241 * Configurable value of TX queue flags.
243 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
246 * Receive Side Scaling (RSS) configuration.
248 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
251 * Port topology configuration
253 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
256 * Avoids to flush all the RX streams before starts forwarding.
258 uint8_t no_flush_rx = 0; /* flush by default */
261 * Avoids to check link status when starting/stopping a port.
263 uint8_t no_link_check = 0; /* check by default */
266 * NIC bypass mode configuration options.
268 #ifdef RTE_NIC_BYPASS
270 /* The NIC bypass watchdog timeout. */
271 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
276 * Ethernet device configuration.
278 struct rte_eth_rxmode rx_mode = {
279 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
281 .header_split = 0, /**< Header Split disabled. */
282 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
283 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
284 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
285 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
286 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
287 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
290 struct rte_fdir_conf fdir_conf = {
291 .mode = RTE_FDIR_MODE_NONE,
292 .pballoc = RTE_FDIR_PBALLOC_64K,
293 .status = RTE_FDIR_REPORT_STATUS,
295 .vlan_tci_mask = 0x0,
297 .src_ip = 0xFFFFFFFF,
298 .dst_ip = 0xFFFFFFFF,
301 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
302 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
304 .src_port_mask = 0xFFFF,
305 .dst_port_mask = 0xFFFF,
306 .mac_addr_byte_mask = 0xFF,
307 .tunnel_type_mask = 1,
308 .tunnel_id_mask = 0xFFFFFFFF,
313 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
315 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
316 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
318 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
319 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
321 uint16_t nb_tx_queue_stats_mappings = 0;
322 uint16_t nb_rx_queue_stats_mappings = 0;
324 unsigned max_socket = 0;
326 /* Forward function declarations */
327 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
328 static void check_all_ports_link_status(uint32_t port_mask);
331 * Check if all the ports are started.
332 * If yes, return positive value. If not, return zero.
334 static int all_ports_started(void);
337 * Find next enabled port
340 find_next_port(portid_t p, struct rte_port *ports, int size)
343 rte_exit(-EINVAL, "failed to find a next port id\n");
345 while ((p < size) && (ports[p].enabled == 0))
351 * Setup default configuration.
354 set_default_fwd_lcores_config(void)
358 unsigned int sock_num;
361 for (i = 0; i < RTE_MAX_LCORE; i++) {
362 sock_num = rte_lcore_to_socket_id(i) + 1;
363 if (sock_num > max_socket) {
364 if (sock_num > RTE_MAX_NUMA_NODES)
365 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
366 max_socket = sock_num;
368 if (!rte_lcore_is_enabled(i))
370 if (i == rte_get_master_lcore())
372 fwd_lcores_cpuids[nb_lc++] = i;
374 nb_lcores = (lcoreid_t) nb_lc;
375 nb_cfg_lcores = nb_lcores;
380 set_def_peer_eth_addrs(void)
384 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
385 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
386 peer_eth_addrs[i].addr_bytes[5] = i;
391 set_default_fwd_ports_config(void)
395 for (pt_id = 0; pt_id < nb_ports; pt_id++)
396 fwd_ports_ids[pt_id] = pt_id;
398 nb_cfg_ports = nb_ports;
399 nb_fwd_ports = nb_ports;
403 set_def_fwd_config(void)
405 set_default_fwd_lcores_config();
406 set_def_peer_eth_addrs();
407 set_default_fwd_ports_config();
411 * Configuration initialisation done once at init time.
414 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
415 unsigned int socket_id)
417 char pool_name[RTE_MEMPOOL_NAMESIZE];
418 struct rte_mempool *rte_mp = NULL;
421 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
422 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
425 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
426 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
428 #ifdef RTE_LIBRTE_PMD_XENVIRT
429 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
430 (unsigned) mb_mempool_cache,
431 sizeof(struct rte_pktmbuf_pool_private),
432 rte_pktmbuf_pool_init, NULL,
433 rte_pktmbuf_init, NULL,
437 /* if the former XEN allocation failed fall back to normal allocation */
438 if (rte_mp == NULL) {
440 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
441 mb_size, (unsigned) mb_mempool_cache,
442 sizeof(struct rte_pktmbuf_pool_private),
445 if (rte_mempool_populate_anon(rte_mp) == 0) {
446 rte_mempool_free(rte_mp);
449 rte_pktmbuf_pool_init(rte_mp, NULL);
450 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
452 /* wrapper to rte_mempool_create() */
453 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
454 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
458 if (rte_mp == NULL) {
459 rte_exit(EXIT_FAILURE,
460 "Creation of mbuf pool for socket %u failed: %s\n",
461 socket_id, rte_strerror(rte_errno));
462 } else if (verbose_level > 0) {
463 rte_mempool_dump(stdout, rte_mp);
468 * Check given socket id is valid or not with NUMA mode,
469 * if valid, return 0, else return -1
472 check_socket_id(const unsigned int socket_id)
474 static int warning_once = 0;
476 if (socket_id >= max_socket) {
477 if (!warning_once && numa_support)
478 printf("Warning: NUMA should be configured manually by"
479 " using --port-numa-config and"
480 " --ring-numa-config parameters along with"
492 struct rte_port *port;
493 struct rte_mempool *mbp;
494 unsigned int nb_mbuf_per_pool;
496 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
498 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
499 /* Configuration of logical cores. */
500 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
501 sizeof(struct fwd_lcore *) * nb_lcores,
502 RTE_CACHE_LINE_SIZE);
503 if (fwd_lcores == NULL) {
504 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
505 "failed\n", nb_lcores);
507 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
508 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
509 sizeof(struct fwd_lcore),
510 RTE_CACHE_LINE_SIZE);
511 if (fwd_lcores[lc_id] == NULL) {
512 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
515 fwd_lcores[lc_id]->cpuid_idx = lc_id;
519 * Create pools of mbuf.
520 * If NUMA support is disabled, create a single pool of mbuf in
521 * socket 0 memory by default.
522 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
524 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
525 * nb_txd can be configured at run time.
527 if (param_total_num_mbufs)
528 nb_mbuf_per_pool = param_total_num_mbufs;
530 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
531 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
535 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
539 if (socket_num == UMA_NO_CONFIG)
540 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
542 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
546 FOREACH_PORT(pid, ports) {
548 rte_eth_dev_info_get(pid, &port->dev_info);
551 if (port_numa[pid] != NUMA_NO_CONFIG)
552 port_per_socket[port_numa[pid]]++;
554 uint32_t socket_id = rte_eth_dev_socket_id(pid);
556 /* if socket_id is invalid, set to 0 */
557 if (check_socket_id(socket_id) < 0)
559 port_per_socket[socket_id]++;
563 /* set flag to initialize port/queue */
564 port->need_reconfig = 1;
565 port->need_reconfig_queues = 1;
570 unsigned int nb_mbuf;
572 if (param_total_num_mbufs)
573 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
575 for (i = 0; i < max_socket; i++) {
576 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
578 mbuf_pool_create(mbuf_data_size,
585 * Records which Mbuf pool to use by each logical core, if needed.
587 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
588 mbp = mbuf_pool_find(
589 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
592 mbp = mbuf_pool_find(0);
593 fwd_lcores[lc_id]->mbp = mbp;
596 /* Configuration of packet forwarding streams. */
597 if (init_fwd_streams() < 0)
598 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
605 reconfig(portid_t new_port_id, unsigned socket_id)
607 struct rte_port *port;
609 /* Reconfiguration of Ethernet ports. */
610 port = &ports[new_port_id];
611 rte_eth_dev_info_get(new_port_id, &port->dev_info);
613 /* set flag to initialize port/queue */
614 port->need_reconfig = 1;
615 port->need_reconfig_queues = 1;
616 port->socket_id = socket_id;
623 init_fwd_streams(void)
626 struct rte_port *port;
627 streamid_t sm_id, nb_fwd_streams_new;
630 /* set socket id according to numa or not */
631 FOREACH_PORT(pid, ports) {
633 if (nb_rxq > port->dev_info.max_rx_queues) {
634 printf("Fail: nb_rxq(%d) is greater than "
635 "max_rx_queues(%d)\n", nb_rxq,
636 port->dev_info.max_rx_queues);
639 if (nb_txq > port->dev_info.max_tx_queues) {
640 printf("Fail: nb_txq(%d) is greater than "
641 "max_tx_queues(%d)\n", nb_txq,
642 port->dev_info.max_tx_queues);
646 if (port_numa[pid] != NUMA_NO_CONFIG)
647 port->socket_id = port_numa[pid];
649 port->socket_id = rte_eth_dev_socket_id(pid);
651 /* if socket_id is invalid, set to 0 */
652 if (check_socket_id(port->socket_id) < 0)
657 if (socket_num == UMA_NO_CONFIG)
660 port->socket_id = socket_num;
664 q = RTE_MAX(nb_rxq, nb_txq);
666 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
669 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
670 if (nb_fwd_streams_new == nb_fwd_streams)
673 if (fwd_streams != NULL) {
674 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
675 if (fwd_streams[sm_id] == NULL)
677 rte_free(fwd_streams[sm_id]);
678 fwd_streams[sm_id] = NULL;
680 rte_free(fwd_streams);
685 nb_fwd_streams = nb_fwd_streams_new;
686 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
687 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
688 if (fwd_streams == NULL)
689 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
690 "failed\n", nb_fwd_streams);
692 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
693 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
694 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
695 if (fwd_streams[sm_id] == NULL)
696 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
703 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
705 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
707 unsigned int total_burst;
708 unsigned int nb_burst;
709 unsigned int burst_stats[3];
710 uint16_t pktnb_stats[3];
712 int burst_percent[3];
715 * First compute the total number of packet bursts and the
716 * two highest numbers of bursts of the same number of packets.
719 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
720 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
721 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
722 nb_burst = pbs->pkt_burst_spread[nb_pkt];
725 total_burst += nb_burst;
726 if (nb_burst > burst_stats[0]) {
727 burst_stats[1] = burst_stats[0];
728 pktnb_stats[1] = pktnb_stats[0];
729 burst_stats[0] = nb_burst;
730 pktnb_stats[0] = nb_pkt;
733 if (total_burst == 0)
735 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
736 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
737 burst_percent[0], (int) pktnb_stats[0]);
738 if (burst_stats[0] == total_burst) {
742 if (burst_stats[0] + burst_stats[1] == total_burst) {
743 printf(" + %d%% of %d pkts]\n",
744 100 - burst_percent[0], pktnb_stats[1]);
747 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
748 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
749 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
750 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
753 printf(" + %d%% of %d pkts + %d%% of others]\n",
754 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
756 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
759 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
761 struct rte_port *port;
764 static const char *fwd_stats_border = "----------------------";
766 port = &ports[port_id];
767 printf("\n %s Forward statistics for port %-2d %s\n",
768 fwd_stats_border, port_id, fwd_stats_border);
770 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
771 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
773 stats->ipackets, stats->imissed,
774 (uint64_t) (stats->ipackets + stats->imissed));
776 if (cur_fwd_eng == &csum_fwd_engine)
777 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
778 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
779 if ((stats->ierrors + stats->rx_nombuf) > 0) {
780 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
781 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
784 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
786 stats->opackets, port->tx_dropped,
787 (uint64_t) (stats->opackets + port->tx_dropped));
790 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
792 stats->ipackets, stats->imissed,
793 (uint64_t) (stats->ipackets + stats->imissed));
795 if (cur_fwd_eng == &csum_fwd_engine)
796 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
797 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
798 if ((stats->ierrors + stats->rx_nombuf) > 0) {
799 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
800 printf(" RX-nombufs: %14"PRIu64"\n",
804 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
806 stats->opackets, port->tx_dropped,
807 (uint64_t) (stats->opackets + port->tx_dropped));
810 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
812 pkt_burst_stats_display("RX",
813 &port->rx_stream->rx_burst_stats);
815 pkt_burst_stats_display("TX",
816 &port->tx_stream->tx_burst_stats);
819 if (port->rx_queue_stats_mapping_enabled) {
821 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
822 printf(" Stats reg %2d RX-packets:%14"PRIu64
823 " RX-errors:%14"PRIu64
824 " RX-bytes:%14"PRIu64"\n",
825 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
829 if (port->tx_queue_stats_mapping_enabled) {
830 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
831 printf(" Stats reg %2d TX-packets:%14"PRIu64
832 " TX-bytes:%14"PRIu64"\n",
833 i, stats->q_opackets[i], stats->q_obytes[i]);
837 printf(" %s--------------------------------%s\n",
838 fwd_stats_border, fwd_stats_border);
842 fwd_stream_stats_display(streamid_t stream_id)
844 struct fwd_stream *fs;
845 static const char *fwd_top_stats_border = "-------";
847 fs = fwd_streams[stream_id];
848 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
849 (fs->fwd_dropped == 0))
851 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
852 "TX Port=%2d/Queue=%2d %s\n",
853 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
854 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
855 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
856 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
858 /* if checksum mode */
859 if (cur_fwd_eng == &csum_fwd_engine) {
860 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
861 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
864 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
865 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
866 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
871 flush_fwd_rx_queues(void)
873 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
881 for (j = 0; j < 2; j++) {
882 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
883 for (rxq = 0; rxq < nb_rxq; rxq++) {
884 port_id = fwd_ports_ids[rxp];
886 nb_rx = rte_eth_rx_burst(port_id, rxq,
887 pkts_burst, MAX_PKT_BURST);
888 for (i = 0; i < nb_rx; i++)
889 rte_pktmbuf_free(pkts_burst[i]);
893 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
898 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
900 struct fwd_stream **fsm;
904 fsm = &fwd_streams[fc->stream_idx];
905 nb_fs = fc->stream_nb;
907 for (sm_id = 0; sm_id < nb_fs; sm_id++)
908 (*pkt_fwd)(fsm[sm_id]);
909 } while (! fc->stopped);
913 start_pkt_forward_on_core(void *fwd_arg)
915 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
916 cur_fwd_config.fwd_eng->packet_fwd);
921 * Run the TXONLY packet forwarding engine to send a single burst of packets.
922 * Used to start communication flows in network loopback test configurations.
925 run_one_txonly_burst_on_core(void *fwd_arg)
927 struct fwd_lcore *fwd_lc;
928 struct fwd_lcore tmp_lcore;
930 fwd_lc = (struct fwd_lcore *) fwd_arg;
932 tmp_lcore.stopped = 1;
933 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
938 * Launch packet forwarding:
939 * - Setup per-port forwarding context.
940 * - launch logical cores with their forwarding configuration.
943 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
945 port_fwd_begin_t port_fwd_begin;
950 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
951 if (port_fwd_begin != NULL) {
952 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
953 (*port_fwd_begin)(fwd_ports_ids[i]);
955 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
956 lc_id = fwd_lcores_cpuids[i];
957 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
958 fwd_lcores[i]->stopped = 0;
959 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
960 fwd_lcores[i], lc_id);
962 printf("launch lcore %u failed - diag=%d\n",
969 * Launch packet forwarding configuration.
972 start_packet_forwarding(int with_tx_first)
974 port_fwd_begin_t port_fwd_begin;
975 port_fwd_end_t port_fwd_end;
976 struct rte_port *port;
981 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
982 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
984 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
985 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
987 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
988 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
989 (!nb_rxq || !nb_txq))
990 rte_exit(EXIT_FAILURE,
991 "Either rxq or txq are 0, cannot use %s fwd mode\n",
992 cur_fwd_eng->fwd_mode_name);
994 if (all_ports_started() == 0) {
995 printf("Not all ports were started\n");
998 if (test_done == 0) {
999 printf("Packet forwarding already started\n");
1003 if (init_fwd_streams() < 0) {
1004 printf("Fail from init_fwd_streams()\n");
1009 for (i = 0; i < nb_fwd_ports; i++) {
1010 pt_id = fwd_ports_ids[i];
1011 port = &ports[pt_id];
1012 if (!port->dcb_flag) {
1013 printf("In DCB mode, all forwarding ports must "
1014 "be configured in this mode.\n");
1018 if (nb_fwd_lcores == 1) {
1019 printf("In DCB mode,the nb forwarding cores "
1020 "should be larger than 1.\n");
1027 flush_fwd_rx_queues();
1030 pkt_fwd_config_display(&cur_fwd_config);
1031 rxtx_config_display();
1033 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1034 pt_id = fwd_ports_ids[i];
1035 port = &ports[pt_id];
1036 rte_eth_stats_get(pt_id, &port->stats);
1037 port->tx_dropped = 0;
1039 map_port_queue_stats_mapping_registers(pt_id, port);
1041 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1042 fwd_streams[sm_id]->rx_packets = 0;
1043 fwd_streams[sm_id]->tx_packets = 0;
1044 fwd_streams[sm_id]->fwd_dropped = 0;
1045 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1046 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1048 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1049 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1050 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1051 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1052 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1054 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1055 fwd_streams[sm_id]->core_cycles = 0;
1058 if (with_tx_first) {
1059 port_fwd_begin = tx_only_engine.port_fwd_begin;
1060 if (port_fwd_begin != NULL) {
1061 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1062 (*port_fwd_begin)(fwd_ports_ids[i]);
1064 while (with_tx_first--) {
1065 launch_packet_forwarding(
1066 run_one_txonly_burst_on_core);
1067 rte_eal_mp_wait_lcore();
1069 port_fwd_end = tx_only_engine.port_fwd_end;
1070 if (port_fwd_end != NULL) {
1071 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1072 (*port_fwd_end)(fwd_ports_ids[i]);
1075 launch_packet_forwarding(start_pkt_forward_on_core);
1079 stop_packet_forwarding(void)
1081 struct rte_eth_stats stats;
1082 struct rte_port *port;
1083 port_fwd_end_t port_fwd_end;
1088 uint64_t total_recv;
1089 uint64_t total_xmit;
1090 uint64_t total_rx_dropped;
1091 uint64_t total_tx_dropped;
1092 uint64_t total_rx_nombuf;
1093 uint64_t tx_dropped;
1094 uint64_t rx_bad_ip_csum;
1095 uint64_t rx_bad_l4_csum;
1096 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1097 uint64_t fwd_cycles;
1099 static const char *acc_stats_border = "+++++++++++++++";
1102 printf("Packet forwarding not started\n");
1105 printf("Telling cores to stop...");
1106 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1107 fwd_lcores[lc_id]->stopped = 1;
1108 printf("\nWaiting for lcores to finish...\n");
1109 rte_eal_mp_wait_lcore();
1110 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1111 if (port_fwd_end != NULL) {
1112 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1113 pt_id = fwd_ports_ids[i];
1114 (*port_fwd_end)(pt_id);
1117 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1120 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1121 if (cur_fwd_config.nb_fwd_streams >
1122 cur_fwd_config.nb_fwd_ports) {
1123 fwd_stream_stats_display(sm_id);
1124 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1125 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1127 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1129 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1132 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1133 tx_dropped = (uint64_t) (tx_dropped +
1134 fwd_streams[sm_id]->fwd_dropped);
1135 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1138 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1139 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1140 fwd_streams[sm_id]->rx_bad_ip_csum);
1141 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1145 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1146 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1147 fwd_streams[sm_id]->rx_bad_l4_csum);
1148 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1151 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1152 fwd_cycles = (uint64_t) (fwd_cycles +
1153 fwd_streams[sm_id]->core_cycles);
1158 total_rx_dropped = 0;
1159 total_tx_dropped = 0;
1160 total_rx_nombuf = 0;
1161 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1162 pt_id = fwd_ports_ids[i];
1164 port = &ports[pt_id];
1165 rte_eth_stats_get(pt_id, &stats);
1166 stats.ipackets -= port->stats.ipackets;
1167 port->stats.ipackets = 0;
1168 stats.opackets -= port->stats.opackets;
1169 port->stats.opackets = 0;
1170 stats.ibytes -= port->stats.ibytes;
1171 port->stats.ibytes = 0;
1172 stats.obytes -= port->stats.obytes;
1173 port->stats.obytes = 0;
1174 stats.imissed -= port->stats.imissed;
1175 port->stats.imissed = 0;
1176 stats.oerrors -= port->stats.oerrors;
1177 port->stats.oerrors = 0;
1178 stats.rx_nombuf -= port->stats.rx_nombuf;
1179 port->stats.rx_nombuf = 0;
1181 total_recv += stats.ipackets;
1182 total_xmit += stats.opackets;
1183 total_rx_dropped += stats.imissed;
1184 total_tx_dropped += port->tx_dropped;
1185 total_rx_nombuf += stats.rx_nombuf;
1187 fwd_port_stats_display(pt_id, &stats);
1189 printf("\n %s Accumulated forward statistics for all ports"
1191 acc_stats_border, acc_stats_border);
1192 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1194 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1196 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1197 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1198 if (total_rx_nombuf > 0)
1199 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1200 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1202 acc_stats_border, acc_stats_border);
1203 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1205 printf("\n CPU cycles/packet=%u (total cycles="
1206 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1207 (unsigned int)(fwd_cycles / total_recv),
1208 fwd_cycles, total_recv);
1210 printf("\nDone.\n");
1215 dev_set_link_up(portid_t pid)
1217 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1218 printf("\nSet link up fail.\n");
1222 dev_set_link_down(portid_t pid)
1224 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1225 printf("\nSet link down fail.\n");
1229 all_ports_started(void)
1232 struct rte_port *port;
1234 FOREACH_PORT(pi, ports) {
1236 /* Check if there is a port which is not started */
1237 if ((port->port_status != RTE_PORT_STARTED) &&
1238 (port->slave_flag == 0))
1242 /* No port is not started */
1247 all_ports_stopped(void)
1250 struct rte_port *port;
1252 FOREACH_PORT(pi, ports) {
1254 if ((port->port_status != RTE_PORT_STOPPED) &&
1255 (port->slave_flag == 0))
1263 port_is_started(portid_t port_id)
1265 if (port_id_is_invalid(port_id, ENABLED_WARN))
1268 if (ports[port_id].port_status != RTE_PORT_STARTED)
1275 port_is_closed(portid_t port_id)
1277 if (port_id_is_invalid(port_id, ENABLED_WARN))
1280 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1287 start_port(portid_t pid)
1289 int diag, need_check_link_status = -1;
1292 struct rte_port *port;
1293 struct ether_addr mac_addr;
1295 if (port_id_is_invalid(pid, ENABLED_WARN))
1300 FOREACH_PORT(pi, ports) {
1301 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1304 need_check_link_status = 0;
1306 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1307 RTE_PORT_HANDLING) == 0) {
1308 printf("Port %d is now not stopped\n", pi);
1312 if (port->need_reconfig > 0) {
1313 port->need_reconfig = 0;
1315 printf("Configuring Port %d (socket %u)\n", pi,
1317 /* configure port */
1318 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1321 if (rte_atomic16_cmpset(&(port->port_status),
1322 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1323 printf("Port %d can not be set back "
1324 "to stopped\n", pi);
1325 printf("Fail to configure port %d\n", pi);
1326 /* try to reconfigure port next time */
1327 port->need_reconfig = 1;
1331 if (port->need_reconfig_queues > 0) {
1332 port->need_reconfig_queues = 0;
1333 /* setup tx queues */
1334 for (qi = 0; qi < nb_txq; qi++) {
1335 if ((numa_support) &&
1336 (txring_numa[pi] != NUMA_NO_CONFIG))
1337 diag = rte_eth_tx_queue_setup(pi, qi,
1338 nb_txd,txring_numa[pi],
1341 diag = rte_eth_tx_queue_setup(pi, qi,
1342 nb_txd,port->socket_id,
1348 /* Fail to setup tx queue, return */
1349 if (rte_atomic16_cmpset(&(port->port_status),
1351 RTE_PORT_STOPPED) == 0)
1352 printf("Port %d can not be set back "
1353 "to stopped\n", pi);
1354 printf("Fail to configure port %d tx queues\n", pi);
1355 /* try to reconfigure queues next time */
1356 port->need_reconfig_queues = 1;
1359 /* setup rx queues */
1360 for (qi = 0; qi < nb_rxq; qi++) {
1361 if ((numa_support) &&
1362 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1363 struct rte_mempool * mp =
1364 mbuf_pool_find(rxring_numa[pi]);
1366 printf("Failed to setup RX queue:"
1367 "No mempool allocation"
1368 " on the socket %d\n",
1373 diag = rte_eth_rx_queue_setup(pi, qi,
1374 nb_rxd,rxring_numa[pi],
1375 &(port->rx_conf),mp);
1377 struct rte_mempool *mp =
1378 mbuf_pool_find(port->socket_id);
1380 printf("Failed to setup RX queue:"
1381 "No mempool allocation"
1382 " on the socket %d\n",
1386 diag = rte_eth_rx_queue_setup(pi, qi,
1387 nb_rxd,port->socket_id,
1388 &(port->rx_conf), mp);
1393 /* Fail to setup rx queue, return */
1394 if (rte_atomic16_cmpset(&(port->port_status),
1396 RTE_PORT_STOPPED) == 0)
1397 printf("Port %d can not be set back "
1398 "to stopped\n", pi);
1399 printf("Fail to configure port %d rx queues\n", pi);
1400 /* try to reconfigure queues next time */
1401 port->need_reconfig_queues = 1;
1406 if (rte_eth_dev_start(pi) < 0) {
1407 printf("Fail to start port %d\n", pi);
1409 /* Fail to setup rx queue, return */
1410 if (rte_atomic16_cmpset(&(port->port_status),
1411 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1412 printf("Port %d can not be set back to "
1417 if (rte_atomic16_cmpset(&(port->port_status),
1418 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1419 printf("Port %d can not be set into started\n", pi);
1421 rte_eth_macaddr_get(pi, &mac_addr);
1422 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1423 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1424 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1425 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1427 /* at least one port started, need checking link status */
1428 need_check_link_status = 1;
1431 if (need_check_link_status == 1 && !no_link_check)
1432 check_all_ports_link_status(RTE_PORT_ALL);
1433 else if (need_check_link_status == 0)
1434 printf("Please stop the ports first\n");
1441 stop_port(portid_t pid)
1444 struct rte_port *port;
1445 int need_check_link_status = 0;
1452 if (port_id_is_invalid(pid, ENABLED_WARN))
1455 printf("Stopping ports...\n");
1457 FOREACH_PORT(pi, ports) {
1458 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1461 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1462 printf("Please remove port %d from forwarding configuration.\n", pi);
1466 if (port_is_bonding_slave(pi)) {
1467 printf("Please remove port %d from bonded device.\n", pi);
1472 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1473 RTE_PORT_HANDLING) == 0)
1476 rte_eth_dev_stop(pi);
1478 if (rte_atomic16_cmpset(&(port->port_status),
1479 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1480 printf("Port %d can not be set into stopped\n", pi);
1481 need_check_link_status = 1;
1483 if (need_check_link_status && !no_link_check)
1484 check_all_ports_link_status(RTE_PORT_ALL);
1490 close_port(portid_t pid)
1493 struct rte_port *port;
1495 if (port_id_is_invalid(pid, ENABLED_WARN))
1498 printf("Closing ports...\n");
1500 FOREACH_PORT(pi, ports) {
1501 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1504 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1505 printf("Please remove port %d from forwarding configuration.\n", pi);
1509 if (port_is_bonding_slave(pi)) {
1510 printf("Please remove port %d from bonded device.\n", pi);
1515 if (rte_atomic16_cmpset(&(port->port_status),
1516 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1517 printf("Port %d is already closed\n", pi);
1521 if (rte_atomic16_cmpset(&(port->port_status),
1522 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1523 printf("Port %d is now not stopped\n", pi);
1527 rte_eth_dev_close(pi);
1529 if (rte_atomic16_cmpset(&(port->port_status),
1530 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1531 printf("Port %d cannot be set to closed\n", pi);
1538 attach_port(char *identifier)
1541 unsigned int socket_id;
1543 printf("Attaching a new port...\n");
1545 if (identifier == NULL) {
1546 printf("Invalid parameters are specified\n");
1550 if (rte_eth_dev_attach(identifier, &pi))
1553 ports[pi].enabled = 1;
1554 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1555 /* if socket_id is invalid, set to 0 */
1556 if (check_socket_id(socket_id) < 0)
1558 reconfig(pi, socket_id);
1559 rte_eth_promiscuous_enable(pi);
1561 nb_ports = rte_eth_dev_count();
1563 ports[pi].port_status = RTE_PORT_STOPPED;
1565 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1570 detach_port(uint8_t port_id)
1572 char name[RTE_ETH_NAME_MAX_LEN];
1574 printf("Detaching a port...\n");
1576 if (!port_is_closed(port_id)) {
1577 printf("Please close port first\n");
1581 if (rte_eth_dev_detach(port_id, name))
1584 ports[port_id].enabled = 0;
1585 nb_ports = rte_eth_dev_count();
1587 printf("Port '%s' is detached. Now total ports is %d\n",
1599 stop_packet_forwarding();
1601 if (ports != NULL) {
1603 FOREACH_PORT(pt_id, ports) {
1604 printf("\nShutting down port %d...\n", pt_id);
1610 printf("\nBye...\n");
1613 typedef void (*cmd_func_t)(void);
1614 struct pmd_test_command {
1615 const char *cmd_name;
1616 cmd_func_t cmd_func;
1619 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1621 /* Check the link status of all ports in up to 9s, and print them finally */
1623 check_all_ports_link_status(uint32_t port_mask)
1625 #define CHECK_INTERVAL 100 /* 100ms */
1626 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1627 uint8_t portid, count, all_ports_up, print_flag = 0;
1628 struct rte_eth_link link;
1630 printf("Checking link statuses...\n");
1632 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1634 FOREACH_PORT(portid, ports) {
1635 if ((port_mask & (1 << portid)) == 0)
1637 memset(&link, 0, sizeof(link));
1638 rte_eth_link_get_nowait(portid, &link);
1639 /* print link status if flag set */
1640 if (print_flag == 1) {
1641 if (link.link_status)
1642 printf("Port %d Link Up - speed %u "
1643 "Mbps - %s\n", (uint8_t)portid,
1644 (unsigned)link.link_speed,
1645 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1646 ("full-duplex") : ("half-duplex\n"));
1648 printf("Port %d Link Down\n",
1652 /* clear all_ports_up flag if any link down */
1653 if (link.link_status == ETH_LINK_DOWN) {
1658 /* after finally printing all link status, get out */
1659 if (print_flag == 1)
1662 if (all_ports_up == 0) {
1664 rte_delay_ms(CHECK_INTERVAL);
1667 /* set the print_flag if all ports up or timeout */
1668 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1675 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1679 uint8_t mapping_found = 0;
1681 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1682 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1683 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1684 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1685 tx_queue_stats_mappings[i].queue_id,
1686 tx_queue_stats_mappings[i].stats_counter_id);
1693 port->tx_queue_stats_mapping_enabled = 1;
1698 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1702 uint8_t mapping_found = 0;
1704 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1705 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1706 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1707 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1708 rx_queue_stats_mappings[i].queue_id,
1709 rx_queue_stats_mappings[i].stats_counter_id);
1716 port->rx_queue_stats_mapping_enabled = 1;
1721 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1725 diag = set_tx_queue_stats_mapping_registers(pi, port);
1727 if (diag == -ENOTSUP) {
1728 port->tx_queue_stats_mapping_enabled = 0;
1729 printf("TX queue stats mapping not supported port id=%d\n", pi);
1732 rte_exit(EXIT_FAILURE,
1733 "set_tx_queue_stats_mapping_registers "
1734 "failed for port id=%d diag=%d\n",
1738 diag = set_rx_queue_stats_mapping_registers(pi, port);
1740 if (diag == -ENOTSUP) {
1741 port->rx_queue_stats_mapping_enabled = 0;
1742 printf("RX queue stats mapping not supported port id=%d\n", pi);
1745 rte_exit(EXIT_FAILURE,
1746 "set_rx_queue_stats_mapping_registers "
1747 "failed for port id=%d diag=%d\n",
1753 rxtx_port_config(struct rte_port *port)
1755 port->rx_conf = port->dev_info.default_rxconf;
1756 port->tx_conf = port->dev_info.default_txconf;
1758 /* Check if any RX/TX parameters have been passed */
1759 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1760 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1762 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1763 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1765 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1766 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1768 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1769 port->rx_conf.rx_free_thresh = rx_free_thresh;
1771 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1772 port->rx_conf.rx_drop_en = rx_drop_en;
1774 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1775 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1777 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1778 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1780 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1781 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1783 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1784 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1786 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1787 port->tx_conf.tx_free_thresh = tx_free_thresh;
1789 if (txq_flags != RTE_PMD_PARAM_UNSET)
1790 port->tx_conf.txq_flags = txq_flags;
1794 init_port_config(void)
1797 struct rte_port *port;
1799 FOREACH_PORT(pid, ports) {
1801 port->dev_conf.rxmode = rx_mode;
1802 port->dev_conf.fdir_conf = fdir_conf;
1804 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1805 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1807 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1808 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1811 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1812 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1813 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1815 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1818 if (port->dev_info.max_vfs != 0) {
1819 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1820 port->dev_conf.rxmode.mq_mode =
1823 port->dev_conf.rxmode.mq_mode =
1826 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1829 rxtx_port_config(port);
1831 rte_eth_macaddr_get(pid, &port->eth_addr);
1833 map_port_queue_stats_mapping_registers(pid, port);
1834 #ifdef RTE_NIC_BYPASS
1835 rte_eth_dev_bypass_init(pid);
1840 void set_port_slave_flag(portid_t slave_pid)
1842 struct rte_port *port;
1844 port = &ports[slave_pid];
1845 port->slave_flag = 1;
1848 void clear_port_slave_flag(portid_t slave_pid)
1850 struct rte_port *port;
1852 port = &ports[slave_pid];
1853 port->slave_flag = 0;
1856 uint8_t port_is_bonding_slave(portid_t slave_pid)
1858 struct rte_port *port;
1860 port = &ports[slave_pid];
1861 return port->slave_flag;
1864 const uint16_t vlan_tags[] = {
1865 0, 1, 2, 3, 4, 5, 6, 7,
1866 8, 9, 10, 11, 12, 13, 14, 15,
1867 16, 17, 18, 19, 20, 21, 22, 23,
1868 24, 25, 26, 27, 28, 29, 30, 31
1872 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1873 enum dcb_mode_enable dcb_mode,
1874 enum rte_eth_nb_tcs num_tcs,
1880 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1881 * given above, and the number of traffic classes available for use.
1883 if (dcb_mode == DCB_VT_ENABLED) {
1884 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1885 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1886 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1887 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1889 /* VMDQ+DCB RX and TX configrations */
1890 vmdq_rx_conf->enable_default_pool = 0;
1891 vmdq_rx_conf->default_pool = 0;
1892 vmdq_rx_conf->nb_queue_pools =
1893 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1894 vmdq_tx_conf->nb_queue_pools =
1895 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1897 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1898 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1899 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1900 vmdq_rx_conf->pool_map[i].pools =
1901 1 << (i % vmdq_rx_conf->nb_queue_pools);
1903 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1904 vmdq_rx_conf->dcb_tc[i] = i;
1905 vmdq_tx_conf->dcb_tc[i] = i;
1908 /* set DCB mode of RX and TX of multiple queues */
1909 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1910 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1912 struct rte_eth_dcb_rx_conf *rx_conf =
1913 ð_conf->rx_adv_conf.dcb_rx_conf;
1914 struct rte_eth_dcb_tx_conf *tx_conf =
1915 ð_conf->tx_adv_conf.dcb_tx_conf;
1917 rx_conf->nb_tcs = num_tcs;
1918 tx_conf->nb_tcs = num_tcs;
1920 for (i = 0; i < num_tcs; i++) {
1921 rx_conf->dcb_tc[i] = i;
1922 tx_conf->dcb_tc[i] = i;
1924 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1925 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1926 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1930 eth_conf->dcb_capability_en =
1931 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1933 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1939 init_port_dcb_config(portid_t pid,
1940 enum dcb_mode_enable dcb_mode,
1941 enum rte_eth_nb_tcs num_tcs,
1944 struct rte_eth_conf port_conf;
1945 struct rte_eth_dev_info dev_info;
1946 struct rte_port *rte_port;
1950 rte_eth_dev_info_get(pid, &dev_info);
1952 /* If dev_info.vmdq_pool_base is greater than 0,
1953 * the queue id of vmdq pools is started after pf queues.
1955 if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1956 printf("VMDQ_DCB multi-queue mode is nonsensical"
1957 " for port %d.", pid);
1961 /* Assume the ports in testpmd have the same dcb capability
1962 * and has the same number of rxq and txq in dcb mode
1964 if (dcb_mode == DCB_VT_ENABLED) {
1965 nb_rxq = dev_info.max_rx_queues;
1966 nb_txq = dev_info.max_tx_queues;
1968 /*if vt is disabled, use all pf queues */
1969 if (dev_info.vmdq_pool_base == 0) {
1970 nb_rxq = dev_info.max_rx_queues;
1971 nb_txq = dev_info.max_tx_queues;
1973 nb_rxq = (queueid_t)num_tcs;
1974 nb_txq = (queueid_t)num_tcs;
1978 rx_free_thresh = 64;
1980 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1981 /* Enter DCB configuration status */
1984 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1985 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1989 rte_port = &ports[pid];
1990 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
1992 rxtx_port_config(rte_port);
1994 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
1995 for (i = 0; i < RTE_DIM(vlan_tags); i++)
1996 rx_vft_set(pid, vlan_tags[i], 1);
1998 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
1999 map_port_queue_stats_mapping_registers(pid, rte_port);
2001 rte_port->dcb_flag = 1;
2011 /* Configuration of Ethernet ports. */
2012 ports = rte_zmalloc("testpmd: ports",
2013 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2014 RTE_CACHE_LINE_SIZE);
2015 if (ports == NULL) {
2016 rte_exit(EXIT_FAILURE,
2017 "rte_zmalloc(%d struct rte_port) failed\n",
2021 /* enabled allocated ports */
2022 for (pid = 0; pid < nb_ports; pid++)
2023 ports[pid].enabled = 1;
2034 signal_handler(int signum)
2036 if (signum == SIGINT || signum == SIGTERM) {
2037 printf("\nSignal %d received, preparing to exit...\n",
2039 #ifdef RTE_LIBRTE_PDUMP
2040 /* uninitialize packet capture framework */
2044 /* exit with the expected status */
2045 signal(signum, SIG_DFL);
2046 kill(getpid(), signum);
2051 main(int argc, char** argv)
2056 signal(SIGINT, signal_handler);
2057 signal(SIGTERM, signal_handler);
2059 diag = rte_eal_init(argc, argv);
2061 rte_panic("Cannot init EAL\n");
2063 #ifdef RTE_LIBRTE_PDUMP
2064 /* initialize packet capture framework */
2065 rte_pdump_init(NULL);
2068 nb_ports = (portid_t) rte_eth_dev_count();
2070 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2072 /* allocate port structures, and init them */
2075 set_def_fwd_config();
2077 rte_panic("Empty set of forwarding logical cores - check the "
2078 "core mask supplied in the command parameters\n");
2083 launch_args_parse(argc, argv);
2085 if (!nb_rxq && !nb_txq)
2086 printf("Warning: Either rx or tx queues should be non-zero\n");
2088 if (nb_rxq > 1 && nb_rxq > nb_txq)
2089 printf("Warning: nb_rxq=%d enables RSS configuration, "
2090 "but nb_txq=%d will prevent to fully test it.\n",
2094 if (start_port(RTE_PORT_ALL) != 0)
2095 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2097 /* set all ports to promiscuous mode by default */
2098 FOREACH_PORT(port_id, ports)
2099 rte_eth_promiscuous_enable(port_id);
2101 #ifdef RTE_LIBRTE_CMDLINE
2102 if (interactive == 1) {
2104 printf("Start automatic packet forwarding\n");
2105 start_packet_forwarding(0);
2114 printf("No commandline core given, start packet forwarding\n");
2115 start_packet_forwarding(0);
2116 printf("Press enter to exit\n");
2117 rc = read(0, &c, 1);