4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
69 #include <rte_interrupts.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
84 uint16_t verbose_level = 0; /**< Silent by default. */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
91 * NUMA support configuration.
92 * When set, the NUMA support attempts to dispatch the allocation of the
93 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
94 * probed ports among the CPU sockets 0 and 1.
95 * Otherwise, all memory is allocated from CPU socket 0.
97 uint8_t numa_support = 0; /**< No numa support by default */
100 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103 uint8_t socket_num = UMA_NO_CONFIG;
106 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
111 * Record the Ethernet address of peer target ports to which packets are
113 * Must be instanciated with the ethernet addresses of peer traffic generator
116 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
117 portid_t nb_peer_eth_addrs = 0;
120 * Probed Target Environment.
122 struct rte_port *ports; /**< For all probed ethernet ports. */
123 portid_t nb_ports; /**< Number of probed ethernet ports. */
124 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
125 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
128 * Test Forwarding Configuration.
129 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
130 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
132 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
133 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
134 portid_t nb_cfg_ports; /**< Number of configured ports. */
135 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
137 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
138 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
140 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
141 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
144 * Forwarding engines.
146 struct fwd_engine * fwd_engines[] = {
155 #ifdef RTE_LIBRTE_IEEE1588
156 &ieee1588_fwd_engine,
161 struct fwd_config cur_fwd_config;
162 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint32_t retry_enabled;
164 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
165 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
167 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
168 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
169 * specified on command-line. */
172 * Configuration of packet segments used by the "txonly" processing engine.
174 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
175 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
176 TXONLY_DEF_PACKET_LEN,
178 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
180 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
181 /**< Split policy for packets to TX. */
183 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
184 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
186 /* current configuration is in DCB or not,0 means it is not in DCB mode */
187 uint8_t dcb_config = 0;
189 /* Whether the dcb is in testing status */
190 uint8_t dcb_test = 0;
193 * Configurable number of RX/TX queues.
195 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
196 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
199 * Configurable number of RX/TX ring descriptors.
201 #define RTE_TEST_RX_DESC_DEFAULT 128
202 #define RTE_TEST_TX_DESC_DEFAULT 512
203 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
204 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
206 #define RTE_PMD_PARAM_UNSET -1
208 * Configurable values of RX and TX ring threshold registers.
211 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
215 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
216 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
217 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
220 * Configurable value of RX free threshold.
222 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
225 * Configurable value of RX drop enable.
227 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
230 * Configurable value of TX free threshold.
232 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of TX RS bit threshold.
237 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
240 * Configurable value of TX queue flags.
242 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
245 * Receive Side Scaling (RSS) configuration.
247 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
250 * Port topology configuration
252 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
255 * Avoids to flush all the RX streams before starts forwarding.
257 uint8_t no_flush_rx = 0; /* flush by default */
260 * Avoids to check link status when starting/stopping a port.
262 uint8_t no_link_check = 0; /* check by default */
265 * NIC bypass mode configuration options.
267 #ifdef RTE_NIC_BYPASS
269 /* The NIC bypass watchdog timeout. */
270 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
275 * Ethernet device configuration.
277 struct rte_eth_rxmode rx_mode = {
278 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
280 .header_split = 0, /**< Header Split disabled. */
281 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
282 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
283 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
284 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
285 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
286 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
289 struct rte_fdir_conf fdir_conf = {
290 .mode = RTE_FDIR_MODE_NONE,
291 .pballoc = RTE_FDIR_PBALLOC_64K,
292 .status = RTE_FDIR_REPORT_STATUS,
294 .vlan_tci_mask = 0x0,
296 .src_ip = 0xFFFFFFFF,
297 .dst_ip = 0xFFFFFFFF,
300 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
301 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
303 .src_port_mask = 0xFFFF,
304 .dst_port_mask = 0xFFFF,
305 .mac_addr_byte_mask = 0xFF,
306 .tunnel_type_mask = 1,
307 .tunnel_id_mask = 0xFFFFFFFF,
312 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
314 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
315 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
317 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
318 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
320 uint16_t nb_tx_queue_stats_mappings = 0;
321 uint16_t nb_rx_queue_stats_mappings = 0;
323 unsigned max_socket = 0;
325 /* Forward function declarations */
326 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
327 static void check_all_ports_link_status(uint32_t port_mask);
330 * Check if all the ports are started.
331 * If yes, return positive value. If not, return zero.
333 static int all_ports_started(void);
336 * Find next enabled port
339 find_next_port(portid_t p, struct rte_port *ports, int size)
342 rte_exit(-EINVAL, "failed to find a next port id\n");
344 while ((p < size) && (ports[p].enabled == 0))
350 * Setup default configuration.
353 set_default_fwd_lcores_config(void)
357 unsigned int sock_num;
360 for (i = 0; i < RTE_MAX_LCORE; i++) {
361 sock_num = rte_lcore_to_socket_id(i) + 1;
362 if (sock_num > max_socket) {
363 if (sock_num > RTE_MAX_NUMA_NODES)
364 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
365 max_socket = sock_num;
367 if (!rte_lcore_is_enabled(i))
369 if (i == rte_get_master_lcore())
371 fwd_lcores_cpuids[nb_lc++] = i;
373 nb_lcores = (lcoreid_t) nb_lc;
374 nb_cfg_lcores = nb_lcores;
379 set_def_peer_eth_addrs(void)
383 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
384 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
385 peer_eth_addrs[i].addr_bytes[5] = i;
390 set_default_fwd_ports_config(void)
394 for (pt_id = 0; pt_id < nb_ports; pt_id++)
395 fwd_ports_ids[pt_id] = pt_id;
397 nb_cfg_ports = nb_ports;
398 nb_fwd_ports = nb_ports;
402 set_def_fwd_config(void)
404 set_default_fwd_lcores_config();
405 set_def_peer_eth_addrs();
406 set_default_fwd_ports_config();
410 * Configuration initialisation done once at init time.
413 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
414 unsigned int socket_id)
416 char pool_name[RTE_MEMPOOL_NAMESIZE];
417 struct rte_mempool *rte_mp = NULL;
420 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
421 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
424 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
425 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
427 #ifdef RTE_LIBRTE_PMD_XENVIRT
428 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
429 (unsigned) mb_mempool_cache,
430 sizeof(struct rte_pktmbuf_pool_private),
431 rte_pktmbuf_pool_init, NULL,
432 rte_pktmbuf_init, NULL,
436 /* if the former XEN allocation failed fall back to normal allocation */
437 if (rte_mp == NULL) {
439 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
440 mb_size, (unsigned) mb_mempool_cache,
441 sizeof(struct rte_pktmbuf_pool_private),
446 if (rte_mempool_populate_anon(rte_mp) == 0) {
447 rte_mempool_free(rte_mp);
451 rte_pktmbuf_pool_init(rte_mp, NULL);
452 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
454 /* wrapper to rte_mempool_create() */
455 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
456 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
461 if (rte_mp == NULL) {
462 rte_exit(EXIT_FAILURE,
463 "Creation of mbuf pool for socket %u failed: %s\n",
464 socket_id, rte_strerror(rte_errno));
465 } else if (verbose_level > 0) {
466 rte_mempool_dump(stdout, rte_mp);
471 * Check given socket id is valid or not with NUMA mode,
472 * if valid, return 0, else return -1
475 check_socket_id(const unsigned int socket_id)
477 static int warning_once = 0;
479 if (socket_id >= max_socket) {
480 if (!warning_once && numa_support)
481 printf("Warning: NUMA should be configured manually by"
482 " using --port-numa-config and"
483 " --ring-numa-config parameters along with"
495 struct rte_port *port;
496 struct rte_mempool *mbp;
497 unsigned int nb_mbuf_per_pool;
499 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
501 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
502 /* Configuration of logical cores. */
503 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
504 sizeof(struct fwd_lcore *) * nb_lcores,
505 RTE_CACHE_LINE_SIZE);
506 if (fwd_lcores == NULL) {
507 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
508 "failed\n", nb_lcores);
510 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
511 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
512 sizeof(struct fwd_lcore),
513 RTE_CACHE_LINE_SIZE);
514 if (fwd_lcores[lc_id] == NULL) {
515 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
518 fwd_lcores[lc_id]->cpuid_idx = lc_id;
521 FOREACH_PORT(pid, ports) {
523 rte_eth_dev_info_get(pid, &port->dev_info);
526 if (port_numa[pid] != NUMA_NO_CONFIG)
527 port_per_socket[port_numa[pid]]++;
529 uint32_t socket_id = rte_eth_dev_socket_id(pid);
531 /* if socket_id is invalid, set to 0 */
532 if (check_socket_id(socket_id) < 0)
534 port_per_socket[socket_id]++;
538 /* set flag to initialize port/queue */
539 port->need_reconfig = 1;
540 port->need_reconfig_queues = 1;
544 * Create pools of mbuf.
545 * If NUMA support is disabled, create a single pool of mbuf in
546 * socket 0 memory by default.
547 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
549 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
550 * nb_txd can be configured at run time.
552 if (param_total_num_mbufs)
553 nb_mbuf_per_pool = param_total_num_mbufs;
555 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
556 (nb_lcores * mb_mempool_cache) +
557 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
558 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
564 for (i = 0; i < max_socket; i++)
565 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
567 if (socket_num == UMA_NO_CONFIG)
568 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
570 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
577 * Records which Mbuf pool to use by each logical core, if needed.
579 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
580 mbp = mbuf_pool_find(
581 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
584 mbp = mbuf_pool_find(0);
585 fwd_lcores[lc_id]->mbp = mbp;
588 /* Configuration of packet forwarding streams. */
589 if (init_fwd_streams() < 0)
590 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
597 reconfig(portid_t new_port_id, unsigned socket_id)
599 struct rte_port *port;
601 /* Reconfiguration of Ethernet ports. */
602 port = &ports[new_port_id];
603 rte_eth_dev_info_get(new_port_id, &port->dev_info);
605 /* set flag to initialize port/queue */
606 port->need_reconfig = 1;
607 port->need_reconfig_queues = 1;
608 port->socket_id = socket_id;
615 init_fwd_streams(void)
618 struct rte_port *port;
619 streamid_t sm_id, nb_fwd_streams_new;
622 /* set socket id according to numa or not */
623 FOREACH_PORT(pid, ports) {
625 if (nb_rxq > port->dev_info.max_rx_queues) {
626 printf("Fail: nb_rxq(%d) is greater than "
627 "max_rx_queues(%d)\n", nb_rxq,
628 port->dev_info.max_rx_queues);
631 if (nb_txq > port->dev_info.max_tx_queues) {
632 printf("Fail: nb_txq(%d) is greater than "
633 "max_tx_queues(%d)\n", nb_txq,
634 port->dev_info.max_tx_queues);
638 if (port_numa[pid] != NUMA_NO_CONFIG)
639 port->socket_id = port_numa[pid];
641 port->socket_id = rte_eth_dev_socket_id(pid);
643 /* if socket_id is invalid, set to 0 */
644 if (check_socket_id(port->socket_id) < 0)
649 if (socket_num == UMA_NO_CONFIG)
652 port->socket_id = socket_num;
656 q = RTE_MAX(nb_rxq, nb_txq);
658 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
661 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
662 if (nb_fwd_streams_new == nb_fwd_streams)
665 if (fwd_streams != NULL) {
666 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
667 if (fwd_streams[sm_id] == NULL)
669 rte_free(fwd_streams[sm_id]);
670 fwd_streams[sm_id] = NULL;
672 rte_free(fwd_streams);
677 nb_fwd_streams = nb_fwd_streams_new;
678 if (nb_fwd_streams) {
679 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
680 sizeof(struct fwd_stream *) * nb_fwd_streams,
681 RTE_CACHE_LINE_SIZE);
682 if (fwd_streams == NULL)
683 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
684 " (struct fwd_stream *)) failed\n",
687 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
688 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
689 " struct fwd_stream", sizeof(struct fwd_stream),
690 RTE_CACHE_LINE_SIZE);
691 if (fwd_streams[sm_id] == NULL)
692 rte_exit(EXIT_FAILURE, "rte_zmalloc"
693 "(struct fwd_stream) failed\n");
700 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
702 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
704 unsigned int total_burst;
705 unsigned int nb_burst;
706 unsigned int burst_stats[3];
707 uint16_t pktnb_stats[3];
709 int burst_percent[3];
712 * First compute the total number of packet bursts and the
713 * two highest numbers of bursts of the same number of packets.
716 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
717 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
718 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
719 nb_burst = pbs->pkt_burst_spread[nb_pkt];
722 total_burst += nb_burst;
723 if (nb_burst > burst_stats[0]) {
724 burst_stats[1] = burst_stats[0];
725 pktnb_stats[1] = pktnb_stats[0];
726 burst_stats[0] = nb_burst;
727 pktnb_stats[0] = nb_pkt;
728 } else if (nb_burst > burst_stats[1]) {
729 burst_stats[1] = nb_burst;
730 pktnb_stats[1] = nb_pkt;
733 if (total_burst == 0)
735 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
736 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
737 burst_percent[0], (int) pktnb_stats[0]);
738 if (burst_stats[0] == total_burst) {
742 if (burst_stats[0] + burst_stats[1] == total_burst) {
743 printf(" + %d%% of %d pkts]\n",
744 100 - burst_percent[0], pktnb_stats[1]);
747 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
748 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
749 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
750 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
753 printf(" + %d%% of %d pkts + %d%% of others]\n",
754 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
756 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
759 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
761 struct rte_port *port;
764 static const char *fwd_stats_border = "----------------------";
766 port = &ports[port_id];
767 printf("\n %s Forward statistics for port %-2d %s\n",
768 fwd_stats_border, port_id, fwd_stats_border);
770 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
771 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
773 stats->ipackets, stats->imissed,
774 (uint64_t) (stats->ipackets + stats->imissed));
776 if (cur_fwd_eng == &csum_fwd_engine)
777 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
778 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
779 if ((stats->ierrors + stats->rx_nombuf) > 0) {
780 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
781 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
784 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
786 stats->opackets, port->tx_dropped,
787 (uint64_t) (stats->opackets + port->tx_dropped));
790 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
792 stats->ipackets, stats->imissed,
793 (uint64_t) (stats->ipackets + stats->imissed));
795 if (cur_fwd_eng == &csum_fwd_engine)
796 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
797 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
798 if ((stats->ierrors + stats->rx_nombuf) > 0) {
799 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
800 printf(" RX-nombufs: %14"PRIu64"\n",
804 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
806 stats->opackets, port->tx_dropped,
807 (uint64_t) (stats->opackets + port->tx_dropped));
810 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
812 pkt_burst_stats_display("RX",
813 &port->rx_stream->rx_burst_stats);
815 pkt_burst_stats_display("TX",
816 &port->tx_stream->tx_burst_stats);
819 if (port->rx_queue_stats_mapping_enabled) {
821 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
822 printf(" Stats reg %2d RX-packets:%14"PRIu64
823 " RX-errors:%14"PRIu64
824 " RX-bytes:%14"PRIu64"\n",
825 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
829 if (port->tx_queue_stats_mapping_enabled) {
830 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
831 printf(" Stats reg %2d TX-packets:%14"PRIu64
832 " TX-bytes:%14"PRIu64"\n",
833 i, stats->q_opackets[i], stats->q_obytes[i]);
837 printf(" %s--------------------------------%s\n",
838 fwd_stats_border, fwd_stats_border);
842 fwd_stream_stats_display(streamid_t stream_id)
844 struct fwd_stream *fs;
845 static const char *fwd_top_stats_border = "-------";
847 fs = fwd_streams[stream_id];
848 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
849 (fs->fwd_dropped == 0))
851 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
852 "TX Port=%2d/Queue=%2d %s\n",
853 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
854 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
855 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
856 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
858 /* if checksum mode */
859 if (cur_fwd_eng == &csum_fwd_engine) {
860 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
861 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
864 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
865 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
866 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
871 flush_fwd_rx_queues(void)
873 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
880 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
881 uint64_t timer_period;
883 /* convert to number of cycles */
884 timer_period = rte_get_timer_hz(); /* 1 second timeout */
886 for (j = 0; j < 2; j++) {
887 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
888 for (rxq = 0; rxq < nb_rxq; rxq++) {
889 port_id = fwd_ports_ids[rxp];
891 * testpmd can stuck in the below do while loop
892 * if rte_eth_rx_burst() always returns nonzero
893 * packets. So timer is added to exit this loop
894 * after 1sec timer expiry.
896 prev_tsc = rte_rdtsc();
898 nb_rx = rte_eth_rx_burst(port_id, rxq,
899 pkts_burst, MAX_PKT_BURST);
900 for (i = 0; i < nb_rx; i++)
901 rte_pktmbuf_free(pkts_burst[i]);
903 cur_tsc = rte_rdtsc();
904 diff_tsc = cur_tsc - prev_tsc;
905 timer_tsc += diff_tsc;
906 } while ((nb_rx > 0) &&
907 (timer_tsc < timer_period));
911 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
916 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
918 struct fwd_stream **fsm;
922 fsm = &fwd_streams[fc->stream_idx];
923 nb_fs = fc->stream_nb;
925 for (sm_id = 0; sm_id < nb_fs; sm_id++)
926 (*pkt_fwd)(fsm[sm_id]);
927 } while (! fc->stopped);
931 start_pkt_forward_on_core(void *fwd_arg)
933 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
934 cur_fwd_config.fwd_eng->packet_fwd);
939 * Run the TXONLY packet forwarding engine to send a single burst of packets.
940 * Used to start communication flows in network loopback test configurations.
943 run_one_txonly_burst_on_core(void *fwd_arg)
945 struct fwd_lcore *fwd_lc;
946 struct fwd_lcore tmp_lcore;
948 fwd_lc = (struct fwd_lcore *) fwd_arg;
950 tmp_lcore.stopped = 1;
951 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
956 * Launch packet forwarding:
957 * - Setup per-port forwarding context.
958 * - launch logical cores with their forwarding configuration.
961 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
963 port_fwd_begin_t port_fwd_begin;
968 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
969 if (port_fwd_begin != NULL) {
970 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
971 (*port_fwd_begin)(fwd_ports_ids[i]);
973 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
974 lc_id = fwd_lcores_cpuids[i];
975 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
976 fwd_lcores[i]->stopped = 0;
977 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
978 fwd_lcores[i], lc_id);
980 printf("launch lcore %u failed - diag=%d\n",
987 * Update the forward ports list.
990 update_fwd_ports(portid_t new_pid)
993 unsigned int new_nb_fwd_ports = 0;
996 for (i = 0; i < nb_fwd_ports; ++i) {
997 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1000 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1004 if (new_pid < RTE_MAX_ETHPORTS)
1005 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1007 nb_fwd_ports = new_nb_fwd_ports;
1008 nb_cfg_ports = new_nb_fwd_ports;
1012 * Launch packet forwarding configuration.
1015 start_packet_forwarding(int with_tx_first)
1017 port_fwd_begin_t port_fwd_begin;
1018 port_fwd_end_t port_fwd_end;
1019 struct rte_port *port;
1024 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1025 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1027 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1028 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1030 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1031 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1032 (!nb_rxq || !nb_txq))
1033 rte_exit(EXIT_FAILURE,
1034 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1035 cur_fwd_eng->fwd_mode_name);
1037 if (all_ports_started() == 0) {
1038 printf("Not all ports were started\n");
1041 if (test_done == 0) {
1042 printf("Packet forwarding already started\n");
1048 for (i = 0; i < nb_fwd_ports; i++) {
1049 pt_id = fwd_ports_ids[i];
1050 port = &ports[pt_id];
1051 if (!port->dcb_flag) {
1052 printf("In DCB mode, all forwarding ports must "
1053 "be configured in this mode.\n");
1057 if (nb_fwd_lcores == 1) {
1058 printf("In DCB mode,the nb forwarding cores "
1059 "should be larger than 1.\n");
1068 flush_fwd_rx_queues();
1070 pkt_fwd_config_display(&cur_fwd_config);
1071 rxtx_config_display();
1073 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1074 pt_id = fwd_ports_ids[i];
1075 port = &ports[pt_id];
1076 rte_eth_stats_get(pt_id, &port->stats);
1077 port->tx_dropped = 0;
1079 map_port_queue_stats_mapping_registers(pt_id, port);
1081 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1082 fwd_streams[sm_id]->rx_packets = 0;
1083 fwd_streams[sm_id]->tx_packets = 0;
1084 fwd_streams[sm_id]->fwd_dropped = 0;
1085 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1086 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1088 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1089 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1090 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1091 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1092 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1094 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1095 fwd_streams[sm_id]->core_cycles = 0;
1098 if (with_tx_first) {
1099 port_fwd_begin = tx_only_engine.port_fwd_begin;
1100 if (port_fwd_begin != NULL) {
1101 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1102 (*port_fwd_begin)(fwd_ports_ids[i]);
1104 while (with_tx_first--) {
1105 launch_packet_forwarding(
1106 run_one_txonly_burst_on_core);
1107 rte_eal_mp_wait_lcore();
1109 port_fwd_end = tx_only_engine.port_fwd_end;
1110 if (port_fwd_end != NULL) {
1111 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1112 (*port_fwd_end)(fwd_ports_ids[i]);
1115 launch_packet_forwarding(start_pkt_forward_on_core);
1119 stop_packet_forwarding(void)
1121 struct rte_eth_stats stats;
1122 struct rte_port *port;
1123 port_fwd_end_t port_fwd_end;
1128 uint64_t total_recv;
1129 uint64_t total_xmit;
1130 uint64_t total_rx_dropped;
1131 uint64_t total_tx_dropped;
1132 uint64_t total_rx_nombuf;
1133 uint64_t tx_dropped;
1134 uint64_t rx_bad_ip_csum;
1135 uint64_t rx_bad_l4_csum;
1136 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1137 uint64_t fwd_cycles;
1139 static const char *acc_stats_border = "+++++++++++++++";
1142 printf("Packet forwarding not started\n");
1145 printf("Telling cores to stop...");
1146 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1147 fwd_lcores[lc_id]->stopped = 1;
1148 printf("\nWaiting for lcores to finish...\n");
1149 rte_eal_mp_wait_lcore();
1150 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1151 if (port_fwd_end != NULL) {
1152 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1153 pt_id = fwd_ports_ids[i];
1154 (*port_fwd_end)(pt_id);
1157 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1160 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1161 if (cur_fwd_config.nb_fwd_streams >
1162 cur_fwd_config.nb_fwd_ports) {
1163 fwd_stream_stats_display(sm_id);
1164 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1165 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1167 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1169 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1172 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1173 tx_dropped = (uint64_t) (tx_dropped +
1174 fwd_streams[sm_id]->fwd_dropped);
1175 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1178 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1179 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1180 fwd_streams[sm_id]->rx_bad_ip_csum);
1181 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1185 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1186 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1187 fwd_streams[sm_id]->rx_bad_l4_csum);
1188 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1191 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1192 fwd_cycles = (uint64_t) (fwd_cycles +
1193 fwd_streams[sm_id]->core_cycles);
1198 total_rx_dropped = 0;
1199 total_tx_dropped = 0;
1200 total_rx_nombuf = 0;
1201 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1202 pt_id = fwd_ports_ids[i];
1204 port = &ports[pt_id];
1205 rte_eth_stats_get(pt_id, &stats);
1206 stats.ipackets -= port->stats.ipackets;
1207 port->stats.ipackets = 0;
1208 stats.opackets -= port->stats.opackets;
1209 port->stats.opackets = 0;
1210 stats.ibytes -= port->stats.ibytes;
1211 port->stats.ibytes = 0;
1212 stats.obytes -= port->stats.obytes;
1213 port->stats.obytes = 0;
1214 stats.imissed -= port->stats.imissed;
1215 port->stats.imissed = 0;
1216 stats.oerrors -= port->stats.oerrors;
1217 port->stats.oerrors = 0;
1218 stats.rx_nombuf -= port->stats.rx_nombuf;
1219 port->stats.rx_nombuf = 0;
1221 total_recv += stats.ipackets;
1222 total_xmit += stats.opackets;
1223 total_rx_dropped += stats.imissed;
1224 total_tx_dropped += port->tx_dropped;
1225 total_rx_nombuf += stats.rx_nombuf;
1227 fwd_port_stats_display(pt_id, &stats);
1229 printf("\n %s Accumulated forward statistics for all ports"
1231 acc_stats_border, acc_stats_border);
1232 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1234 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1236 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1237 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1238 if (total_rx_nombuf > 0)
1239 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1240 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1242 acc_stats_border, acc_stats_border);
1243 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1245 printf("\n CPU cycles/packet=%u (total cycles="
1246 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1247 (unsigned int)(fwd_cycles / total_recv),
1248 fwd_cycles, total_recv);
1250 printf("\nDone.\n");
1255 dev_set_link_up(portid_t pid)
1257 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1258 printf("\nSet link up fail.\n");
1262 dev_set_link_down(portid_t pid)
1264 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1265 printf("\nSet link down fail.\n");
1269 all_ports_started(void)
1272 struct rte_port *port;
1274 FOREACH_PORT(pi, ports) {
1276 /* Check if there is a port which is not started */
1277 if ((port->port_status != RTE_PORT_STARTED) &&
1278 (port->slave_flag == 0))
1282 /* No port is not started */
1287 all_ports_stopped(void)
1290 struct rte_port *port;
1292 FOREACH_PORT(pi, ports) {
1294 if ((port->port_status != RTE_PORT_STOPPED) &&
1295 (port->slave_flag == 0))
1303 port_is_started(portid_t port_id)
1305 if (port_id_is_invalid(port_id, ENABLED_WARN))
1308 if (ports[port_id].port_status != RTE_PORT_STARTED)
1315 port_is_closed(portid_t port_id)
1317 if (port_id_is_invalid(port_id, ENABLED_WARN))
1320 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1327 start_port(portid_t pid)
1329 int diag, need_check_link_status = -1;
1332 struct rte_port *port;
1333 struct ether_addr mac_addr;
1335 if (port_id_is_invalid(pid, ENABLED_WARN))
1340 FOREACH_PORT(pi, ports) {
1341 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1344 need_check_link_status = 0;
1346 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1347 RTE_PORT_HANDLING) == 0) {
1348 printf("Port %d is now not stopped\n", pi);
1352 if (port->need_reconfig > 0) {
1353 port->need_reconfig = 0;
1355 printf("Configuring Port %d (socket %u)\n", pi,
1357 /* configure port */
1358 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1361 if (rte_atomic16_cmpset(&(port->port_status),
1362 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1363 printf("Port %d can not be set back "
1364 "to stopped\n", pi);
1365 printf("Fail to configure port %d\n", pi);
1366 /* try to reconfigure port next time */
1367 port->need_reconfig = 1;
1371 if (port->need_reconfig_queues > 0) {
1372 port->need_reconfig_queues = 0;
1373 /* setup tx queues */
1374 for (qi = 0; qi < nb_txq; qi++) {
1375 if ((numa_support) &&
1376 (txring_numa[pi] != NUMA_NO_CONFIG))
1377 diag = rte_eth_tx_queue_setup(pi, qi,
1378 nb_txd,txring_numa[pi],
1381 diag = rte_eth_tx_queue_setup(pi, qi,
1382 nb_txd,port->socket_id,
1388 /* Fail to setup tx queue, return */
1389 if (rte_atomic16_cmpset(&(port->port_status),
1391 RTE_PORT_STOPPED) == 0)
1392 printf("Port %d can not be set back "
1393 "to stopped\n", pi);
1394 printf("Fail to configure port %d tx queues\n", pi);
1395 /* try to reconfigure queues next time */
1396 port->need_reconfig_queues = 1;
1399 /* setup rx queues */
1400 for (qi = 0; qi < nb_rxq; qi++) {
1401 if ((numa_support) &&
1402 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1403 struct rte_mempool * mp =
1404 mbuf_pool_find(rxring_numa[pi]);
1406 printf("Failed to setup RX queue:"
1407 "No mempool allocation"
1408 " on the socket %d\n",
1413 diag = rte_eth_rx_queue_setup(pi, qi,
1414 nb_rxd,rxring_numa[pi],
1415 &(port->rx_conf),mp);
1417 struct rte_mempool *mp =
1418 mbuf_pool_find(port->socket_id);
1420 printf("Failed to setup RX queue:"
1421 "No mempool allocation"
1422 " on the socket %d\n",
1426 diag = rte_eth_rx_queue_setup(pi, qi,
1427 nb_rxd,port->socket_id,
1428 &(port->rx_conf), mp);
1433 /* Fail to setup rx queue, return */
1434 if (rte_atomic16_cmpset(&(port->port_status),
1436 RTE_PORT_STOPPED) == 0)
1437 printf("Port %d can not be set back "
1438 "to stopped\n", pi);
1439 printf("Fail to configure port %d rx queues\n", pi);
1440 /* try to reconfigure queues next time */
1441 port->need_reconfig_queues = 1;
1446 if (rte_eth_dev_start(pi) < 0) {
1447 printf("Fail to start port %d\n", pi);
1449 /* Fail to setup rx queue, return */
1450 if (rte_atomic16_cmpset(&(port->port_status),
1451 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1452 printf("Port %d can not be set back to "
1457 if (rte_atomic16_cmpset(&(port->port_status),
1458 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1459 printf("Port %d can not be set into started\n", pi);
1461 rte_eth_macaddr_get(pi, &mac_addr);
1462 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1463 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1464 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1465 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1467 /* at least one port started, need checking link status */
1468 need_check_link_status = 1;
1471 if (need_check_link_status == 1 && !no_link_check)
1472 check_all_ports_link_status(RTE_PORT_ALL);
1473 else if (need_check_link_status == 0)
1474 printf("Please stop the ports first\n");
1481 stop_port(portid_t pid)
1484 struct rte_port *port;
1485 int need_check_link_status = 0;
1492 if (port_id_is_invalid(pid, ENABLED_WARN))
1495 printf("Stopping ports...\n");
1497 FOREACH_PORT(pi, ports) {
1498 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1501 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1502 printf("Please remove port %d from forwarding configuration.\n", pi);
1506 if (port_is_bonding_slave(pi)) {
1507 printf("Please remove port %d from bonded device.\n", pi);
1512 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1513 RTE_PORT_HANDLING) == 0)
1516 rte_eth_dev_stop(pi);
1518 if (rte_atomic16_cmpset(&(port->port_status),
1519 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1520 printf("Port %d can not be set into stopped\n", pi);
1521 need_check_link_status = 1;
1523 if (need_check_link_status && !no_link_check)
1524 check_all_ports_link_status(RTE_PORT_ALL);
1530 close_port(portid_t pid)
1533 struct rte_port *port;
1535 if (port_id_is_invalid(pid, ENABLED_WARN))
1538 printf("Closing ports...\n");
1540 FOREACH_PORT(pi, ports) {
1541 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1544 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1545 printf("Please remove port %d from forwarding configuration.\n", pi);
1549 if (port_is_bonding_slave(pi)) {
1550 printf("Please remove port %d from bonded device.\n", pi);
1555 if (rte_atomic16_cmpset(&(port->port_status),
1556 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1557 printf("Port %d is already closed\n", pi);
1561 if (rte_atomic16_cmpset(&(port->port_status),
1562 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1563 printf("Port %d is now not stopped\n", pi);
1567 rte_eth_dev_close(pi);
1569 if (rte_atomic16_cmpset(&(port->port_status),
1570 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1571 printf("Port %d cannot be set to closed\n", pi);
1578 attach_port(char *identifier)
1581 unsigned int socket_id;
1583 printf("Attaching a new port...\n");
1585 if (identifier == NULL) {
1586 printf("Invalid parameters are specified\n");
1590 if (rte_eth_dev_attach(identifier, &pi))
1593 ports[pi].enabled = 1;
1594 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1595 /* if socket_id is invalid, set to 0 */
1596 if (check_socket_id(socket_id) < 0)
1598 reconfig(pi, socket_id);
1599 rte_eth_promiscuous_enable(pi);
1601 nb_ports = rte_eth_dev_count();
1603 ports[pi].port_status = RTE_PORT_STOPPED;
1605 update_fwd_ports(pi);
1607 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1612 detach_port(uint8_t port_id)
1614 char name[RTE_ETH_NAME_MAX_LEN];
1616 printf("Detaching a port...\n");
1618 if (!port_is_closed(port_id)) {
1619 printf("Please close port first\n");
1623 if (rte_eth_dev_detach(port_id, name))
1626 ports[port_id].enabled = 0;
1627 nb_ports = rte_eth_dev_count();
1629 update_fwd_ports(RTE_MAX_ETHPORTS);
1631 printf("Port '%s' is detached. Now total ports is %d\n",
1643 stop_packet_forwarding();
1645 if (ports != NULL) {
1647 FOREACH_PORT(pt_id, ports) {
1648 printf("\nShutting down port %d...\n", pt_id);
1654 printf("\nBye...\n");
1657 typedef void (*cmd_func_t)(void);
1658 struct pmd_test_command {
1659 const char *cmd_name;
1660 cmd_func_t cmd_func;
1663 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1665 /* Check the link status of all ports in up to 9s, and print them finally */
1667 check_all_ports_link_status(uint32_t port_mask)
1669 #define CHECK_INTERVAL 100 /* 100ms */
1670 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1671 uint8_t portid, count, all_ports_up, print_flag = 0;
1672 struct rte_eth_link link;
1674 printf("Checking link statuses...\n");
1676 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1678 FOREACH_PORT(portid, ports) {
1679 if ((port_mask & (1 << portid)) == 0)
1681 memset(&link, 0, sizeof(link));
1682 rte_eth_link_get_nowait(portid, &link);
1683 /* print link status if flag set */
1684 if (print_flag == 1) {
1685 if (link.link_status)
1686 printf("Port %d Link Up - speed %u "
1687 "Mbps - %s\n", (uint8_t)portid,
1688 (unsigned)link.link_speed,
1689 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1690 ("full-duplex") : ("half-duplex\n"));
1692 printf("Port %d Link Down\n",
1696 /* clear all_ports_up flag if any link down */
1697 if (link.link_status == ETH_LINK_DOWN) {
1702 /* after finally printing all link status, get out */
1703 if (print_flag == 1)
1706 if (all_ports_up == 0) {
1708 rte_delay_ms(CHECK_INTERVAL);
1711 /* set the print_flag if all ports up or timeout */
1712 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1719 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1723 uint8_t mapping_found = 0;
1725 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1726 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1727 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1728 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1729 tx_queue_stats_mappings[i].queue_id,
1730 tx_queue_stats_mappings[i].stats_counter_id);
1737 port->tx_queue_stats_mapping_enabled = 1;
1742 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1746 uint8_t mapping_found = 0;
1748 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1749 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1750 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1751 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1752 rx_queue_stats_mappings[i].queue_id,
1753 rx_queue_stats_mappings[i].stats_counter_id);
1760 port->rx_queue_stats_mapping_enabled = 1;
1765 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1769 diag = set_tx_queue_stats_mapping_registers(pi, port);
1771 if (diag == -ENOTSUP) {
1772 port->tx_queue_stats_mapping_enabled = 0;
1773 printf("TX queue stats mapping not supported port id=%d\n", pi);
1776 rte_exit(EXIT_FAILURE,
1777 "set_tx_queue_stats_mapping_registers "
1778 "failed for port id=%d diag=%d\n",
1782 diag = set_rx_queue_stats_mapping_registers(pi, port);
1784 if (diag == -ENOTSUP) {
1785 port->rx_queue_stats_mapping_enabled = 0;
1786 printf("RX queue stats mapping not supported port id=%d\n", pi);
1789 rte_exit(EXIT_FAILURE,
1790 "set_rx_queue_stats_mapping_registers "
1791 "failed for port id=%d diag=%d\n",
1797 rxtx_port_config(struct rte_port *port)
1799 port->rx_conf = port->dev_info.default_rxconf;
1800 port->tx_conf = port->dev_info.default_txconf;
1802 /* Check if any RX/TX parameters have been passed */
1803 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1804 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1806 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1807 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1809 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1810 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1812 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1813 port->rx_conf.rx_free_thresh = rx_free_thresh;
1815 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1816 port->rx_conf.rx_drop_en = rx_drop_en;
1818 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1819 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1821 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1822 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1824 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1825 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1827 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1828 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1830 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1831 port->tx_conf.tx_free_thresh = tx_free_thresh;
1833 if (txq_flags != RTE_PMD_PARAM_UNSET)
1834 port->tx_conf.txq_flags = txq_flags;
1838 init_port_config(void)
1841 struct rte_port *port;
1843 FOREACH_PORT(pid, ports) {
1845 port->dev_conf.rxmode = rx_mode;
1846 port->dev_conf.fdir_conf = fdir_conf;
1848 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1849 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1851 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1852 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1855 if (port->dcb_flag == 0) {
1856 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1857 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1859 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1862 rxtx_port_config(port);
1864 rte_eth_macaddr_get(pid, &port->eth_addr);
1866 map_port_queue_stats_mapping_registers(pid, port);
1867 #ifdef RTE_NIC_BYPASS
1868 rte_eth_dev_bypass_init(pid);
1873 void set_port_slave_flag(portid_t slave_pid)
1875 struct rte_port *port;
1877 port = &ports[slave_pid];
1878 port->slave_flag = 1;
1881 void clear_port_slave_flag(portid_t slave_pid)
1883 struct rte_port *port;
1885 port = &ports[slave_pid];
1886 port->slave_flag = 0;
1889 uint8_t port_is_bonding_slave(portid_t slave_pid)
1891 struct rte_port *port;
1893 port = &ports[slave_pid];
1894 if ((rte_eth_devices[slave_pid].data->dev_flags &
1895 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
1900 const uint16_t vlan_tags[] = {
1901 0, 1, 2, 3, 4, 5, 6, 7,
1902 8, 9, 10, 11, 12, 13, 14, 15,
1903 16, 17, 18, 19, 20, 21, 22, 23,
1904 24, 25, 26, 27, 28, 29, 30, 31
1908 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1909 enum dcb_mode_enable dcb_mode,
1910 enum rte_eth_nb_tcs num_tcs,
1916 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1917 * given above, and the number of traffic classes available for use.
1919 if (dcb_mode == DCB_VT_ENABLED) {
1920 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1921 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1922 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1923 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1925 /* VMDQ+DCB RX and TX configrations */
1926 vmdq_rx_conf->enable_default_pool = 0;
1927 vmdq_rx_conf->default_pool = 0;
1928 vmdq_rx_conf->nb_queue_pools =
1929 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1930 vmdq_tx_conf->nb_queue_pools =
1931 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1933 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1934 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1935 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1936 vmdq_rx_conf->pool_map[i].pools =
1937 1 << (i % vmdq_rx_conf->nb_queue_pools);
1939 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1940 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
1941 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
1944 /* set DCB mode of RX and TX of multiple queues */
1945 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1946 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1948 struct rte_eth_dcb_rx_conf *rx_conf =
1949 ð_conf->rx_adv_conf.dcb_rx_conf;
1950 struct rte_eth_dcb_tx_conf *tx_conf =
1951 ð_conf->tx_adv_conf.dcb_tx_conf;
1953 rx_conf->nb_tcs = num_tcs;
1954 tx_conf->nb_tcs = num_tcs;
1956 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1957 rx_conf->dcb_tc[i] = i % num_tcs;
1958 tx_conf->dcb_tc[i] = i % num_tcs;
1960 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1961 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1962 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1966 eth_conf->dcb_capability_en =
1967 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1969 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1975 init_port_dcb_config(portid_t pid,
1976 enum dcb_mode_enable dcb_mode,
1977 enum rte_eth_nb_tcs num_tcs,
1980 struct rte_eth_conf port_conf;
1981 struct rte_port *rte_port;
1985 rte_port = &ports[pid];
1987 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1988 /* Enter DCB configuration status */
1991 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1992 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1995 port_conf.rxmode.hw_vlan_filter = 1;
1998 * Write the configuration into the device.
1999 * Set the numbers of RX & TX queues to 0, so
2000 * the RX & TX queues will not be setup.
2002 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2004 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2006 /* If dev_info.vmdq_pool_base is greater than 0,
2007 * the queue id of vmdq pools is started after pf queues.
2009 if (dcb_mode == DCB_VT_ENABLED &&
2010 rte_port->dev_info.vmdq_pool_base > 0) {
2011 printf("VMDQ_DCB multi-queue mode is nonsensical"
2012 " for port %d.", pid);
2016 /* Assume the ports in testpmd have the same dcb capability
2017 * and has the same number of rxq and txq in dcb mode
2019 if (dcb_mode == DCB_VT_ENABLED) {
2020 if (rte_port->dev_info.max_vfs > 0) {
2021 nb_rxq = rte_port->dev_info.nb_rx_queues;
2022 nb_txq = rte_port->dev_info.nb_tx_queues;
2024 nb_rxq = rte_port->dev_info.max_rx_queues;
2025 nb_txq = rte_port->dev_info.max_tx_queues;
2028 /*if vt is disabled, use all pf queues */
2029 if (rte_port->dev_info.vmdq_pool_base == 0) {
2030 nb_rxq = rte_port->dev_info.max_rx_queues;
2031 nb_txq = rte_port->dev_info.max_tx_queues;
2033 nb_rxq = (queueid_t)num_tcs;
2034 nb_txq = (queueid_t)num_tcs;
2038 rx_free_thresh = 64;
2040 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2042 rxtx_port_config(rte_port);
2044 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2045 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2046 rx_vft_set(pid, vlan_tags[i], 1);
2048 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2049 map_port_queue_stats_mapping_registers(pid, rte_port);
2051 rte_port->dcb_flag = 1;
2061 /* Configuration of Ethernet ports. */
2062 ports = rte_zmalloc("testpmd: ports",
2063 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2064 RTE_CACHE_LINE_SIZE);
2065 if (ports == NULL) {
2066 rte_exit(EXIT_FAILURE,
2067 "rte_zmalloc(%d struct rte_port) failed\n",
2071 /* enabled allocated ports */
2072 for (pid = 0; pid < nb_ports; pid++)
2073 ports[pid].enabled = 1;
2084 signal_handler(int signum)
2086 if (signum == SIGINT || signum == SIGTERM) {
2087 printf("\nSignal %d received, preparing to exit...\n",
2089 #ifdef RTE_LIBRTE_PDUMP
2090 /* uninitialize packet capture framework */
2094 /* exit with the expected status */
2095 signal(signum, SIG_DFL);
2096 kill(getpid(), signum);
2101 main(int argc, char** argv)
2106 signal(SIGINT, signal_handler);
2107 signal(SIGTERM, signal_handler);
2109 diag = rte_eal_init(argc, argv);
2111 rte_panic("Cannot init EAL\n");
2113 #ifdef RTE_LIBRTE_PDUMP
2114 /* initialize packet capture framework */
2115 rte_pdump_init(NULL);
2118 nb_ports = (portid_t) rte_eth_dev_count();
2120 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2122 /* allocate port structures, and init them */
2125 set_def_fwd_config();
2127 rte_panic("Empty set of forwarding logical cores - check the "
2128 "core mask supplied in the command parameters\n");
2133 launch_args_parse(argc, argv);
2135 if (!nb_rxq && !nb_txq)
2136 printf("Warning: Either rx or tx queues should be non-zero\n");
2138 if (nb_rxq > 1 && nb_rxq > nb_txq)
2139 printf("Warning: nb_rxq=%d enables RSS configuration, "
2140 "but nb_txq=%d will prevent to fully test it.\n",
2144 if (start_port(RTE_PORT_ALL) != 0)
2145 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2147 /* set all ports to promiscuous mode by default */
2148 FOREACH_PORT(port_id, ports)
2149 rte_eth_promiscuous_enable(port_id);
2151 #ifdef RTE_LIBRTE_CMDLINE
2152 if (interactive == 1) {
2154 printf("Start automatic packet forwarding\n");
2155 start_packet_forwarding(0);
2165 printf("No commandline core given, start packet forwarding\n");
2166 start_packet_forwarding(0);
2167 printf("Press enter to exit\n");
2168 rc = read(0, &c, 1);