4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
66 #include <rte_mempool.h>
67 #include <rte_malloc.h>
69 #include <rte_interrupts.h>
71 #include <rte_ether.h>
72 #include <rte_ethdev.h>
74 #include <rte_string_fns.h>
75 #ifdef RTE_LIBRTE_PMD_XENVIRT
76 #include <rte_eth_xenvirt.h>
78 #ifdef RTE_LIBRTE_PDUMP
79 #include <rte_pdump.h>
84 uint16_t verbose_level = 0; /**< Silent by default. */
86 /* use master core for command line ? */
87 uint8_t interactive = 0;
88 uint8_t auto_start = 0;
91 * NUMA support configuration.
92 * When set, the NUMA support attempts to dispatch the allocation of the
93 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
94 * probed ports among the CPU sockets 0 and 1.
95 * Otherwise, all memory is allocated from CPU socket 0.
97 uint8_t numa_support = 0; /**< No numa support by default */
100 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
103 uint8_t socket_num = UMA_NO_CONFIG;
106 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
111 * Record the Ethernet address of peer target ports to which packets are
113 * Must be instanciated with the ethernet addresses of peer traffic generator
116 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
117 portid_t nb_peer_eth_addrs = 0;
120 * Probed Target Environment.
122 struct rte_port *ports; /**< For all probed ethernet ports. */
123 portid_t nb_ports; /**< Number of probed ethernet ports. */
124 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
125 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
128 * Test Forwarding Configuration.
129 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
130 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
132 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
133 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
134 portid_t nb_cfg_ports; /**< Number of configured ports. */
135 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
137 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
138 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
140 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
141 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
144 * Forwarding engines.
146 struct fwd_engine * fwd_engines[] = {
155 #ifdef RTE_LIBRTE_IEEE1588
156 &ieee1588_fwd_engine,
161 struct fwd_config cur_fwd_config;
162 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
163 uint32_t retry_enabled;
164 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
165 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
167 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
168 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
169 * specified on command-line. */
172 * Configuration of packet segments used by the "txonly" processing engine.
174 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
175 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
176 TXONLY_DEF_PACKET_LEN,
178 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
180 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
181 /**< Split policy for packets to TX. */
183 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
184 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
186 /* current configuration is in DCB or not,0 means it is not in DCB mode */
187 uint8_t dcb_config = 0;
189 /* Whether the dcb is in testing status */
190 uint8_t dcb_test = 0;
193 * Configurable number of RX/TX queues.
195 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
196 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
199 * Configurable number of RX/TX ring descriptors.
201 #define RTE_TEST_RX_DESC_DEFAULT 128
202 #define RTE_TEST_TX_DESC_DEFAULT 512
203 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
204 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
206 #define RTE_PMD_PARAM_UNSET -1
208 * Configurable values of RX and TX ring threshold registers.
211 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
212 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
213 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
215 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
216 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
217 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
220 * Configurable value of RX free threshold.
222 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
225 * Configurable value of RX drop enable.
227 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
230 * Configurable value of TX free threshold.
232 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of TX RS bit threshold.
237 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
240 * Configurable value of TX queue flags.
242 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
245 * Receive Side Scaling (RSS) configuration.
247 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
250 * Port topology configuration
252 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
255 * Avoids to flush all the RX streams before starts forwarding.
257 uint8_t no_flush_rx = 0; /* flush by default */
260 * Avoids to check link status when starting/stopping a port.
262 uint8_t no_link_check = 0; /* check by default */
265 * NIC bypass mode configuration options.
267 #ifdef RTE_NIC_BYPASS
269 /* The NIC bypass watchdog timeout. */
270 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
275 * Ethernet device configuration.
277 struct rte_eth_rxmode rx_mode = {
278 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
280 .header_split = 0, /**< Header Split disabled. */
281 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
282 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
283 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
284 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
285 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
286 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
289 struct rte_fdir_conf fdir_conf = {
290 .mode = RTE_FDIR_MODE_NONE,
291 .pballoc = RTE_FDIR_PBALLOC_64K,
292 .status = RTE_FDIR_REPORT_STATUS,
294 .vlan_tci_mask = 0x0,
296 .src_ip = 0xFFFFFFFF,
297 .dst_ip = 0xFFFFFFFF,
300 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
301 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
303 .src_port_mask = 0xFFFF,
304 .dst_port_mask = 0xFFFF,
305 .mac_addr_byte_mask = 0xFF,
306 .tunnel_type_mask = 1,
307 .tunnel_id_mask = 0xFFFFFFFF,
312 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
314 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
315 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
317 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
318 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
320 uint16_t nb_tx_queue_stats_mappings = 0;
321 uint16_t nb_rx_queue_stats_mappings = 0;
323 unsigned max_socket = 0;
325 /* Forward function declarations */
326 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
327 static void check_all_ports_link_status(uint32_t port_mask);
330 * Check if all the ports are started.
331 * If yes, return positive value. If not, return zero.
333 static int all_ports_started(void);
336 * Find next enabled port
339 find_next_port(portid_t p, struct rte_port *ports, int size)
342 rte_exit(-EINVAL, "failed to find a next port id\n");
344 while ((p < size) && (ports[p].enabled == 0))
350 * Setup default configuration.
353 set_default_fwd_lcores_config(void)
357 unsigned int sock_num;
360 for (i = 0; i < RTE_MAX_LCORE; i++) {
361 sock_num = rte_lcore_to_socket_id(i) + 1;
362 if (sock_num > max_socket) {
363 if (sock_num > RTE_MAX_NUMA_NODES)
364 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
365 max_socket = sock_num;
367 if (!rte_lcore_is_enabled(i))
369 if (i == rte_get_master_lcore())
371 fwd_lcores_cpuids[nb_lc++] = i;
373 nb_lcores = (lcoreid_t) nb_lc;
374 nb_cfg_lcores = nb_lcores;
379 set_def_peer_eth_addrs(void)
383 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
384 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
385 peer_eth_addrs[i].addr_bytes[5] = i;
390 set_default_fwd_ports_config(void)
394 for (pt_id = 0; pt_id < nb_ports; pt_id++)
395 fwd_ports_ids[pt_id] = pt_id;
397 nb_cfg_ports = nb_ports;
398 nb_fwd_ports = nb_ports;
402 set_def_fwd_config(void)
404 set_default_fwd_lcores_config();
405 set_def_peer_eth_addrs();
406 set_default_fwd_ports_config();
410 * Configuration initialisation done once at init time.
413 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
414 unsigned int socket_id)
416 char pool_name[RTE_MEMPOOL_NAMESIZE];
417 struct rte_mempool *rte_mp = NULL;
420 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
421 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
424 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
425 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
427 #ifdef RTE_LIBRTE_PMD_XENVIRT
428 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
429 (unsigned) mb_mempool_cache,
430 sizeof(struct rte_pktmbuf_pool_private),
431 rte_pktmbuf_pool_init, NULL,
432 rte_pktmbuf_init, NULL,
436 /* if the former XEN allocation failed fall back to normal allocation */
437 if (rte_mp == NULL) {
439 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
440 mb_size, (unsigned) mb_mempool_cache,
441 sizeof(struct rte_pktmbuf_pool_private),
446 if (rte_mempool_populate_anon(rte_mp) == 0) {
447 rte_mempool_free(rte_mp);
451 rte_pktmbuf_pool_init(rte_mp, NULL);
452 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
454 /* wrapper to rte_mempool_create() */
455 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
456 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
461 if (rte_mp == NULL) {
462 rte_exit(EXIT_FAILURE,
463 "Creation of mbuf pool for socket %u failed: %s\n",
464 socket_id, rte_strerror(rte_errno));
465 } else if (verbose_level > 0) {
466 rte_mempool_dump(stdout, rte_mp);
471 * Check given socket id is valid or not with NUMA mode,
472 * if valid, return 0, else return -1
475 check_socket_id(const unsigned int socket_id)
477 static int warning_once = 0;
479 if (socket_id >= max_socket) {
480 if (!warning_once && numa_support)
481 printf("Warning: NUMA should be configured manually by"
482 " using --port-numa-config and"
483 " --ring-numa-config parameters along with"
495 struct rte_port *port;
496 struct rte_mempool *mbp;
497 unsigned int nb_mbuf_per_pool;
499 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
501 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
502 /* Configuration of logical cores. */
503 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
504 sizeof(struct fwd_lcore *) * nb_lcores,
505 RTE_CACHE_LINE_SIZE);
506 if (fwd_lcores == NULL) {
507 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
508 "failed\n", nb_lcores);
510 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
511 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
512 sizeof(struct fwd_lcore),
513 RTE_CACHE_LINE_SIZE);
514 if (fwd_lcores[lc_id] == NULL) {
515 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
518 fwd_lcores[lc_id]->cpuid_idx = lc_id;
521 FOREACH_PORT(pid, ports) {
523 rte_eth_dev_info_get(pid, &port->dev_info);
526 if (port_numa[pid] != NUMA_NO_CONFIG)
527 port_per_socket[port_numa[pid]]++;
529 uint32_t socket_id = rte_eth_dev_socket_id(pid);
531 /* if socket_id is invalid, set to 0 */
532 if (check_socket_id(socket_id) < 0)
534 port_per_socket[socket_id]++;
538 /* set flag to initialize port/queue */
539 port->need_reconfig = 1;
540 port->need_reconfig_queues = 1;
544 * Create pools of mbuf.
545 * If NUMA support is disabled, create a single pool of mbuf in
546 * socket 0 memory by default.
547 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
549 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
550 * nb_txd can be configured at run time.
552 if (param_total_num_mbufs)
553 nb_mbuf_per_pool = param_total_num_mbufs;
555 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
556 (nb_lcores * mb_mempool_cache) +
557 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
558 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
564 for (i = 0; i < max_socket; i++)
565 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, i);
567 if (socket_num == UMA_NO_CONFIG)
568 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
570 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
577 * Records which Mbuf pool to use by each logical core, if needed.
579 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
580 mbp = mbuf_pool_find(
581 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
584 mbp = mbuf_pool_find(0);
585 fwd_lcores[lc_id]->mbp = mbp;
588 /* Configuration of packet forwarding streams. */
589 if (init_fwd_streams() < 0)
590 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
597 reconfig(portid_t new_port_id, unsigned socket_id)
599 struct rte_port *port;
601 /* Reconfiguration of Ethernet ports. */
602 port = &ports[new_port_id];
603 rte_eth_dev_info_get(new_port_id, &port->dev_info);
605 /* set flag to initialize port/queue */
606 port->need_reconfig = 1;
607 port->need_reconfig_queues = 1;
608 port->socket_id = socket_id;
615 init_fwd_streams(void)
618 struct rte_port *port;
619 streamid_t sm_id, nb_fwd_streams_new;
622 /* set socket id according to numa or not */
623 FOREACH_PORT(pid, ports) {
625 if (nb_rxq > port->dev_info.max_rx_queues) {
626 printf("Fail: nb_rxq(%d) is greater than "
627 "max_rx_queues(%d)\n", nb_rxq,
628 port->dev_info.max_rx_queues);
631 if (nb_txq > port->dev_info.max_tx_queues) {
632 printf("Fail: nb_txq(%d) is greater than "
633 "max_tx_queues(%d)\n", nb_txq,
634 port->dev_info.max_tx_queues);
638 if (port_numa[pid] != NUMA_NO_CONFIG)
639 port->socket_id = port_numa[pid];
641 port->socket_id = rte_eth_dev_socket_id(pid);
643 /* if socket_id is invalid, set to 0 */
644 if (check_socket_id(port->socket_id) < 0)
649 if (socket_num == UMA_NO_CONFIG)
652 port->socket_id = socket_num;
656 q = RTE_MAX(nb_rxq, nb_txq);
658 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
661 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
662 if (nb_fwd_streams_new == nb_fwd_streams)
665 if (fwd_streams != NULL) {
666 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
667 if (fwd_streams[sm_id] == NULL)
669 rte_free(fwd_streams[sm_id]);
670 fwd_streams[sm_id] = NULL;
672 rte_free(fwd_streams);
677 nb_fwd_streams = nb_fwd_streams_new;
678 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
679 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
680 if (fwd_streams == NULL)
681 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
682 "failed\n", nb_fwd_streams);
684 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
685 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
686 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
687 if (fwd_streams[sm_id] == NULL)
688 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
695 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
697 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
699 unsigned int total_burst;
700 unsigned int nb_burst;
701 unsigned int burst_stats[3];
702 uint16_t pktnb_stats[3];
704 int burst_percent[3];
707 * First compute the total number of packet bursts and the
708 * two highest numbers of bursts of the same number of packets.
711 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
712 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
713 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
714 nb_burst = pbs->pkt_burst_spread[nb_pkt];
717 total_burst += nb_burst;
718 if (nb_burst > burst_stats[0]) {
719 burst_stats[1] = burst_stats[0];
720 pktnb_stats[1] = pktnb_stats[0];
721 burst_stats[0] = nb_burst;
722 pktnb_stats[0] = nb_pkt;
725 if (total_burst == 0)
727 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
728 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
729 burst_percent[0], (int) pktnb_stats[0]);
730 if (burst_stats[0] == total_burst) {
734 if (burst_stats[0] + burst_stats[1] == total_burst) {
735 printf(" + %d%% of %d pkts]\n",
736 100 - burst_percent[0], pktnb_stats[1]);
739 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
740 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
741 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
742 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
745 printf(" + %d%% of %d pkts + %d%% of others]\n",
746 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
748 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
751 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
753 struct rte_port *port;
756 static const char *fwd_stats_border = "----------------------";
758 port = &ports[port_id];
759 printf("\n %s Forward statistics for port %-2d %s\n",
760 fwd_stats_border, port_id, fwd_stats_border);
762 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
763 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
765 stats->ipackets, stats->imissed,
766 (uint64_t) (stats->ipackets + stats->imissed));
768 if (cur_fwd_eng == &csum_fwd_engine)
769 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
770 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
771 if ((stats->ierrors + stats->rx_nombuf) > 0) {
772 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
773 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
776 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
778 stats->opackets, port->tx_dropped,
779 (uint64_t) (stats->opackets + port->tx_dropped));
782 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
784 stats->ipackets, stats->imissed,
785 (uint64_t) (stats->ipackets + stats->imissed));
787 if (cur_fwd_eng == &csum_fwd_engine)
788 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
789 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
790 if ((stats->ierrors + stats->rx_nombuf) > 0) {
791 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
792 printf(" RX-nombufs: %14"PRIu64"\n",
796 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
798 stats->opackets, port->tx_dropped,
799 (uint64_t) (stats->opackets + port->tx_dropped));
802 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
804 pkt_burst_stats_display("RX",
805 &port->rx_stream->rx_burst_stats);
807 pkt_burst_stats_display("TX",
808 &port->tx_stream->tx_burst_stats);
811 if (port->rx_queue_stats_mapping_enabled) {
813 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
814 printf(" Stats reg %2d RX-packets:%14"PRIu64
815 " RX-errors:%14"PRIu64
816 " RX-bytes:%14"PRIu64"\n",
817 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
821 if (port->tx_queue_stats_mapping_enabled) {
822 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
823 printf(" Stats reg %2d TX-packets:%14"PRIu64
824 " TX-bytes:%14"PRIu64"\n",
825 i, stats->q_opackets[i], stats->q_obytes[i]);
829 printf(" %s--------------------------------%s\n",
830 fwd_stats_border, fwd_stats_border);
834 fwd_stream_stats_display(streamid_t stream_id)
836 struct fwd_stream *fs;
837 static const char *fwd_top_stats_border = "-------";
839 fs = fwd_streams[stream_id];
840 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
841 (fs->fwd_dropped == 0))
843 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
844 "TX Port=%2d/Queue=%2d %s\n",
845 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
846 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
847 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
848 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
850 /* if checksum mode */
851 if (cur_fwd_eng == &csum_fwd_engine) {
852 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
853 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
856 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
857 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
858 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
863 flush_fwd_rx_queues(void)
865 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
872 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
873 uint64_t timer_period;
875 /* convert to number of cycles */
876 timer_period = rte_get_timer_hz(); /* 1 second timeout */
878 for (j = 0; j < 2; j++) {
879 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
880 for (rxq = 0; rxq < nb_rxq; rxq++) {
881 port_id = fwd_ports_ids[rxp];
883 * testpmd can stuck in the below do while loop
884 * if rte_eth_rx_burst() always returns nonzero
885 * packets. So timer is added to exit this loop
886 * after 1sec timer expiry.
888 prev_tsc = rte_rdtsc();
890 nb_rx = rte_eth_rx_burst(port_id, rxq,
891 pkts_burst, MAX_PKT_BURST);
892 for (i = 0; i < nb_rx; i++)
893 rte_pktmbuf_free(pkts_burst[i]);
895 cur_tsc = rte_rdtsc();
896 diff_tsc = cur_tsc - prev_tsc;
897 timer_tsc += diff_tsc;
898 } while ((nb_rx > 0) &&
899 (timer_tsc < timer_period));
903 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
908 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
910 struct fwd_stream **fsm;
914 fsm = &fwd_streams[fc->stream_idx];
915 nb_fs = fc->stream_nb;
917 for (sm_id = 0; sm_id < nb_fs; sm_id++)
918 (*pkt_fwd)(fsm[sm_id]);
919 } while (! fc->stopped);
923 start_pkt_forward_on_core(void *fwd_arg)
925 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
926 cur_fwd_config.fwd_eng->packet_fwd);
931 * Run the TXONLY packet forwarding engine to send a single burst of packets.
932 * Used to start communication flows in network loopback test configurations.
935 run_one_txonly_burst_on_core(void *fwd_arg)
937 struct fwd_lcore *fwd_lc;
938 struct fwd_lcore tmp_lcore;
940 fwd_lc = (struct fwd_lcore *) fwd_arg;
942 tmp_lcore.stopped = 1;
943 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
948 * Launch packet forwarding:
949 * - Setup per-port forwarding context.
950 * - launch logical cores with their forwarding configuration.
953 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
955 port_fwd_begin_t port_fwd_begin;
960 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
961 if (port_fwd_begin != NULL) {
962 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
963 (*port_fwd_begin)(fwd_ports_ids[i]);
965 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
966 lc_id = fwd_lcores_cpuids[i];
967 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
968 fwd_lcores[i]->stopped = 0;
969 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
970 fwd_lcores[i], lc_id);
972 printf("launch lcore %u failed - diag=%d\n",
979 * Launch packet forwarding configuration.
982 start_packet_forwarding(int with_tx_first)
984 port_fwd_begin_t port_fwd_begin;
985 port_fwd_end_t port_fwd_end;
986 struct rte_port *port;
991 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
992 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
994 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
995 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
997 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
998 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
999 (!nb_rxq || !nb_txq))
1000 rte_exit(EXIT_FAILURE,
1001 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1002 cur_fwd_eng->fwd_mode_name);
1004 if (all_ports_started() == 0) {
1005 printf("Not all ports were started\n");
1008 if (test_done == 0) {
1009 printf("Packet forwarding already started\n");
1013 if (init_fwd_streams() < 0) {
1014 printf("Fail from init_fwd_streams()\n");
1019 for (i = 0; i < nb_fwd_ports; i++) {
1020 pt_id = fwd_ports_ids[i];
1021 port = &ports[pt_id];
1022 if (!port->dcb_flag) {
1023 printf("In DCB mode, all forwarding ports must "
1024 "be configured in this mode.\n");
1028 if (nb_fwd_lcores == 1) {
1029 printf("In DCB mode,the nb forwarding cores "
1030 "should be larger than 1.\n");
1037 flush_fwd_rx_queues();
1040 pkt_fwd_config_display(&cur_fwd_config);
1041 rxtx_config_display();
1043 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1044 pt_id = fwd_ports_ids[i];
1045 port = &ports[pt_id];
1046 rte_eth_stats_get(pt_id, &port->stats);
1047 port->tx_dropped = 0;
1049 map_port_queue_stats_mapping_registers(pt_id, port);
1051 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1052 fwd_streams[sm_id]->rx_packets = 0;
1053 fwd_streams[sm_id]->tx_packets = 0;
1054 fwd_streams[sm_id]->fwd_dropped = 0;
1055 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1056 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1058 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1059 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1060 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1061 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1062 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1064 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1065 fwd_streams[sm_id]->core_cycles = 0;
1068 if (with_tx_first) {
1069 port_fwd_begin = tx_only_engine.port_fwd_begin;
1070 if (port_fwd_begin != NULL) {
1071 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1072 (*port_fwd_begin)(fwd_ports_ids[i]);
1074 while (with_tx_first--) {
1075 launch_packet_forwarding(
1076 run_one_txonly_burst_on_core);
1077 rte_eal_mp_wait_lcore();
1079 port_fwd_end = tx_only_engine.port_fwd_end;
1080 if (port_fwd_end != NULL) {
1081 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1082 (*port_fwd_end)(fwd_ports_ids[i]);
1085 launch_packet_forwarding(start_pkt_forward_on_core);
1089 stop_packet_forwarding(void)
1091 struct rte_eth_stats stats;
1092 struct rte_port *port;
1093 port_fwd_end_t port_fwd_end;
1098 uint64_t total_recv;
1099 uint64_t total_xmit;
1100 uint64_t total_rx_dropped;
1101 uint64_t total_tx_dropped;
1102 uint64_t total_rx_nombuf;
1103 uint64_t tx_dropped;
1104 uint64_t rx_bad_ip_csum;
1105 uint64_t rx_bad_l4_csum;
1106 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1107 uint64_t fwd_cycles;
1109 static const char *acc_stats_border = "+++++++++++++++";
1112 printf("Packet forwarding not started\n");
1115 printf("Telling cores to stop...");
1116 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1117 fwd_lcores[lc_id]->stopped = 1;
1118 printf("\nWaiting for lcores to finish...\n");
1119 rte_eal_mp_wait_lcore();
1120 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1121 if (port_fwd_end != NULL) {
1122 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1123 pt_id = fwd_ports_ids[i];
1124 (*port_fwd_end)(pt_id);
1127 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1130 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1131 if (cur_fwd_config.nb_fwd_streams >
1132 cur_fwd_config.nb_fwd_ports) {
1133 fwd_stream_stats_display(sm_id);
1134 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1135 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1137 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1139 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1142 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1143 tx_dropped = (uint64_t) (tx_dropped +
1144 fwd_streams[sm_id]->fwd_dropped);
1145 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1148 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1149 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1150 fwd_streams[sm_id]->rx_bad_ip_csum);
1151 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1155 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1156 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1157 fwd_streams[sm_id]->rx_bad_l4_csum);
1158 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1161 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1162 fwd_cycles = (uint64_t) (fwd_cycles +
1163 fwd_streams[sm_id]->core_cycles);
1168 total_rx_dropped = 0;
1169 total_tx_dropped = 0;
1170 total_rx_nombuf = 0;
1171 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1172 pt_id = fwd_ports_ids[i];
1174 port = &ports[pt_id];
1175 rte_eth_stats_get(pt_id, &stats);
1176 stats.ipackets -= port->stats.ipackets;
1177 port->stats.ipackets = 0;
1178 stats.opackets -= port->stats.opackets;
1179 port->stats.opackets = 0;
1180 stats.ibytes -= port->stats.ibytes;
1181 port->stats.ibytes = 0;
1182 stats.obytes -= port->stats.obytes;
1183 port->stats.obytes = 0;
1184 stats.imissed -= port->stats.imissed;
1185 port->stats.imissed = 0;
1186 stats.oerrors -= port->stats.oerrors;
1187 port->stats.oerrors = 0;
1188 stats.rx_nombuf -= port->stats.rx_nombuf;
1189 port->stats.rx_nombuf = 0;
1191 total_recv += stats.ipackets;
1192 total_xmit += stats.opackets;
1193 total_rx_dropped += stats.imissed;
1194 total_tx_dropped += port->tx_dropped;
1195 total_rx_nombuf += stats.rx_nombuf;
1197 fwd_port_stats_display(pt_id, &stats);
1199 printf("\n %s Accumulated forward statistics for all ports"
1201 acc_stats_border, acc_stats_border);
1202 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1204 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1206 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1207 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1208 if (total_rx_nombuf > 0)
1209 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1210 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1212 acc_stats_border, acc_stats_border);
1213 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1215 printf("\n CPU cycles/packet=%u (total cycles="
1216 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1217 (unsigned int)(fwd_cycles / total_recv),
1218 fwd_cycles, total_recv);
1220 printf("\nDone.\n");
1225 dev_set_link_up(portid_t pid)
1227 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1228 printf("\nSet link up fail.\n");
1232 dev_set_link_down(portid_t pid)
1234 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1235 printf("\nSet link down fail.\n");
1239 all_ports_started(void)
1242 struct rte_port *port;
1244 FOREACH_PORT(pi, ports) {
1246 /* Check if there is a port which is not started */
1247 if ((port->port_status != RTE_PORT_STARTED) &&
1248 (port->slave_flag == 0))
1252 /* No port is not started */
1257 all_ports_stopped(void)
1260 struct rte_port *port;
1262 FOREACH_PORT(pi, ports) {
1264 if ((port->port_status != RTE_PORT_STOPPED) &&
1265 (port->slave_flag == 0))
1273 port_is_started(portid_t port_id)
1275 if (port_id_is_invalid(port_id, ENABLED_WARN))
1278 if (ports[port_id].port_status != RTE_PORT_STARTED)
1285 port_is_closed(portid_t port_id)
1287 if (port_id_is_invalid(port_id, ENABLED_WARN))
1290 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1297 start_port(portid_t pid)
1299 int diag, need_check_link_status = -1;
1302 struct rte_port *port;
1303 struct ether_addr mac_addr;
1305 if (port_id_is_invalid(pid, ENABLED_WARN))
1310 FOREACH_PORT(pi, ports) {
1311 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1314 need_check_link_status = 0;
1316 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1317 RTE_PORT_HANDLING) == 0) {
1318 printf("Port %d is now not stopped\n", pi);
1322 if (port->need_reconfig > 0) {
1323 port->need_reconfig = 0;
1325 printf("Configuring Port %d (socket %u)\n", pi,
1327 /* configure port */
1328 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1331 if (rte_atomic16_cmpset(&(port->port_status),
1332 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1333 printf("Port %d can not be set back "
1334 "to stopped\n", pi);
1335 printf("Fail to configure port %d\n", pi);
1336 /* try to reconfigure port next time */
1337 port->need_reconfig = 1;
1341 if (port->need_reconfig_queues > 0) {
1342 port->need_reconfig_queues = 0;
1343 /* setup tx queues */
1344 for (qi = 0; qi < nb_txq; qi++) {
1345 if ((numa_support) &&
1346 (txring_numa[pi] != NUMA_NO_CONFIG))
1347 diag = rte_eth_tx_queue_setup(pi, qi,
1348 nb_txd,txring_numa[pi],
1351 diag = rte_eth_tx_queue_setup(pi, qi,
1352 nb_txd,port->socket_id,
1358 /* Fail to setup tx queue, return */
1359 if (rte_atomic16_cmpset(&(port->port_status),
1361 RTE_PORT_STOPPED) == 0)
1362 printf("Port %d can not be set back "
1363 "to stopped\n", pi);
1364 printf("Fail to configure port %d tx queues\n", pi);
1365 /* try to reconfigure queues next time */
1366 port->need_reconfig_queues = 1;
1369 /* setup rx queues */
1370 for (qi = 0; qi < nb_rxq; qi++) {
1371 if ((numa_support) &&
1372 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1373 struct rte_mempool * mp =
1374 mbuf_pool_find(rxring_numa[pi]);
1376 printf("Failed to setup RX queue:"
1377 "No mempool allocation"
1378 " on the socket %d\n",
1383 diag = rte_eth_rx_queue_setup(pi, qi,
1384 nb_rxd,rxring_numa[pi],
1385 &(port->rx_conf),mp);
1387 struct rte_mempool *mp =
1388 mbuf_pool_find(port->socket_id);
1390 printf("Failed to setup RX queue:"
1391 "No mempool allocation"
1392 " on the socket %d\n",
1396 diag = rte_eth_rx_queue_setup(pi, qi,
1397 nb_rxd,port->socket_id,
1398 &(port->rx_conf), mp);
1403 /* Fail to setup rx queue, return */
1404 if (rte_atomic16_cmpset(&(port->port_status),
1406 RTE_PORT_STOPPED) == 0)
1407 printf("Port %d can not be set back "
1408 "to stopped\n", pi);
1409 printf("Fail to configure port %d rx queues\n", pi);
1410 /* try to reconfigure queues next time */
1411 port->need_reconfig_queues = 1;
1416 if (rte_eth_dev_start(pi) < 0) {
1417 printf("Fail to start port %d\n", pi);
1419 /* Fail to setup rx queue, return */
1420 if (rte_atomic16_cmpset(&(port->port_status),
1421 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1422 printf("Port %d can not be set back to "
1427 if (rte_atomic16_cmpset(&(port->port_status),
1428 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1429 printf("Port %d can not be set into started\n", pi);
1431 rte_eth_macaddr_get(pi, &mac_addr);
1432 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1433 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1434 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1435 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1437 /* at least one port started, need checking link status */
1438 need_check_link_status = 1;
1441 if (need_check_link_status == 1 && !no_link_check)
1442 check_all_ports_link_status(RTE_PORT_ALL);
1443 else if (need_check_link_status == 0)
1444 printf("Please stop the ports first\n");
1451 stop_port(portid_t pid)
1454 struct rte_port *port;
1455 int need_check_link_status = 0;
1462 if (port_id_is_invalid(pid, ENABLED_WARN))
1465 printf("Stopping ports...\n");
1467 FOREACH_PORT(pi, ports) {
1468 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1471 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1472 printf("Please remove port %d from forwarding configuration.\n", pi);
1476 if (port_is_bonding_slave(pi)) {
1477 printf("Please remove port %d from bonded device.\n", pi);
1482 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1483 RTE_PORT_HANDLING) == 0)
1486 rte_eth_dev_stop(pi);
1488 if (rte_atomic16_cmpset(&(port->port_status),
1489 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1490 printf("Port %d can not be set into stopped\n", pi);
1491 need_check_link_status = 1;
1493 if (need_check_link_status && !no_link_check)
1494 check_all_ports_link_status(RTE_PORT_ALL);
1500 close_port(portid_t pid)
1503 struct rte_port *port;
1505 if (port_id_is_invalid(pid, ENABLED_WARN))
1508 printf("Closing ports...\n");
1510 FOREACH_PORT(pi, ports) {
1511 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1514 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1515 printf("Please remove port %d from forwarding configuration.\n", pi);
1519 if (port_is_bonding_slave(pi)) {
1520 printf("Please remove port %d from bonded device.\n", pi);
1525 if (rte_atomic16_cmpset(&(port->port_status),
1526 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1527 printf("Port %d is already closed\n", pi);
1531 if (rte_atomic16_cmpset(&(port->port_status),
1532 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1533 printf("Port %d is now not stopped\n", pi);
1537 rte_eth_dev_close(pi);
1539 if (rte_atomic16_cmpset(&(port->port_status),
1540 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1541 printf("Port %d cannot be set to closed\n", pi);
1548 attach_port(char *identifier)
1551 unsigned int socket_id;
1553 printf("Attaching a new port...\n");
1555 if (identifier == NULL) {
1556 printf("Invalid parameters are specified\n");
1560 if (rte_eth_dev_attach(identifier, &pi))
1563 ports[pi].enabled = 1;
1564 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1565 /* if socket_id is invalid, set to 0 */
1566 if (check_socket_id(socket_id) < 0)
1568 reconfig(pi, socket_id);
1569 rte_eth_promiscuous_enable(pi);
1571 nb_ports = rte_eth_dev_count();
1573 ports[pi].port_status = RTE_PORT_STOPPED;
1575 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1580 detach_port(uint8_t port_id)
1582 char name[RTE_ETH_NAME_MAX_LEN];
1584 printf("Detaching a port...\n");
1586 if (!port_is_closed(port_id)) {
1587 printf("Please close port first\n");
1591 if (rte_eth_dev_detach(port_id, name))
1594 ports[port_id].enabled = 0;
1595 nb_ports = rte_eth_dev_count();
1597 printf("Port '%s' is detached. Now total ports is %d\n",
1609 stop_packet_forwarding();
1611 if (ports != NULL) {
1613 FOREACH_PORT(pt_id, ports) {
1614 printf("\nShutting down port %d...\n", pt_id);
1620 printf("\nBye...\n");
1623 typedef void (*cmd_func_t)(void);
1624 struct pmd_test_command {
1625 const char *cmd_name;
1626 cmd_func_t cmd_func;
1629 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1631 /* Check the link status of all ports in up to 9s, and print them finally */
1633 check_all_ports_link_status(uint32_t port_mask)
1635 #define CHECK_INTERVAL 100 /* 100ms */
1636 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1637 uint8_t portid, count, all_ports_up, print_flag = 0;
1638 struct rte_eth_link link;
1640 printf("Checking link statuses...\n");
1642 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1644 FOREACH_PORT(portid, ports) {
1645 if ((port_mask & (1 << portid)) == 0)
1647 memset(&link, 0, sizeof(link));
1648 rte_eth_link_get_nowait(portid, &link);
1649 /* print link status if flag set */
1650 if (print_flag == 1) {
1651 if (link.link_status)
1652 printf("Port %d Link Up - speed %u "
1653 "Mbps - %s\n", (uint8_t)portid,
1654 (unsigned)link.link_speed,
1655 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1656 ("full-duplex") : ("half-duplex\n"));
1658 printf("Port %d Link Down\n",
1662 /* clear all_ports_up flag if any link down */
1663 if (link.link_status == ETH_LINK_DOWN) {
1668 /* after finally printing all link status, get out */
1669 if (print_flag == 1)
1672 if (all_ports_up == 0) {
1674 rte_delay_ms(CHECK_INTERVAL);
1677 /* set the print_flag if all ports up or timeout */
1678 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1685 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1689 uint8_t mapping_found = 0;
1691 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1692 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1693 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1694 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1695 tx_queue_stats_mappings[i].queue_id,
1696 tx_queue_stats_mappings[i].stats_counter_id);
1703 port->tx_queue_stats_mapping_enabled = 1;
1708 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1712 uint8_t mapping_found = 0;
1714 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1715 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1716 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1717 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1718 rx_queue_stats_mappings[i].queue_id,
1719 rx_queue_stats_mappings[i].stats_counter_id);
1726 port->rx_queue_stats_mapping_enabled = 1;
1731 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1735 diag = set_tx_queue_stats_mapping_registers(pi, port);
1737 if (diag == -ENOTSUP) {
1738 port->tx_queue_stats_mapping_enabled = 0;
1739 printf("TX queue stats mapping not supported port id=%d\n", pi);
1742 rte_exit(EXIT_FAILURE,
1743 "set_tx_queue_stats_mapping_registers "
1744 "failed for port id=%d diag=%d\n",
1748 diag = set_rx_queue_stats_mapping_registers(pi, port);
1750 if (diag == -ENOTSUP) {
1751 port->rx_queue_stats_mapping_enabled = 0;
1752 printf("RX queue stats mapping not supported port id=%d\n", pi);
1755 rte_exit(EXIT_FAILURE,
1756 "set_rx_queue_stats_mapping_registers "
1757 "failed for port id=%d diag=%d\n",
1763 rxtx_port_config(struct rte_port *port)
1765 port->rx_conf = port->dev_info.default_rxconf;
1766 port->tx_conf = port->dev_info.default_txconf;
1768 /* Check if any RX/TX parameters have been passed */
1769 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1770 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1772 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1773 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1775 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1776 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1778 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1779 port->rx_conf.rx_free_thresh = rx_free_thresh;
1781 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1782 port->rx_conf.rx_drop_en = rx_drop_en;
1784 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1785 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1787 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1788 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1790 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1791 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1793 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1794 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1796 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1797 port->tx_conf.tx_free_thresh = tx_free_thresh;
1799 if (txq_flags != RTE_PMD_PARAM_UNSET)
1800 port->tx_conf.txq_flags = txq_flags;
1804 init_port_config(void)
1807 struct rte_port *port;
1809 FOREACH_PORT(pid, ports) {
1811 port->dev_conf.rxmode = rx_mode;
1812 port->dev_conf.fdir_conf = fdir_conf;
1814 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1815 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1817 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1818 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1821 if (port->dcb_flag == 0) {
1822 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1823 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1825 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1828 rxtx_port_config(port);
1830 rte_eth_macaddr_get(pid, &port->eth_addr);
1832 map_port_queue_stats_mapping_registers(pid, port);
1833 #ifdef RTE_NIC_BYPASS
1834 rte_eth_dev_bypass_init(pid);
1839 void set_port_slave_flag(portid_t slave_pid)
1841 struct rte_port *port;
1843 port = &ports[slave_pid];
1844 port->slave_flag = 1;
1847 void clear_port_slave_flag(portid_t slave_pid)
1849 struct rte_port *port;
1851 port = &ports[slave_pid];
1852 port->slave_flag = 0;
1855 uint8_t port_is_bonding_slave(portid_t slave_pid)
1857 struct rte_port *port;
1859 port = &ports[slave_pid];
1860 return port->slave_flag;
1863 const uint16_t vlan_tags[] = {
1864 0, 1, 2, 3, 4, 5, 6, 7,
1865 8, 9, 10, 11, 12, 13, 14, 15,
1866 16, 17, 18, 19, 20, 21, 22, 23,
1867 24, 25, 26, 27, 28, 29, 30, 31
1871 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1872 enum dcb_mode_enable dcb_mode,
1873 enum rte_eth_nb_tcs num_tcs,
1879 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1880 * given above, and the number of traffic classes available for use.
1882 if (dcb_mode == DCB_VT_ENABLED) {
1883 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1884 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1885 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1886 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1888 /* VMDQ+DCB RX and TX configrations */
1889 vmdq_rx_conf->enable_default_pool = 0;
1890 vmdq_rx_conf->default_pool = 0;
1891 vmdq_rx_conf->nb_queue_pools =
1892 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1893 vmdq_tx_conf->nb_queue_pools =
1894 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1896 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1897 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1898 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1899 vmdq_rx_conf->pool_map[i].pools =
1900 1 << (i % vmdq_rx_conf->nb_queue_pools);
1902 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1903 vmdq_rx_conf->dcb_tc[i] = i;
1904 vmdq_tx_conf->dcb_tc[i] = i;
1907 /* set DCB mode of RX and TX of multiple queues */
1908 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1909 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1911 struct rte_eth_dcb_rx_conf *rx_conf =
1912 ð_conf->rx_adv_conf.dcb_rx_conf;
1913 struct rte_eth_dcb_tx_conf *tx_conf =
1914 ð_conf->tx_adv_conf.dcb_tx_conf;
1916 rx_conf->nb_tcs = num_tcs;
1917 tx_conf->nb_tcs = num_tcs;
1919 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1920 rx_conf->dcb_tc[i] = i % num_tcs;
1921 tx_conf->dcb_tc[i] = i % num_tcs;
1923 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1924 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1925 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1929 eth_conf->dcb_capability_en =
1930 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1932 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1938 init_port_dcb_config(portid_t pid,
1939 enum dcb_mode_enable dcb_mode,
1940 enum rte_eth_nb_tcs num_tcs,
1943 struct rte_eth_conf port_conf;
1944 struct rte_port *rte_port;
1948 rte_port = &ports[pid];
1950 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
1951 /* Enter DCB configuration status */
1954 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
1955 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
1958 port_conf.rxmode.hw_vlan_filter = 1;
1961 * Write the configuration into the device.
1962 * Set the numbers of RX & TX queues to 0, so
1963 * the RX & TX queues will not be setup.
1965 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
1967 rte_eth_dev_info_get(pid, &rte_port->dev_info);
1969 /* If dev_info.vmdq_pool_base is greater than 0,
1970 * the queue id of vmdq pools is started after pf queues.
1972 if (dcb_mode == DCB_VT_ENABLED &&
1973 rte_port->dev_info.vmdq_pool_base > 0) {
1974 printf("VMDQ_DCB multi-queue mode is nonsensical"
1975 " for port %d.", pid);
1979 /* Assume the ports in testpmd have the same dcb capability
1980 * and has the same number of rxq and txq in dcb mode
1982 if (dcb_mode == DCB_VT_ENABLED) {
1983 if (rte_port->dev_info.max_vfs > 0) {
1984 nb_rxq = rte_port->dev_info.nb_rx_queues;
1985 nb_txq = rte_port->dev_info.nb_tx_queues;
1987 nb_rxq = rte_port->dev_info.max_rx_queues;
1988 nb_txq = rte_port->dev_info.max_tx_queues;
1991 /*if vt is disabled, use all pf queues */
1992 if (rte_port->dev_info.vmdq_pool_base == 0) {
1993 nb_rxq = rte_port->dev_info.max_rx_queues;
1994 nb_txq = rte_port->dev_info.max_tx_queues;
1996 nb_rxq = (queueid_t)num_tcs;
1997 nb_txq = (queueid_t)num_tcs;
2001 rx_free_thresh = 64;
2003 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2005 rxtx_port_config(rte_port);
2007 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2008 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2009 rx_vft_set(pid, vlan_tags[i], 1);
2011 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2012 map_port_queue_stats_mapping_registers(pid, rte_port);
2014 rte_port->dcb_flag = 1;
2024 /* Configuration of Ethernet ports. */
2025 ports = rte_zmalloc("testpmd: ports",
2026 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2027 RTE_CACHE_LINE_SIZE);
2028 if (ports == NULL) {
2029 rte_exit(EXIT_FAILURE,
2030 "rte_zmalloc(%d struct rte_port) failed\n",
2034 /* enabled allocated ports */
2035 for (pid = 0; pid < nb_ports; pid++)
2036 ports[pid].enabled = 1;
2047 signal_handler(int signum)
2049 if (signum == SIGINT || signum == SIGTERM) {
2050 printf("\nSignal %d received, preparing to exit...\n",
2052 #ifdef RTE_LIBRTE_PDUMP
2053 /* uninitialize packet capture framework */
2057 /* exit with the expected status */
2058 signal(signum, SIG_DFL);
2059 kill(getpid(), signum);
2064 main(int argc, char** argv)
2069 signal(SIGINT, signal_handler);
2070 signal(SIGTERM, signal_handler);
2072 diag = rte_eal_init(argc, argv);
2074 rte_panic("Cannot init EAL\n");
2076 #ifdef RTE_LIBRTE_PDUMP
2077 /* initialize packet capture framework */
2078 rte_pdump_init(NULL);
2081 nb_ports = (portid_t) rte_eth_dev_count();
2083 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2085 /* allocate port structures, and init them */
2088 set_def_fwd_config();
2090 rte_panic("Empty set of forwarding logical cores - check the "
2091 "core mask supplied in the command parameters\n");
2096 launch_args_parse(argc, argv);
2098 if (!nb_rxq && !nb_txq)
2099 printf("Warning: Either rx or tx queues should be non-zero\n");
2101 if (nb_rxq > 1 && nb_rxq > nb_txq)
2102 printf("Warning: nb_rxq=%d enables RSS configuration, "
2103 "but nb_txq=%d will prevent to fully test it.\n",
2107 if (start_port(RTE_PORT_ALL) != 0)
2108 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2110 /* set all ports to promiscuous mode by default */
2111 FOREACH_PORT(port_id, ports)
2112 rte_eth_promiscuous_enable(port_id);
2114 #ifdef RTE_LIBRTE_CMDLINE
2115 if (interactive == 1) {
2117 printf("Start automatic packet forwarding\n");
2118 start_packet_forwarding(0);
2128 printf("No commandline core given, start packet forwarding\n");
2129 start_packet_forwarding(0);
2130 printf("Press enter to exit\n");
2131 rc = read(0, &c, 1);