4 * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_per_lcore.h>
63 #include <rte_lcore.h>
64 #include <rte_atomic.h>
65 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
85 uint16_t verbose_level = 0; /**< Silent by default. */
87 /* use master core for command line ? */
88 uint8_t interactive = 0;
89 uint8_t auto_start = 0;
92 * NUMA support configuration.
93 * When set, the NUMA support attempts to dispatch the allocation of the
94 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
95 * probed ports among the CPU sockets 0 and 1.
96 * Otherwise, all memory is allocated from CPU socket 0.
98 uint8_t numa_support = 0; /**< No numa support by default */
101 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
104 uint8_t socket_num = UMA_NO_CONFIG;
107 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
112 * Record the Ethernet address of peer target ports to which packets are
114 * Must be instanciated with the ethernet addresses of peer traffic generator
117 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
118 portid_t nb_peer_eth_addrs = 0;
121 * Probed Target Environment.
123 struct rte_port *ports; /**< For all probed ethernet ports. */
124 portid_t nb_ports; /**< Number of probed ethernet ports. */
125 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
126 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
129 * Test Forwarding Configuration.
130 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
131 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
133 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
134 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
135 portid_t nb_cfg_ports; /**< Number of configured ports. */
136 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
138 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
139 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
141 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
142 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
145 * Forwarding engines.
147 struct fwd_engine * fwd_engines[] = {
156 #ifdef RTE_LIBRTE_IEEE1588
157 &ieee1588_fwd_engine,
162 struct fwd_config cur_fwd_config;
163 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
164 uint32_t retry_enabled;
165 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
166 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
168 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
169 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
170 * specified on command-line. */
173 * Configuration of packet segments used by the "txonly" processing engine.
175 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
176 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
177 TXONLY_DEF_PACKET_LEN,
179 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
181 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
182 /**< Split policy for packets to TX. */
184 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
185 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
187 /* current configuration is in DCB or not,0 means it is not in DCB mode */
188 uint8_t dcb_config = 0;
190 /* Whether the dcb is in testing status */
191 uint8_t dcb_test = 0;
194 * Configurable number of RX/TX queues.
196 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
197 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
200 * Configurable number of RX/TX ring descriptors.
202 #define RTE_TEST_RX_DESC_DEFAULT 128
203 #define RTE_TEST_TX_DESC_DEFAULT 512
204 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
205 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
207 #define RTE_PMD_PARAM_UNSET -1
209 * Configurable values of RX and TX ring threshold registers.
212 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
213 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
214 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
216 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
217 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
218 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
221 * Configurable value of RX free threshold.
223 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
226 * Configurable value of RX drop enable.
228 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
231 * Configurable value of TX free threshold.
233 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
236 * Configurable value of TX RS bit threshold.
238 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
241 * Configurable value of TX queue flags.
243 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
246 * Receive Side Scaling (RSS) configuration.
248 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
251 * Port topology configuration
253 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
256 * Avoids to flush all the RX streams before starts forwarding.
258 uint8_t no_flush_rx = 0; /* flush by default */
261 * Avoids to check link status when starting/stopping a port.
263 uint8_t no_link_check = 0; /* check by default */
266 * NIC bypass mode configuration options.
268 #ifdef RTE_NIC_BYPASS
270 /* The NIC bypass watchdog timeout. */
271 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
275 /* default period is 1 second */
276 static uint64_t timer_period = 1;
279 * Ethernet device configuration.
281 struct rte_eth_rxmode rx_mode = {
282 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
284 .header_split = 0, /**< Header Split disabled. */
285 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
286 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
287 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
288 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
289 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
290 .hw_strip_crc = 0, /**< CRC stripping by hardware disabled. */
293 struct rte_fdir_conf fdir_conf = {
294 .mode = RTE_FDIR_MODE_NONE,
295 .pballoc = RTE_FDIR_PBALLOC_64K,
296 .status = RTE_FDIR_REPORT_STATUS,
298 .vlan_tci_mask = 0x0,
300 .src_ip = 0xFFFFFFFF,
301 .dst_ip = 0xFFFFFFFF,
304 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
305 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
307 .src_port_mask = 0xFFFF,
308 .dst_port_mask = 0xFFFF,
309 .mac_addr_byte_mask = 0xFF,
310 .tunnel_type_mask = 1,
311 .tunnel_id_mask = 0xFFFFFFFF,
316 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
318 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
319 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
321 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
322 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
324 uint16_t nb_tx_queue_stats_mappings = 0;
325 uint16_t nb_rx_queue_stats_mappings = 0;
327 unsigned max_socket = 0;
329 /* Forward function declarations */
330 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
331 static void check_all_ports_link_status(uint32_t port_mask);
334 * Check if all the ports are started.
335 * If yes, return positive value. If not, return zero.
337 static int all_ports_started(void);
340 * Find next enabled port
343 find_next_port(portid_t p, struct rte_port *ports, int size)
346 rte_exit(-EINVAL, "failed to find a next port id\n");
348 while ((p < size) && (ports[p].enabled == 0))
354 * Setup default configuration.
357 set_default_fwd_lcores_config(void)
361 unsigned int sock_num;
364 for (i = 0; i < RTE_MAX_LCORE; i++) {
365 sock_num = rte_lcore_to_socket_id(i) + 1;
366 if (sock_num > max_socket) {
367 if (sock_num > RTE_MAX_NUMA_NODES)
368 rte_exit(EXIT_FAILURE, "Total sockets greater than %u\n", RTE_MAX_NUMA_NODES);
369 max_socket = sock_num;
371 if (!rte_lcore_is_enabled(i))
373 if (i == rte_get_master_lcore())
375 fwd_lcores_cpuids[nb_lc++] = i;
377 nb_lcores = (lcoreid_t) nb_lc;
378 nb_cfg_lcores = nb_lcores;
383 set_def_peer_eth_addrs(void)
387 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
388 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
389 peer_eth_addrs[i].addr_bytes[5] = i;
394 set_default_fwd_ports_config(void)
398 for (pt_id = 0; pt_id < nb_ports; pt_id++)
399 fwd_ports_ids[pt_id] = pt_id;
401 nb_cfg_ports = nb_ports;
402 nb_fwd_ports = nb_ports;
406 set_def_fwd_config(void)
408 set_default_fwd_lcores_config();
409 set_def_peer_eth_addrs();
410 set_default_fwd_ports_config();
414 * Configuration initialisation done once at init time.
417 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
418 unsigned int socket_id)
420 char pool_name[RTE_MEMPOOL_NAMESIZE];
421 struct rte_mempool *rte_mp = NULL;
424 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
425 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
428 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
429 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
431 #ifdef RTE_LIBRTE_PMD_XENVIRT
432 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
433 (unsigned) mb_mempool_cache,
434 sizeof(struct rte_pktmbuf_pool_private),
435 rte_pktmbuf_pool_init, NULL,
436 rte_pktmbuf_init, NULL,
440 /* if the former XEN allocation failed fall back to normal allocation */
441 if (rte_mp == NULL) {
443 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
444 mb_size, (unsigned) mb_mempool_cache,
445 sizeof(struct rte_pktmbuf_pool_private),
448 if (rte_mempool_populate_anon(rte_mp) == 0) {
449 rte_mempool_free(rte_mp);
452 rte_pktmbuf_pool_init(rte_mp, NULL);
453 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
455 /* wrapper to rte_mempool_create() */
456 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
457 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
461 if (rte_mp == NULL) {
462 rte_exit(EXIT_FAILURE,
463 "Creation of mbuf pool for socket %u failed: %s\n",
464 socket_id, rte_strerror(rte_errno));
465 } else if (verbose_level > 0) {
466 rte_mempool_dump(stdout, rte_mp);
471 * Check given socket id is valid or not with NUMA mode,
472 * if valid, return 0, else return -1
475 check_socket_id(const unsigned int socket_id)
477 static int warning_once = 0;
479 if (socket_id >= max_socket) {
480 if (!warning_once && numa_support)
481 printf("Warning: NUMA should be configured manually by"
482 " using --port-numa-config and"
483 " --ring-numa-config parameters along with"
495 struct rte_port *port;
496 struct rte_mempool *mbp;
497 unsigned int nb_mbuf_per_pool;
499 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
501 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
502 /* Configuration of logical cores. */
503 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
504 sizeof(struct fwd_lcore *) * nb_lcores,
505 RTE_CACHE_LINE_SIZE);
506 if (fwd_lcores == NULL) {
507 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
508 "failed\n", nb_lcores);
510 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
511 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
512 sizeof(struct fwd_lcore),
513 RTE_CACHE_LINE_SIZE);
514 if (fwd_lcores[lc_id] == NULL) {
515 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
518 fwd_lcores[lc_id]->cpuid_idx = lc_id;
522 * Create pools of mbuf.
523 * If NUMA support is disabled, create a single pool of mbuf in
524 * socket 0 memory by default.
525 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
527 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
528 * nb_txd can be configured at run time.
530 if (param_total_num_mbufs)
531 nb_mbuf_per_pool = param_total_num_mbufs;
533 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX + (nb_lcores * mb_mempool_cache)
534 + RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
538 (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
542 if (socket_num == UMA_NO_CONFIG)
543 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
545 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
549 FOREACH_PORT(pid, ports) {
551 rte_eth_dev_info_get(pid, &port->dev_info);
554 if (port_numa[pid] != NUMA_NO_CONFIG)
555 port_per_socket[port_numa[pid]]++;
557 uint32_t socket_id = rte_eth_dev_socket_id(pid);
559 /* if socket_id is invalid, set to 0 */
560 if (check_socket_id(socket_id) < 0)
562 port_per_socket[socket_id]++;
566 /* set flag to initialize port/queue */
567 port->need_reconfig = 1;
568 port->need_reconfig_queues = 1;
573 unsigned int nb_mbuf;
575 if (param_total_num_mbufs)
576 nb_mbuf_per_pool = nb_mbuf_per_pool/nb_ports;
578 for (i = 0; i < max_socket; i++) {
579 nb_mbuf = (nb_mbuf_per_pool * RTE_MAX_ETHPORTS);
581 mbuf_pool_create(mbuf_data_size,
588 * Records which Mbuf pool to use by each logical core, if needed.
590 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
591 mbp = mbuf_pool_find(
592 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
595 mbp = mbuf_pool_find(0);
596 fwd_lcores[lc_id]->mbp = mbp;
599 /* Configuration of packet forwarding streams. */
600 if (init_fwd_streams() < 0)
601 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
608 reconfig(portid_t new_port_id, unsigned socket_id)
610 struct rte_port *port;
612 /* Reconfiguration of Ethernet ports. */
613 port = &ports[new_port_id];
614 rte_eth_dev_info_get(new_port_id, &port->dev_info);
616 /* set flag to initialize port/queue */
617 port->need_reconfig = 1;
618 port->need_reconfig_queues = 1;
619 port->socket_id = socket_id;
626 init_fwd_streams(void)
629 struct rte_port *port;
630 streamid_t sm_id, nb_fwd_streams_new;
633 /* set socket id according to numa or not */
634 FOREACH_PORT(pid, ports) {
636 if (nb_rxq > port->dev_info.max_rx_queues) {
637 printf("Fail: nb_rxq(%d) is greater than "
638 "max_rx_queues(%d)\n", nb_rxq,
639 port->dev_info.max_rx_queues);
642 if (nb_txq > port->dev_info.max_tx_queues) {
643 printf("Fail: nb_txq(%d) is greater than "
644 "max_tx_queues(%d)\n", nb_txq,
645 port->dev_info.max_tx_queues);
649 if (port_numa[pid] != NUMA_NO_CONFIG)
650 port->socket_id = port_numa[pid];
652 port->socket_id = rte_eth_dev_socket_id(pid);
654 /* if socket_id is invalid, set to 0 */
655 if (check_socket_id(port->socket_id) < 0)
660 if (socket_num == UMA_NO_CONFIG)
663 port->socket_id = socket_num;
667 q = RTE_MAX(nb_rxq, nb_txq);
669 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
672 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
673 if (nb_fwd_streams_new == nb_fwd_streams)
676 if (fwd_streams != NULL) {
677 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
678 if (fwd_streams[sm_id] == NULL)
680 rte_free(fwd_streams[sm_id]);
681 fwd_streams[sm_id] = NULL;
683 rte_free(fwd_streams);
688 nb_fwd_streams = nb_fwd_streams_new;
689 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
690 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
691 if (fwd_streams == NULL)
692 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
693 "failed\n", nb_fwd_streams);
695 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
696 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
697 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
698 if (fwd_streams[sm_id] == NULL)
699 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
706 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
708 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
710 unsigned int total_burst;
711 unsigned int nb_burst;
712 unsigned int burst_stats[3];
713 uint16_t pktnb_stats[3];
715 int burst_percent[3];
718 * First compute the total number of packet bursts and the
719 * two highest numbers of bursts of the same number of packets.
722 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
723 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
724 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
725 nb_burst = pbs->pkt_burst_spread[nb_pkt];
728 total_burst += nb_burst;
729 if (nb_burst > burst_stats[0]) {
730 burst_stats[1] = burst_stats[0];
731 pktnb_stats[1] = pktnb_stats[0];
732 burst_stats[0] = nb_burst;
733 pktnb_stats[0] = nb_pkt;
736 if (total_burst == 0)
738 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
739 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
740 burst_percent[0], (int) pktnb_stats[0]);
741 if (burst_stats[0] == total_burst) {
745 if (burst_stats[0] + burst_stats[1] == total_burst) {
746 printf(" + %d%% of %d pkts]\n",
747 100 - burst_percent[0], pktnb_stats[1]);
750 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
751 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
752 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
753 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
756 printf(" + %d%% of %d pkts + %d%% of others]\n",
757 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
759 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
762 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
764 struct rte_port *port;
767 static const char *fwd_stats_border = "----------------------";
769 port = &ports[port_id];
770 printf("\n %s Forward statistics for port %-2d %s\n",
771 fwd_stats_border, port_id, fwd_stats_border);
773 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
774 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
776 stats->ipackets, stats->imissed,
777 (uint64_t) (stats->ipackets + stats->imissed));
779 if (cur_fwd_eng == &csum_fwd_engine)
780 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
781 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
782 if ((stats->ierrors + stats->rx_nombuf) > 0) {
783 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
784 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
787 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
789 stats->opackets, port->tx_dropped,
790 (uint64_t) (stats->opackets + port->tx_dropped));
793 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
795 stats->ipackets, stats->imissed,
796 (uint64_t) (stats->ipackets + stats->imissed));
798 if (cur_fwd_eng == &csum_fwd_engine)
799 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
800 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
801 if ((stats->ierrors + stats->rx_nombuf) > 0) {
802 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
803 printf(" RX-nombufs: %14"PRIu64"\n",
807 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
809 stats->opackets, port->tx_dropped,
810 (uint64_t) (stats->opackets + port->tx_dropped));
813 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
815 pkt_burst_stats_display("RX",
816 &port->rx_stream->rx_burst_stats);
818 pkt_burst_stats_display("TX",
819 &port->tx_stream->tx_burst_stats);
822 if (port->rx_queue_stats_mapping_enabled) {
824 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
825 printf(" Stats reg %2d RX-packets:%14"PRIu64
826 " RX-errors:%14"PRIu64
827 " RX-bytes:%14"PRIu64"\n",
828 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
832 if (port->tx_queue_stats_mapping_enabled) {
833 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
834 printf(" Stats reg %2d TX-packets:%14"PRIu64
835 " TX-bytes:%14"PRIu64"\n",
836 i, stats->q_opackets[i], stats->q_obytes[i]);
840 printf(" %s--------------------------------%s\n",
841 fwd_stats_border, fwd_stats_border);
845 fwd_stream_stats_display(streamid_t stream_id)
847 struct fwd_stream *fs;
848 static const char *fwd_top_stats_border = "-------";
850 fs = fwd_streams[stream_id];
851 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
852 (fs->fwd_dropped == 0))
854 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
855 "TX Port=%2d/Queue=%2d %s\n",
856 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
857 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
858 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
859 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
861 /* if checksum mode */
862 if (cur_fwd_eng == &csum_fwd_engine) {
863 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
864 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
867 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
868 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
869 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
874 flush_fwd_rx_queues(void)
876 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
883 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
885 /* convert to number of cycles */
886 timer_period *= rte_get_timer_hz();
888 for (j = 0; j < 2; j++) {
889 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
890 for (rxq = 0; rxq < nb_rxq; rxq++) {
891 port_id = fwd_ports_ids[rxp];
893 * testpmd can stuck in the below do while loop
894 * if rte_eth_rx_burst() always returns nonzero
895 * packets. So timer is added to exit this loop
896 * after 1sec timer expiry.
898 prev_tsc = rte_rdtsc();
900 nb_rx = rte_eth_rx_burst(port_id, rxq,
901 pkts_burst, MAX_PKT_BURST);
902 for (i = 0; i < nb_rx; i++)
903 rte_pktmbuf_free(pkts_burst[i]);
905 cur_tsc = rte_rdtsc();
906 diff_tsc = cur_tsc - prev_tsc;
907 timer_tsc += diff_tsc;
908 } while ((nb_rx > 0) &&
909 (timer_tsc < timer_period));
913 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
918 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
920 struct fwd_stream **fsm;
924 fsm = &fwd_streams[fc->stream_idx];
925 nb_fs = fc->stream_nb;
927 for (sm_id = 0; sm_id < nb_fs; sm_id++)
928 (*pkt_fwd)(fsm[sm_id]);
929 } while (! fc->stopped);
933 start_pkt_forward_on_core(void *fwd_arg)
935 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
936 cur_fwd_config.fwd_eng->packet_fwd);
941 * Run the TXONLY packet forwarding engine to send a single burst of packets.
942 * Used to start communication flows in network loopback test configurations.
945 run_one_txonly_burst_on_core(void *fwd_arg)
947 struct fwd_lcore *fwd_lc;
948 struct fwd_lcore tmp_lcore;
950 fwd_lc = (struct fwd_lcore *) fwd_arg;
952 tmp_lcore.stopped = 1;
953 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
958 * Launch packet forwarding:
959 * - Setup per-port forwarding context.
960 * - launch logical cores with their forwarding configuration.
963 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
965 port_fwd_begin_t port_fwd_begin;
970 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
971 if (port_fwd_begin != NULL) {
972 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
973 (*port_fwd_begin)(fwd_ports_ids[i]);
975 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
976 lc_id = fwd_lcores_cpuids[i];
977 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
978 fwd_lcores[i]->stopped = 0;
979 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
980 fwd_lcores[i], lc_id);
982 printf("launch lcore %u failed - diag=%d\n",
989 * Launch packet forwarding configuration.
992 start_packet_forwarding(int with_tx_first)
994 port_fwd_begin_t port_fwd_begin;
995 port_fwd_end_t port_fwd_end;
996 struct rte_port *port;
1001 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1002 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1004 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1005 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1007 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1008 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1009 (!nb_rxq || !nb_txq))
1010 rte_exit(EXIT_FAILURE,
1011 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1012 cur_fwd_eng->fwd_mode_name);
1014 if (all_ports_started() == 0) {
1015 printf("Not all ports were started\n");
1018 if (test_done == 0) {
1019 printf("Packet forwarding already started\n");
1023 if (init_fwd_streams() < 0) {
1024 printf("Fail from init_fwd_streams()\n");
1029 for (i = 0; i < nb_fwd_ports; i++) {
1030 pt_id = fwd_ports_ids[i];
1031 port = &ports[pt_id];
1032 if (!port->dcb_flag) {
1033 printf("In DCB mode, all forwarding ports must "
1034 "be configured in this mode.\n");
1038 if (nb_fwd_lcores == 1) {
1039 printf("In DCB mode,the nb forwarding cores "
1040 "should be larger than 1.\n");
1047 flush_fwd_rx_queues();
1050 pkt_fwd_config_display(&cur_fwd_config);
1051 rxtx_config_display();
1053 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1054 pt_id = fwd_ports_ids[i];
1055 port = &ports[pt_id];
1056 rte_eth_stats_get(pt_id, &port->stats);
1057 port->tx_dropped = 0;
1059 map_port_queue_stats_mapping_registers(pt_id, port);
1061 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1062 fwd_streams[sm_id]->rx_packets = 0;
1063 fwd_streams[sm_id]->tx_packets = 0;
1064 fwd_streams[sm_id]->fwd_dropped = 0;
1065 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1066 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1068 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1069 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1070 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1071 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1072 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1074 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1075 fwd_streams[sm_id]->core_cycles = 0;
1078 if (with_tx_first) {
1079 port_fwd_begin = tx_only_engine.port_fwd_begin;
1080 if (port_fwd_begin != NULL) {
1081 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1082 (*port_fwd_begin)(fwd_ports_ids[i]);
1084 while (with_tx_first--) {
1085 launch_packet_forwarding(
1086 run_one_txonly_burst_on_core);
1087 rte_eal_mp_wait_lcore();
1089 port_fwd_end = tx_only_engine.port_fwd_end;
1090 if (port_fwd_end != NULL) {
1091 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1092 (*port_fwd_end)(fwd_ports_ids[i]);
1095 launch_packet_forwarding(start_pkt_forward_on_core);
1099 stop_packet_forwarding(void)
1101 struct rte_eth_stats stats;
1102 struct rte_port *port;
1103 port_fwd_end_t port_fwd_end;
1108 uint64_t total_recv;
1109 uint64_t total_xmit;
1110 uint64_t total_rx_dropped;
1111 uint64_t total_tx_dropped;
1112 uint64_t total_rx_nombuf;
1113 uint64_t tx_dropped;
1114 uint64_t rx_bad_ip_csum;
1115 uint64_t rx_bad_l4_csum;
1116 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1117 uint64_t fwd_cycles;
1119 static const char *acc_stats_border = "+++++++++++++++";
1122 printf("Packet forwarding not started\n");
1125 printf("Telling cores to stop...");
1126 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1127 fwd_lcores[lc_id]->stopped = 1;
1128 printf("\nWaiting for lcores to finish...\n");
1129 rte_eal_mp_wait_lcore();
1130 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1131 if (port_fwd_end != NULL) {
1132 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1133 pt_id = fwd_ports_ids[i];
1134 (*port_fwd_end)(pt_id);
1137 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1140 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1141 if (cur_fwd_config.nb_fwd_streams >
1142 cur_fwd_config.nb_fwd_ports) {
1143 fwd_stream_stats_display(sm_id);
1144 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1145 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1147 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1149 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1152 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1153 tx_dropped = (uint64_t) (tx_dropped +
1154 fwd_streams[sm_id]->fwd_dropped);
1155 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1158 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1159 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1160 fwd_streams[sm_id]->rx_bad_ip_csum);
1161 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1165 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1166 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1167 fwd_streams[sm_id]->rx_bad_l4_csum);
1168 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1171 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1172 fwd_cycles = (uint64_t) (fwd_cycles +
1173 fwd_streams[sm_id]->core_cycles);
1178 total_rx_dropped = 0;
1179 total_tx_dropped = 0;
1180 total_rx_nombuf = 0;
1181 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1182 pt_id = fwd_ports_ids[i];
1184 port = &ports[pt_id];
1185 rte_eth_stats_get(pt_id, &stats);
1186 stats.ipackets -= port->stats.ipackets;
1187 port->stats.ipackets = 0;
1188 stats.opackets -= port->stats.opackets;
1189 port->stats.opackets = 0;
1190 stats.ibytes -= port->stats.ibytes;
1191 port->stats.ibytes = 0;
1192 stats.obytes -= port->stats.obytes;
1193 port->stats.obytes = 0;
1194 stats.imissed -= port->stats.imissed;
1195 port->stats.imissed = 0;
1196 stats.oerrors -= port->stats.oerrors;
1197 port->stats.oerrors = 0;
1198 stats.rx_nombuf -= port->stats.rx_nombuf;
1199 port->stats.rx_nombuf = 0;
1201 total_recv += stats.ipackets;
1202 total_xmit += stats.opackets;
1203 total_rx_dropped += stats.imissed;
1204 total_tx_dropped += port->tx_dropped;
1205 total_rx_nombuf += stats.rx_nombuf;
1207 fwd_port_stats_display(pt_id, &stats);
1209 printf("\n %s Accumulated forward statistics for all ports"
1211 acc_stats_border, acc_stats_border);
1212 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1214 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1216 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1217 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1218 if (total_rx_nombuf > 0)
1219 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1220 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1222 acc_stats_border, acc_stats_border);
1223 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1225 printf("\n CPU cycles/packet=%u (total cycles="
1226 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1227 (unsigned int)(fwd_cycles / total_recv),
1228 fwd_cycles, total_recv);
1230 printf("\nDone.\n");
1235 dev_set_link_up(portid_t pid)
1237 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1238 printf("\nSet link up fail.\n");
1242 dev_set_link_down(portid_t pid)
1244 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1245 printf("\nSet link down fail.\n");
1249 all_ports_started(void)
1252 struct rte_port *port;
1254 FOREACH_PORT(pi, ports) {
1256 /* Check if there is a port which is not started */
1257 if ((port->port_status != RTE_PORT_STARTED) &&
1258 (port->slave_flag == 0))
1262 /* No port is not started */
1267 all_ports_stopped(void)
1270 struct rte_port *port;
1272 FOREACH_PORT(pi, ports) {
1274 if ((port->port_status != RTE_PORT_STOPPED) &&
1275 (port->slave_flag == 0))
1283 port_is_started(portid_t port_id)
1285 if (port_id_is_invalid(port_id, ENABLED_WARN))
1288 if (ports[port_id].port_status != RTE_PORT_STARTED)
1295 port_is_closed(portid_t port_id)
1297 if (port_id_is_invalid(port_id, ENABLED_WARN))
1300 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1307 start_port(portid_t pid)
1309 int diag, need_check_link_status = -1;
1312 struct rte_port *port;
1313 struct ether_addr mac_addr;
1315 if (port_id_is_invalid(pid, ENABLED_WARN))
1320 FOREACH_PORT(pi, ports) {
1321 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1324 need_check_link_status = 0;
1326 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1327 RTE_PORT_HANDLING) == 0) {
1328 printf("Port %d is now not stopped\n", pi);
1332 if (port->need_reconfig > 0) {
1333 port->need_reconfig = 0;
1335 printf("Configuring Port %d (socket %u)\n", pi,
1337 /* configure port */
1338 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1341 if (rte_atomic16_cmpset(&(port->port_status),
1342 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1343 printf("Port %d can not be set back "
1344 "to stopped\n", pi);
1345 printf("Fail to configure port %d\n", pi);
1346 /* try to reconfigure port next time */
1347 port->need_reconfig = 1;
1351 if (port->need_reconfig_queues > 0) {
1352 port->need_reconfig_queues = 0;
1353 /* setup tx queues */
1354 for (qi = 0; qi < nb_txq; qi++) {
1355 if ((numa_support) &&
1356 (txring_numa[pi] != NUMA_NO_CONFIG))
1357 diag = rte_eth_tx_queue_setup(pi, qi,
1358 nb_txd,txring_numa[pi],
1361 diag = rte_eth_tx_queue_setup(pi, qi,
1362 nb_txd,port->socket_id,
1368 /* Fail to setup tx queue, return */
1369 if (rte_atomic16_cmpset(&(port->port_status),
1371 RTE_PORT_STOPPED) == 0)
1372 printf("Port %d can not be set back "
1373 "to stopped\n", pi);
1374 printf("Fail to configure port %d tx queues\n", pi);
1375 /* try to reconfigure queues next time */
1376 port->need_reconfig_queues = 1;
1379 /* setup rx queues */
1380 for (qi = 0; qi < nb_rxq; qi++) {
1381 if ((numa_support) &&
1382 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1383 struct rte_mempool * mp =
1384 mbuf_pool_find(rxring_numa[pi]);
1386 printf("Failed to setup RX queue:"
1387 "No mempool allocation"
1388 " on the socket %d\n",
1393 diag = rte_eth_rx_queue_setup(pi, qi,
1394 nb_rxd,rxring_numa[pi],
1395 &(port->rx_conf),mp);
1397 struct rte_mempool *mp =
1398 mbuf_pool_find(port->socket_id);
1400 printf("Failed to setup RX queue:"
1401 "No mempool allocation"
1402 " on the socket %d\n",
1406 diag = rte_eth_rx_queue_setup(pi, qi,
1407 nb_rxd,port->socket_id,
1408 &(port->rx_conf), mp);
1413 /* Fail to setup rx queue, return */
1414 if (rte_atomic16_cmpset(&(port->port_status),
1416 RTE_PORT_STOPPED) == 0)
1417 printf("Port %d can not be set back "
1418 "to stopped\n", pi);
1419 printf("Fail to configure port %d rx queues\n", pi);
1420 /* try to reconfigure queues next time */
1421 port->need_reconfig_queues = 1;
1426 if (rte_eth_dev_start(pi) < 0) {
1427 printf("Fail to start port %d\n", pi);
1429 /* Fail to setup rx queue, return */
1430 if (rte_atomic16_cmpset(&(port->port_status),
1431 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1432 printf("Port %d can not be set back to "
1437 if (rte_atomic16_cmpset(&(port->port_status),
1438 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1439 printf("Port %d can not be set into started\n", pi);
1441 rte_eth_macaddr_get(pi, &mac_addr);
1442 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1443 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1444 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1445 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1447 /* at least one port started, need checking link status */
1448 need_check_link_status = 1;
1451 if (need_check_link_status == 1 && !no_link_check)
1452 check_all_ports_link_status(RTE_PORT_ALL);
1453 else if (need_check_link_status == 0)
1454 printf("Please stop the ports first\n");
1461 stop_port(portid_t pid)
1464 struct rte_port *port;
1465 int need_check_link_status = 0;
1472 if (port_id_is_invalid(pid, ENABLED_WARN))
1475 printf("Stopping ports...\n");
1477 FOREACH_PORT(pi, ports) {
1478 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1481 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1482 printf("Please remove port %d from forwarding configuration.\n", pi);
1486 if (port_is_bonding_slave(pi)) {
1487 printf("Please remove port %d from bonded device.\n", pi);
1492 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1493 RTE_PORT_HANDLING) == 0)
1496 rte_eth_dev_stop(pi);
1498 if (rte_atomic16_cmpset(&(port->port_status),
1499 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1500 printf("Port %d can not be set into stopped\n", pi);
1501 need_check_link_status = 1;
1503 if (need_check_link_status && !no_link_check)
1504 check_all_ports_link_status(RTE_PORT_ALL);
1510 close_port(portid_t pid)
1513 struct rte_port *port;
1515 if (port_id_is_invalid(pid, ENABLED_WARN))
1518 printf("Closing ports...\n");
1520 FOREACH_PORT(pi, ports) {
1521 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1524 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1525 printf("Please remove port %d from forwarding configuration.\n", pi);
1529 if (port_is_bonding_slave(pi)) {
1530 printf("Please remove port %d from bonded device.\n", pi);
1535 if (rte_atomic16_cmpset(&(port->port_status),
1536 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1537 printf("Port %d is already closed\n", pi);
1541 if (rte_atomic16_cmpset(&(port->port_status),
1542 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1543 printf("Port %d is now not stopped\n", pi);
1547 rte_eth_dev_close(pi);
1549 if (rte_atomic16_cmpset(&(port->port_status),
1550 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1551 printf("Port %d cannot be set to closed\n", pi);
1558 attach_port(char *identifier)
1561 unsigned int socket_id;
1563 printf("Attaching a new port...\n");
1565 if (identifier == NULL) {
1566 printf("Invalid parameters are specified\n");
1570 if (rte_eth_dev_attach(identifier, &pi))
1573 ports[pi].enabled = 1;
1574 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1575 /* if socket_id is invalid, set to 0 */
1576 if (check_socket_id(socket_id) < 0)
1578 reconfig(pi, socket_id);
1579 rte_eth_promiscuous_enable(pi);
1581 nb_ports = rte_eth_dev_count();
1583 ports[pi].port_status = RTE_PORT_STOPPED;
1585 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1590 detach_port(uint8_t port_id)
1592 char name[RTE_ETH_NAME_MAX_LEN];
1594 printf("Detaching a port...\n");
1596 if (!port_is_closed(port_id)) {
1597 printf("Please close port first\n");
1601 if (rte_eth_dev_detach(port_id, name))
1604 ports[port_id].enabled = 0;
1605 nb_ports = rte_eth_dev_count();
1607 printf("Port '%s' is detached. Now total ports is %d\n",
1619 stop_packet_forwarding();
1621 if (ports != NULL) {
1623 FOREACH_PORT(pt_id, ports) {
1624 printf("\nShutting down port %d...\n", pt_id);
1630 printf("\nBye...\n");
1633 typedef void (*cmd_func_t)(void);
1634 struct pmd_test_command {
1635 const char *cmd_name;
1636 cmd_func_t cmd_func;
1639 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1641 /* Check the link status of all ports in up to 9s, and print them finally */
1643 check_all_ports_link_status(uint32_t port_mask)
1645 #define CHECK_INTERVAL 100 /* 100ms */
1646 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1647 uint8_t portid, count, all_ports_up, print_flag = 0;
1648 struct rte_eth_link link;
1650 printf("Checking link statuses...\n");
1652 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1654 FOREACH_PORT(portid, ports) {
1655 if ((port_mask & (1 << portid)) == 0)
1657 memset(&link, 0, sizeof(link));
1658 rte_eth_link_get_nowait(portid, &link);
1659 /* print link status if flag set */
1660 if (print_flag == 1) {
1661 if (link.link_status)
1662 printf("Port %d Link Up - speed %u "
1663 "Mbps - %s\n", (uint8_t)portid,
1664 (unsigned)link.link_speed,
1665 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1666 ("full-duplex") : ("half-duplex\n"));
1668 printf("Port %d Link Down\n",
1672 /* clear all_ports_up flag if any link down */
1673 if (link.link_status == ETH_LINK_DOWN) {
1678 /* after finally printing all link status, get out */
1679 if (print_flag == 1)
1682 if (all_ports_up == 0) {
1684 rte_delay_ms(CHECK_INTERVAL);
1687 /* set the print_flag if all ports up or timeout */
1688 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1695 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1699 uint8_t mapping_found = 0;
1701 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1702 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1703 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1704 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1705 tx_queue_stats_mappings[i].queue_id,
1706 tx_queue_stats_mappings[i].stats_counter_id);
1713 port->tx_queue_stats_mapping_enabled = 1;
1718 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1722 uint8_t mapping_found = 0;
1724 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1725 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1726 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1727 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1728 rx_queue_stats_mappings[i].queue_id,
1729 rx_queue_stats_mappings[i].stats_counter_id);
1736 port->rx_queue_stats_mapping_enabled = 1;
1741 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1745 diag = set_tx_queue_stats_mapping_registers(pi, port);
1747 if (diag == -ENOTSUP) {
1748 port->tx_queue_stats_mapping_enabled = 0;
1749 printf("TX queue stats mapping not supported port id=%d\n", pi);
1752 rte_exit(EXIT_FAILURE,
1753 "set_tx_queue_stats_mapping_registers "
1754 "failed for port id=%d diag=%d\n",
1758 diag = set_rx_queue_stats_mapping_registers(pi, port);
1760 if (diag == -ENOTSUP) {
1761 port->rx_queue_stats_mapping_enabled = 0;
1762 printf("RX queue stats mapping not supported port id=%d\n", pi);
1765 rte_exit(EXIT_FAILURE,
1766 "set_rx_queue_stats_mapping_registers "
1767 "failed for port id=%d diag=%d\n",
1773 rxtx_port_config(struct rte_port *port)
1775 port->rx_conf = port->dev_info.default_rxconf;
1776 port->tx_conf = port->dev_info.default_txconf;
1778 /* Check if any RX/TX parameters have been passed */
1779 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1780 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1782 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1783 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1785 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1786 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1788 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1789 port->rx_conf.rx_free_thresh = rx_free_thresh;
1791 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1792 port->rx_conf.rx_drop_en = rx_drop_en;
1794 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1795 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1797 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1798 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1800 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1801 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1803 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1804 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1806 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1807 port->tx_conf.tx_free_thresh = tx_free_thresh;
1809 if (txq_flags != RTE_PMD_PARAM_UNSET)
1810 port->tx_conf.txq_flags = txq_flags;
1814 init_port_config(void)
1817 struct rte_port *port;
1819 FOREACH_PORT(pid, ports) {
1821 port->dev_conf.rxmode = rx_mode;
1822 port->dev_conf.fdir_conf = fdir_conf;
1824 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1825 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1827 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1828 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
1831 if (port->dcb_flag == 0 && port->dev_info.max_vfs == 0) {
1832 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1833 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
1835 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
1838 if (port->dev_info.max_vfs != 0) {
1839 if (port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
1840 port->dev_conf.rxmode.mq_mode =
1843 port->dev_conf.rxmode.mq_mode =
1846 port->dev_conf.txmode.mq_mode = ETH_MQ_TX_NONE;
1849 rxtx_port_config(port);
1851 rte_eth_macaddr_get(pid, &port->eth_addr);
1853 map_port_queue_stats_mapping_registers(pid, port);
1854 #ifdef RTE_NIC_BYPASS
1855 rte_eth_dev_bypass_init(pid);
1860 void set_port_slave_flag(portid_t slave_pid)
1862 struct rte_port *port;
1864 port = &ports[slave_pid];
1865 port->slave_flag = 1;
1868 void clear_port_slave_flag(portid_t slave_pid)
1870 struct rte_port *port;
1872 port = &ports[slave_pid];
1873 port->slave_flag = 0;
1876 uint8_t port_is_bonding_slave(portid_t slave_pid)
1878 struct rte_port *port;
1880 port = &ports[slave_pid];
1881 return port->slave_flag;
1884 const uint16_t vlan_tags[] = {
1885 0, 1, 2, 3, 4, 5, 6, 7,
1886 8, 9, 10, 11, 12, 13, 14, 15,
1887 16, 17, 18, 19, 20, 21, 22, 23,
1888 24, 25, 26, 27, 28, 29, 30, 31
1892 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
1893 enum dcb_mode_enable dcb_mode,
1894 enum rte_eth_nb_tcs num_tcs,
1900 * Builds up the correct configuration for dcb+vt based on the vlan tags array
1901 * given above, and the number of traffic classes available for use.
1903 if (dcb_mode == DCB_VT_ENABLED) {
1904 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
1905 ð_conf->rx_adv_conf.vmdq_dcb_conf;
1906 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
1907 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
1909 /* VMDQ+DCB RX and TX configrations */
1910 vmdq_rx_conf->enable_default_pool = 0;
1911 vmdq_rx_conf->default_pool = 0;
1912 vmdq_rx_conf->nb_queue_pools =
1913 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1914 vmdq_tx_conf->nb_queue_pools =
1915 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
1917 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
1918 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
1919 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
1920 vmdq_rx_conf->pool_map[i].pools =
1921 1 << (i % vmdq_rx_conf->nb_queue_pools);
1923 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
1924 vmdq_rx_conf->dcb_tc[i] = i;
1925 vmdq_tx_conf->dcb_tc[i] = i;
1928 /* set DCB mode of RX and TX of multiple queues */
1929 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
1930 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
1932 struct rte_eth_dcb_rx_conf *rx_conf =
1933 ð_conf->rx_adv_conf.dcb_rx_conf;
1934 struct rte_eth_dcb_tx_conf *tx_conf =
1935 ð_conf->tx_adv_conf.dcb_tx_conf;
1937 rx_conf->nb_tcs = num_tcs;
1938 tx_conf->nb_tcs = num_tcs;
1940 for (i = 0; i < num_tcs; i++) {
1941 rx_conf->dcb_tc[i] = i;
1942 tx_conf->dcb_tc[i] = i;
1944 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
1945 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
1946 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
1950 eth_conf->dcb_capability_en =
1951 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
1953 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
1959 init_port_dcb_config(portid_t pid,
1960 enum dcb_mode_enable dcb_mode,
1961 enum rte_eth_nb_tcs num_tcs,
1964 struct rte_eth_conf port_conf;
1965 struct rte_eth_dev_info dev_info;
1966 struct rte_port *rte_port;
1970 rte_eth_dev_info_get(pid, &dev_info);
1972 /* If dev_info.vmdq_pool_base is greater than 0,
1973 * the queue id of vmdq pools is started after pf queues.
1975 if (dcb_mode == DCB_VT_ENABLED && dev_info.vmdq_pool_base > 0) {
1976 printf("VMDQ_DCB multi-queue mode is nonsensical"
1977 " for port %d.", pid);
1981 /* Assume the ports in testpmd have the same dcb capability
1982 * and has the same number of rxq and txq in dcb mode
1984 if (dcb_mode == DCB_VT_ENABLED) {
1985 nb_rxq = dev_info.max_rx_queues;
1986 nb_txq = dev_info.max_tx_queues;
1988 /*if vt is disabled, use all pf queues */
1989 if (dev_info.vmdq_pool_base == 0) {
1990 nb_rxq = dev_info.max_rx_queues;
1991 nb_txq = dev_info.max_tx_queues;
1993 nb_rxq = (queueid_t)num_tcs;
1994 nb_txq = (queueid_t)num_tcs;
1998 rx_free_thresh = 64;
2000 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2001 /* Enter DCB configuration status */
2004 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2005 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2009 rte_port = &ports[pid];
2010 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2012 rxtx_port_config(rte_port);
2014 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2015 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2016 rx_vft_set(pid, vlan_tags[i], 1);
2018 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2019 map_port_queue_stats_mapping_registers(pid, rte_port);
2021 rte_port->dcb_flag = 1;
2031 /* Configuration of Ethernet ports. */
2032 ports = rte_zmalloc("testpmd: ports",
2033 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2034 RTE_CACHE_LINE_SIZE);
2035 if (ports == NULL) {
2036 rte_exit(EXIT_FAILURE,
2037 "rte_zmalloc(%d struct rte_port) failed\n",
2041 /* enabled allocated ports */
2042 for (pid = 0; pid < nb_ports; pid++)
2043 ports[pid].enabled = 1;
2054 signal_handler(int signum)
2056 if (signum == SIGINT || signum == SIGTERM) {
2057 printf("\nSignal %d received, preparing to exit...\n",
2059 #ifdef RTE_LIBRTE_PDUMP
2060 /* uninitialize packet capture framework */
2064 /* exit with the expected status */
2065 signal(signum, SIG_DFL);
2066 kill(getpid(), signum);
2071 main(int argc, char** argv)
2076 signal(SIGINT, signal_handler);
2077 signal(SIGTERM, signal_handler);
2079 diag = rte_eal_init(argc, argv);
2081 rte_panic("Cannot init EAL\n");
2083 #ifdef RTE_LIBRTE_PDUMP
2084 /* initialize packet capture framework */
2085 rte_pdump_init(NULL);
2088 nb_ports = (portid_t) rte_eth_dev_count();
2090 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2092 /* allocate port structures, and init them */
2095 set_def_fwd_config();
2097 rte_panic("Empty set of forwarding logical cores - check the "
2098 "core mask supplied in the command parameters\n");
2103 launch_args_parse(argc, argv);
2105 if (!nb_rxq && !nb_txq)
2106 printf("Warning: Either rx or tx queues should be non-zero\n");
2108 if (nb_rxq > 1 && nb_rxq > nb_txq)
2109 printf("Warning: nb_rxq=%d enables RSS configuration, "
2110 "but nb_txq=%d will prevent to fully test it.\n",
2114 if (start_port(RTE_PORT_ALL) != 0)
2115 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2117 /* set all ports to promiscuous mode by default */
2118 FOREACH_PORT(port_id, ports)
2119 rte_eth_promiscuous_enable(port_id);
2121 #ifdef RTE_LIBRTE_CMDLINE
2122 if (interactive == 1) {
2124 printf("Start automatic packet forwarding\n");
2125 start_packet_forwarding(0);
2134 printf("No commandline core given, start packet forwarding\n");
2135 start_packet_forwarding(0);
2136 printf("Press enter to exit\n");
2137 rc = read(0, &c, 1);