4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include <sys/types.h>
44 #include <sys/queue.h>
51 #include <rte_common.h>
52 #include <rte_errno.h>
53 #include <rte_byteorder.h>
55 #include <rte_debug.h>
56 #include <rte_cycles.h>
57 #include <rte_memory.h>
58 #include <rte_memcpy.h>
59 #include <rte_memzone.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_PMD_XENVIRT
77 #include <rte_eth_xenvirt.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
98 char cmdline_filename[PATH_MAX] = {0};
101 * NUMA support configuration.
102 * When set, the NUMA support attempts to dispatch the allocation of the
103 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
104 * probed ports among the CPU sockets 0 and 1.
105 * Otherwise, all memory is allocated from CPU socket 0.
107 uint8_t numa_support = 1; /**< numa enabled by default */
110 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
113 uint8_t socket_num = UMA_NO_CONFIG;
116 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
121 * Record the Ethernet address of peer target ports to which packets are
123 * Must be instantiated with the ethernet addresses of peer traffic generator
126 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
127 portid_t nb_peer_eth_addrs = 0;
130 * Probed Target Environment.
132 struct rte_port *ports; /**< For all probed ethernet ports. */
133 portid_t nb_ports; /**< Number of probed ethernet ports. */
134 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
135 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
138 * Test Forwarding Configuration.
139 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
140 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
142 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
143 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
144 portid_t nb_cfg_ports; /**< Number of configured ports. */
145 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
147 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
148 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
150 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
151 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
154 * Forwarding engines.
156 struct fwd_engine * fwd_engines[] = {
165 #ifdef RTE_LIBRTE_IEEE1588
166 &ieee1588_fwd_engine,
171 struct fwd_config cur_fwd_config;
172 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
173 uint32_t retry_enabled;
174 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
175 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
177 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
178 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
179 * specified on command-line. */
182 * Configuration of packet segments used by the "txonly" processing engine.
184 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
185 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
186 TXONLY_DEF_PACKET_LEN,
188 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
190 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
191 /**< Split policy for packets to TX. */
193 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
194 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
196 /* current configuration is in DCB or not,0 means it is not in DCB mode */
197 uint8_t dcb_config = 0;
199 /* Whether the dcb is in testing status */
200 uint8_t dcb_test = 0;
203 * Configurable number of RX/TX queues.
205 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
206 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
209 * Configurable number of RX/TX ring descriptors.
211 #define RTE_TEST_RX_DESC_DEFAULT 128
212 #define RTE_TEST_TX_DESC_DEFAULT 512
213 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
214 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
216 #define RTE_PMD_PARAM_UNSET -1
218 * Configurable values of RX and TX ring threshold registers.
221 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
222 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
223 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
225 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
226 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
227 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
230 * Configurable value of RX free threshold.
232 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
235 * Configurable value of RX drop enable.
237 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
240 * Configurable value of TX free threshold.
242 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
245 * Configurable value of TX RS bit threshold.
247 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
250 * Configurable value of TX queue flags.
252 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
255 * Receive Side Scaling (RSS) configuration.
257 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
260 * Port topology configuration
262 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
265 * Avoids to flush all the RX streams before starts forwarding.
267 uint8_t no_flush_rx = 0; /* flush by default */
270 * Avoids to check link status when starting/stopping a port.
272 uint8_t no_link_check = 0; /* check by default */
275 * Enable link status change notification
277 uint8_t lsc_interrupt = 1; /* enabled by default */
280 * Enable device removal notification.
282 uint8_t rmv_interrupt = 1; /* enabled by default */
285 * Display or mask ether events
286 * Default to all events except VF_MBOX
288 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
289 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
290 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
291 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
292 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
293 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
296 * NIC bypass mode configuration options.
298 #ifdef RTE_NIC_BYPASS
300 /* The NIC bypass watchdog timeout. */
301 uint32_t bypass_timeout = RTE_BYPASS_TMT_OFF;
305 #ifdef RTE_LIBRTE_LATENCY_STATS
308 * Set when latency stats is enabled in the commandline
310 uint8_t latencystats_enabled;
313 * Lcore ID to serive latency statistics.
315 lcoreid_t latencystats_lcore_id = -1;
320 * Ethernet device configuration.
322 struct rte_eth_rxmode rx_mode = {
323 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
325 .header_split = 0, /**< Header Split disabled. */
326 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
327 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
328 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
329 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
330 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
331 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
334 struct rte_fdir_conf fdir_conf = {
335 .mode = RTE_FDIR_MODE_NONE,
336 .pballoc = RTE_FDIR_PBALLOC_64K,
337 .status = RTE_FDIR_REPORT_STATUS,
339 .vlan_tci_mask = 0x0,
341 .src_ip = 0xFFFFFFFF,
342 .dst_ip = 0xFFFFFFFF,
345 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
346 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
348 .src_port_mask = 0xFFFF,
349 .dst_port_mask = 0xFFFF,
350 .mac_addr_byte_mask = 0xFF,
351 .tunnel_type_mask = 1,
352 .tunnel_id_mask = 0xFFFFFFFF,
357 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
359 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
360 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
362 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
363 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
365 uint16_t nb_tx_queue_stats_mappings = 0;
366 uint16_t nb_rx_queue_stats_mappings = 0;
368 unsigned int num_sockets = 0;
369 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
371 #ifdef RTE_LIBRTE_BITRATE
372 /* Bitrate statistics */
373 struct rte_stats_bitrates *bitrate_data;
374 lcoreid_t bitrate_lcore_id;
375 uint8_t bitrate_enabled;
378 /* Forward function declarations */
379 static void map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port);
380 static void check_all_ports_link_status(uint32_t port_mask);
381 static void eth_event_callback(uint8_t port_id,
382 enum rte_eth_event_type type,
386 * Check if all the ports are started.
387 * If yes, return positive value. If not, return zero.
389 static int all_ports_started(void);
392 * Helper function to check if socket is allready discovered.
393 * If yes, return positive value. If not, return zero.
396 new_socket_id(unsigned int socket_id)
400 for (i = 0; i < num_sockets; i++) {
401 if (socket_ids[i] == socket_id)
408 * Setup default configuration.
411 set_default_fwd_lcores_config(void)
415 unsigned int sock_num;
418 for (i = 0; i < RTE_MAX_LCORE; i++) {
419 sock_num = rte_lcore_to_socket_id(i);
420 if (new_socket_id(sock_num)) {
421 if (num_sockets >= RTE_MAX_NUMA_NODES) {
422 rte_exit(EXIT_FAILURE,
423 "Total sockets greater than %u\n",
426 socket_ids[num_sockets++] = sock_num;
428 if (!rte_lcore_is_enabled(i))
430 if (i == rte_get_master_lcore())
432 fwd_lcores_cpuids[nb_lc++] = i;
434 nb_lcores = (lcoreid_t) nb_lc;
435 nb_cfg_lcores = nb_lcores;
440 set_def_peer_eth_addrs(void)
444 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
445 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
446 peer_eth_addrs[i].addr_bytes[5] = i;
451 set_default_fwd_ports_config(void)
455 for (pt_id = 0; pt_id < nb_ports; pt_id++)
456 fwd_ports_ids[pt_id] = pt_id;
458 nb_cfg_ports = nb_ports;
459 nb_fwd_ports = nb_ports;
463 set_def_fwd_config(void)
465 set_default_fwd_lcores_config();
466 set_def_peer_eth_addrs();
467 set_default_fwd_ports_config();
471 * Configuration initialisation done once at init time.
474 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
475 unsigned int socket_id)
477 char pool_name[RTE_MEMPOOL_NAMESIZE];
478 struct rte_mempool *rte_mp = NULL;
481 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
482 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
485 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
486 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
488 #ifdef RTE_LIBRTE_PMD_XENVIRT
489 rte_mp = rte_mempool_gntalloc_create(pool_name, nb_mbuf, mb_size,
490 (unsigned) mb_mempool_cache,
491 sizeof(struct rte_pktmbuf_pool_private),
492 rte_pktmbuf_pool_init, NULL,
493 rte_pktmbuf_init, NULL,
497 /* if the former XEN allocation failed fall back to normal allocation */
498 if (rte_mp == NULL) {
500 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
501 mb_size, (unsigned) mb_mempool_cache,
502 sizeof(struct rte_pktmbuf_pool_private),
507 if (rte_mempool_populate_anon(rte_mp) == 0) {
508 rte_mempool_free(rte_mp);
512 rte_pktmbuf_pool_init(rte_mp, NULL);
513 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
515 /* wrapper to rte_mempool_create() */
516 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
517 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
522 if (rte_mp == NULL) {
523 rte_exit(EXIT_FAILURE,
524 "Creation of mbuf pool for socket %u failed: %s\n",
525 socket_id, rte_strerror(rte_errno));
526 } else if (verbose_level > 0) {
527 rte_mempool_dump(stdout, rte_mp);
532 * Check given socket id is valid or not with NUMA mode,
533 * if valid, return 0, else return -1
536 check_socket_id(const unsigned int socket_id)
538 static int warning_once = 0;
540 if (new_socket_id(socket_id)) {
541 if (!warning_once && numa_support)
542 printf("Warning: NUMA should be configured manually by"
543 " using --port-numa-config and"
544 " --ring-numa-config parameters along with"
556 struct rte_port *port;
557 struct rte_mempool *mbp;
558 unsigned int nb_mbuf_per_pool;
560 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
562 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
565 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
566 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
567 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
570 /* Configuration of logical cores. */
571 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
572 sizeof(struct fwd_lcore *) * nb_lcores,
573 RTE_CACHE_LINE_SIZE);
574 if (fwd_lcores == NULL) {
575 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
576 "failed\n", nb_lcores);
578 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
579 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
580 sizeof(struct fwd_lcore),
581 RTE_CACHE_LINE_SIZE);
582 if (fwd_lcores[lc_id] == NULL) {
583 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
586 fwd_lcores[lc_id]->cpuid_idx = lc_id;
589 RTE_ETH_FOREACH_DEV(pid) {
591 rte_eth_dev_info_get(pid, &port->dev_info);
594 if (port_numa[pid] != NUMA_NO_CONFIG)
595 port_per_socket[port_numa[pid]]++;
597 uint32_t socket_id = rte_eth_dev_socket_id(pid);
599 /* if socket_id is invalid, set to 0 */
600 if (check_socket_id(socket_id) < 0)
602 port_per_socket[socket_id]++;
606 /* set flag to initialize port/queue */
607 port->need_reconfig = 1;
608 port->need_reconfig_queues = 1;
612 * Create pools of mbuf.
613 * If NUMA support is disabled, create a single pool of mbuf in
614 * socket 0 memory by default.
615 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
617 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
618 * nb_txd can be configured at run time.
620 if (param_total_num_mbufs)
621 nb_mbuf_per_pool = param_total_num_mbufs;
623 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
624 (nb_lcores * mb_mempool_cache) +
625 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
626 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
632 for (i = 0; i < num_sockets; i++)
633 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
636 if (socket_num == UMA_NO_CONFIG)
637 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
639 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
646 * Records which Mbuf pool to use by each logical core, if needed.
648 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
649 mbp = mbuf_pool_find(
650 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
653 mbp = mbuf_pool_find(0);
654 fwd_lcores[lc_id]->mbp = mbp;
657 /* Configuration of packet forwarding streams. */
658 if (init_fwd_streams() < 0)
659 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
666 reconfig(portid_t new_port_id, unsigned socket_id)
668 struct rte_port *port;
670 /* Reconfiguration of Ethernet ports. */
671 port = &ports[new_port_id];
672 rte_eth_dev_info_get(new_port_id, &port->dev_info);
674 /* set flag to initialize port/queue */
675 port->need_reconfig = 1;
676 port->need_reconfig_queues = 1;
677 port->socket_id = socket_id;
684 init_fwd_streams(void)
687 struct rte_port *port;
688 streamid_t sm_id, nb_fwd_streams_new;
691 /* set socket id according to numa or not */
692 RTE_ETH_FOREACH_DEV(pid) {
694 if (nb_rxq > port->dev_info.max_rx_queues) {
695 printf("Fail: nb_rxq(%d) is greater than "
696 "max_rx_queues(%d)\n", nb_rxq,
697 port->dev_info.max_rx_queues);
700 if (nb_txq > port->dev_info.max_tx_queues) {
701 printf("Fail: nb_txq(%d) is greater than "
702 "max_tx_queues(%d)\n", nb_txq,
703 port->dev_info.max_tx_queues);
707 if (port_numa[pid] != NUMA_NO_CONFIG)
708 port->socket_id = port_numa[pid];
710 port->socket_id = rte_eth_dev_socket_id(pid);
712 /* if socket_id is invalid, set to 0 */
713 if (check_socket_id(port->socket_id) < 0)
718 if (socket_num == UMA_NO_CONFIG)
721 port->socket_id = socket_num;
725 q = RTE_MAX(nb_rxq, nb_txq);
727 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
730 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
731 if (nb_fwd_streams_new == nb_fwd_streams)
734 if (fwd_streams != NULL) {
735 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
736 if (fwd_streams[sm_id] == NULL)
738 rte_free(fwd_streams[sm_id]);
739 fwd_streams[sm_id] = NULL;
741 rte_free(fwd_streams);
746 nb_fwd_streams = nb_fwd_streams_new;
747 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
748 sizeof(struct fwd_stream *) * nb_fwd_streams, RTE_CACHE_LINE_SIZE);
749 if (fwd_streams == NULL)
750 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_stream *)) "
751 "failed\n", nb_fwd_streams);
753 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
754 fwd_streams[sm_id] = rte_zmalloc("testpmd: struct fwd_stream",
755 sizeof(struct fwd_stream), RTE_CACHE_LINE_SIZE);
756 if (fwd_streams[sm_id] == NULL)
757 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_stream)"
764 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
766 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
768 unsigned int total_burst;
769 unsigned int nb_burst;
770 unsigned int burst_stats[3];
771 uint16_t pktnb_stats[3];
773 int burst_percent[3];
776 * First compute the total number of packet bursts and the
777 * two highest numbers of bursts of the same number of packets.
780 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
781 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
782 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
783 nb_burst = pbs->pkt_burst_spread[nb_pkt];
786 total_burst += nb_burst;
787 if (nb_burst > burst_stats[0]) {
788 burst_stats[1] = burst_stats[0];
789 pktnb_stats[1] = pktnb_stats[0];
790 burst_stats[0] = nb_burst;
791 pktnb_stats[0] = nb_pkt;
794 if (total_burst == 0)
796 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
797 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
798 burst_percent[0], (int) pktnb_stats[0]);
799 if (burst_stats[0] == total_burst) {
803 if (burst_stats[0] + burst_stats[1] == total_burst) {
804 printf(" + %d%% of %d pkts]\n",
805 100 - burst_percent[0], pktnb_stats[1]);
808 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
809 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
810 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
811 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
814 printf(" + %d%% of %d pkts + %d%% of others]\n",
815 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
817 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
820 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
822 struct rte_port *port;
825 static const char *fwd_stats_border = "----------------------";
827 port = &ports[port_id];
828 printf("\n %s Forward statistics for port %-2d %s\n",
829 fwd_stats_border, port_id, fwd_stats_border);
831 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
832 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
834 stats->ipackets, stats->imissed,
835 (uint64_t) (stats->ipackets + stats->imissed));
837 if (cur_fwd_eng == &csum_fwd_engine)
838 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
839 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
840 if ((stats->ierrors + stats->rx_nombuf) > 0) {
841 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
842 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
845 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
847 stats->opackets, port->tx_dropped,
848 (uint64_t) (stats->opackets + port->tx_dropped));
851 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
853 stats->ipackets, stats->imissed,
854 (uint64_t) (stats->ipackets + stats->imissed));
856 if (cur_fwd_eng == &csum_fwd_engine)
857 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
858 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
859 if ((stats->ierrors + stats->rx_nombuf) > 0) {
860 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
861 printf(" RX-nombufs: %14"PRIu64"\n",
865 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
867 stats->opackets, port->tx_dropped,
868 (uint64_t) (stats->opackets + port->tx_dropped));
871 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
873 pkt_burst_stats_display("RX",
874 &port->rx_stream->rx_burst_stats);
876 pkt_burst_stats_display("TX",
877 &port->tx_stream->tx_burst_stats);
880 if (port->rx_queue_stats_mapping_enabled) {
882 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
883 printf(" Stats reg %2d RX-packets:%14"PRIu64
884 " RX-errors:%14"PRIu64
885 " RX-bytes:%14"PRIu64"\n",
886 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
890 if (port->tx_queue_stats_mapping_enabled) {
891 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
892 printf(" Stats reg %2d TX-packets:%14"PRIu64
893 " TX-bytes:%14"PRIu64"\n",
894 i, stats->q_opackets[i], stats->q_obytes[i]);
898 printf(" %s--------------------------------%s\n",
899 fwd_stats_border, fwd_stats_border);
903 fwd_stream_stats_display(streamid_t stream_id)
905 struct fwd_stream *fs;
906 static const char *fwd_top_stats_border = "-------";
908 fs = fwd_streams[stream_id];
909 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
910 (fs->fwd_dropped == 0))
912 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
913 "TX Port=%2d/Queue=%2d %s\n",
914 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
915 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
916 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
917 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
919 /* if checksum mode */
920 if (cur_fwd_eng == &csum_fwd_engine) {
921 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
922 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
925 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
926 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
927 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
932 flush_fwd_rx_queues(void)
934 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
941 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
942 uint64_t timer_period;
944 /* convert to number of cycles */
945 timer_period = rte_get_timer_hz(); /* 1 second timeout */
947 for (j = 0; j < 2; j++) {
948 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
949 for (rxq = 0; rxq < nb_rxq; rxq++) {
950 port_id = fwd_ports_ids[rxp];
952 * testpmd can stuck in the below do while loop
953 * if rte_eth_rx_burst() always returns nonzero
954 * packets. So timer is added to exit this loop
955 * after 1sec timer expiry.
957 prev_tsc = rte_rdtsc();
959 nb_rx = rte_eth_rx_burst(port_id, rxq,
960 pkts_burst, MAX_PKT_BURST);
961 for (i = 0; i < nb_rx; i++)
962 rte_pktmbuf_free(pkts_burst[i]);
964 cur_tsc = rte_rdtsc();
965 diff_tsc = cur_tsc - prev_tsc;
966 timer_tsc += diff_tsc;
967 } while ((nb_rx > 0) &&
968 (timer_tsc < timer_period));
972 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
977 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
979 struct fwd_stream **fsm;
982 #ifdef RTE_LIBRTE_BITRATE
983 uint64_t tics_per_1sec;
985 uint64_t tics_current;
986 uint8_t idx_port, cnt_ports;
988 cnt_ports = rte_eth_dev_count();
989 tics_datum = rte_rdtsc();
990 tics_per_1sec = rte_get_timer_hz();
992 fsm = &fwd_streams[fc->stream_idx];
993 nb_fs = fc->stream_nb;
995 for (sm_id = 0; sm_id < nb_fs; sm_id++)
996 (*pkt_fwd)(fsm[sm_id]);
997 #ifdef RTE_LIBRTE_BITRATE
998 if (bitrate_enabled != 0 &&
999 bitrate_lcore_id == rte_lcore_id()) {
1000 tics_current = rte_rdtsc();
1001 if (tics_current - tics_datum >= tics_per_1sec) {
1002 /* Periodic bitrate calculation */
1004 idx_port < cnt_ports;
1006 rte_stats_bitrate_calc(bitrate_data,
1008 tics_datum = tics_current;
1012 #ifdef RTE_LIBRTE_LATENCY_STATS
1013 if (latencystats_enabled != 0 &&
1014 latencystats_lcore_id == rte_lcore_id())
1015 rte_latencystats_update();
1018 } while (! fc->stopped);
1022 start_pkt_forward_on_core(void *fwd_arg)
1024 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1025 cur_fwd_config.fwd_eng->packet_fwd);
1030 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1031 * Used to start communication flows in network loopback test configurations.
1034 run_one_txonly_burst_on_core(void *fwd_arg)
1036 struct fwd_lcore *fwd_lc;
1037 struct fwd_lcore tmp_lcore;
1039 fwd_lc = (struct fwd_lcore *) fwd_arg;
1040 tmp_lcore = *fwd_lc;
1041 tmp_lcore.stopped = 1;
1042 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1047 * Launch packet forwarding:
1048 * - Setup per-port forwarding context.
1049 * - launch logical cores with their forwarding configuration.
1052 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1054 port_fwd_begin_t port_fwd_begin;
1059 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1060 if (port_fwd_begin != NULL) {
1061 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1062 (*port_fwd_begin)(fwd_ports_ids[i]);
1064 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1065 lc_id = fwd_lcores_cpuids[i];
1066 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1067 fwd_lcores[i]->stopped = 0;
1068 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1069 fwd_lcores[i], lc_id);
1071 printf("launch lcore %u failed - diag=%d\n",
1078 * Launch packet forwarding configuration.
1081 start_packet_forwarding(int with_tx_first)
1083 port_fwd_begin_t port_fwd_begin;
1084 port_fwd_end_t port_fwd_end;
1085 struct rte_port *port;
1090 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1091 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1093 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1094 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1096 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1097 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1098 (!nb_rxq || !nb_txq))
1099 rte_exit(EXIT_FAILURE,
1100 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1101 cur_fwd_eng->fwd_mode_name);
1103 if (all_ports_started() == 0) {
1104 printf("Not all ports were started\n");
1107 if (test_done == 0) {
1108 printf("Packet forwarding already started\n");
1112 if (init_fwd_streams() < 0) {
1113 printf("Fail from init_fwd_streams()\n");
1118 for (i = 0; i < nb_fwd_ports; i++) {
1119 pt_id = fwd_ports_ids[i];
1120 port = &ports[pt_id];
1121 if (!port->dcb_flag) {
1122 printf("In DCB mode, all forwarding ports must "
1123 "be configured in this mode.\n");
1127 if (nb_fwd_lcores == 1) {
1128 printf("In DCB mode,the nb forwarding cores "
1129 "should be larger than 1.\n");
1136 flush_fwd_rx_queues();
1139 pkt_fwd_config_display(&cur_fwd_config);
1140 rxtx_config_display();
1142 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1143 pt_id = fwd_ports_ids[i];
1144 port = &ports[pt_id];
1145 rte_eth_stats_get(pt_id, &port->stats);
1146 port->tx_dropped = 0;
1148 map_port_queue_stats_mapping_registers(pt_id, port);
1150 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1151 fwd_streams[sm_id]->rx_packets = 0;
1152 fwd_streams[sm_id]->tx_packets = 0;
1153 fwd_streams[sm_id]->fwd_dropped = 0;
1154 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1155 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1157 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1158 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1159 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1160 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1161 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1163 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1164 fwd_streams[sm_id]->core_cycles = 0;
1167 if (with_tx_first) {
1168 port_fwd_begin = tx_only_engine.port_fwd_begin;
1169 if (port_fwd_begin != NULL) {
1170 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1171 (*port_fwd_begin)(fwd_ports_ids[i]);
1173 while (with_tx_first--) {
1174 launch_packet_forwarding(
1175 run_one_txonly_burst_on_core);
1176 rte_eal_mp_wait_lcore();
1178 port_fwd_end = tx_only_engine.port_fwd_end;
1179 if (port_fwd_end != NULL) {
1180 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1181 (*port_fwd_end)(fwd_ports_ids[i]);
1184 launch_packet_forwarding(start_pkt_forward_on_core);
1188 stop_packet_forwarding(void)
1190 struct rte_eth_stats stats;
1191 struct rte_port *port;
1192 port_fwd_end_t port_fwd_end;
1197 uint64_t total_recv;
1198 uint64_t total_xmit;
1199 uint64_t total_rx_dropped;
1200 uint64_t total_tx_dropped;
1201 uint64_t total_rx_nombuf;
1202 uint64_t tx_dropped;
1203 uint64_t rx_bad_ip_csum;
1204 uint64_t rx_bad_l4_csum;
1205 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1206 uint64_t fwd_cycles;
1208 static const char *acc_stats_border = "+++++++++++++++";
1211 printf("Packet forwarding not started\n");
1214 printf("Telling cores to stop...");
1215 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1216 fwd_lcores[lc_id]->stopped = 1;
1217 printf("\nWaiting for lcores to finish...\n");
1218 rte_eal_mp_wait_lcore();
1219 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1220 if (port_fwd_end != NULL) {
1221 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1222 pt_id = fwd_ports_ids[i];
1223 (*port_fwd_end)(pt_id);
1226 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1229 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1230 if (cur_fwd_config.nb_fwd_streams >
1231 cur_fwd_config.nb_fwd_ports) {
1232 fwd_stream_stats_display(sm_id);
1233 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1234 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1236 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1238 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1241 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1242 tx_dropped = (uint64_t) (tx_dropped +
1243 fwd_streams[sm_id]->fwd_dropped);
1244 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1247 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1248 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1249 fwd_streams[sm_id]->rx_bad_ip_csum);
1250 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1254 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1255 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1256 fwd_streams[sm_id]->rx_bad_l4_csum);
1257 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1260 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1261 fwd_cycles = (uint64_t) (fwd_cycles +
1262 fwd_streams[sm_id]->core_cycles);
1267 total_rx_dropped = 0;
1268 total_tx_dropped = 0;
1269 total_rx_nombuf = 0;
1270 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1271 pt_id = fwd_ports_ids[i];
1273 port = &ports[pt_id];
1274 rte_eth_stats_get(pt_id, &stats);
1275 stats.ipackets -= port->stats.ipackets;
1276 port->stats.ipackets = 0;
1277 stats.opackets -= port->stats.opackets;
1278 port->stats.opackets = 0;
1279 stats.ibytes -= port->stats.ibytes;
1280 port->stats.ibytes = 0;
1281 stats.obytes -= port->stats.obytes;
1282 port->stats.obytes = 0;
1283 stats.imissed -= port->stats.imissed;
1284 port->stats.imissed = 0;
1285 stats.oerrors -= port->stats.oerrors;
1286 port->stats.oerrors = 0;
1287 stats.rx_nombuf -= port->stats.rx_nombuf;
1288 port->stats.rx_nombuf = 0;
1290 total_recv += stats.ipackets;
1291 total_xmit += stats.opackets;
1292 total_rx_dropped += stats.imissed;
1293 total_tx_dropped += port->tx_dropped;
1294 total_rx_nombuf += stats.rx_nombuf;
1296 fwd_port_stats_display(pt_id, &stats);
1298 printf("\n %s Accumulated forward statistics for all ports"
1300 acc_stats_border, acc_stats_border);
1301 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1303 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1305 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1306 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1307 if (total_rx_nombuf > 0)
1308 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1309 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1311 acc_stats_border, acc_stats_border);
1312 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1314 printf("\n CPU cycles/packet=%u (total cycles="
1315 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1316 (unsigned int)(fwd_cycles / total_recv),
1317 fwd_cycles, total_recv);
1319 printf("\nDone.\n");
1324 dev_set_link_up(portid_t pid)
1326 if (rte_eth_dev_set_link_up((uint8_t)pid) < 0)
1327 printf("\nSet link up fail.\n");
1331 dev_set_link_down(portid_t pid)
1333 if (rte_eth_dev_set_link_down((uint8_t)pid) < 0)
1334 printf("\nSet link down fail.\n");
1338 all_ports_started(void)
1341 struct rte_port *port;
1343 RTE_ETH_FOREACH_DEV(pi) {
1345 /* Check if there is a port which is not started */
1346 if ((port->port_status != RTE_PORT_STARTED) &&
1347 (port->slave_flag == 0))
1351 /* No port is not started */
1356 all_ports_stopped(void)
1359 struct rte_port *port;
1361 RTE_ETH_FOREACH_DEV(pi) {
1363 if ((port->port_status != RTE_PORT_STOPPED) &&
1364 (port->slave_flag == 0))
1372 port_is_started(portid_t port_id)
1374 if (port_id_is_invalid(port_id, ENABLED_WARN))
1377 if (ports[port_id].port_status != RTE_PORT_STARTED)
1384 port_is_closed(portid_t port_id)
1386 if (port_id_is_invalid(port_id, ENABLED_WARN))
1389 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1396 start_port(portid_t pid)
1398 int diag, need_check_link_status = -1;
1401 struct rte_port *port;
1402 struct ether_addr mac_addr;
1403 enum rte_eth_event_type event_type;
1405 if (port_id_is_invalid(pid, ENABLED_WARN))
1410 RTE_ETH_FOREACH_DEV(pi) {
1411 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1414 need_check_link_status = 0;
1416 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1417 RTE_PORT_HANDLING) == 0) {
1418 printf("Port %d is now not stopped\n", pi);
1422 if (port->need_reconfig > 0) {
1423 port->need_reconfig = 0;
1425 printf("Configuring Port %d (socket %u)\n", pi,
1427 /* configure port */
1428 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1431 if (rte_atomic16_cmpset(&(port->port_status),
1432 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1433 printf("Port %d can not be set back "
1434 "to stopped\n", pi);
1435 printf("Fail to configure port %d\n", pi);
1436 /* try to reconfigure port next time */
1437 port->need_reconfig = 1;
1441 if (port->need_reconfig_queues > 0) {
1442 port->need_reconfig_queues = 0;
1443 /* setup tx queues */
1444 for (qi = 0; qi < nb_txq; qi++) {
1445 if ((numa_support) &&
1446 (txring_numa[pi] != NUMA_NO_CONFIG))
1447 diag = rte_eth_tx_queue_setup(pi, qi,
1448 nb_txd,txring_numa[pi],
1451 diag = rte_eth_tx_queue_setup(pi, qi,
1452 nb_txd,port->socket_id,
1458 /* Fail to setup tx queue, return */
1459 if (rte_atomic16_cmpset(&(port->port_status),
1461 RTE_PORT_STOPPED) == 0)
1462 printf("Port %d can not be set back "
1463 "to stopped\n", pi);
1464 printf("Fail to configure port %d tx queues\n", pi);
1465 /* try to reconfigure queues next time */
1466 port->need_reconfig_queues = 1;
1469 /* setup rx queues */
1470 for (qi = 0; qi < nb_rxq; qi++) {
1471 if ((numa_support) &&
1472 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1473 struct rte_mempool * mp =
1474 mbuf_pool_find(rxring_numa[pi]);
1476 printf("Failed to setup RX queue:"
1477 "No mempool allocation"
1478 " on the socket %d\n",
1483 diag = rte_eth_rx_queue_setup(pi, qi,
1484 nb_rxd,rxring_numa[pi],
1485 &(port->rx_conf),mp);
1487 struct rte_mempool *mp =
1488 mbuf_pool_find(port->socket_id);
1490 printf("Failed to setup RX queue:"
1491 "No mempool allocation"
1492 " on the socket %d\n",
1496 diag = rte_eth_rx_queue_setup(pi, qi,
1497 nb_rxd,port->socket_id,
1498 &(port->rx_conf), mp);
1503 /* Fail to setup rx queue, return */
1504 if (rte_atomic16_cmpset(&(port->port_status),
1506 RTE_PORT_STOPPED) == 0)
1507 printf("Port %d can not be set back "
1508 "to stopped\n", pi);
1509 printf("Fail to configure port %d rx queues\n", pi);
1510 /* try to reconfigure queues next time */
1511 port->need_reconfig_queues = 1;
1516 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1517 event_type < RTE_ETH_EVENT_MAX;
1519 diag = rte_eth_dev_callback_register(pi,
1524 printf("Failed to setup even callback for event %d\n",
1531 if (rte_eth_dev_start(pi) < 0) {
1532 printf("Fail to start port %d\n", pi);
1534 /* Fail to setup rx queue, return */
1535 if (rte_atomic16_cmpset(&(port->port_status),
1536 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1537 printf("Port %d can not be set back to "
1542 if (rte_atomic16_cmpset(&(port->port_status),
1543 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1544 printf("Port %d can not be set into started\n", pi);
1546 rte_eth_macaddr_get(pi, &mac_addr);
1547 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1548 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1549 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1550 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1552 /* at least one port started, need checking link status */
1553 need_check_link_status = 1;
1556 if (need_check_link_status == 1 && !no_link_check)
1557 check_all_ports_link_status(RTE_PORT_ALL);
1558 else if (need_check_link_status == 0)
1559 printf("Please stop the ports first\n");
1566 stop_port(portid_t pid)
1569 struct rte_port *port;
1570 int need_check_link_status = 0;
1577 if (port_id_is_invalid(pid, ENABLED_WARN))
1580 printf("Stopping ports...\n");
1582 RTE_ETH_FOREACH_DEV(pi) {
1583 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1586 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1587 printf("Please remove port %d from forwarding configuration.\n", pi);
1591 if (port_is_bonding_slave(pi)) {
1592 printf("Please remove port %d from bonded device.\n", pi);
1597 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1598 RTE_PORT_HANDLING) == 0)
1601 rte_eth_dev_stop(pi);
1603 if (rte_atomic16_cmpset(&(port->port_status),
1604 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1605 printf("Port %d can not be set into stopped\n", pi);
1606 need_check_link_status = 1;
1608 if (need_check_link_status && !no_link_check)
1609 check_all_ports_link_status(RTE_PORT_ALL);
1615 close_port(portid_t pid)
1618 struct rte_port *port;
1620 if (port_id_is_invalid(pid, ENABLED_WARN))
1623 printf("Closing ports...\n");
1625 RTE_ETH_FOREACH_DEV(pi) {
1626 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1629 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1630 printf("Please remove port %d from forwarding configuration.\n", pi);
1634 if (port_is_bonding_slave(pi)) {
1635 printf("Please remove port %d from bonded device.\n", pi);
1640 if (rte_atomic16_cmpset(&(port->port_status),
1641 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1642 printf("Port %d is already closed\n", pi);
1646 if (rte_atomic16_cmpset(&(port->port_status),
1647 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1648 printf("Port %d is now not stopped\n", pi);
1652 if (port->flow_list)
1653 port_flow_flush(pi);
1654 rte_eth_dev_close(pi);
1656 if (rte_atomic16_cmpset(&(port->port_status),
1657 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1658 printf("Port %d cannot be set to closed\n", pi);
1665 attach_port(char *identifier)
1668 unsigned int socket_id;
1670 printf("Attaching a new port...\n");
1672 if (identifier == NULL) {
1673 printf("Invalid parameters are specified\n");
1677 if (rte_eth_dev_attach(identifier, &pi))
1680 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1681 /* if socket_id is invalid, set to 0 */
1682 if (check_socket_id(socket_id) < 0)
1684 reconfig(pi, socket_id);
1685 rte_eth_promiscuous_enable(pi);
1687 nb_ports = rte_eth_dev_count();
1689 ports[pi].port_status = RTE_PORT_STOPPED;
1691 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1696 detach_port(uint8_t port_id)
1698 char name[RTE_ETH_NAME_MAX_LEN];
1700 printf("Detaching a port...\n");
1702 if (!port_is_closed(port_id)) {
1703 printf("Please close port first\n");
1707 if (ports[port_id].flow_list)
1708 port_flow_flush(port_id);
1710 if (rte_eth_dev_detach(port_id, name))
1713 nb_ports = rte_eth_dev_count();
1715 printf("Port '%s' is detached. Now total ports is %d\n",
1727 stop_packet_forwarding();
1729 if (ports != NULL) {
1731 RTE_ETH_FOREACH_DEV(pt_id) {
1732 printf("\nShutting down port %d...\n", pt_id);
1738 printf("\nBye...\n");
1741 typedef void (*cmd_func_t)(void);
1742 struct pmd_test_command {
1743 const char *cmd_name;
1744 cmd_func_t cmd_func;
1747 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1749 /* Check the link status of all ports in up to 9s, and print them finally */
1751 check_all_ports_link_status(uint32_t port_mask)
1753 #define CHECK_INTERVAL 100 /* 100ms */
1754 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1755 uint8_t portid, count, all_ports_up, print_flag = 0;
1756 struct rte_eth_link link;
1758 printf("Checking link statuses...\n");
1760 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1762 RTE_ETH_FOREACH_DEV(portid) {
1763 if ((port_mask & (1 << portid)) == 0)
1765 memset(&link, 0, sizeof(link));
1766 rte_eth_link_get_nowait(portid, &link);
1767 /* print link status if flag set */
1768 if (print_flag == 1) {
1769 if (link.link_status)
1770 printf("Port %d Link Up - speed %u "
1771 "Mbps - %s\n", (uint8_t)portid,
1772 (unsigned)link.link_speed,
1773 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
1774 ("full-duplex") : ("half-duplex\n"));
1776 printf("Port %d Link Down\n",
1780 /* clear all_ports_up flag if any link down */
1781 if (link.link_status == ETH_LINK_DOWN) {
1786 /* after finally printing all link status, get out */
1787 if (print_flag == 1)
1790 if (all_ports_up == 0) {
1792 rte_delay_ms(CHECK_INTERVAL);
1795 /* set the print_flag if all ports up or timeout */
1796 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
1806 rmv_event_callback(void *arg)
1808 struct rte_eth_dev *dev;
1809 struct rte_devargs *da;
1811 uint8_t port_id = (intptr_t)arg;
1813 RTE_ETH_VALID_PORTID_OR_RET(port_id);
1814 dev = &rte_eth_devices[port_id];
1815 da = dev->device->devargs;
1818 close_port(port_id);
1819 if (da->type == RTE_DEVTYPE_VIRTUAL)
1820 snprintf(name, sizeof(name), "%s", da->virt.drv_name);
1821 else if (da->type == RTE_DEVTYPE_WHITELISTED_PCI)
1822 rte_pci_device_name(&da->pci.addr, name, sizeof(name));
1823 printf("removing device %s\n", name);
1824 rte_eal_dev_detach(name);
1825 dev->state = RTE_ETH_DEV_UNUSED;
1828 /* This function is used by the interrupt thread */
1830 eth_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param)
1832 static const char * const event_desc[] = {
1833 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
1834 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
1835 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
1836 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
1837 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
1838 [RTE_ETH_EVENT_MACSEC] = "MACsec",
1839 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
1840 [RTE_ETH_EVENT_MAX] = NULL,
1843 RTE_SET_USED(param);
1845 if (type >= RTE_ETH_EVENT_MAX) {
1846 fprintf(stderr, "\nPort %" PRIu8 ": %s called upon invalid event %d\n",
1847 port_id, __func__, type);
1849 } else if (event_print_mask & (UINT32_C(1) << type)) {
1850 printf("\nPort %" PRIu8 ": %s event\n", port_id,
1856 case RTE_ETH_EVENT_INTR_RMV:
1857 if (rte_eal_alarm_set(100000,
1858 rmv_event_callback, (void *)(intptr_t)port_id))
1859 fprintf(stderr, "Could not set up deferred device removal\n");
1867 set_tx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1871 uint8_t mapping_found = 0;
1873 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
1874 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
1875 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
1876 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
1877 tx_queue_stats_mappings[i].queue_id,
1878 tx_queue_stats_mappings[i].stats_counter_id);
1885 port->tx_queue_stats_mapping_enabled = 1;
1890 set_rx_queue_stats_mapping_registers(uint8_t port_id, struct rte_port *port)
1894 uint8_t mapping_found = 0;
1896 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
1897 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
1898 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
1899 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
1900 rx_queue_stats_mappings[i].queue_id,
1901 rx_queue_stats_mappings[i].stats_counter_id);
1908 port->rx_queue_stats_mapping_enabled = 1;
1913 map_port_queue_stats_mapping_registers(uint8_t pi, struct rte_port *port)
1917 diag = set_tx_queue_stats_mapping_registers(pi, port);
1919 if (diag == -ENOTSUP) {
1920 port->tx_queue_stats_mapping_enabled = 0;
1921 printf("TX queue stats mapping not supported port id=%d\n", pi);
1924 rte_exit(EXIT_FAILURE,
1925 "set_tx_queue_stats_mapping_registers "
1926 "failed for port id=%d diag=%d\n",
1930 diag = set_rx_queue_stats_mapping_registers(pi, port);
1932 if (diag == -ENOTSUP) {
1933 port->rx_queue_stats_mapping_enabled = 0;
1934 printf("RX queue stats mapping not supported port id=%d\n", pi);
1937 rte_exit(EXIT_FAILURE,
1938 "set_rx_queue_stats_mapping_registers "
1939 "failed for port id=%d diag=%d\n",
1945 rxtx_port_config(struct rte_port *port)
1947 port->rx_conf = port->dev_info.default_rxconf;
1948 port->tx_conf = port->dev_info.default_txconf;
1950 /* Check if any RX/TX parameters have been passed */
1951 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
1952 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
1954 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
1955 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
1957 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
1958 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
1960 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
1961 port->rx_conf.rx_free_thresh = rx_free_thresh;
1963 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
1964 port->rx_conf.rx_drop_en = rx_drop_en;
1966 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
1967 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
1969 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
1970 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
1972 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
1973 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
1975 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
1976 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
1978 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
1979 port->tx_conf.tx_free_thresh = tx_free_thresh;
1981 if (txq_flags != RTE_PMD_PARAM_UNSET)
1982 port->tx_conf.txq_flags = txq_flags;
1986 init_port_config(void)
1989 struct rte_port *port;
1991 RTE_ETH_FOREACH_DEV(pid) {
1993 port->dev_conf.rxmode = rx_mode;
1994 port->dev_conf.fdir_conf = fdir_conf;
1996 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
1997 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
1999 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2000 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2003 if (port->dcb_flag == 0) {
2004 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2005 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2007 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2010 rxtx_port_config(port);
2012 rte_eth_macaddr_get(pid, &port->eth_addr);
2014 map_port_queue_stats_mapping_registers(pid, port);
2015 #ifdef RTE_NIC_BYPASS
2016 rte_eth_dev_bypass_init(pid);
2019 if (lsc_interrupt &&
2020 (rte_eth_devices[pid].data->dev_flags &
2021 RTE_ETH_DEV_INTR_LSC))
2022 port->dev_conf.intr_conf.lsc = 1;
2023 if (rmv_interrupt &&
2024 (rte_eth_devices[pid].data->dev_flags &
2025 RTE_ETH_DEV_INTR_RMV))
2026 port->dev_conf.intr_conf.rmv = 1;
2030 void set_port_slave_flag(portid_t slave_pid)
2032 struct rte_port *port;
2034 port = &ports[slave_pid];
2035 port->slave_flag = 1;
2038 void clear_port_slave_flag(portid_t slave_pid)
2040 struct rte_port *port;
2042 port = &ports[slave_pid];
2043 port->slave_flag = 0;
2046 uint8_t port_is_bonding_slave(portid_t slave_pid)
2048 struct rte_port *port;
2050 port = &ports[slave_pid];
2051 return port->slave_flag;
2054 const uint16_t vlan_tags[] = {
2055 0, 1, 2, 3, 4, 5, 6, 7,
2056 8, 9, 10, 11, 12, 13, 14, 15,
2057 16, 17, 18, 19, 20, 21, 22, 23,
2058 24, 25, 26, 27, 28, 29, 30, 31
2062 get_eth_dcb_conf(struct rte_eth_conf *eth_conf,
2063 enum dcb_mode_enable dcb_mode,
2064 enum rte_eth_nb_tcs num_tcs,
2070 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2071 * given above, and the number of traffic classes available for use.
2073 if (dcb_mode == DCB_VT_ENABLED) {
2074 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2075 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2076 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2077 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2079 /* VMDQ+DCB RX and TX configurations */
2080 vmdq_rx_conf->enable_default_pool = 0;
2081 vmdq_rx_conf->default_pool = 0;
2082 vmdq_rx_conf->nb_queue_pools =
2083 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2084 vmdq_tx_conf->nb_queue_pools =
2085 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2087 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2088 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2089 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2090 vmdq_rx_conf->pool_map[i].pools =
2091 1 << (i % vmdq_rx_conf->nb_queue_pools);
2093 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2094 vmdq_rx_conf->dcb_tc[i] = i;
2095 vmdq_tx_conf->dcb_tc[i] = i;
2098 /* set DCB mode of RX and TX of multiple queues */
2099 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2100 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2102 struct rte_eth_dcb_rx_conf *rx_conf =
2103 ð_conf->rx_adv_conf.dcb_rx_conf;
2104 struct rte_eth_dcb_tx_conf *tx_conf =
2105 ð_conf->tx_adv_conf.dcb_tx_conf;
2107 rx_conf->nb_tcs = num_tcs;
2108 tx_conf->nb_tcs = num_tcs;
2110 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2111 rx_conf->dcb_tc[i] = i % num_tcs;
2112 tx_conf->dcb_tc[i] = i % num_tcs;
2114 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2115 eth_conf->rx_adv_conf.rss_conf.rss_hf = rss_hf;
2116 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2120 eth_conf->dcb_capability_en =
2121 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2123 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2129 init_port_dcb_config(portid_t pid,
2130 enum dcb_mode_enable dcb_mode,
2131 enum rte_eth_nb_tcs num_tcs,
2134 struct rte_eth_conf port_conf;
2135 struct rte_port *rte_port;
2139 rte_port = &ports[pid];
2141 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2142 /* Enter DCB configuration status */
2145 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2146 retval = get_eth_dcb_conf(&port_conf, dcb_mode, num_tcs, pfc_en);
2149 port_conf.rxmode.hw_vlan_filter = 1;
2152 * Write the configuration into the device.
2153 * Set the numbers of RX & TX queues to 0, so
2154 * the RX & TX queues will not be setup.
2156 (void)rte_eth_dev_configure(pid, 0, 0, &port_conf);
2158 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2160 /* If dev_info.vmdq_pool_base is greater than 0,
2161 * the queue id of vmdq pools is started after pf queues.
2163 if (dcb_mode == DCB_VT_ENABLED &&
2164 rte_port->dev_info.vmdq_pool_base > 0) {
2165 printf("VMDQ_DCB multi-queue mode is nonsensical"
2166 " for port %d.", pid);
2170 /* Assume the ports in testpmd have the same dcb capability
2171 * and has the same number of rxq and txq in dcb mode
2173 if (dcb_mode == DCB_VT_ENABLED) {
2174 if (rte_port->dev_info.max_vfs > 0) {
2175 nb_rxq = rte_port->dev_info.nb_rx_queues;
2176 nb_txq = rte_port->dev_info.nb_tx_queues;
2178 nb_rxq = rte_port->dev_info.max_rx_queues;
2179 nb_txq = rte_port->dev_info.max_tx_queues;
2182 /*if vt is disabled, use all pf queues */
2183 if (rte_port->dev_info.vmdq_pool_base == 0) {
2184 nb_rxq = rte_port->dev_info.max_rx_queues;
2185 nb_txq = rte_port->dev_info.max_tx_queues;
2187 nb_rxq = (queueid_t)num_tcs;
2188 nb_txq = (queueid_t)num_tcs;
2192 rx_free_thresh = 64;
2194 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2196 rxtx_port_config(rte_port);
2198 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2199 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2200 rx_vft_set(pid, vlan_tags[i], 1);
2202 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2203 map_port_queue_stats_mapping_registers(pid, rte_port);
2205 rte_port->dcb_flag = 1;
2213 /* Configuration of Ethernet ports. */
2214 ports = rte_zmalloc("testpmd: ports",
2215 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2216 RTE_CACHE_LINE_SIZE);
2217 if (ports == NULL) {
2218 rte_exit(EXIT_FAILURE,
2219 "rte_zmalloc(%d struct rte_port) failed\n",
2232 signal_handler(int signum)
2234 if (signum == SIGINT || signum == SIGTERM) {
2235 printf("\nSignal %d received, preparing to exit...\n",
2237 #ifdef RTE_LIBRTE_PDUMP
2238 /* uninitialize packet capture framework */
2241 #ifdef RTE_LIBRTE_LATENCY_STATS
2242 rte_latencystats_uninit();
2245 /* exit with the expected status */
2246 signal(signum, SIG_DFL);
2247 kill(getpid(), signum);
2252 main(int argc, char** argv)
2257 signal(SIGINT, signal_handler);
2258 signal(SIGTERM, signal_handler);
2260 diag = rte_eal_init(argc, argv);
2262 rte_panic("Cannot init EAL\n");
2264 #ifdef RTE_LIBRTE_PDUMP
2265 /* initialize packet capture framework */
2266 rte_pdump_init(NULL);
2269 nb_ports = (portid_t) rte_eth_dev_count();
2271 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2273 /* allocate port structures, and init them */
2276 set_def_fwd_config();
2278 rte_panic("Empty set of forwarding logical cores - check the "
2279 "core mask supplied in the command parameters\n");
2281 /* Bitrate/latency stats disabled by default */
2282 #ifdef RTE_LIBRTE_BITRATE
2283 bitrate_enabled = 0;
2285 #ifdef RTE_LIBRTE_LATENCY_STATS
2286 latencystats_enabled = 0;
2292 launch_args_parse(argc, argv);
2294 if (!nb_rxq && !nb_txq)
2295 printf("Warning: Either rx or tx queues should be non-zero\n");
2297 if (nb_rxq > 1 && nb_rxq > nb_txq)
2298 printf("Warning: nb_rxq=%d enables RSS configuration, "
2299 "but nb_txq=%d will prevent to fully test it.\n",
2303 if (start_port(RTE_PORT_ALL) != 0)
2304 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2306 /* set all ports to promiscuous mode by default */
2307 RTE_ETH_FOREACH_DEV(port_id)
2308 rte_eth_promiscuous_enable(port_id);
2310 /* Init metrics library */
2311 rte_metrics_init(rte_socket_id());
2313 #ifdef RTE_LIBRTE_LATENCY_STATS
2314 if (latencystats_enabled != 0) {
2315 int ret = rte_latencystats_init(1, NULL);
2317 printf("Warning: latencystats init()"
2318 " returned error %d\n", ret);
2319 printf("Latencystats running on lcore %d\n",
2320 latencystats_lcore_id);
2324 /* Setup bitrate stats */
2325 #ifdef RTE_LIBRTE_BITRATE
2326 if (bitrate_enabled != 0) {
2327 bitrate_data = rte_stats_bitrate_create();
2328 if (bitrate_data == NULL)
2329 rte_exit(EXIT_FAILURE,
2330 "Could not allocate bitrate data.\n");
2331 rte_stats_bitrate_reg(bitrate_data);
2335 #ifdef RTE_LIBRTE_CMDLINE
2336 if (strlen(cmdline_filename) != 0)
2337 cmdline_read_from_file(cmdline_filename);
2339 if (interactive == 1) {
2341 printf("Start automatic packet forwarding\n");
2342 start_packet_forwarding(0);
2352 printf("No commandline core given, start packet forwarding\n");
2353 start_packet_forwarding(0);
2354 printf("Press enter to exit\n");
2355 rc = read(0, &c, 1);