4 * Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 #include <sys/types.h>
45 #include <sys/queue.h>
52 #include <rte_common.h>
53 #include <rte_errno.h>
54 #include <rte_byteorder.h>
56 #include <rte_debug.h>
57 #include <rte_cycles.h>
58 #include <rte_memory.h>
59 #include <rte_memcpy.h>
60 #include <rte_launch.h>
62 #include <rte_alarm.h>
63 #include <rte_per_lcore.h>
64 #include <rte_lcore.h>
65 #include <rte_atomic.h>
66 #include <rte_branch_prediction.h>
67 #include <rte_mempool.h>
68 #include <rte_malloc.h>
70 #include <rte_interrupts.h>
72 #include <rte_ether.h>
73 #include <rte_ethdev.h>
75 #include <rte_string_fns.h>
76 #ifdef RTE_LIBRTE_IXGBE_PMD
77 #include <rte_pmd_ixgbe.h>
79 #ifdef RTE_LIBRTE_PDUMP
80 #include <rte_pdump.h>
83 #include <rte_metrics.h>
84 #ifdef RTE_LIBRTE_BITRATE
85 #include <rte_bitrate.h>
87 #ifdef RTE_LIBRTE_LATENCY_STATS
88 #include <rte_latencystats.h>
93 uint16_t verbose_level = 0; /**< Silent by default. */
95 /* use master core for command line ? */
96 uint8_t interactive = 0;
97 uint8_t auto_start = 0;
99 char cmdline_filename[PATH_MAX] = {0};
102 * NUMA support configuration.
103 * When set, the NUMA support attempts to dispatch the allocation of the
104 * RX and TX memory rings, and of the DMA memory buffers (mbufs) for the
105 * probed ports among the CPU sockets 0 and 1.
106 * Otherwise, all memory is allocated from CPU socket 0.
108 uint8_t numa_support = 1; /**< numa enabled by default */
111 * In UMA mode,all memory is allocated from socket 0 if --socket-num is
114 uint8_t socket_num = UMA_NO_CONFIG;
117 * Use ANONYMOUS mapped memory (might be not physically continuous) for mbufs.
122 * Record the Ethernet address of peer target ports to which packets are
124 * Must be instantiated with the ethernet addresses of peer traffic generator
127 struct ether_addr peer_eth_addrs[RTE_MAX_ETHPORTS];
128 portid_t nb_peer_eth_addrs = 0;
131 * Probed Target Environment.
133 struct rte_port *ports; /**< For all probed ethernet ports. */
134 portid_t nb_ports; /**< Number of probed ethernet ports. */
135 struct fwd_lcore **fwd_lcores; /**< For all probed logical cores. */
136 lcoreid_t nb_lcores; /**< Number of probed logical cores. */
139 * Test Forwarding Configuration.
140 * nb_fwd_lcores <= nb_cfg_lcores <= nb_lcores
141 * nb_fwd_ports <= nb_cfg_ports <= nb_ports
143 lcoreid_t nb_cfg_lcores; /**< Number of configured logical cores. */
144 lcoreid_t nb_fwd_lcores; /**< Number of forwarding logical cores. */
145 portid_t nb_cfg_ports; /**< Number of configured ports. */
146 portid_t nb_fwd_ports; /**< Number of forwarding ports. */
148 unsigned int fwd_lcores_cpuids[RTE_MAX_LCORE]; /**< CPU ids configuration. */
149 portid_t fwd_ports_ids[RTE_MAX_ETHPORTS]; /**< Port ids configuration. */
151 struct fwd_stream **fwd_streams; /**< For each RX queue of each port. */
152 streamid_t nb_fwd_streams; /**< Is equal to (nb_ports * nb_rxq). */
155 * Forwarding engines.
157 struct fwd_engine * fwd_engines[] = {
166 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
168 &softnic_tm_bypass_engine,
170 #ifdef RTE_LIBRTE_IEEE1588
171 &ieee1588_fwd_engine,
176 struct fwd_config cur_fwd_config;
177 struct fwd_engine *cur_fwd_eng = &io_fwd_engine; /**< IO mode by default. */
178 uint32_t retry_enabled;
179 uint32_t burst_tx_delay_time = BURST_TX_WAIT_US;
180 uint32_t burst_tx_retry_num = BURST_TX_RETRIES;
182 uint16_t mbuf_data_size = DEFAULT_MBUF_DATA_SIZE; /**< Mbuf data space size. */
183 uint32_t param_total_num_mbufs = 0; /**< number of mbufs in all pools - if
184 * specified on command-line. */
185 uint16_t stats_period; /**< Period to show statistics (disabled by default) */
188 * In container, it cannot terminate the process which running with 'stats-period'
189 * option. Set flag to exit stats period loop after received SIGINT/SIGTERM.
194 * Configuration of packet segments used by the "txonly" processing engine.
196 uint16_t tx_pkt_length = TXONLY_DEF_PACKET_LEN; /**< TXONLY packet length. */
197 uint16_t tx_pkt_seg_lengths[RTE_MAX_SEGS_PER_PKT] = {
198 TXONLY_DEF_PACKET_LEN,
200 uint8_t tx_pkt_nb_segs = 1; /**< Number of segments in TXONLY packets */
202 enum tx_pkt_split tx_pkt_split = TX_PKT_SPLIT_OFF;
203 /**< Split policy for packets to TX. */
205 uint16_t nb_pkt_per_burst = DEF_PKT_BURST; /**< Number of packets per burst. */
206 uint16_t mb_mempool_cache = DEF_MBUF_CACHE; /**< Size of mbuf mempool cache. */
208 /* current configuration is in DCB or not,0 means it is not in DCB mode */
209 uint8_t dcb_config = 0;
211 /* Whether the dcb is in testing status */
212 uint8_t dcb_test = 0;
215 * Configurable number of RX/TX queues.
217 queueid_t nb_rxq = 1; /**< Number of RX queues per port. */
218 queueid_t nb_txq = 1; /**< Number of TX queues per port. */
221 * Configurable number of RX/TX ring descriptors.
223 #define RTE_TEST_RX_DESC_DEFAULT 128
224 #define RTE_TEST_TX_DESC_DEFAULT 512
225 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; /**< Number of RX descriptors. */
226 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; /**< Number of TX descriptors. */
228 #define RTE_PMD_PARAM_UNSET -1
230 * Configurable values of RX and TX ring threshold registers.
233 int8_t rx_pthresh = RTE_PMD_PARAM_UNSET;
234 int8_t rx_hthresh = RTE_PMD_PARAM_UNSET;
235 int8_t rx_wthresh = RTE_PMD_PARAM_UNSET;
237 int8_t tx_pthresh = RTE_PMD_PARAM_UNSET;
238 int8_t tx_hthresh = RTE_PMD_PARAM_UNSET;
239 int8_t tx_wthresh = RTE_PMD_PARAM_UNSET;
242 * Configurable value of RX free threshold.
244 int16_t rx_free_thresh = RTE_PMD_PARAM_UNSET;
247 * Configurable value of RX drop enable.
249 int8_t rx_drop_en = RTE_PMD_PARAM_UNSET;
252 * Configurable value of TX free threshold.
254 int16_t tx_free_thresh = RTE_PMD_PARAM_UNSET;
257 * Configurable value of TX RS bit threshold.
259 int16_t tx_rs_thresh = RTE_PMD_PARAM_UNSET;
262 * Configurable value of TX queue flags.
264 int32_t txq_flags = RTE_PMD_PARAM_UNSET;
267 * Receive Side Scaling (RSS) configuration.
269 uint64_t rss_hf = ETH_RSS_IP; /* RSS IP by default. */
272 * Port topology configuration
274 uint16_t port_topology = PORT_TOPOLOGY_PAIRED; /* Ports are paired by default */
277 * Avoids to flush all the RX streams before starts forwarding.
279 uint8_t no_flush_rx = 0; /* flush by default */
282 * Flow API isolated mode.
284 uint8_t flow_isolate_all;
287 * Avoids to check link status when starting/stopping a port.
289 uint8_t no_link_check = 0; /* check by default */
292 * Enable link status change notification
294 uint8_t lsc_interrupt = 1; /* enabled by default */
297 * Enable device removal notification.
299 uint8_t rmv_interrupt = 1; /* enabled by default */
302 * Display or mask ether events
303 * Default to all events except VF_MBOX
305 uint32_t event_print_mask = (UINT32_C(1) << RTE_ETH_EVENT_UNKNOWN) |
306 (UINT32_C(1) << RTE_ETH_EVENT_INTR_LSC) |
307 (UINT32_C(1) << RTE_ETH_EVENT_QUEUE_STATE) |
308 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RESET) |
309 (UINT32_C(1) << RTE_ETH_EVENT_MACSEC) |
310 (UINT32_C(1) << RTE_ETH_EVENT_INTR_RMV);
313 * NIC bypass mode configuration options.
316 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
317 /* The NIC bypass watchdog timeout. */
318 uint32_t bypass_timeout = RTE_PMD_IXGBE_BYPASS_TMT_OFF;
322 #ifdef RTE_LIBRTE_LATENCY_STATS
325 * Set when latency stats is enabled in the commandline
327 uint8_t latencystats_enabled;
330 * Lcore ID to serive latency statistics.
332 lcoreid_t latencystats_lcore_id = -1;
337 * Ethernet device configuration.
339 struct rte_eth_rxmode rx_mode = {
340 .max_rx_pkt_len = ETHER_MAX_LEN, /**< Default maximum frame length. */
342 .header_split = 0, /**< Header Split disabled. */
343 .hw_ip_checksum = 0, /**< IP checksum offload disabled. */
344 .hw_vlan_filter = 1, /**< VLAN filtering enabled. */
345 .hw_vlan_strip = 1, /**< VLAN strip enabled. */
346 .hw_vlan_extend = 0, /**< Extended VLAN disabled. */
347 .jumbo_frame = 0, /**< Jumbo Frame Support disabled. */
348 .hw_strip_crc = 1, /**< CRC stripping by hardware enabled. */
349 .hw_timestamp = 0, /**< HW timestamp enabled. */
352 struct rte_fdir_conf fdir_conf = {
353 .mode = RTE_FDIR_MODE_NONE,
354 .pballoc = RTE_FDIR_PBALLOC_64K,
355 .status = RTE_FDIR_REPORT_STATUS,
357 .vlan_tci_mask = 0xFFEF,
359 .src_ip = 0xFFFFFFFF,
360 .dst_ip = 0xFFFFFFFF,
363 .src_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
364 .dst_ip = {0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF},
366 .src_port_mask = 0xFFFF,
367 .dst_port_mask = 0xFFFF,
368 .mac_addr_byte_mask = 0xFF,
369 .tunnel_type_mask = 1,
370 .tunnel_id_mask = 0xFFFFFFFF,
375 volatile int test_done = 1; /* stop packet forwarding when set to 1. */
377 struct queue_stats_mappings tx_queue_stats_mappings_array[MAX_TX_QUEUE_STATS_MAPPINGS];
378 struct queue_stats_mappings rx_queue_stats_mappings_array[MAX_RX_QUEUE_STATS_MAPPINGS];
380 struct queue_stats_mappings *tx_queue_stats_mappings = tx_queue_stats_mappings_array;
381 struct queue_stats_mappings *rx_queue_stats_mappings = rx_queue_stats_mappings_array;
383 uint16_t nb_tx_queue_stats_mappings = 0;
384 uint16_t nb_rx_queue_stats_mappings = 0;
387 * Display zero values by default for xstats
389 uint8_t xstats_hide_zero;
391 unsigned int num_sockets = 0;
392 unsigned int socket_ids[RTE_MAX_NUMA_NODES];
394 #ifdef RTE_LIBRTE_BITRATE
395 /* Bitrate statistics */
396 struct rte_stats_bitrates *bitrate_data;
397 lcoreid_t bitrate_lcore_id;
398 uint8_t bitrate_enabled;
401 struct gro_status gro_ports[RTE_MAX_ETHPORTS];
402 uint8_t gro_flush_cycles = GRO_DEFAULT_FLUSH_CYCLES;
404 /* Forward function declarations */
405 static void map_port_queue_stats_mapping_registers(portid_t pi,
406 struct rte_port *port);
407 static void check_all_ports_link_status(uint32_t port_mask);
408 static int eth_event_callback(portid_t port_id,
409 enum rte_eth_event_type type,
410 void *param, void *ret_param);
413 * Check if all the ports are started.
414 * If yes, return positive value. If not, return zero.
416 static int all_ports_started(void);
418 struct gso_status gso_ports[RTE_MAX_ETHPORTS];
419 uint16_t gso_max_segment_size = ETHER_MAX_LEN - ETHER_CRC_LEN;
422 * Helper function to check if socket is already discovered.
423 * If yes, return positive value. If not, return zero.
426 new_socket_id(unsigned int socket_id)
430 for (i = 0; i < num_sockets; i++) {
431 if (socket_ids[i] == socket_id)
438 * Setup default configuration.
441 set_default_fwd_lcores_config(void)
445 unsigned int sock_num;
448 for (i = 0; i < RTE_MAX_LCORE; i++) {
449 if (!rte_lcore_is_enabled(i))
451 sock_num = rte_lcore_to_socket_id(i);
452 if (new_socket_id(sock_num)) {
453 if (num_sockets >= RTE_MAX_NUMA_NODES) {
454 rte_exit(EXIT_FAILURE,
455 "Total sockets greater than %u\n",
458 socket_ids[num_sockets++] = sock_num;
460 if (i == rte_get_master_lcore())
462 fwd_lcores_cpuids[nb_lc++] = i;
464 nb_lcores = (lcoreid_t) nb_lc;
465 nb_cfg_lcores = nb_lcores;
470 set_def_peer_eth_addrs(void)
474 for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
475 peer_eth_addrs[i].addr_bytes[0] = ETHER_LOCAL_ADMIN_ADDR;
476 peer_eth_addrs[i].addr_bytes[5] = i;
481 set_default_fwd_ports_config(void)
486 RTE_ETH_FOREACH_DEV(pt_id) {
487 fwd_ports_ids[i++] = pt_id;
489 /* Update sockets info according to the attached device */
490 int socket_id = rte_eth_dev_socket_id(pt_id);
491 if (socket_id >= 0 && new_socket_id(socket_id)) {
492 if (num_sockets >= RTE_MAX_NUMA_NODES) {
493 rte_exit(EXIT_FAILURE,
494 "Total sockets greater than %u\n",
497 socket_ids[num_sockets++] = socket_id;
501 nb_cfg_ports = nb_ports;
502 nb_fwd_ports = nb_ports;
506 set_def_fwd_config(void)
508 set_default_fwd_lcores_config();
509 set_def_peer_eth_addrs();
510 set_default_fwd_ports_config();
514 * Configuration initialisation done once at init time.
517 mbuf_pool_create(uint16_t mbuf_seg_size, unsigned nb_mbuf,
518 unsigned int socket_id)
520 char pool_name[RTE_MEMPOOL_NAMESIZE];
521 struct rte_mempool *rte_mp = NULL;
524 mb_size = sizeof(struct rte_mbuf) + mbuf_seg_size;
525 mbuf_poolname_build(socket_id, pool_name, sizeof(pool_name));
528 "create a new mbuf pool <%s>: n=%u, size=%u, socket=%u\n",
529 pool_name, nb_mbuf, mbuf_seg_size, socket_id);
532 rte_mp = rte_mempool_create_empty(pool_name, nb_mbuf,
533 mb_size, (unsigned) mb_mempool_cache,
534 sizeof(struct rte_pktmbuf_pool_private),
539 if (rte_mempool_populate_anon(rte_mp) == 0) {
540 rte_mempool_free(rte_mp);
544 rte_pktmbuf_pool_init(rte_mp, NULL);
545 rte_mempool_obj_iter(rte_mp, rte_pktmbuf_init, NULL);
547 /* wrapper to rte_mempool_create() */
548 rte_mp = rte_pktmbuf_pool_create(pool_name, nb_mbuf,
549 mb_mempool_cache, 0, mbuf_seg_size, socket_id);
553 if (rte_mp == NULL) {
554 rte_exit(EXIT_FAILURE,
555 "Creation of mbuf pool for socket %u failed: %s\n",
556 socket_id, rte_strerror(rte_errno));
557 } else if (verbose_level > 0) {
558 rte_mempool_dump(stdout, rte_mp);
563 * Check given socket id is valid or not with NUMA mode,
564 * if valid, return 0, else return -1
567 check_socket_id(const unsigned int socket_id)
569 static int warning_once = 0;
571 if (new_socket_id(socket_id)) {
572 if (!warning_once && numa_support)
573 printf("Warning: NUMA should be configured manually by"
574 " using --port-numa-config and"
575 " --ring-numa-config parameters along with"
584 * Get the allowed maximum number of RX queues.
585 * *pid return the port id which has minimal value of
586 * max_rx_queues in all ports.
589 get_allowed_max_nb_rxq(portid_t *pid)
591 queueid_t allowed_max_rxq = MAX_QUEUE_ID;
593 struct rte_eth_dev_info dev_info;
595 RTE_ETH_FOREACH_DEV(pi) {
596 rte_eth_dev_info_get(pi, &dev_info);
597 if (dev_info.max_rx_queues < allowed_max_rxq) {
598 allowed_max_rxq = dev_info.max_rx_queues;
602 return allowed_max_rxq;
606 * Check input rxq is valid or not.
607 * If input rxq is not greater than any of maximum number
608 * of RX queues of all ports, it is valid.
609 * if valid, return 0, else return -1
612 check_nb_rxq(queueid_t rxq)
614 queueid_t allowed_max_rxq;
617 allowed_max_rxq = get_allowed_max_nb_rxq(&pid);
618 if (rxq > allowed_max_rxq) {
619 printf("Fail: input rxq (%u) can't be greater "
620 "than max_rx_queues (%u) of port %u\n",
630 * Get the allowed maximum number of TX queues.
631 * *pid return the port id which has minimal value of
632 * max_tx_queues in all ports.
635 get_allowed_max_nb_txq(portid_t *pid)
637 queueid_t allowed_max_txq = MAX_QUEUE_ID;
639 struct rte_eth_dev_info dev_info;
641 RTE_ETH_FOREACH_DEV(pi) {
642 rte_eth_dev_info_get(pi, &dev_info);
643 if (dev_info.max_tx_queues < allowed_max_txq) {
644 allowed_max_txq = dev_info.max_tx_queues;
648 return allowed_max_txq;
652 * Check input txq is valid or not.
653 * If input txq is not greater than any of maximum number
654 * of TX queues of all ports, it is valid.
655 * if valid, return 0, else return -1
658 check_nb_txq(queueid_t txq)
660 queueid_t allowed_max_txq;
663 allowed_max_txq = get_allowed_max_nb_txq(&pid);
664 if (txq > allowed_max_txq) {
665 printf("Fail: input txq (%u) can't be greater "
666 "than max_tx_queues (%u) of port %u\n",
679 struct rte_port *port;
680 struct rte_mempool *mbp;
681 unsigned int nb_mbuf_per_pool;
683 uint8_t port_per_socket[RTE_MAX_NUMA_NODES];
684 struct rte_gro_param gro_param;
687 memset(port_per_socket,0,RTE_MAX_NUMA_NODES);
689 /* Configuration of logical cores. */
690 fwd_lcores = rte_zmalloc("testpmd: fwd_lcores",
691 sizeof(struct fwd_lcore *) * nb_lcores,
692 RTE_CACHE_LINE_SIZE);
693 if (fwd_lcores == NULL) {
694 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d (struct fwd_lcore *)) "
695 "failed\n", nb_lcores);
697 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
698 fwd_lcores[lc_id] = rte_zmalloc("testpmd: struct fwd_lcore",
699 sizeof(struct fwd_lcore),
700 RTE_CACHE_LINE_SIZE);
701 if (fwd_lcores[lc_id] == NULL) {
702 rte_exit(EXIT_FAILURE, "rte_zmalloc(struct fwd_lcore) "
705 fwd_lcores[lc_id]->cpuid_idx = lc_id;
708 RTE_ETH_FOREACH_DEV(pid) {
710 rte_eth_dev_info_get(pid, &port->dev_info);
713 if (port_numa[pid] != NUMA_NO_CONFIG)
714 port_per_socket[port_numa[pid]]++;
716 uint32_t socket_id = rte_eth_dev_socket_id(pid);
719 * if socket_id is invalid,
720 * set to the first available socket.
722 if (check_socket_id(socket_id) < 0)
723 socket_id = socket_ids[0];
724 port_per_socket[socket_id]++;
728 /* set flag to initialize port/queue */
729 port->need_reconfig = 1;
730 port->need_reconfig_queues = 1;
734 * Create pools of mbuf.
735 * If NUMA support is disabled, create a single pool of mbuf in
736 * socket 0 memory by default.
737 * Otherwise, create a pool of mbuf in the memory of sockets 0 and 1.
739 * Use the maximum value of nb_rxd and nb_txd here, then nb_rxd and
740 * nb_txd can be configured at run time.
742 if (param_total_num_mbufs)
743 nb_mbuf_per_pool = param_total_num_mbufs;
745 nb_mbuf_per_pool = RTE_TEST_RX_DESC_MAX +
746 (nb_lcores * mb_mempool_cache) +
747 RTE_TEST_TX_DESC_MAX + MAX_PKT_BURST;
748 nb_mbuf_per_pool *= RTE_MAX_ETHPORTS;
754 for (i = 0; i < num_sockets; i++)
755 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
758 if (socket_num == UMA_NO_CONFIG)
759 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool, 0);
761 mbuf_pool_create(mbuf_data_size, nb_mbuf_per_pool,
767 gso_types = DEV_TX_OFFLOAD_TCP_TSO | DEV_TX_OFFLOAD_VXLAN_TNL_TSO |
768 DEV_TX_OFFLOAD_GRE_TNL_TSO;
770 * Records which Mbuf pool to use by each logical core, if needed.
772 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
773 mbp = mbuf_pool_find(
774 rte_lcore_to_socket_id(fwd_lcores_cpuids[lc_id]));
777 mbp = mbuf_pool_find(0);
778 fwd_lcores[lc_id]->mbp = mbp;
779 /* initialize GSO context */
780 fwd_lcores[lc_id]->gso_ctx.direct_pool = mbp;
781 fwd_lcores[lc_id]->gso_ctx.indirect_pool = mbp;
782 fwd_lcores[lc_id]->gso_ctx.gso_types = gso_types;
783 fwd_lcores[lc_id]->gso_ctx.gso_size = ETHER_MAX_LEN -
785 fwd_lcores[lc_id]->gso_ctx.flag = 0;
788 /* Configuration of packet forwarding streams. */
789 if (init_fwd_streams() < 0)
790 rte_exit(EXIT_FAILURE, "FAIL from init_fwd_streams()\n");
794 /* create a gro context for each lcore */
795 gro_param.gro_types = RTE_GRO_TCP_IPV4;
796 gro_param.max_flow_num = GRO_MAX_FLUSH_CYCLES;
797 gro_param.max_item_per_flow = MAX_PKT_BURST;
798 for (lc_id = 0; lc_id < nb_lcores; lc_id++) {
799 gro_param.socket_id = rte_lcore_to_socket_id(
800 fwd_lcores_cpuids[lc_id]);
801 fwd_lcores[lc_id]->gro_ctx = rte_gro_ctx_create(&gro_param);
802 if (fwd_lcores[lc_id]->gro_ctx == NULL) {
803 rte_exit(EXIT_FAILURE,
804 "rte_gro_ctx_create() failed\n");
811 reconfig(portid_t new_port_id, unsigned socket_id)
813 struct rte_port *port;
815 /* Reconfiguration of Ethernet ports. */
816 port = &ports[new_port_id];
817 rte_eth_dev_info_get(new_port_id, &port->dev_info);
819 /* set flag to initialize port/queue */
820 port->need_reconfig = 1;
821 port->need_reconfig_queues = 1;
822 port->socket_id = socket_id;
829 init_fwd_streams(void)
832 struct rte_port *port;
833 streamid_t sm_id, nb_fwd_streams_new;
836 /* set socket id according to numa or not */
837 RTE_ETH_FOREACH_DEV(pid) {
839 if (nb_rxq > port->dev_info.max_rx_queues) {
840 printf("Fail: nb_rxq(%d) is greater than "
841 "max_rx_queues(%d)\n", nb_rxq,
842 port->dev_info.max_rx_queues);
845 if (nb_txq > port->dev_info.max_tx_queues) {
846 printf("Fail: nb_txq(%d) is greater than "
847 "max_tx_queues(%d)\n", nb_txq,
848 port->dev_info.max_tx_queues);
852 if (port_numa[pid] != NUMA_NO_CONFIG)
853 port->socket_id = port_numa[pid];
855 port->socket_id = rte_eth_dev_socket_id(pid);
858 * if socket_id is invalid,
859 * set to the first available socket.
861 if (check_socket_id(port->socket_id) < 0)
862 port->socket_id = socket_ids[0];
866 if (socket_num == UMA_NO_CONFIG)
869 port->socket_id = socket_num;
873 q = RTE_MAX(nb_rxq, nb_txq);
875 printf("Fail: Cannot allocate fwd streams as number of queues is 0\n");
878 nb_fwd_streams_new = (streamid_t)(nb_ports * q);
879 if (nb_fwd_streams_new == nb_fwd_streams)
882 if (fwd_streams != NULL) {
883 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
884 if (fwd_streams[sm_id] == NULL)
886 rte_free(fwd_streams[sm_id]);
887 fwd_streams[sm_id] = NULL;
889 rte_free(fwd_streams);
894 nb_fwd_streams = nb_fwd_streams_new;
895 if (nb_fwd_streams) {
896 fwd_streams = rte_zmalloc("testpmd: fwd_streams",
897 sizeof(struct fwd_stream *) * nb_fwd_streams,
898 RTE_CACHE_LINE_SIZE);
899 if (fwd_streams == NULL)
900 rte_exit(EXIT_FAILURE, "rte_zmalloc(%d"
901 " (struct fwd_stream *)) failed\n",
904 for (sm_id = 0; sm_id < nb_fwd_streams; sm_id++) {
905 fwd_streams[sm_id] = rte_zmalloc("testpmd:"
906 " struct fwd_stream", sizeof(struct fwd_stream),
907 RTE_CACHE_LINE_SIZE);
908 if (fwd_streams[sm_id] == NULL)
909 rte_exit(EXIT_FAILURE, "rte_zmalloc"
910 "(struct fwd_stream) failed\n");
917 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
919 pkt_burst_stats_display(const char *rx_tx, struct pkt_burst_stats *pbs)
921 unsigned int total_burst;
922 unsigned int nb_burst;
923 unsigned int burst_stats[3];
924 uint16_t pktnb_stats[3];
926 int burst_percent[3];
929 * First compute the total number of packet bursts and the
930 * two highest numbers of bursts of the same number of packets.
933 burst_stats[0] = burst_stats[1] = burst_stats[2] = 0;
934 pktnb_stats[0] = pktnb_stats[1] = pktnb_stats[2] = 0;
935 for (nb_pkt = 0; nb_pkt < MAX_PKT_BURST; nb_pkt++) {
936 nb_burst = pbs->pkt_burst_spread[nb_pkt];
939 total_burst += nb_burst;
940 if (nb_burst > burst_stats[0]) {
941 burst_stats[1] = burst_stats[0];
942 pktnb_stats[1] = pktnb_stats[0];
943 burst_stats[0] = nb_burst;
944 pktnb_stats[0] = nb_pkt;
945 } else if (nb_burst > burst_stats[1]) {
946 burst_stats[1] = nb_burst;
947 pktnb_stats[1] = nb_pkt;
950 if (total_burst == 0)
952 burst_percent[0] = (burst_stats[0] * 100) / total_burst;
953 printf(" %s-bursts : %u [%d%% of %d pkts", rx_tx, total_burst,
954 burst_percent[0], (int) pktnb_stats[0]);
955 if (burst_stats[0] == total_burst) {
959 if (burst_stats[0] + burst_stats[1] == total_burst) {
960 printf(" + %d%% of %d pkts]\n",
961 100 - burst_percent[0], pktnb_stats[1]);
964 burst_percent[1] = (burst_stats[1] * 100) / total_burst;
965 burst_percent[2] = 100 - (burst_percent[0] + burst_percent[1]);
966 if ((burst_percent[1] == 0) || (burst_percent[2] == 0)) {
967 printf(" + %d%% of others]\n", 100 - burst_percent[0]);
970 printf(" + %d%% of %d pkts + %d%% of others]\n",
971 burst_percent[1], (int) pktnb_stats[1], burst_percent[2]);
973 #endif /* RTE_TEST_PMD_RECORD_BURST_STATS */
976 fwd_port_stats_display(portid_t port_id, struct rte_eth_stats *stats)
978 struct rte_port *port;
981 static const char *fwd_stats_border = "----------------------";
983 port = &ports[port_id];
984 printf("\n %s Forward statistics for port %-2d %s\n",
985 fwd_stats_border, port_id, fwd_stats_border);
987 if ((!port->rx_queue_stats_mapping_enabled) && (!port->tx_queue_stats_mapping_enabled)) {
988 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
990 stats->ipackets, stats->imissed,
991 (uint64_t) (stats->ipackets + stats->imissed));
993 if (cur_fwd_eng == &csum_fwd_engine)
994 printf(" Bad-ipcsum: %-14"PRIu64" Bad-l4csum: %-14"PRIu64" \n",
995 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
996 if ((stats->ierrors + stats->rx_nombuf) > 0) {
997 printf(" RX-error: %-"PRIu64"\n", stats->ierrors);
998 printf(" RX-nombufs: %-14"PRIu64"\n", stats->rx_nombuf);
1001 printf(" TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1003 stats->opackets, port->tx_dropped,
1004 (uint64_t) (stats->opackets + port->tx_dropped));
1007 printf(" RX-packets: %14"PRIu64" RX-dropped:%14"PRIu64" RX-total:"
1009 stats->ipackets, stats->imissed,
1010 (uint64_t) (stats->ipackets + stats->imissed));
1012 if (cur_fwd_eng == &csum_fwd_engine)
1013 printf(" Bad-ipcsum:%14"PRIu64" Bad-l4csum:%14"PRIu64"\n",
1014 port->rx_bad_ip_csum, port->rx_bad_l4_csum);
1015 if ((stats->ierrors + stats->rx_nombuf) > 0) {
1016 printf(" RX-error:%"PRIu64"\n", stats->ierrors);
1017 printf(" RX-nombufs: %14"PRIu64"\n",
1021 printf(" TX-packets: %14"PRIu64" TX-dropped:%14"PRIu64" TX-total:"
1023 stats->opackets, port->tx_dropped,
1024 (uint64_t) (stats->opackets + port->tx_dropped));
1027 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1028 if (port->rx_stream)
1029 pkt_burst_stats_display("RX",
1030 &port->rx_stream->rx_burst_stats);
1031 if (port->tx_stream)
1032 pkt_burst_stats_display("TX",
1033 &port->tx_stream->tx_burst_stats);
1036 if (port->rx_queue_stats_mapping_enabled) {
1038 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1039 printf(" Stats reg %2d RX-packets:%14"PRIu64
1040 " RX-errors:%14"PRIu64
1041 " RX-bytes:%14"PRIu64"\n",
1042 i, stats->q_ipackets[i], stats->q_errors[i], stats->q_ibytes[i]);
1046 if (port->tx_queue_stats_mapping_enabled) {
1047 for (i = 0; i < RTE_ETHDEV_QUEUE_STAT_CNTRS; i++) {
1048 printf(" Stats reg %2d TX-packets:%14"PRIu64
1049 " TX-bytes:%14"PRIu64"\n",
1050 i, stats->q_opackets[i], stats->q_obytes[i]);
1054 printf(" %s--------------------------------%s\n",
1055 fwd_stats_border, fwd_stats_border);
1059 fwd_stream_stats_display(streamid_t stream_id)
1061 struct fwd_stream *fs;
1062 static const char *fwd_top_stats_border = "-------";
1064 fs = fwd_streams[stream_id];
1065 if ((fs->rx_packets == 0) && (fs->tx_packets == 0) &&
1066 (fs->fwd_dropped == 0))
1068 printf("\n %s Forward Stats for RX Port=%2d/Queue=%2d -> "
1069 "TX Port=%2d/Queue=%2d %s\n",
1070 fwd_top_stats_border, fs->rx_port, fs->rx_queue,
1071 fs->tx_port, fs->tx_queue, fwd_top_stats_border);
1072 printf(" RX-packets: %-14u TX-packets: %-14u TX-dropped: %-14u",
1073 fs->rx_packets, fs->tx_packets, fs->fwd_dropped);
1075 /* if checksum mode */
1076 if (cur_fwd_eng == &csum_fwd_engine) {
1077 printf(" RX- bad IP checksum: %-14u Rx- bad L4 checksum: "
1078 "%-14u\n", fs->rx_bad_ip_csum, fs->rx_bad_l4_csum);
1081 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1082 pkt_burst_stats_display("RX", &fs->rx_burst_stats);
1083 pkt_burst_stats_display("TX", &fs->tx_burst_stats);
1088 flush_fwd_rx_queues(void)
1090 struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
1097 uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
1098 uint64_t timer_period;
1100 /* convert to number of cycles */
1101 timer_period = rte_get_timer_hz(); /* 1 second timeout */
1103 for (j = 0; j < 2; j++) {
1104 for (rxp = 0; rxp < cur_fwd_config.nb_fwd_ports; rxp++) {
1105 for (rxq = 0; rxq < nb_rxq; rxq++) {
1106 port_id = fwd_ports_ids[rxp];
1108 * testpmd can stuck in the below do while loop
1109 * if rte_eth_rx_burst() always returns nonzero
1110 * packets. So timer is added to exit this loop
1111 * after 1sec timer expiry.
1113 prev_tsc = rte_rdtsc();
1115 nb_rx = rte_eth_rx_burst(port_id, rxq,
1116 pkts_burst, MAX_PKT_BURST);
1117 for (i = 0; i < nb_rx; i++)
1118 rte_pktmbuf_free(pkts_burst[i]);
1120 cur_tsc = rte_rdtsc();
1121 diff_tsc = cur_tsc - prev_tsc;
1122 timer_tsc += diff_tsc;
1123 } while ((nb_rx > 0) &&
1124 (timer_tsc < timer_period));
1128 rte_delay_ms(10); /* wait 10 milli-seconds before retrying */
1133 run_pkt_fwd_on_lcore(struct fwd_lcore *fc, packet_fwd_t pkt_fwd)
1135 struct fwd_stream **fsm;
1138 #ifdef RTE_LIBRTE_BITRATE
1139 uint64_t tics_per_1sec;
1140 uint64_t tics_datum;
1141 uint64_t tics_current;
1142 uint8_t idx_port, cnt_ports;
1144 cnt_ports = rte_eth_dev_count();
1145 tics_datum = rte_rdtsc();
1146 tics_per_1sec = rte_get_timer_hz();
1148 fsm = &fwd_streams[fc->stream_idx];
1149 nb_fs = fc->stream_nb;
1151 for (sm_id = 0; sm_id < nb_fs; sm_id++)
1152 (*pkt_fwd)(fsm[sm_id]);
1153 #ifdef RTE_LIBRTE_BITRATE
1154 if (bitrate_enabled != 0 &&
1155 bitrate_lcore_id == rte_lcore_id()) {
1156 tics_current = rte_rdtsc();
1157 if (tics_current - tics_datum >= tics_per_1sec) {
1158 /* Periodic bitrate calculation */
1160 idx_port < cnt_ports;
1162 rte_stats_bitrate_calc(bitrate_data,
1164 tics_datum = tics_current;
1168 #ifdef RTE_LIBRTE_LATENCY_STATS
1169 if (latencystats_enabled != 0 &&
1170 latencystats_lcore_id == rte_lcore_id())
1171 rte_latencystats_update();
1174 } while (! fc->stopped);
1178 start_pkt_forward_on_core(void *fwd_arg)
1180 run_pkt_fwd_on_lcore((struct fwd_lcore *) fwd_arg,
1181 cur_fwd_config.fwd_eng->packet_fwd);
1186 * Run the TXONLY packet forwarding engine to send a single burst of packets.
1187 * Used to start communication flows in network loopback test configurations.
1190 run_one_txonly_burst_on_core(void *fwd_arg)
1192 struct fwd_lcore *fwd_lc;
1193 struct fwd_lcore tmp_lcore;
1195 fwd_lc = (struct fwd_lcore *) fwd_arg;
1196 tmp_lcore = *fwd_lc;
1197 tmp_lcore.stopped = 1;
1198 run_pkt_fwd_on_lcore(&tmp_lcore, tx_only_engine.packet_fwd);
1203 * Launch packet forwarding:
1204 * - Setup per-port forwarding context.
1205 * - launch logical cores with their forwarding configuration.
1208 launch_packet_forwarding(lcore_function_t *pkt_fwd_on_lcore)
1210 port_fwd_begin_t port_fwd_begin;
1215 port_fwd_begin = cur_fwd_config.fwd_eng->port_fwd_begin;
1216 if (port_fwd_begin != NULL) {
1217 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1218 (*port_fwd_begin)(fwd_ports_ids[i]);
1220 for (i = 0; i < cur_fwd_config.nb_fwd_lcores; i++) {
1221 lc_id = fwd_lcores_cpuids[i];
1222 if ((interactive == 0) || (lc_id != rte_lcore_id())) {
1223 fwd_lcores[i]->stopped = 0;
1224 diag = rte_eal_remote_launch(pkt_fwd_on_lcore,
1225 fwd_lcores[i], lc_id);
1227 printf("launch lcore %u failed - diag=%d\n",
1234 * Update the forward ports list.
1237 update_fwd_ports(portid_t new_pid)
1240 unsigned int new_nb_fwd_ports = 0;
1243 for (i = 0; i < nb_fwd_ports; ++i) {
1244 if (port_id_is_invalid(fwd_ports_ids[i], DISABLED_WARN))
1247 fwd_ports_ids[new_nb_fwd_ports++] = fwd_ports_ids[i];
1251 if (new_pid < RTE_MAX_ETHPORTS)
1252 fwd_ports_ids[new_nb_fwd_ports++] = new_pid;
1254 nb_fwd_ports = new_nb_fwd_ports;
1255 nb_cfg_ports = new_nb_fwd_ports;
1259 * Launch packet forwarding configuration.
1262 start_packet_forwarding(int with_tx_first)
1264 port_fwd_begin_t port_fwd_begin;
1265 port_fwd_end_t port_fwd_end;
1266 struct rte_port *port;
1271 if (strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") == 0 && !nb_rxq)
1272 rte_exit(EXIT_FAILURE, "rxq are 0, cannot use rxonly fwd mode\n");
1274 if (strcmp(cur_fwd_eng->fwd_mode_name, "txonly") == 0 && !nb_txq)
1275 rte_exit(EXIT_FAILURE, "txq are 0, cannot use txonly fwd mode\n");
1277 if ((strcmp(cur_fwd_eng->fwd_mode_name, "rxonly") != 0 &&
1278 strcmp(cur_fwd_eng->fwd_mode_name, "txonly") != 0) &&
1279 (!nb_rxq || !nb_txq))
1280 rte_exit(EXIT_FAILURE,
1281 "Either rxq or txq are 0, cannot use %s fwd mode\n",
1282 cur_fwd_eng->fwd_mode_name);
1284 if (all_ports_started() == 0) {
1285 printf("Not all ports were started\n");
1288 if (test_done == 0) {
1289 printf("Packet forwarding already started\n");
1295 for (i = 0; i < nb_fwd_ports; i++) {
1296 pt_id = fwd_ports_ids[i];
1297 port = &ports[pt_id];
1298 if (!port->dcb_flag) {
1299 printf("In DCB mode, all forwarding ports must "
1300 "be configured in this mode.\n");
1304 if (nb_fwd_lcores == 1) {
1305 printf("In DCB mode,the nb forwarding cores "
1306 "should be larger than 1.\n");
1315 flush_fwd_rx_queues();
1317 pkt_fwd_config_display(&cur_fwd_config);
1318 rxtx_config_display();
1320 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1321 pt_id = fwd_ports_ids[i];
1322 port = &ports[pt_id];
1323 rte_eth_stats_get(pt_id, &port->stats);
1324 port->tx_dropped = 0;
1326 map_port_queue_stats_mapping_registers(pt_id, port);
1328 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1329 fwd_streams[sm_id]->rx_packets = 0;
1330 fwd_streams[sm_id]->tx_packets = 0;
1331 fwd_streams[sm_id]->fwd_dropped = 0;
1332 fwd_streams[sm_id]->rx_bad_ip_csum = 0;
1333 fwd_streams[sm_id]->rx_bad_l4_csum = 0;
1335 #ifdef RTE_TEST_PMD_RECORD_BURST_STATS
1336 memset(&fwd_streams[sm_id]->rx_burst_stats, 0,
1337 sizeof(fwd_streams[sm_id]->rx_burst_stats));
1338 memset(&fwd_streams[sm_id]->tx_burst_stats, 0,
1339 sizeof(fwd_streams[sm_id]->tx_burst_stats));
1341 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1342 fwd_streams[sm_id]->core_cycles = 0;
1345 if (with_tx_first) {
1346 port_fwd_begin = tx_only_engine.port_fwd_begin;
1347 if (port_fwd_begin != NULL) {
1348 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1349 (*port_fwd_begin)(fwd_ports_ids[i]);
1351 while (with_tx_first--) {
1352 launch_packet_forwarding(
1353 run_one_txonly_burst_on_core);
1354 rte_eal_mp_wait_lcore();
1356 port_fwd_end = tx_only_engine.port_fwd_end;
1357 if (port_fwd_end != NULL) {
1358 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
1359 (*port_fwd_end)(fwd_ports_ids[i]);
1362 launch_packet_forwarding(start_pkt_forward_on_core);
1366 stop_packet_forwarding(void)
1368 struct rte_eth_stats stats;
1369 struct rte_port *port;
1370 port_fwd_end_t port_fwd_end;
1375 uint64_t total_recv;
1376 uint64_t total_xmit;
1377 uint64_t total_rx_dropped;
1378 uint64_t total_tx_dropped;
1379 uint64_t total_rx_nombuf;
1380 uint64_t tx_dropped;
1381 uint64_t rx_bad_ip_csum;
1382 uint64_t rx_bad_l4_csum;
1383 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1384 uint64_t fwd_cycles;
1387 static const char *acc_stats_border = "+++++++++++++++";
1390 printf("Packet forwarding not started\n");
1393 printf("Telling cores to stop...");
1394 for (lc_id = 0; lc_id < cur_fwd_config.nb_fwd_lcores; lc_id++)
1395 fwd_lcores[lc_id]->stopped = 1;
1396 printf("\nWaiting for lcores to finish...\n");
1397 rte_eal_mp_wait_lcore();
1398 port_fwd_end = cur_fwd_config.fwd_eng->port_fwd_end;
1399 if (port_fwd_end != NULL) {
1400 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1401 pt_id = fwd_ports_ids[i];
1402 (*port_fwd_end)(pt_id);
1405 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1408 for (sm_id = 0; sm_id < cur_fwd_config.nb_fwd_streams; sm_id++) {
1409 if (cur_fwd_config.nb_fwd_streams >
1410 cur_fwd_config.nb_fwd_ports) {
1411 fwd_stream_stats_display(sm_id);
1412 ports[fwd_streams[sm_id]->tx_port].tx_stream = NULL;
1413 ports[fwd_streams[sm_id]->rx_port].rx_stream = NULL;
1415 ports[fwd_streams[sm_id]->tx_port].tx_stream =
1417 ports[fwd_streams[sm_id]->rx_port].rx_stream =
1420 tx_dropped = ports[fwd_streams[sm_id]->tx_port].tx_dropped;
1421 tx_dropped = (uint64_t) (tx_dropped +
1422 fwd_streams[sm_id]->fwd_dropped);
1423 ports[fwd_streams[sm_id]->tx_port].tx_dropped = tx_dropped;
1426 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum;
1427 rx_bad_ip_csum = (uint64_t) (rx_bad_ip_csum +
1428 fwd_streams[sm_id]->rx_bad_ip_csum);
1429 ports[fwd_streams[sm_id]->rx_port].rx_bad_ip_csum =
1433 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum;
1434 rx_bad_l4_csum = (uint64_t) (rx_bad_l4_csum +
1435 fwd_streams[sm_id]->rx_bad_l4_csum);
1436 ports[fwd_streams[sm_id]->rx_port].rx_bad_l4_csum =
1439 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1440 fwd_cycles = (uint64_t) (fwd_cycles +
1441 fwd_streams[sm_id]->core_cycles);
1446 total_rx_dropped = 0;
1447 total_tx_dropped = 0;
1448 total_rx_nombuf = 0;
1449 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++) {
1450 pt_id = fwd_ports_ids[i];
1452 port = &ports[pt_id];
1453 rte_eth_stats_get(pt_id, &stats);
1454 stats.ipackets -= port->stats.ipackets;
1455 port->stats.ipackets = 0;
1456 stats.opackets -= port->stats.opackets;
1457 port->stats.opackets = 0;
1458 stats.ibytes -= port->stats.ibytes;
1459 port->stats.ibytes = 0;
1460 stats.obytes -= port->stats.obytes;
1461 port->stats.obytes = 0;
1462 stats.imissed -= port->stats.imissed;
1463 port->stats.imissed = 0;
1464 stats.oerrors -= port->stats.oerrors;
1465 port->stats.oerrors = 0;
1466 stats.rx_nombuf -= port->stats.rx_nombuf;
1467 port->stats.rx_nombuf = 0;
1469 total_recv += stats.ipackets;
1470 total_xmit += stats.opackets;
1471 total_rx_dropped += stats.imissed;
1472 total_tx_dropped += port->tx_dropped;
1473 total_rx_nombuf += stats.rx_nombuf;
1475 fwd_port_stats_display(pt_id, &stats);
1478 printf("\n %s Accumulated forward statistics for all ports"
1480 acc_stats_border, acc_stats_border);
1481 printf(" RX-packets: %-14"PRIu64" RX-dropped: %-14"PRIu64"RX-total: "
1483 " TX-packets: %-14"PRIu64" TX-dropped: %-14"PRIu64"TX-total: "
1485 total_recv, total_rx_dropped, total_recv + total_rx_dropped,
1486 total_xmit, total_tx_dropped, total_xmit + total_tx_dropped);
1487 if (total_rx_nombuf > 0)
1488 printf(" RX-nombufs: %-14"PRIu64"\n", total_rx_nombuf);
1489 printf(" %s++++++++++++++++++++++++++++++++++++++++++++++"
1491 acc_stats_border, acc_stats_border);
1492 #ifdef RTE_TEST_PMD_RECORD_CORE_CYCLES
1494 printf("\n CPU cycles/packet=%u (total cycles="
1495 "%"PRIu64" / total RX packets=%"PRIu64")\n",
1496 (unsigned int)(fwd_cycles / total_recv),
1497 fwd_cycles, total_recv);
1499 printf("\nDone.\n");
1504 dev_set_link_up(portid_t pid)
1506 if (rte_eth_dev_set_link_up(pid) < 0)
1507 printf("\nSet link up fail.\n");
1511 dev_set_link_down(portid_t pid)
1513 if (rte_eth_dev_set_link_down(pid) < 0)
1514 printf("\nSet link down fail.\n");
1518 all_ports_started(void)
1521 struct rte_port *port;
1523 RTE_ETH_FOREACH_DEV(pi) {
1525 /* Check if there is a port which is not started */
1526 if ((port->port_status != RTE_PORT_STARTED) &&
1527 (port->slave_flag == 0))
1531 /* No port is not started */
1536 all_ports_stopped(void)
1539 struct rte_port *port;
1541 RTE_ETH_FOREACH_DEV(pi) {
1543 if ((port->port_status != RTE_PORT_STOPPED) &&
1544 (port->slave_flag == 0))
1552 port_is_started(portid_t port_id)
1554 if (port_id_is_invalid(port_id, ENABLED_WARN))
1557 if (ports[port_id].port_status != RTE_PORT_STARTED)
1564 port_is_closed(portid_t port_id)
1566 if (port_id_is_invalid(port_id, ENABLED_WARN))
1569 if (ports[port_id].port_status != RTE_PORT_CLOSED)
1576 start_port(portid_t pid)
1578 int diag, need_check_link_status = -1;
1581 struct rte_port *port;
1582 struct ether_addr mac_addr;
1583 enum rte_eth_event_type event_type;
1585 if (port_id_is_invalid(pid, ENABLED_WARN))
1590 RTE_ETH_FOREACH_DEV(pi) {
1591 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1594 need_check_link_status = 0;
1596 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STOPPED,
1597 RTE_PORT_HANDLING) == 0) {
1598 printf("Port %d is now not stopped\n", pi);
1602 if (port->need_reconfig > 0) {
1603 port->need_reconfig = 0;
1605 if (flow_isolate_all) {
1606 int ret = port_flow_isolate(pi, 1);
1608 printf("Failed to apply isolated"
1609 " mode on port %d\n", pi);
1614 printf("Configuring Port %d (socket %u)\n", pi,
1616 /* configure port */
1617 diag = rte_eth_dev_configure(pi, nb_rxq, nb_txq,
1620 if (rte_atomic16_cmpset(&(port->port_status),
1621 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1622 printf("Port %d can not be set back "
1623 "to stopped\n", pi);
1624 printf("Fail to configure port %d\n", pi);
1625 /* try to reconfigure port next time */
1626 port->need_reconfig = 1;
1630 if (port->need_reconfig_queues > 0) {
1631 port->need_reconfig_queues = 0;
1632 /* setup tx queues */
1633 for (qi = 0; qi < nb_txq; qi++) {
1634 if ((numa_support) &&
1635 (txring_numa[pi] != NUMA_NO_CONFIG))
1636 diag = rte_eth_tx_queue_setup(pi, qi,
1637 nb_txd,txring_numa[pi],
1640 diag = rte_eth_tx_queue_setup(pi, qi,
1641 nb_txd,port->socket_id,
1647 /* Fail to setup tx queue, return */
1648 if (rte_atomic16_cmpset(&(port->port_status),
1650 RTE_PORT_STOPPED) == 0)
1651 printf("Port %d can not be set back "
1652 "to stopped\n", pi);
1653 printf("Fail to configure port %d tx queues\n", pi);
1654 /* try to reconfigure queues next time */
1655 port->need_reconfig_queues = 1;
1658 /* setup rx queues */
1659 for (qi = 0; qi < nb_rxq; qi++) {
1660 if ((numa_support) &&
1661 (rxring_numa[pi] != NUMA_NO_CONFIG)) {
1662 struct rte_mempool * mp =
1663 mbuf_pool_find(rxring_numa[pi]);
1665 printf("Failed to setup RX queue:"
1666 "No mempool allocation"
1667 " on the socket %d\n",
1672 diag = rte_eth_rx_queue_setup(pi, qi,
1673 nb_rxd,rxring_numa[pi],
1674 &(port->rx_conf),mp);
1676 struct rte_mempool *mp =
1677 mbuf_pool_find(port->socket_id);
1679 printf("Failed to setup RX queue:"
1680 "No mempool allocation"
1681 " on the socket %d\n",
1685 diag = rte_eth_rx_queue_setup(pi, qi,
1686 nb_rxd,port->socket_id,
1687 &(port->rx_conf), mp);
1692 /* Fail to setup rx queue, return */
1693 if (rte_atomic16_cmpset(&(port->port_status),
1695 RTE_PORT_STOPPED) == 0)
1696 printf("Port %d can not be set back "
1697 "to stopped\n", pi);
1698 printf("Fail to configure port %d rx queues\n", pi);
1699 /* try to reconfigure queues next time */
1700 port->need_reconfig_queues = 1;
1705 for (event_type = RTE_ETH_EVENT_UNKNOWN;
1706 event_type < RTE_ETH_EVENT_MAX;
1708 diag = rte_eth_dev_callback_register(pi,
1713 printf("Failed to setup even callback for event %d\n",
1720 if (rte_eth_dev_start(pi) < 0) {
1721 printf("Fail to start port %d\n", pi);
1723 /* Fail to setup rx queue, return */
1724 if (rte_atomic16_cmpset(&(port->port_status),
1725 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1726 printf("Port %d can not be set back to "
1731 if (rte_atomic16_cmpset(&(port->port_status),
1732 RTE_PORT_HANDLING, RTE_PORT_STARTED) == 0)
1733 printf("Port %d can not be set into started\n", pi);
1735 rte_eth_macaddr_get(pi, &mac_addr);
1736 printf("Port %d: %02X:%02X:%02X:%02X:%02X:%02X\n", pi,
1737 mac_addr.addr_bytes[0], mac_addr.addr_bytes[1],
1738 mac_addr.addr_bytes[2], mac_addr.addr_bytes[3],
1739 mac_addr.addr_bytes[4], mac_addr.addr_bytes[5]);
1741 /* at least one port started, need checking link status */
1742 need_check_link_status = 1;
1745 if (need_check_link_status == 1 && !no_link_check)
1746 check_all_ports_link_status(RTE_PORT_ALL);
1747 else if (need_check_link_status == 0)
1748 printf("Please stop the ports first\n");
1755 stop_port(portid_t pid)
1758 struct rte_port *port;
1759 int need_check_link_status = 0;
1766 if (port_id_is_invalid(pid, ENABLED_WARN))
1769 printf("Stopping ports...\n");
1771 RTE_ETH_FOREACH_DEV(pi) {
1772 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1775 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1776 printf("Please remove port %d from forwarding configuration.\n", pi);
1780 if (port_is_bonding_slave(pi)) {
1781 printf("Please remove port %d from bonded device.\n", pi);
1786 if (rte_atomic16_cmpset(&(port->port_status), RTE_PORT_STARTED,
1787 RTE_PORT_HANDLING) == 0)
1790 rte_eth_dev_stop(pi);
1792 if (rte_atomic16_cmpset(&(port->port_status),
1793 RTE_PORT_HANDLING, RTE_PORT_STOPPED) == 0)
1794 printf("Port %d can not be set into stopped\n", pi);
1795 need_check_link_status = 1;
1797 if (need_check_link_status && !no_link_check)
1798 check_all_ports_link_status(RTE_PORT_ALL);
1804 close_port(portid_t pid)
1807 struct rte_port *port;
1809 if (port_id_is_invalid(pid, ENABLED_WARN))
1812 printf("Closing ports...\n");
1814 RTE_ETH_FOREACH_DEV(pi) {
1815 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1818 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1819 printf("Please remove port %d from forwarding configuration.\n", pi);
1823 if (port_is_bonding_slave(pi)) {
1824 printf("Please remove port %d from bonded device.\n", pi);
1829 if (rte_atomic16_cmpset(&(port->port_status),
1830 RTE_PORT_CLOSED, RTE_PORT_CLOSED) == 1) {
1831 printf("Port %d is already closed\n", pi);
1835 if (rte_atomic16_cmpset(&(port->port_status),
1836 RTE_PORT_STOPPED, RTE_PORT_HANDLING) == 0) {
1837 printf("Port %d is now not stopped\n", pi);
1841 if (port->flow_list)
1842 port_flow_flush(pi);
1843 rte_eth_dev_close(pi);
1845 if (rte_atomic16_cmpset(&(port->port_status),
1846 RTE_PORT_HANDLING, RTE_PORT_CLOSED) == 0)
1847 printf("Port %d cannot be set to closed\n", pi);
1854 reset_port(portid_t pid)
1858 struct rte_port *port;
1860 if (port_id_is_invalid(pid, ENABLED_WARN))
1863 printf("Resetting ports...\n");
1865 RTE_ETH_FOREACH_DEV(pi) {
1866 if (pid != pi && pid != (portid_t)RTE_PORT_ALL)
1869 if (port_is_forwarding(pi) != 0 && test_done == 0) {
1870 printf("Please remove port %d from forwarding "
1871 "configuration.\n", pi);
1875 if (port_is_bonding_slave(pi)) {
1876 printf("Please remove port %d from bonded device.\n",
1881 diag = rte_eth_dev_reset(pi);
1884 port->need_reconfig = 1;
1885 port->need_reconfig_queues = 1;
1887 printf("Failed to reset port %d. diag=%d\n", pi, diag);
1895 attach_port(char *identifier)
1898 unsigned int socket_id;
1900 printf("Attaching a new port...\n");
1902 if (identifier == NULL) {
1903 printf("Invalid parameters are specified\n");
1907 if (rte_eth_dev_attach(identifier, &pi))
1910 socket_id = (unsigned)rte_eth_dev_socket_id(pi);
1911 /* if socket_id is invalid, set to the first available socket. */
1912 if (check_socket_id(socket_id) < 0)
1913 socket_id = socket_ids[0];
1914 reconfig(pi, socket_id);
1915 rte_eth_promiscuous_enable(pi);
1917 nb_ports = rte_eth_dev_count();
1919 ports[pi].port_status = RTE_PORT_STOPPED;
1921 update_fwd_ports(pi);
1923 printf("Port %d is attached. Now total ports is %d\n", pi, nb_ports);
1928 detach_port(portid_t port_id)
1930 char name[RTE_ETH_NAME_MAX_LEN];
1932 printf("Detaching a port...\n");
1934 if (!port_is_closed(port_id)) {
1935 printf("Please close port first\n");
1939 if (ports[port_id].flow_list)
1940 port_flow_flush(port_id);
1942 if (rte_eth_dev_detach(port_id, name)) {
1943 RTE_LOG(ERR, USER1, "Failed to detach port '%s'\n", name);
1947 nb_ports = rte_eth_dev_count();
1949 update_fwd_ports(RTE_MAX_ETHPORTS);
1951 printf("Port '%s' is detached. Now total ports is %d\n",
1963 stop_packet_forwarding();
1965 if (ports != NULL) {
1967 RTE_ETH_FOREACH_DEV(pt_id) {
1968 printf("\nShutting down port %d...\n", pt_id);
1974 printf("\nBye...\n");
1977 typedef void (*cmd_func_t)(void);
1978 struct pmd_test_command {
1979 const char *cmd_name;
1980 cmd_func_t cmd_func;
1983 #define PMD_TEST_CMD_NB (sizeof(pmd_test_menu) / sizeof(pmd_test_menu[0]))
1985 /* Check the link status of all ports in up to 9s, and print them finally */
1987 check_all_ports_link_status(uint32_t port_mask)
1989 #define CHECK_INTERVAL 100 /* 100ms */
1990 #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
1992 uint8_t count, all_ports_up, print_flag = 0;
1993 struct rte_eth_link link;
1995 printf("Checking link statuses...\n");
1997 for (count = 0; count <= MAX_CHECK_TIME; count++) {
1999 RTE_ETH_FOREACH_DEV(portid) {
2000 if ((port_mask & (1 << portid)) == 0)
2002 memset(&link, 0, sizeof(link));
2003 rte_eth_link_get_nowait(portid, &link);
2004 /* print link status if flag set */
2005 if (print_flag == 1) {
2006 if (link.link_status)
2008 "Port%d Link Up. speed %u Mbps- %s\n",
2009 portid, link.link_speed,
2010 (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
2011 ("full-duplex") : ("half-duplex\n"));
2013 printf("Port %d Link Down\n", portid);
2016 /* clear all_ports_up flag if any link down */
2017 if (link.link_status == ETH_LINK_DOWN) {
2022 /* after finally printing all link status, get out */
2023 if (print_flag == 1)
2026 if (all_ports_up == 0) {
2028 rte_delay_ms(CHECK_INTERVAL);
2031 /* set the print_flag if all ports up or timeout */
2032 if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
2042 rmv_event_callback(void *arg)
2044 int org_no_link_check = no_link_check;
2045 struct rte_eth_dev *dev;
2046 portid_t port_id = (intptr_t)arg;
2048 RTE_ETH_VALID_PORTID_OR_RET(port_id);
2049 dev = &rte_eth_devices[port_id];
2053 no_link_check = org_no_link_check;
2054 close_port(port_id);
2055 printf("removing device %s\n", dev->device->name);
2056 if (rte_eal_dev_detach(dev->device))
2057 RTE_LOG(ERR, USER1, "Failed to detach device %s\n",
2061 /* This function is used by the interrupt thread */
2063 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
2066 static const char * const event_desc[] = {
2067 [RTE_ETH_EVENT_UNKNOWN] = "Unknown",
2068 [RTE_ETH_EVENT_INTR_LSC] = "LSC",
2069 [RTE_ETH_EVENT_QUEUE_STATE] = "Queue state",
2070 [RTE_ETH_EVENT_INTR_RESET] = "Interrupt reset",
2071 [RTE_ETH_EVENT_VF_MBOX] = "VF Mbox",
2072 [RTE_ETH_EVENT_MACSEC] = "MACsec",
2073 [RTE_ETH_EVENT_INTR_RMV] = "device removal",
2074 [RTE_ETH_EVENT_MAX] = NULL,
2077 RTE_SET_USED(param);
2078 RTE_SET_USED(ret_param);
2080 if (type >= RTE_ETH_EVENT_MAX) {
2081 fprintf(stderr, "\nPort %" PRIu16 ": %s called upon invalid event %d\n",
2082 port_id, __func__, type);
2084 } else if (event_print_mask & (UINT32_C(1) << type)) {
2085 printf("\nPort %" PRIu16 ": %s event\n", port_id,
2091 case RTE_ETH_EVENT_INTR_RMV:
2092 if (rte_eal_alarm_set(100000,
2093 rmv_event_callback, (void *)(intptr_t)port_id))
2094 fprintf(stderr, "Could not set up deferred device removal\n");
2103 set_tx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2107 uint8_t mapping_found = 0;
2109 for (i = 0; i < nb_tx_queue_stats_mappings; i++) {
2110 if ((tx_queue_stats_mappings[i].port_id == port_id) &&
2111 (tx_queue_stats_mappings[i].queue_id < nb_txq )) {
2112 diag = rte_eth_dev_set_tx_queue_stats_mapping(port_id,
2113 tx_queue_stats_mappings[i].queue_id,
2114 tx_queue_stats_mappings[i].stats_counter_id);
2121 port->tx_queue_stats_mapping_enabled = 1;
2126 set_rx_queue_stats_mapping_registers(portid_t port_id, struct rte_port *port)
2130 uint8_t mapping_found = 0;
2132 for (i = 0; i < nb_rx_queue_stats_mappings; i++) {
2133 if ((rx_queue_stats_mappings[i].port_id == port_id) &&
2134 (rx_queue_stats_mappings[i].queue_id < nb_rxq )) {
2135 diag = rte_eth_dev_set_rx_queue_stats_mapping(port_id,
2136 rx_queue_stats_mappings[i].queue_id,
2137 rx_queue_stats_mappings[i].stats_counter_id);
2144 port->rx_queue_stats_mapping_enabled = 1;
2149 map_port_queue_stats_mapping_registers(portid_t pi, struct rte_port *port)
2153 diag = set_tx_queue_stats_mapping_registers(pi, port);
2155 if (diag == -ENOTSUP) {
2156 port->tx_queue_stats_mapping_enabled = 0;
2157 printf("TX queue stats mapping not supported port id=%d\n", pi);
2160 rte_exit(EXIT_FAILURE,
2161 "set_tx_queue_stats_mapping_registers "
2162 "failed for port id=%d diag=%d\n",
2166 diag = set_rx_queue_stats_mapping_registers(pi, port);
2168 if (diag == -ENOTSUP) {
2169 port->rx_queue_stats_mapping_enabled = 0;
2170 printf("RX queue stats mapping not supported port id=%d\n", pi);
2173 rte_exit(EXIT_FAILURE,
2174 "set_rx_queue_stats_mapping_registers "
2175 "failed for port id=%d diag=%d\n",
2181 rxtx_port_config(struct rte_port *port)
2183 port->rx_conf = port->dev_info.default_rxconf;
2184 port->tx_conf = port->dev_info.default_txconf;
2186 /* Check if any RX/TX parameters have been passed */
2187 if (rx_pthresh != RTE_PMD_PARAM_UNSET)
2188 port->rx_conf.rx_thresh.pthresh = rx_pthresh;
2190 if (rx_hthresh != RTE_PMD_PARAM_UNSET)
2191 port->rx_conf.rx_thresh.hthresh = rx_hthresh;
2193 if (rx_wthresh != RTE_PMD_PARAM_UNSET)
2194 port->rx_conf.rx_thresh.wthresh = rx_wthresh;
2196 if (rx_free_thresh != RTE_PMD_PARAM_UNSET)
2197 port->rx_conf.rx_free_thresh = rx_free_thresh;
2199 if (rx_drop_en != RTE_PMD_PARAM_UNSET)
2200 port->rx_conf.rx_drop_en = rx_drop_en;
2202 if (tx_pthresh != RTE_PMD_PARAM_UNSET)
2203 port->tx_conf.tx_thresh.pthresh = tx_pthresh;
2205 if (tx_hthresh != RTE_PMD_PARAM_UNSET)
2206 port->tx_conf.tx_thresh.hthresh = tx_hthresh;
2208 if (tx_wthresh != RTE_PMD_PARAM_UNSET)
2209 port->tx_conf.tx_thresh.wthresh = tx_wthresh;
2211 if (tx_rs_thresh != RTE_PMD_PARAM_UNSET)
2212 port->tx_conf.tx_rs_thresh = tx_rs_thresh;
2214 if (tx_free_thresh != RTE_PMD_PARAM_UNSET)
2215 port->tx_conf.tx_free_thresh = tx_free_thresh;
2217 if (txq_flags != RTE_PMD_PARAM_UNSET)
2218 port->tx_conf.txq_flags = txq_flags;
2222 init_port_config(void)
2225 struct rte_port *port;
2227 RTE_ETH_FOREACH_DEV(pid) {
2229 port->dev_conf.rxmode = rx_mode;
2230 port->dev_conf.fdir_conf = fdir_conf;
2232 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2233 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = rss_hf;
2235 port->dev_conf.rx_adv_conf.rss_conf.rss_key = NULL;
2236 port->dev_conf.rx_adv_conf.rss_conf.rss_hf = 0;
2239 if (port->dcb_flag == 0) {
2240 if( port->dev_conf.rx_adv_conf.rss_conf.rss_hf != 0)
2241 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_RSS;
2243 port->dev_conf.rxmode.mq_mode = ETH_MQ_RX_NONE;
2246 rxtx_port_config(port);
2248 rte_eth_macaddr_get(pid, &port->eth_addr);
2250 map_port_queue_stats_mapping_registers(pid, port);
2251 #if defined RTE_LIBRTE_IXGBE_PMD && defined RTE_LIBRTE_IXGBE_BYPASS
2252 rte_pmd_ixgbe_bypass_init(pid);
2255 if (lsc_interrupt &&
2256 (rte_eth_devices[pid].data->dev_flags &
2257 RTE_ETH_DEV_INTR_LSC))
2258 port->dev_conf.intr_conf.lsc = 1;
2259 if (rmv_interrupt &&
2260 (rte_eth_devices[pid].data->dev_flags &
2261 RTE_ETH_DEV_INTR_RMV))
2262 port->dev_conf.intr_conf.rmv = 1;
2264 #if defined RTE_LIBRTE_PMD_SOFTNIC && defined RTE_LIBRTE_SCHED
2265 /* Detect softnic port */
2266 if (!strcmp(port->dev_info.driver_name, "net_softnic")) {
2267 port->softnic_enable = 1;
2268 memset(&port->softport, 0, sizeof(struct softnic_port));
2270 if (!strcmp(cur_fwd_eng->fwd_mode_name, "tm"))
2271 port->softport.tm_flag = 1;
2277 void set_port_slave_flag(portid_t slave_pid)
2279 struct rte_port *port;
2281 port = &ports[slave_pid];
2282 port->slave_flag = 1;
2285 void clear_port_slave_flag(portid_t slave_pid)
2287 struct rte_port *port;
2289 port = &ports[slave_pid];
2290 port->slave_flag = 0;
2293 uint8_t port_is_bonding_slave(portid_t slave_pid)
2295 struct rte_port *port;
2297 port = &ports[slave_pid];
2298 if ((rte_eth_devices[slave_pid].data->dev_flags &
2299 RTE_ETH_DEV_BONDED_SLAVE) || (port->slave_flag == 1))
2304 const uint16_t vlan_tags[] = {
2305 0, 1, 2, 3, 4, 5, 6, 7,
2306 8, 9, 10, 11, 12, 13, 14, 15,
2307 16, 17, 18, 19, 20, 21, 22, 23,
2308 24, 25, 26, 27, 28, 29, 30, 31
2312 get_eth_dcb_conf(portid_t pid, struct rte_eth_conf *eth_conf,
2313 enum dcb_mode_enable dcb_mode,
2314 enum rte_eth_nb_tcs num_tcs,
2319 struct rte_eth_rss_conf rss_conf;
2322 * Builds up the correct configuration for dcb+vt based on the vlan tags array
2323 * given above, and the number of traffic classes available for use.
2325 if (dcb_mode == DCB_VT_ENABLED) {
2326 struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf =
2327 ð_conf->rx_adv_conf.vmdq_dcb_conf;
2328 struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf =
2329 ð_conf->tx_adv_conf.vmdq_dcb_tx_conf;
2331 /* VMDQ+DCB RX and TX configurations */
2332 vmdq_rx_conf->enable_default_pool = 0;
2333 vmdq_rx_conf->default_pool = 0;
2334 vmdq_rx_conf->nb_queue_pools =
2335 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2336 vmdq_tx_conf->nb_queue_pools =
2337 (num_tcs == ETH_4_TCS ? ETH_32_POOLS : ETH_16_POOLS);
2339 vmdq_rx_conf->nb_pool_maps = vmdq_rx_conf->nb_queue_pools;
2340 for (i = 0; i < vmdq_rx_conf->nb_pool_maps; i++) {
2341 vmdq_rx_conf->pool_map[i].vlan_id = vlan_tags[i];
2342 vmdq_rx_conf->pool_map[i].pools =
2343 1 << (i % vmdq_rx_conf->nb_queue_pools);
2345 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2346 vmdq_rx_conf->dcb_tc[i] = i % num_tcs;
2347 vmdq_tx_conf->dcb_tc[i] = i % num_tcs;
2350 /* set DCB mode of RX and TX of multiple queues */
2351 eth_conf->rxmode.mq_mode = ETH_MQ_RX_VMDQ_DCB;
2352 eth_conf->txmode.mq_mode = ETH_MQ_TX_VMDQ_DCB;
2354 struct rte_eth_dcb_rx_conf *rx_conf =
2355 ð_conf->rx_adv_conf.dcb_rx_conf;
2356 struct rte_eth_dcb_tx_conf *tx_conf =
2357 ð_conf->tx_adv_conf.dcb_tx_conf;
2359 rc = rte_eth_dev_rss_hash_conf_get(pid, &rss_conf);
2363 rx_conf->nb_tcs = num_tcs;
2364 tx_conf->nb_tcs = num_tcs;
2366 for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES; i++) {
2367 rx_conf->dcb_tc[i] = i % num_tcs;
2368 tx_conf->dcb_tc[i] = i % num_tcs;
2371 eth_conf->rxmode.mq_mode = ETH_MQ_RX_DCB_RSS;
2372 eth_conf->rx_adv_conf.rss_conf = rss_conf;
2373 eth_conf->txmode.mq_mode = ETH_MQ_TX_DCB;
2377 eth_conf->dcb_capability_en =
2378 ETH_DCB_PG_SUPPORT | ETH_DCB_PFC_SUPPORT;
2380 eth_conf->dcb_capability_en = ETH_DCB_PG_SUPPORT;
2386 init_port_dcb_config(portid_t pid,
2387 enum dcb_mode_enable dcb_mode,
2388 enum rte_eth_nb_tcs num_tcs,
2391 struct rte_eth_conf port_conf;
2392 struct rte_port *rte_port;
2396 rte_port = &ports[pid];
2398 memset(&port_conf, 0, sizeof(struct rte_eth_conf));
2399 /* Enter DCB configuration status */
2402 /*set configuration of DCB in vt mode and DCB in non-vt mode*/
2403 retval = get_eth_dcb_conf(pid, &port_conf, dcb_mode, num_tcs, pfc_en);
2406 port_conf.rxmode.hw_vlan_filter = 1;
2409 * Write the configuration into the device.
2410 * Set the numbers of RX & TX queues to 0, so
2411 * the RX & TX queues will not be setup.
2413 rte_eth_dev_configure(pid, 0, 0, &port_conf);
2415 rte_eth_dev_info_get(pid, &rte_port->dev_info);
2417 /* If dev_info.vmdq_pool_base is greater than 0,
2418 * the queue id of vmdq pools is started after pf queues.
2420 if (dcb_mode == DCB_VT_ENABLED &&
2421 rte_port->dev_info.vmdq_pool_base > 0) {
2422 printf("VMDQ_DCB multi-queue mode is nonsensical"
2423 " for port %d.", pid);
2427 /* Assume the ports in testpmd have the same dcb capability
2428 * and has the same number of rxq and txq in dcb mode
2430 if (dcb_mode == DCB_VT_ENABLED) {
2431 if (rte_port->dev_info.max_vfs > 0) {
2432 nb_rxq = rte_port->dev_info.nb_rx_queues;
2433 nb_txq = rte_port->dev_info.nb_tx_queues;
2435 nb_rxq = rte_port->dev_info.max_rx_queues;
2436 nb_txq = rte_port->dev_info.max_tx_queues;
2439 /*if vt is disabled, use all pf queues */
2440 if (rte_port->dev_info.vmdq_pool_base == 0) {
2441 nb_rxq = rte_port->dev_info.max_rx_queues;
2442 nb_txq = rte_port->dev_info.max_tx_queues;
2444 nb_rxq = (queueid_t)num_tcs;
2445 nb_txq = (queueid_t)num_tcs;
2449 rx_free_thresh = 64;
2451 memcpy(&rte_port->dev_conf, &port_conf, sizeof(struct rte_eth_conf));
2453 rxtx_port_config(rte_port);
2455 rte_port->dev_conf.rxmode.hw_vlan_filter = 1;
2456 for (i = 0; i < RTE_DIM(vlan_tags); i++)
2457 rx_vft_set(pid, vlan_tags[i], 1);
2459 rte_eth_macaddr_get(pid, &rte_port->eth_addr);
2460 map_port_queue_stats_mapping_registers(pid, rte_port);
2462 rte_port->dcb_flag = 1;
2470 /* Configuration of Ethernet ports. */
2471 ports = rte_zmalloc("testpmd: ports",
2472 sizeof(struct rte_port) * RTE_MAX_ETHPORTS,
2473 RTE_CACHE_LINE_SIZE);
2474 if (ports == NULL) {
2475 rte_exit(EXIT_FAILURE,
2476 "rte_zmalloc(%d struct rte_port) failed\n",
2480 /* Initialize ports NUMA structures */
2481 memset(port_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2482 memset(rxring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2483 memset(txring_numa, NUMA_NO_CONFIG, RTE_MAX_ETHPORTS);
2497 const char clr[] = { 27, '[', '2', 'J', '\0' };
2498 const char top_left[] = { 27, '[', '1', ';', '1', 'H', '\0' };
2500 /* Clear screen and move to top left */
2501 printf("%s%s", clr, top_left);
2503 printf("\nPort statistics ====================================");
2504 for (i = 0; i < cur_fwd_config.nb_fwd_ports; i++)
2505 nic_stats_display(fwd_ports_ids[i]);
2509 signal_handler(int signum)
2511 if (signum == SIGINT || signum == SIGTERM) {
2512 printf("\nSignal %d received, preparing to exit...\n",
2514 #ifdef RTE_LIBRTE_PDUMP
2515 /* uninitialize packet capture framework */
2518 #ifdef RTE_LIBRTE_LATENCY_STATS
2519 rte_latencystats_uninit();
2522 /* Set flag to indicate the force termination. */
2524 /* exit with the expected status */
2525 signal(signum, SIG_DFL);
2526 kill(getpid(), signum);
2531 main(int argc, char** argv)
2536 signal(SIGINT, signal_handler);
2537 signal(SIGTERM, signal_handler);
2539 diag = rte_eal_init(argc, argv);
2541 rte_panic("Cannot init EAL\n");
2543 if (mlockall(MCL_CURRENT | MCL_FUTURE)) {
2544 RTE_LOG(NOTICE, USER1, "mlockall() failed with error \"%s\"\n",
2548 #ifdef RTE_LIBRTE_PDUMP
2549 /* initialize packet capture framework */
2550 rte_pdump_init(NULL);
2553 nb_ports = (portid_t) rte_eth_dev_count();
2555 RTE_LOG(WARNING, EAL, "No probed ethernet devices\n");
2557 /* allocate port structures, and init them */
2560 set_def_fwd_config();
2562 rte_panic("Empty set of forwarding logical cores - check the "
2563 "core mask supplied in the command parameters\n");
2565 /* Bitrate/latency stats disabled by default */
2566 #ifdef RTE_LIBRTE_BITRATE
2567 bitrate_enabled = 0;
2569 #ifdef RTE_LIBRTE_LATENCY_STATS
2570 latencystats_enabled = 0;
2576 launch_args_parse(argc, argv);
2578 if (tx_first && interactive)
2579 rte_exit(EXIT_FAILURE, "--tx-first cannot be used on "
2580 "interactive mode.\n");
2582 if (tx_first && lsc_interrupt) {
2583 printf("Warning: lsc_interrupt needs to be off when "
2584 " using tx_first. Disabling.\n");
2588 if (!nb_rxq && !nb_txq)
2589 printf("Warning: Either rx or tx queues should be non-zero\n");
2591 if (nb_rxq > 1 && nb_rxq > nb_txq)
2592 printf("Warning: nb_rxq=%d enables RSS configuration, "
2593 "but nb_txq=%d will prevent to fully test it.\n",
2597 if (start_port(RTE_PORT_ALL) != 0)
2598 rte_exit(EXIT_FAILURE, "Start ports failed\n");
2600 /* set all ports to promiscuous mode by default */
2601 RTE_ETH_FOREACH_DEV(port_id)
2602 rte_eth_promiscuous_enable(port_id);
2604 /* Init metrics library */
2605 rte_metrics_init(rte_socket_id());
2607 #ifdef RTE_LIBRTE_LATENCY_STATS
2608 if (latencystats_enabled != 0) {
2609 int ret = rte_latencystats_init(1, NULL);
2611 printf("Warning: latencystats init()"
2612 " returned error %d\n", ret);
2613 printf("Latencystats running on lcore %d\n",
2614 latencystats_lcore_id);
2618 /* Setup bitrate stats */
2619 #ifdef RTE_LIBRTE_BITRATE
2620 if (bitrate_enabled != 0) {
2621 bitrate_data = rte_stats_bitrate_create();
2622 if (bitrate_data == NULL)
2623 rte_exit(EXIT_FAILURE,
2624 "Could not allocate bitrate data.\n");
2625 rte_stats_bitrate_reg(bitrate_data);
2629 #ifdef RTE_LIBRTE_CMDLINE
2630 if (strlen(cmdline_filename) != 0)
2631 cmdline_read_from_file(cmdline_filename);
2633 if (interactive == 1) {
2635 printf("Start automatic packet forwarding\n");
2636 start_packet_forwarding(0);
2648 printf("No commandline core given, start packet forwarding\n");
2649 start_packet_forwarding(tx_first);
2650 if (stats_period != 0) {
2651 uint64_t prev_time = 0, cur_time, diff_time = 0;
2652 uint64_t timer_period;
2654 /* Convert to number of cycles */
2655 timer_period = stats_period * rte_get_timer_hz();
2657 while (f_quit == 0) {
2658 cur_time = rte_get_timer_cycles();
2659 diff_time += cur_time - prev_time;
2661 if (diff_time >= timer_period) {
2663 /* Reset the timer */
2666 /* Sleep to avoid unnecessary checks */
2667 prev_time = cur_time;
2672 printf("Press enter to exit\n");
2673 rc = read(0, &c, 1);